gcc/
[official-gcc.git] / gcc / config / arm / arm.c
blob6c9a69594baa3887d050aec8ae3e35e1368e6646
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
55 #include "df.h"
57 /* Forward definitions of types. */
58 typedef struct minipool_node Mnode;
59 typedef struct minipool_fixup Mfix;
61 const struct attribute_spec arm_attribute_table[];
63 /* Forward function declarations. */
64 static arm_stack_offsets *arm_get_frame_offsets (void);
65 static void arm_add_gc_roots (void);
66 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
67 HOST_WIDE_INT, rtx, rtx, int, int);
68 static unsigned bit_count (unsigned long);
69 static int arm_address_register_rtx_p (rtx, int);
70 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
71 static int thumb2_legitimate_index_p (enum machine_mode, rtx, int);
72 static int thumb1_base_register_rtx_p (rtx, enum machine_mode, int);
73 inline static int thumb1_index_register_rtx_p (rtx, int);
74 static int thumb_far_jump_used_p (void);
75 static bool thumb_force_lr_save (void);
76 static unsigned long thumb1_compute_save_reg_mask (void);
77 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
78 static rtx emit_sfm (int, int);
79 static int arm_size_return_regs (void);
80 #ifndef AOF_ASSEMBLER
81 static bool arm_assemble_integer (rtx, unsigned int, int);
82 #endif
83 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
84 static arm_cc get_arm_condition_code (rtx);
85 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
86 static rtx is_jump_table (rtx);
87 static const char *output_multi_immediate (rtx *, const char *, const char *,
88 int, HOST_WIDE_INT);
89 static const char *shift_op (rtx, HOST_WIDE_INT *);
90 static struct machine_function *arm_init_machine_status (void);
91 static void thumb_exit (FILE *, int);
92 static rtx is_jump_table (rtx);
93 static HOST_WIDE_INT get_jump_table_size (rtx);
94 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
95 static Mnode *add_minipool_forward_ref (Mfix *);
96 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_backward_ref (Mfix *);
98 static void assign_minipool_offsets (Mfix *);
99 static void arm_print_value (FILE *, rtx);
100 static void dump_minipool (rtx);
101 static int arm_barrier_cost (rtx);
102 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
103 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
104 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
105 rtx);
106 static void arm_reorg (void);
107 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
108 static unsigned long arm_compute_save_reg0_reg12_mask (void);
109 static unsigned long arm_compute_save_reg_mask (void);
110 static unsigned long arm_isr_value (tree);
111 static unsigned long arm_compute_func_type (void);
112 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
113 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
114 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
115 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
116 #endif
117 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
118 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void thumb1_output_function_prologue (FILE *, HOST_WIDE_INT);
120 static int arm_comp_type_attributes (tree, tree);
121 static void arm_set_default_type_attributes (tree);
122 static int arm_adjust_cost (rtx, rtx, rtx, int);
123 static int count_insns_for_constant (HOST_WIDE_INT, int);
124 static int arm_get_strip_length (int);
125 static bool arm_function_ok_for_sibcall (tree, tree);
126 static void arm_internal_label (FILE *, const char *, unsigned long);
127 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
128 tree);
129 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
130 static bool arm_size_rtx_costs (rtx, int, int, int *);
131 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
132 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
133 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
134 static bool arm_9e_rtx_costs (rtx, int, int, int *);
135 static int arm_address_cost (rtx);
136 static bool arm_memory_load_p (rtx);
137 static bool arm_cirrus_insn_p (rtx);
138 static void cirrus_reorg (rtx);
139 static void arm_init_builtins (void);
140 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
141 static void arm_init_iwmmxt_builtins (void);
142 static rtx safe_vector_operand (rtx, enum machine_mode);
143 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
144 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
145 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
146 static void emit_constant_insn (rtx cond, rtx pattern);
147 static rtx emit_set_insn (rtx, rtx);
148 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
149 tree, bool);
151 #ifdef OBJECT_FORMAT_ELF
152 static void arm_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
153 static void arm_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
154 #endif
155 #ifndef ARM_PE
156 static void arm_encode_section_info (tree, rtx, int);
157 #endif
159 static void arm_file_end (void);
160 static void arm_file_start (void);
162 #ifdef AOF_ASSEMBLER
163 static void aof_globalize_label (FILE *, const char *);
164 static void aof_dump_imports (FILE *);
165 static void aof_dump_pic_table (FILE *);
166 static void aof_file_start (void);
167 static void aof_file_end (void);
168 static void aof_asm_init_sections (void);
169 #endif
170 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
171 tree, int *, int);
172 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
173 enum machine_mode, tree, bool);
174 static bool arm_promote_prototypes (tree);
175 static bool arm_default_short_enums (void);
176 static bool arm_align_anon_bitfield (void);
177 static bool arm_return_in_msb (tree);
178 static bool arm_must_pass_in_stack (enum machine_mode, tree);
179 #ifdef TARGET_UNWIND_INFO
180 static void arm_unwind_emit (FILE *, rtx);
181 static bool arm_output_ttype (rtx);
182 #endif
183 static void arm_dwarf_handle_frame_unspec (const char *, rtx, int);
185 static tree arm_cxx_guard_type (void);
186 static bool arm_cxx_guard_mask_bit (void);
187 static tree arm_get_cookie_size (tree);
188 static bool arm_cookie_has_size (void);
189 static bool arm_cxx_cdtor_returns_this (void);
190 static bool arm_cxx_key_method_may_be_inline (void);
191 static void arm_cxx_determine_class_data_visibility (tree);
192 static bool arm_cxx_class_data_always_comdat (void);
193 static bool arm_cxx_use_aeabi_atexit (void);
194 static void arm_init_libfuncs (void);
195 static bool arm_handle_option (size_t, const char *, int);
196 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
197 static bool arm_cannot_copy_insn_p (rtx);
198 static bool arm_tls_symbol_p (rtx x);
199 static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
202 /* Initialize the GCC target structure. */
203 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
204 #undef TARGET_MERGE_DECL_ATTRIBUTES
205 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
206 #endif
208 #undef TARGET_ATTRIBUTE_TABLE
209 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
211 #undef TARGET_ASM_FILE_START
212 #define TARGET_ASM_FILE_START arm_file_start
213 #undef TARGET_ASM_FILE_END
214 #define TARGET_ASM_FILE_END arm_file_end
216 #ifdef AOF_ASSEMBLER
217 #undef TARGET_ASM_BYTE_OP
218 #define TARGET_ASM_BYTE_OP "\tDCB\t"
219 #undef TARGET_ASM_ALIGNED_HI_OP
220 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
221 #undef TARGET_ASM_ALIGNED_SI_OP
222 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
223 #undef TARGET_ASM_GLOBALIZE_LABEL
224 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
225 #undef TARGET_ASM_FILE_START
226 #define TARGET_ASM_FILE_START aof_file_start
227 #undef TARGET_ASM_FILE_END
228 #define TARGET_ASM_FILE_END aof_file_end
229 #else
230 #undef TARGET_ASM_ALIGNED_SI_OP
231 #define TARGET_ASM_ALIGNED_SI_OP NULL
232 #undef TARGET_ASM_INTEGER
233 #define TARGET_ASM_INTEGER arm_assemble_integer
234 #endif
236 #undef TARGET_ASM_FUNCTION_PROLOGUE
237 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
239 #undef TARGET_ASM_FUNCTION_EPILOGUE
240 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
242 #undef TARGET_DEFAULT_TARGET_FLAGS
243 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
244 #undef TARGET_HANDLE_OPTION
245 #define TARGET_HANDLE_OPTION arm_handle_option
247 #undef TARGET_COMP_TYPE_ATTRIBUTES
248 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
250 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
251 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
253 #undef TARGET_SCHED_ADJUST_COST
254 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
256 #undef TARGET_ENCODE_SECTION_INFO
257 #ifdef ARM_PE
258 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
259 #else
260 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
261 #endif
263 #undef TARGET_STRIP_NAME_ENCODING
264 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
266 #undef TARGET_ASM_INTERNAL_LABEL
267 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
269 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
270 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
272 #undef TARGET_ASM_OUTPUT_MI_THUNK
273 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
274 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
275 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
277 /* This will be overridden in arm_override_options. */
278 #undef TARGET_RTX_COSTS
279 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
280 #undef TARGET_ADDRESS_COST
281 #define TARGET_ADDRESS_COST arm_address_cost
283 #undef TARGET_SHIFT_TRUNCATION_MASK
284 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
285 #undef TARGET_VECTOR_MODE_SUPPORTED_P
286 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
288 #undef TARGET_MACHINE_DEPENDENT_REORG
289 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
291 #undef TARGET_INIT_BUILTINS
292 #define TARGET_INIT_BUILTINS arm_init_builtins
293 #undef TARGET_EXPAND_BUILTIN
294 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
296 #undef TARGET_INIT_LIBFUNCS
297 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
299 #undef TARGET_PROMOTE_FUNCTION_ARGS
300 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
301 #undef TARGET_PROMOTE_FUNCTION_RETURN
302 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
303 #undef TARGET_PROMOTE_PROTOTYPES
304 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
305 #undef TARGET_PASS_BY_REFERENCE
306 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
307 #undef TARGET_ARG_PARTIAL_BYTES
308 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
310 #undef TARGET_SETUP_INCOMING_VARARGS
311 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
313 #undef TARGET_DEFAULT_SHORT_ENUMS
314 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
316 #undef TARGET_ALIGN_ANON_BITFIELD
317 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
319 #undef TARGET_NARROW_VOLATILE_BITFIELD
320 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
322 #undef TARGET_CXX_GUARD_TYPE
323 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
325 #undef TARGET_CXX_GUARD_MASK_BIT
326 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
328 #undef TARGET_CXX_GET_COOKIE_SIZE
329 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
331 #undef TARGET_CXX_COOKIE_HAS_SIZE
332 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
334 #undef TARGET_CXX_CDTOR_RETURNS_THIS
335 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
337 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
338 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
340 #undef TARGET_CXX_USE_AEABI_ATEXIT
341 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
343 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
344 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
345 arm_cxx_determine_class_data_visibility
347 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
348 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
350 #undef TARGET_RETURN_IN_MSB
351 #define TARGET_RETURN_IN_MSB arm_return_in_msb
353 #undef TARGET_MUST_PASS_IN_STACK
354 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
356 #ifdef TARGET_UNWIND_INFO
357 #undef TARGET_UNWIND_EMIT
358 #define TARGET_UNWIND_EMIT arm_unwind_emit
360 /* EABI unwinding tables use a different format for the typeinfo tables. */
361 #undef TARGET_ASM_TTYPE
362 #define TARGET_ASM_TTYPE arm_output_ttype
364 #undef TARGET_ARM_EABI_UNWINDER
365 #define TARGET_ARM_EABI_UNWINDER true
366 #endif /* TARGET_UNWIND_INFO */
368 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
369 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec
371 #undef TARGET_CANNOT_COPY_INSN_P
372 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
374 #ifdef HAVE_AS_TLS
375 #undef TARGET_HAVE_TLS
376 #define TARGET_HAVE_TLS true
377 #endif
379 #undef TARGET_CANNOT_FORCE_CONST_MEM
380 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
382 #ifdef HAVE_AS_TLS
383 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
384 #define TARGET_ASM_OUTPUT_DWARF_DTPREL arm_output_dwarf_dtprel
385 #endif
387 struct gcc_target targetm = TARGET_INITIALIZER;
389 /* Obstack for minipool constant handling. */
390 static struct obstack minipool_obstack;
391 static char * minipool_startobj;
393 /* The maximum number of insns skipped which
394 will be conditionalised if possible. */
395 static int max_insns_skipped = 5;
397 extern FILE * asm_out_file;
399 /* True if we are currently building a constant table. */
400 int making_const_table;
402 /* Define the information needed to generate branch insns. This is
403 stored from the compare operation. */
404 rtx arm_compare_op0, arm_compare_op1;
406 /* The processor for which instructions should be scheduled. */
407 enum processor_type arm_tune = arm_none;
409 /* The default processor used if not overridden by commandline. */
410 static enum processor_type arm_default_cpu = arm_none;
412 /* Which floating point model to use. */
413 enum arm_fp_model arm_fp_model;
415 /* Which floating point hardware is available. */
416 enum fputype arm_fpu_arch;
418 /* Which floating point hardware to schedule for. */
419 enum fputype arm_fpu_tune;
421 /* Whether to use floating point hardware. */
422 enum float_abi_type arm_float_abi;
424 /* Which ABI to use. */
425 enum arm_abi_type arm_abi;
427 /* Which thread pointer model to use. */
428 enum arm_tp_type target_thread_pointer = TP_AUTO;
430 /* Used to parse -mstructure_size_boundary command line option. */
431 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
433 /* Used for Thumb call_via trampolines. */
434 rtx thumb_call_via_label[14];
435 static int thumb_call_reg_needed;
437 /* Bit values used to identify processor capabilities. */
438 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
439 #define FL_ARCH3M (1 << 1) /* Extended multiply */
440 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
441 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
442 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
443 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
444 #define FL_THUMB (1 << 6) /* Thumb aware */
445 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
446 #define FL_STRONG (1 << 8) /* StrongARM */
447 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
448 #define FL_XSCALE (1 << 10) /* XScale */
449 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
450 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
451 media instructions. */
452 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
453 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
454 Note: ARM6 & 7 derivatives only. */
455 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
456 #define FL_THUMB2 (1 << 16) /* Thumb-2. */
457 #define FL_NOTM (1 << 17) /* Instructions not present in the 'M'
458 profile. */
459 #define FL_DIV (1 << 18) /* Hardware divide. */
460 #define FL_VFPV3 (1 << 19) /* Vector Floating Point V3. */
462 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
464 #define FL_FOR_ARCH2 FL_NOTM
465 #define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
466 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
467 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
468 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
469 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
470 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
471 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
472 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
473 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
474 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
475 #define FL_FOR_ARCH6J FL_FOR_ARCH6
476 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
477 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
478 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
479 #define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2)
480 #define FL_FOR_ARCH7 (FL_FOR_ARCH6T2 &~ FL_NOTM)
481 #define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM)
482 #define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_DIV)
483 #define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_DIV)
485 /* The bits in this mask specify which
486 instructions we are allowed to generate. */
487 static unsigned long insn_flags = 0;
489 /* The bits in this mask specify which instruction scheduling options should
490 be used. */
491 static unsigned long tune_flags = 0;
493 /* The following are used in the arm.md file as equivalents to bits
494 in the above two flag variables. */
496 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
497 int arm_arch3m = 0;
499 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
500 int arm_arch4 = 0;
502 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
503 int arm_arch4t = 0;
505 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
506 int arm_arch5 = 0;
508 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
509 int arm_arch5e = 0;
511 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
512 int arm_arch6 = 0;
514 /* Nonzero if this chip supports the ARM 6K extensions. */
515 int arm_arch6k = 0;
517 /* Nonzero if instructions not present in the 'M' profile can be used. */
518 int arm_arch_notm = 0;
520 /* Nonzero if this chip can benefit from load scheduling. */
521 int arm_ld_sched = 0;
523 /* Nonzero if this chip is a StrongARM. */
524 int arm_tune_strongarm = 0;
526 /* Nonzero if this chip is a Cirrus variant. */
527 int arm_arch_cirrus = 0;
529 /* Nonzero if this chip supports Intel Wireless MMX technology. */
530 int arm_arch_iwmmxt = 0;
532 /* Nonzero if this chip is an XScale. */
533 int arm_arch_xscale = 0;
535 /* Nonzero if tuning for XScale */
536 int arm_tune_xscale = 0;
538 /* Nonzero if we want to tune for stores that access the write-buffer.
539 This typically means an ARM6 or ARM7 with MMU or MPU. */
540 int arm_tune_wbuf = 0;
542 /* Nonzero if generating Thumb instructions. */
543 int thumb_code = 0;
545 /* Nonzero if we should define __THUMB_INTERWORK__ in the
546 preprocessor.
547 XXX This is a bit of a hack, it's intended to help work around
548 problems in GLD which doesn't understand that armv5t code is
549 interworking clean. */
550 int arm_cpp_interwork = 0;
552 /* Nonzero if chip supports Thumb 2. */
553 int arm_arch_thumb2;
555 /* Nonzero if chip supports integer division instruction. */
556 int arm_arch_hwdiv;
558 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
559 must report the mode of the memory reference from PRINT_OPERAND to
560 PRINT_OPERAND_ADDRESS. */
561 enum machine_mode output_memory_reference_mode;
563 /* The register number to be used for the PIC offset register. */
564 unsigned arm_pic_register = INVALID_REGNUM;
566 /* Set to 1 when a return insn is output, this means that the epilogue
567 is not needed. */
568 int return_used_this_function;
570 /* Set to 1 after arm_reorg has started. Reset to start at the start of
571 the next function. */
572 static int after_arm_reorg = 0;
574 /* The maximum number of insns to be used when loading a constant. */
575 static int arm_constant_limit = 3;
577 /* For an explanation of these variables, see final_prescan_insn below. */
578 int arm_ccfsm_state;
579 /* arm_current_cc is also used for Thumb-2 cond_exec blocks. */
580 enum arm_cond_code arm_current_cc;
581 rtx arm_target_insn;
582 int arm_target_label;
583 /* The number of conditionally executed insns, including the current insn. */
584 int arm_condexec_count = 0;
585 /* A bitmask specifying the patterns for the IT block.
586 Zero means do not output an IT block before this insn. */
587 int arm_condexec_mask = 0;
588 /* The number of bits used in arm_condexec_mask. */
589 int arm_condexec_masklen = 0;
591 /* The condition codes of the ARM, and the inverse function. */
592 static const char * const arm_condition_codes[] =
594 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
595 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
598 #define ARM_LSL_NAME (TARGET_UNIFIED_ASM ? "lsl" : "asl")
599 #define streq(string1, string2) (strcmp (string1, string2) == 0)
601 #define THUMB2_WORK_REGS (0xff & ~( (1 << THUMB_HARD_FRAME_POINTER_REGNUM) \
602 | (1 << SP_REGNUM) | (1 << PC_REGNUM) \
603 | (1 << PIC_OFFSET_TABLE_REGNUM)))
605 /* Initialization code. */
607 struct processors
609 const char *const name;
610 enum processor_type core;
611 const char *arch;
612 const unsigned long flags;
613 bool (* rtx_costs) (rtx, int, int, int *);
616 /* Not all of these give usefully different compilation alternatives,
617 but there is no simple way of generalizing them. */
618 static const struct processors all_cores[] =
620 /* ARM Cores */
621 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
622 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
623 #include "arm-cores.def"
624 #undef ARM_CORE
625 {NULL, arm_none, NULL, 0, NULL}
628 static const struct processors all_architectures[] =
630 /* ARM Architectures */
631 /* We don't specify rtx_costs here as it will be figured out
632 from the core. */
634 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
635 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
636 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
637 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
638 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
639 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
640 implementations that support it, so we will leave it out for now. */
641 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
642 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
643 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
644 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
645 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
646 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
647 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
648 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
649 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
650 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
651 {"armv6t2", arm1156t2s, "6T2", FL_CO_PROC | FL_FOR_ARCH6T2, NULL},
652 {"armv7", cortexa8, "7", FL_CO_PROC | FL_FOR_ARCH7, NULL},
653 {"armv7-a", cortexa8, "7A", FL_CO_PROC | FL_FOR_ARCH7A, NULL},
654 {"armv7-r", cortexr4, "7R", FL_CO_PROC | FL_FOR_ARCH7R, NULL},
655 {"armv7-m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL},
656 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
657 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
658 {NULL, arm_none, NULL, 0 , NULL}
661 struct arm_cpu_select
663 const char * string;
664 const char * name;
665 const struct processors * processors;
668 /* This is a magic structure. The 'string' field is magically filled in
669 with a pointer to the value specified by the user on the command line
670 assuming that the user has specified such a value. */
672 static struct arm_cpu_select arm_select[] =
674 /* string name processors */
675 { NULL, "-mcpu=", all_cores },
676 { NULL, "-march=", all_architectures },
677 { NULL, "-mtune=", all_cores }
680 /* Defines representing the indexes into the above table. */
681 #define ARM_OPT_SET_CPU 0
682 #define ARM_OPT_SET_ARCH 1
683 #define ARM_OPT_SET_TUNE 2
685 /* The name of the preprocessor macro to define for this architecture. */
687 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
689 struct fpu_desc
691 const char * name;
692 enum fputype fpu;
696 /* Available values for -mfpu=. */
698 static const struct fpu_desc all_fpus[] =
700 {"fpa", FPUTYPE_FPA},
701 {"fpe2", FPUTYPE_FPA_EMU2},
702 {"fpe3", FPUTYPE_FPA_EMU2},
703 {"maverick", FPUTYPE_MAVERICK},
704 {"vfp", FPUTYPE_VFP},
705 {"vfp3", FPUTYPE_VFP3},
709 /* Floating point models used by the different hardware.
710 See fputype in arm.h. */
712 static const enum fputype fp_model_for_fpu[] =
714 /* No FP hardware. */
715 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
716 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
717 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
718 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
719 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
720 ARM_FP_MODEL_VFP, /* FPUTYPE_VFP */
721 ARM_FP_MODEL_VFP /* FPUTYPE_VFP3 */
725 struct float_abi
727 const char * name;
728 enum float_abi_type abi_type;
732 /* Available values for -mfloat-abi=. */
734 static const struct float_abi all_float_abis[] =
736 {"soft", ARM_FLOAT_ABI_SOFT},
737 {"softfp", ARM_FLOAT_ABI_SOFTFP},
738 {"hard", ARM_FLOAT_ABI_HARD}
742 struct abi_name
744 const char *name;
745 enum arm_abi_type abi_type;
749 /* Available values for -mabi=. */
751 static const struct abi_name arm_all_abis[] =
753 {"apcs-gnu", ARM_ABI_APCS},
754 {"atpcs", ARM_ABI_ATPCS},
755 {"aapcs", ARM_ABI_AAPCS},
756 {"iwmmxt", ARM_ABI_IWMMXT},
757 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
760 /* Supported TLS relocations. */
762 enum tls_reloc {
763 TLS_GD32,
764 TLS_LDM32,
765 TLS_LDO32,
766 TLS_IE32,
767 TLS_LE32
770 /* Emit an insn that's a simple single-set. Both the operands must be known
771 to be valid. */
772 inline static rtx
773 emit_set_insn (rtx x, rtx y)
775 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
778 /* Return the number of bits set in VALUE. */
779 static unsigned
780 bit_count (unsigned long value)
782 unsigned long count = 0;
784 while (value)
786 count++;
787 value &= value - 1; /* Clear the least-significant set bit. */
790 return count;
793 /* Set up library functions unique to ARM. */
795 static void
796 arm_init_libfuncs (void)
798 /* There are no special library functions unless we are using the
799 ARM BPABI. */
800 if (!TARGET_BPABI)
801 return;
803 /* The functions below are described in Section 4 of the "Run-Time
804 ABI for the ARM architecture", Version 1.0. */
806 /* Double-precision floating-point arithmetic. Table 2. */
807 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
808 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
809 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
810 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
811 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
813 /* Double-precision comparisons. Table 3. */
814 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
815 set_optab_libfunc (ne_optab, DFmode, NULL);
816 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
817 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
818 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
819 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
820 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
822 /* Single-precision floating-point arithmetic. Table 4. */
823 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
824 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
825 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
826 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
827 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
829 /* Single-precision comparisons. Table 5. */
830 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
831 set_optab_libfunc (ne_optab, SFmode, NULL);
832 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
833 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
834 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
835 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
836 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
838 /* Floating-point to integer conversions. Table 6. */
839 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
840 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
841 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
842 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
843 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
844 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
845 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
846 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
848 /* Conversions between floating types. Table 7. */
849 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
850 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
852 /* Integer to floating-point conversions. Table 8. */
853 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
854 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
855 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
856 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
857 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
858 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
859 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
860 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
862 /* Long long. Table 9. */
863 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
864 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
865 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
866 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
867 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
868 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
869 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
870 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
872 /* Integer (32/32->32) division. \S 4.3.1. */
873 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
874 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
876 /* The divmod functions are designed so that they can be used for
877 plain division, even though they return both the quotient and the
878 remainder. The quotient is returned in the usual location (i.e.,
879 r0 for SImode, {r0, r1} for DImode), just as would be expected
880 for an ordinary division routine. Because the AAPCS calling
881 conventions specify that all of { r0, r1, r2, r3 } are
882 callee-saved registers, there is no need to tell the compiler
883 explicitly that those registers are clobbered by these
884 routines. */
885 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
886 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
888 /* For SImode division the ABI provides div-without-mod routines,
889 which are faster. */
890 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
891 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
893 /* We don't have mod libcalls. Fortunately gcc knows how to use the
894 divmod libcalls instead. */
895 set_optab_libfunc (smod_optab, DImode, NULL);
896 set_optab_libfunc (umod_optab, DImode, NULL);
897 set_optab_libfunc (smod_optab, SImode, NULL);
898 set_optab_libfunc (umod_optab, SImode, NULL);
901 /* Implement TARGET_HANDLE_OPTION. */
903 static bool
904 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
906 switch (code)
908 case OPT_march_:
909 arm_select[1].string = arg;
910 return true;
912 case OPT_mcpu_:
913 arm_select[0].string = arg;
914 return true;
916 case OPT_mhard_float:
917 target_float_abi_name = "hard";
918 return true;
920 case OPT_msoft_float:
921 target_float_abi_name = "soft";
922 return true;
924 case OPT_mtune_:
925 arm_select[2].string = arg;
926 return true;
928 default:
929 return true;
933 /* Fix up any incompatible options that the user has specified.
934 This has now turned into a maze. */
935 void
936 arm_override_options (void)
938 unsigned i;
939 enum processor_type target_arch_cpu = arm_none;
941 /* Set up the flags based on the cpu/architecture selected by the user. */
942 for (i = ARRAY_SIZE (arm_select); i--;)
944 struct arm_cpu_select * ptr = arm_select + i;
946 if (ptr->string != NULL && ptr->string[0] != '\0')
948 const struct processors * sel;
950 for (sel = ptr->processors; sel->name != NULL; sel++)
951 if (streq (ptr->string, sel->name))
953 /* Set the architecture define. */
954 if (i != ARM_OPT_SET_TUNE)
955 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
957 /* Determine the processor core for which we should
958 tune code-generation. */
959 if (/* -mcpu= is a sensible default. */
960 i == ARM_OPT_SET_CPU
961 /* -mtune= overrides -mcpu= and -march=. */
962 || i == ARM_OPT_SET_TUNE)
963 arm_tune = (enum processor_type) (sel - ptr->processors);
965 /* Remember the CPU associated with this architecture.
966 If no other option is used to set the CPU type,
967 we'll use this to guess the most suitable tuning
968 options. */
969 if (i == ARM_OPT_SET_ARCH)
970 target_arch_cpu = sel->core;
972 if (i != ARM_OPT_SET_TUNE)
974 /* If we have been given an architecture and a processor
975 make sure that they are compatible. We only generate
976 a warning though, and we prefer the CPU over the
977 architecture. */
978 if (insn_flags != 0 && (insn_flags ^ sel->flags))
979 warning (0, "switch -mcpu=%s conflicts with -march= switch",
980 ptr->string);
982 insn_flags = sel->flags;
985 break;
988 if (sel->name == NULL)
989 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
993 /* Guess the tuning options from the architecture if necessary. */
994 if (arm_tune == arm_none)
995 arm_tune = target_arch_cpu;
997 /* If the user did not specify a processor, choose one for them. */
998 if (insn_flags == 0)
1000 const struct processors * sel;
1001 unsigned int sought;
1002 enum processor_type cpu;
1004 cpu = TARGET_CPU_DEFAULT;
1005 if (cpu == arm_none)
1007 #ifdef SUBTARGET_CPU_DEFAULT
1008 /* Use the subtarget default CPU if none was specified by
1009 configure. */
1010 cpu = SUBTARGET_CPU_DEFAULT;
1011 #endif
1012 /* Default to ARM6. */
1013 if (cpu == arm_none)
1014 cpu = arm6;
1016 sel = &all_cores[cpu];
1018 insn_flags = sel->flags;
1020 /* Now check to see if the user has specified some command line
1021 switch that require certain abilities from the cpu. */
1022 sought = 0;
1024 if (TARGET_INTERWORK || TARGET_THUMB)
1026 sought |= (FL_THUMB | FL_MODE32);
1028 /* There are no ARM processors that support both APCS-26 and
1029 interworking. Therefore we force FL_MODE26 to be removed
1030 from insn_flags here (if it was set), so that the search
1031 below will always be able to find a compatible processor. */
1032 insn_flags &= ~FL_MODE26;
1035 if (sought != 0 && ((sought & insn_flags) != sought))
1037 /* Try to locate a CPU type that supports all of the abilities
1038 of the default CPU, plus the extra abilities requested by
1039 the user. */
1040 for (sel = all_cores; sel->name != NULL; sel++)
1041 if ((sel->flags & sought) == (sought | insn_flags))
1042 break;
1044 if (sel->name == NULL)
1046 unsigned current_bit_count = 0;
1047 const struct processors * best_fit = NULL;
1049 /* Ideally we would like to issue an error message here
1050 saying that it was not possible to find a CPU compatible
1051 with the default CPU, but which also supports the command
1052 line options specified by the programmer, and so they
1053 ought to use the -mcpu=<name> command line option to
1054 override the default CPU type.
1056 If we cannot find a cpu that has both the
1057 characteristics of the default cpu and the given
1058 command line options we scan the array again looking
1059 for a best match. */
1060 for (sel = all_cores; sel->name != NULL; sel++)
1061 if ((sel->flags & sought) == sought)
1063 unsigned count;
1065 count = bit_count (sel->flags & insn_flags);
1067 if (count >= current_bit_count)
1069 best_fit = sel;
1070 current_bit_count = count;
1074 gcc_assert (best_fit);
1075 sel = best_fit;
1078 insn_flags = sel->flags;
1080 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1081 arm_default_cpu = (enum processor_type) (sel - all_cores);
1082 if (arm_tune == arm_none)
1083 arm_tune = arm_default_cpu;
1086 /* The processor for which we should tune should now have been
1087 chosen. */
1088 gcc_assert (arm_tune != arm_none);
1090 tune_flags = all_cores[(int)arm_tune].flags;
1091 if (optimize_size)
1092 targetm.rtx_costs = arm_size_rtx_costs;
1093 else
1094 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1096 /* Make sure that the processor choice does not conflict with any of the
1097 other command line choices. */
1098 if (TARGET_ARM && !(insn_flags & FL_NOTM))
1099 error ("target CPU does not support ARM mode");
1101 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1103 warning (0, "target CPU does not support interworking" );
1104 target_flags &= ~MASK_INTERWORK;
1107 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1109 warning (0, "target CPU does not support THUMB instructions");
1110 target_flags &= ~MASK_THUMB;
1113 if (TARGET_APCS_FRAME && TARGET_THUMB)
1115 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1116 target_flags &= ~MASK_APCS_FRAME;
1119 /* Callee super interworking implies thumb interworking. Adding
1120 this to the flags here simplifies the logic elsewhere. */
1121 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1122 target_flags |= MASK_INTERWORK;
1124 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1125 from here where no function is being compiled currently. */
1126 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1127 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1129 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1130 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1132 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1133 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1135 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1137 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1138 target_flags |= MASK_APCS_FRAME;
1141 if (TARGET_POKE_FUNCTION_NAME)
1142 target_flags |= MASK_APCS_FRAME;
1144 if (TARGET_APCS_REENT && flag_pic)
1145 error ("-fpic and -mapcs-reent are incompatible");
1147 if (TARGET_APCS_REENT)
1148 warning (0, "APCS reentrant code not supported. Ignored");
1150 /* If this target is normally configured to use APCS frames, warn if they
1151 are turned off and debugging is turned on. */
1152 if (TARGET_ARM
1153 && write_symbols != NO_DEBUG
1154 && !TARGET_APCS_FRAME
1155 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1156 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1158 if (TARGET_APCS_FLOAT)
1159 warning (0, "passing floating point arguments in fp regs not yet supported");
1161 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1162 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1163 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1164 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1165 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1166 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1167 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1168 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1169 arm_arch_notm = (insn_flags & FL_NOTM) != 0;
1170 arm_arch_thumb2 = (insn_flags & FL_THUMB2) != 0;
1171 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1172 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1174 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1175 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1176 thumb_code = (TARGET_ARM == 0);
1177 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1178 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1179 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1180 arm_arch_hwdiv = (insn_flags & FL_DIV) != 0;
1182 /* V5 code we generate is completely interworking capable, so we turn off
1183 TARGET_INTERWORK here to avoid many tests later on. */
1185 /* XXX However, we must pass the right pre-processor defines to CPP
1186 or GLD can get confused. This is a hack. */
1187 if (TARGET_INTERWORK)
1188 arm_cpp_interwork = 1;
1190 if (arm_arch5)
1191 target_flags &= ~MASK_INTERWORK;
1193 if (target_abi_name)
1195 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1197 if (streq (arm_all_abis[i].name, target_abi_name))
1199 arm_abi = arm_all_abis[i].abi_type;
1200 break;
1203 if (i == ARRAY_SIZE (arm_all_abis))
1204 error ("invalid ABI option: -mabi=%s", target_abi_name);
1206 else
1207 arm_abi = ARM_DEFAULT_ABI;
1209 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1210 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1212 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1213 error ("iwmmxt abi requires an iwmmxt capable cpu");
1215 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1216 if (target_fpu_name == NULL && target_fpe_name != NULL)
1218 if (streq (target_fpe_name, "2"))
1219 target_fpu_name = "fpe2";
1220 else if (streq (target_fpe_name, "3"))
1221 target_fpu_name = "fpe3";
1222 else
1223 error ("invalid floating point emulation option: -mfpe=%s",
1224 target_fpe_name);
1226 if (target_fpu_name != NULL)
1228 /* The user specified a FPU. */
1229 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1231 if (streq (all_fpus[i].name, target_fpu_name))
1233 arm_fpu_arch = all_fpus[i].fpu;
1234 arm_fpu_tune = arm_fpu_arch;
1235 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1236 break;
1239 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1240 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1242 else
1244 #ifdef FPUTYPE_DEFAULT
1245 /* Use the default if it is specified for this platform. */
1246 arm_fpu_arch = FPUTYPE_DEFAULT;
1247 arm_fpu_tune = FPUTYPE_DEFAULT;
1248 #else
1249 /* Pick one based on CPU type. */
1250 /* ??? Some targets assume FPA is the default.
1251 if ((insn_flags & FL_VFP) != 0)
1252 arm_fpu_arch = FPUTYPE_VFP;
1253 else
1255 if (arm_arch_cirrus)
1256 arm_fpu_arch = FPUTYPE_MAVERICK;
1257 else
1258 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1259 #endif
1260 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1261 arm_fpu_tune = FPUTYPE_FPA;
1262 else
1263 arm_fpu_tune = arm_fpu_arch;
1264 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1265 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1268 if (target_float_abi_name != NULL)
1270 /* The user specified a FP ABI. */
1271 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1273 if (streq (all_float_abis[i].name, target_float_abi_name))
1275 arm_float_abi = all_float_abis[i].abi_type;
1276 break;
1279 if (i == ARRAY_SIZE (all_float_abis))
1280 error ("invalid floating point abi: -mfloat-abi=%s",
1281 target_float_abi_name);
1283 else
1284 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1286 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1287 sorry ("-mfloat-abi=hard and VFP");
1289 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1290 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1291 will ever exist. GCC makes no attempt to support this combination. */
1292 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1293 sorry ("iWMMXt and hardware floating point");
1295 /* ??? iWMMXt insn patterns need auditing for Thumb-2. */
1296 if (TARGET_THUMB2 && TARGET_IWMMXT)
1297 sorry ("Thumb-2 iWMMXt");
1299 /* If soft-float is specified then don't use FPU. */
1300 if (TARGET_SOFT_FLOAT)
1301 arm_fpu_arch = FPUTYPE_NONE;
1303 /* For arm2/3 there is no need to do any scheduling if there is only
1304 a floating point emulator, or we are doing software floating-point. */
1305 if ((TARGET_SOFT_FLOAT
1306 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1307 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1308 && (tune_flags & FL_MODE32) == 0)
1309 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1311 if (target_thread_switch)
1313 if (strcmp (target_thread_switch, "soft") == 0)
1314 target_thread_pointer = TP_SOFT;
1315 else if (strcmp (target_thread_switch, "auto") == 0)
1316 target_thread_pointer = TP_AUTO;
1317 else if (strcmp (target_thread_switch, "cp15") == 0)
1318 target_thread_pointer = TP_CP15;
1319 else
1320 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1323 /* Use the cp15 method if it is available. */
1324 if (target_thread_pointer == TP_AUTO)
1326 if (arm_arch6k && !TARGET_THUMB)
1327 target_thread_pointer = TP_CP15;
1328 else
1329 target_thread_pointer = TP_SOFT;
1332 if (TARGET_HARD_TP && TARGET_THUMB1)
1333 error ("can not use -mtp=cp15 with 16-bit Thumb");
1335 /* Override the default structure alignment for AAPCS ABI. */
1336 if (TARGET_AAPCS_BASED)
1337 arm_structure_size_boundary = 8;
1339 if (structure_size_string != NULL)
1341 int size = strtol (structure_size_string, NULL, 0);
1343 if (size == 8 || size == 32
1344 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1345 arm_structure_size_boundary = size;
1346 else
1347 warning (0, "structure size boundary can only be set to %s",
1348 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1351 if (!TARGET_ARM && TARGET_VXWORKS_RTP && flag_pic)
1353 error ("RTP PIC is incompatible with Thumb");
1354 flag_pic = 0;
1357 /* If stack checking is disabled, we can use r10 as the PIC register,
1358 which keeps r9 available. The EABI specifies r9 as the PIC register. */
1359 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1361 if (TARGET_VXWORKS_RTP)
1362 warning (0, "RTP PIC is incompatible with -msingle-pic-base");
1363 arm_pic_register = (TARGET_APCS_STACK || TARGET_AAPCS_BASED) ? 9 : 10;
1366 if (flag_pic && TARGET_VXWORKS_RTP)
1367 arm_pic_register = 9;
1369 if (arm_pic_register_string != NULL)
1371 int pic_register = decode_reg_name (arm_pic_register_string);
1373 if (!flag_pic)
1374 warning (0, "-mpic-register= is useless without -fpic");
1376 /* Prevent the user from choosing an obviously stupid PIC register. */
1377 else if (pic_register < 0 || call_used_regs[pic_register]
1378 || pic_register == HARD_FRAME_POINTER_REGNUM
1379 || pic_register == STACK_POINTER_REGNUM
1380 || pic_register >= PC_REGNUM
1381 || (TARGET_VXWORKS_RTP
1382 && (unsigned int) pic_register != arm_pic_register))
1383 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1384 else
1385 arm_pic_register = pic_register;
1388 /* ??? We might want scheduling for thumb2. */
1389 if (TARGET_THUMB && flag_schedule_insns)
1391 /* Don't warn since it's on by default in -O2. */
1392 flag_schedule_insns = 0;
1395 if (optimize_size)
1397 arm_constant_limit = 1;
1399 /* If optimizing for size, bump the number of instructions that we
1400 are prepared to conditionally execute (even on a StrongARM). */
1401 max_insns_skipped = 6;
1403 else
1405 /* For processors with load scheduling, it never costs more than
1406 2 cycles to load a constant, and the load scheduler may well
1407 reduce that to 1. */
1408 if (arm_ld_sched)
1409 arm_constant_limit = 1;
1411 /* On XScale the longer latency of a load makes it more difficult
1412 to achieve a good schedule, so it's faster to synthesize
1413 constants that can be done in two insns. */
1414 if (arm_tune_xscale)
1415 arm_constant_limit = 2;
1417 /* StrongARM has early execution of branches, so a sequence
1418 that is worth skipping is shorter. */
1419 if (arm_tune_strongarm)
1420 max_insns_skipped = 3;
1423 /* Register global variables with the garbage collector. */
1424 arm_add_gc_roots ();
1427 static void
1428 arm_add_gc_roots (void)
1430 gcc_obstack_init(&minipool_obstack);
1431 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1434 /* A table of known ARM exception types.
1435 For use with the interrupt function attribute. */
1437 typedef struct
1439 const char *const arg;
1440 const unsigned long return_value;
1442 isr_attribute_arg;
1444 static const isr_attribute_arg isr_attribute_args [] =
1446 { "IRQ", ARM_FT_ISR },
1447 { "irq", ARM_FT_ISR },
1448 { "FIQ", ARM_FT_FIQ },
1449 { "fiq", ARM_FT_FIQ },
1450 { "ABORT", ARM_FT_ISR },
1451 { "abort", ARM_FT_ISR },
1452 { "ABORT", ARM_FT_ISR },
1453 { "abort", ARM_FT_ISR },
1454 { "UNDEF", ARM_FT_EXCEPTION },
1455 { "undef", ARM_FT_EXCEPTION },
1456 { "SWI", ARM_FT_EXCEPTION },
1457 { "swi", ARM_FT_EXCEPTION },
1458 { NULL, ARM_FT_NORMAL }
1461 /* Returns the (interrupt) function type of the current
1462 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1464 static unsigned long
1465 arm_isr_value (tree argument)
1467 const isr_attribute_arg * ptr;
1468 const char * arg;
1470 if (!arm_arch_notm)
1471 return ARM_FT_NORMAL | ARM_FT_STACKALIGN;
1473 /* No argument - default to IRQ. */
1474 if (argument == NULL_TREE)
1475 return ARM_FT_ISR;
1477 /* Get the value of the argument. */
1478 if (TREE_VALUE (argument) == NULL_TREE
1479 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1480 return ARM_FT_UNKNOWN;
1482 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1484 /* Check it against the list of known arguments. */
1485 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1486 if (streq (arg, ptr->arg))
1487 return ptr->return_value;
1489 /* An unrecognized interrupt type. */
1490 return ARM_FT_UNKNOWN;
1493 /* Computes the type of the current function. */
1495 static unsigned long
1496 arm_compute_func_type (void)
1498 unsigned long type = ARM_FT_UNKNOWN;
1499 tree a;
1500 tree attr;
1502 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1504 /* Decide if the current function is volatile. Such functions
1505 never return, and many memory cycles can be saved by not storing
1506 register values that will never be needed again. This optimization
1507 was added to speed up context switching in a kernel application. */
1508 if (optimize > 0
1509 && (TREE_NOTHROW (current_function_decl)
1510 || !(flag_unwind_tables
1511 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1512 && TREE_THIS_VOLATILE (current_function_decl))
1513 type |= ARM_FT_VOLATILE;
1515 if (cfun->static_chain_decl != NULL)
1516 type |= ARM_FT_NESTED;
1518 attr = DECL_ATTRIBUTES (current_function_decl);
1520 a = lookup_attribute ("naked", attr);
1521 if (a != NULL_TREE)
1522 type |= ARM_FT_NAKED;
1524 a = lookup_attribute ("isr", attr);
1525 if (a == NULL_TREE)
1526 a = lookup_attribute ("interrupt", attr);
1528 if (a == NULL_TREE)
1529 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1530 else
1531 type |= arm_isr_value (TREE_VALUE (a));
1533 return type;
1536 /* Returns the type of the current function. */
1538 unsigned long
1539 arm_current_func_type (void)
1541 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1542 cfun->machine->func_type = arm_compute_func_type ();
1544 return cfun->machine->func_type;
1547 /* Return 1 if it is possible to return using a single instruction.
1548 If SIBLING is non-null, this is a test for a return before a sibling
1549 call. SIBLING is the call insn, so we can examine its register usage. */
1552 use_return_insn (int iscond, rtx sibling)
1554 int regno;
1555 unsigned int func_type;
1556 unsigned long saved_int_regs;
1557 unsigned HOST_WIDE_INT stack_adjust;
1558 arm_stack_offsets *offsets;
1560 /* Never use a return instruction before reload has run. */
1561 if (!reload_completed)
1562 return 0;
1564 func_type = arm_current_func_type ();
1566 /* Naked, volatile and stack alignment functions need special
1567 consideration. */
1568 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED | ARM_FT_STACKALIGN))
1569 return 0;
1571 /* So do interrupt functions that use the frame pointer and Thumb
1572 interrupt functions. */
1573 if (IS_INTERRUPT (func_type) && (frame_pointer_needed || TARGET_THUMB))
1574 return 0;
1576 offsets = arm_get_frame_offsets ();
1577 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1579 /* As do variadic functions. */
1580 if (current_function_pretend_args_size
1581 || cfun->machine->uses_anonymous_args
1582 /* Or if the function calls __builtin_eh_return () */
1583 || current_function_calls_eh_return
1584 /* Or if the function calls alloca */
1585 || current_function_calls_alloca
1586 /* Or if there is a stack adjustment. However, if the stack pointer
1587 is saved on the stack, we can use a pre-incrementing stack load. */
1588 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1589 return 0;
1591 saved_int_regs = arm_compute_save_reg_mask ();
1593 /* Unfortunately, the insn
1595 ldmib sp, {..., sp, ...}
1597 triggers a bug on most SA-110 based devices, such that the stack
1598 pointer won't be correctly restored if the instruction takes a
1599 page fault. We work around this problem by popping r3 along with
1600 the other registers, since that is never slower than executing
1601 another instruction.
1603 We test for !arm_arch5 here, because code for any architecture
1604 less than this could potentially be run on one of the buggy
1605 chips. */
1606 if (stack_adjust == 4 && !arm_arch5 && TARGET_ARM)
1608 /* Validate that r3 is a call-clobbered register (always true in
1609 the default abi) ... */
1610 if (!call_used_regs[3])
1611 return 0;
1613 /* ... that it isn't being used for a return value ... */
1614 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1615 return 0;
1617 /* ... or for a tail-call argument ... */
1618 if (sibling)
1620 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1622 if (find_regno_fusage (sibling, USE, 3))
1623 return 0;
1626 /* ... and that there are no call-saved registers in r0-r2
1627 (always true in the default ABI). */
1628 if (saved_int_regs & 0x7)
1629 return 0;
1632 /* Can't be done if interworking with Thumb, and any registers have been
1633 stacked. */
1634 if (TARGET_INTERWORK && saved_int_regs != 0 && !IS_INTERRUPT(func_type))
1635 return 0;
1637 /* On StrongARM, conditional returns are expensive if they aren't
1638 taken and multiple registers have been stacked. */
1639 if (iscond && arm_tune_strongarm)
1641 /* Conditional return when just the LR is stored is a simple
1642 conditional-load instruction, that's not expensive. */
1643 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1644 return 0;
1646 if (flag_pic
1647 && arm_pic_register != INVALID_REGNUM
1648 && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1649 return 0;
1652 /* If there are saved registers but the LR isn't saved, then we need
1653 two instructions for the return. */
1654 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1655 return 0;
1657 /* Can't be done if any of the FPA regs are pushed,
1658 since this also requires an insn. */
1659 if (TARGET_HARD_FLOAT && TARGET_FPA)
1660 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1661 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
1662 return 0;
1664 /* Likewise VFP regs. */
1665 if (TARGET_HARD_FLOAT && TARGET_VFP)
1666 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1667 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
1668 return 0;
1670 if (TARGET_REALLY_IWMMXT)
1671 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1672 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
1673 return 0;
1675 return 1;
1678 /* Return TRUE if int I is a valid immediate ARM constant. */
1681 const_ok_for_arm (HOST_WIDE_INT i)
1683 int lowbit;
1685 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1686 be all zero, or all one. */
1687 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1688 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1689 != ((~(unsigned HOST_WIDE_INT) 0)
1690 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1691 return FALSE;
1693 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1695 /* Fast return for 0 and small values. We must do this for zero, since
1696 the code below can't handle that one case. */
1697 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1698 return TRUE;
1700 /* Get the number of trailing zeros. */
1701 lowbit = ffs((int) i) - 1;
1703 /* Only even shifts are allowed in ARM mode so round down to the
1704 nearest even number. */
1705 if (TARGET_ARM)
1706 lowbit &= ~1;
1708 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1709 return TRUE;
1711 if (TARGET_ARM)
1713 /* Allow rotated constants in ARM mode. */
1714 if (lowbit <= 4
1715 && ((i & ~0xc000003f) == 0
1716 || (i & ~0xf000000f) == 0
1717 || (i & ~0xfc000003) == 0))
1718 return TRUE;
1720 else
1722 HOST_WIDE_INT v;
1724 /* Allow repeated pattern. */
1725 v = i & 0xff;
1726 v |= v << 16;
1727 if (i == v || i == (v | (v << 8)))
1728 return TRUE;
1731 return FALSE;
1734 /* Return true if I is a valid constant for the operation CODE. */
1735 static int
1736 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1738 if (const_ok_for_arm (i))
1739 return 1;
1741 switch (code)
1743 case PLUS:
1744 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1746 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1747 case XOR:
1748 case IOR:
1749 return 0;
1751 case AND:
1752 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1754 default:
1755 gcc_unreachable ();
1759 /* Emit a sequence of insns to handle a large constant.
1760 CODE is the code of the operation required, it can be any of SET, PLUS,
1761 IOR, AND, XOR, MINUS;
1762 MODE is the mode in which the operation is being performed;
1763 VAL is the integer to operate on;
1764 SOURCE is the other operand (a register, or a null-pointer for SET);
1765 SUBTARGETS means it is safe to create scratch registers if that will
1766 either produce a simpler sequence, or we will want to cse the values.
1767 Return value is the number of insns emitted. */
1769 /* ??? Tweak this for thumb2. */
1771 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1772 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1774 rtx cond;
1776 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1777 cond = COND_EXEC_TEST (PATTERN (insn));
1778 else
1779 cond = NULL_RTX;
1781 if (subtargets || code == SET
1782 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1783 && REGNO (target) != REGNO (source)))
1785 /* After arm_reorg has been called, we can't fix up expensive
1786 constants by pushing them into memory so we must synthesize
1787 them in-line, regardless of the cost. This is only likely to
1788 be more costly on chips that have load delay slots and we are
1789 compiling without running the scheduler (so no splitting
1790 occurred before the final instruction emission).
1792 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1794 if (!after_arm_reorg
1795 && !cond
1796 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1797 1, 0)
1798 > arm_constant_limit + (code != SET)))
1800 if (code == SET)
1802 /* Currently SET is the only monadic value for CODE, all
1803 the rest are diadic. */
1804 emit_set_insn (target, GEN_INT (val));
1805 return 1;
1807 else
1809 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1811 emit_set_insn (temp, GEN_INT (val));
1812 /* For MINUS, the value is subtracted from, since we never
1813 have subtraction of a constant. */
1814 if (code == MINUS)
1815 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1816 else
1817 emit_set_insn (target,
1818 gen_rtx_fmt_ee (code, mode, source, temp));
1819 return 2;
1824 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1828 /* Return the number of ARM instructions required to synthesize the given
1829 constant. */
1830 static int
1831 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1833 HOST_WIDE_INT temp1;
1834 int num_insns = 0;
1837 int end;
1839 if (i <= 0)
1840 i += 32;
1841 if (remainder & (3 << (i - 2)))
1843 end = i - 8;
1844 if (end < 0)
1845 end += 32;
1846 temp1 = remainder & ((0x0ff << end)
1847 | ((i < end) ? (0xff >> (32 - end)) : 0));
1848 remainder &= ~temp1;
1849 num_insns++;
1850 i -= 6;
1852 i -= 2;
1853 } while (remainder);
1854 return num_insns;
1857 /* Emit an instruction with the indicated PATTERN. If COND is
1858 non-NULL, conditionalize the execution of the instruction on COND
1859 being true. */
1861 static void
1862 emit_constant_insn (rtx cond, rtx pattern)
1864 if (cond)
1865 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1866 emit_insn (pattern);
1869 /* As above, but extra parameter GENERATE which, if clear, suppresses
1870 RTL generation. */
1871 /* ??? This needs more work for thumb2. */
1873 static int
1874 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1875 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1876 int generate)
1878 int can_invert = 0;
1879 int can_negate = 0;
1880 int can_negate_initial = 0;
1881 int can_shift = 0;
1882 int i;
1883 int num_bits_set = 0;
1884 int set_sign_bit_copies = 0;
1885 int clear_sign_bit_copies = 0;
1886 int clear_zero_bit_copies = 0;
1887 int set_zero_bit_copies = 0;
1888 int insns = 0;
1889 unsigned HOST_WIDE_INT temp1, temp2;
1890 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1892 /* Find out which operations are safe for a given CODE. Also do a quick
1893 check for degenerate cases; these can occur when DImode operations
1894 are split. */
1895 switch (code)
1897 case SET:
1898 can_invert = 1;
1899 can_shift = 1;
1900 can_negate = 1;
1901 break;
1903 case PLUS:
1904 can_negate = 1;
1905 can_negate_initial = 1;
1906 break;
1908 case IOR:
1909 if (remainder == 0xffffffff)
1911 if (generate)
1912 emit_constant_insn (cond,
1913 gen_rtx_SET (VOIDmode, target,
1914 GEN_INT (ARM_SIGN_EXTEND (val))));
1915 return 1;
1917 if (remainder == 0)
1919 if (reload_completed && rtx_equal_p (target, source))
1920 return 0;
1921 if (generate)
1922 emit_constant_insn (cond,
1923 gen_rtx_SET (VOIDmode, target, source));
1924 return 1;
1926 break;
1928 case AND:
1929 if (remainder == 0)
1931 if (generate)
1932 emit_constant_insn (cond,
1933 gen_rtx_SET (VOIDmode, target, const0_rtx));
1934 return 1;
1936 if (remainder == 0xffffffff)
1938 if (reload_completed && rtx_equal_p (target, source))
1939 return 0;
1940 if (generate)
1941 emit_constant_insn (cond,
1942 gen_rtx_SET (VOIDmode, target, source));
1943 return 1;
1945 can_invert = 1;
1946 break;
1948 case XOR:
1949 if (remainder == 0)
1951 if (reload_completed && rtx_equal_p (target, source))
1952 return 0;
1953 if (generate)
1954 emit_constant_insn (cond,
1955 gen_rtx_SET (VOIDmode, target, source));
1956 return 1;
1959 /* We don't know how to handle other cases yet. */
1960 gcc_assert (remainder == 0xffffffff);
1962 if (generate)
1963 emit_constant_insn (cond,
1964 gen_rtx_SET (VOIDmode, target,
1965 gen_rtx_NOT (mode, source)));
1966 return 1;
1968 case MINUS:
1969 /* We treat MINUS as (val - source), since (source - val) is always
1970 passed as (source + (-val)). */
1971 if (remainder == 0)
1973 if (generate)
1974 emit_constant_insn (cond,
1975 gen_rtx_SET (VOIDmode, target,
1976 gen_rtx_NEG (mode, source)));
1977 return 1;
1979 if (const_ok_for_arm (val))
1981 if (generate)
1982 emit_constant_insn (cond,
1983 gen_rtx_SET (VOIDmode, target,
1984 gen_rtx_MINUS (mode, GEN_INT (val),
1985 source)));
1986 return 1;
1988 can_negate = 1;
1990 break;
1992 default:
1993 gcc_unreachable ();
1996 /* If we can do it in one insn get out quickly. */
1997 if (const_ok_for_arm (val)
1998 || (can_negate_initial && const_ok_for_arm (-val))
1999 || (can_invert && const_ok_for_arm (~val)))
2001 if (generate)
2002 emit_constant_insn (cond,
2003 gen_rtx_SET (VOIDmode, target,
2004 (source
2005 ? gen_rtx_fmt_ee (code, mode, source,
2006 GEN_INT (val))
2007 : GEN_INT (val))));
2008 return 1;
2011 /* Calculate a few attributes that may be useful for specific
2012 optimizations. */
2013 for (i = 31; i >= 0; i--)
2015 if ((remainder & (1 << i)) == 0)
2016 clear_sign_bit_copies++;
2017 else
2018 break;
2021 for (i = 31; i >= 0; i--)
2023 if ((remainder & (1 << i)) != 0)
2024 set_sign_bit_copies++;
2025 else
2026 break;
2029 for (i = 0; i <= 31; i++)
2031 if ((remainder & (1 << i)) == 0)
2032 clear_zero_bit_copies++;
2033 else
2034 break;
2037 for (i = 0; i <= 31; i++)
2039 if ((remainder & (1 << i)) != 0)
2040 set_zero_bit_copies++;
2041 else
2042 break;
2045 switch (code)
2047 case SET:
2048 /* See if we can use movw. */
2049 if (arm_arch_thumb2 && (remainder & 0xffff0000) == 0)
2051 if (generate)
2052 emit_constant_insn (cond, gen_rtx_SET (VOIDmode, target,
2053 GEN_INT (val)));
2054 return 1;
2057 /* See if we can do this by sign_extending a constant that is known
2058 to be negative. This is a good, way of doing it, since the shift
2059 may well merge into a subsequent insn. */
2060 if (set_sign_bit_copies > 1)
2062 if (const_ok_for_arm
2063 (temp1 = ARM_SIGN_EXTEND (remainder
2064 << (set_sign_bit_copies - 1))))
2066 if (generate)
2068 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2069 emit_constant_insn (cond,
2070 gen_rtx_SET (VOIDmode, new_src,
2071 GEN_INT (temp1)));
2072 emit_constant_insn (cond,
2073 gen_ashrsi3 (target, new_src,
2074 GEN_INT (set_sign_bit_copies - 1)));
2076 return 2;
2078 /* For an inverted constant, we will need to set the low bits,
2079 these will be shifted out of harm's way. */
2080 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
2081 if (const_ok_for_arm (~temp1))
2083 if (generate)
2085 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2086 emit_constant_insn (cond,
2087 gen_rtx_SET (VOIDmode, new_src,
2088 GEN_INT (temp1)));
2089 emit_constant_insn (cond,
2090 gen_ashrsi3 (target, new_src,
2091 GEN_INT (set_sign_bit_copies - 1)));
2093 return 2;
2097 /* See if we can calculate the value as the difference between two
2098 valid immediates. */
2099 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
2101 int topshift = clear_sign_bit_copies & ~1;
2103 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
2104 & (0xff000000 >> topshift));
2106 /* If temp1 is zero, then that means the 9 most significant
2107 bits of remainder were 1 and we've caused it to overflow.
2108 When topshift is 0 we don't need to do anything since we
2109 can borrow from 'bit 32'. */
2110 if (temp1 == 0 && topshift != 0)
2111 temp1 = 0x80000000 >> (topshift - 1);
2113 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
2115 if (const_ok_for_arm (temp2))
2117 if (generate)
2119 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2120 emit_constant_insn (cond,
2121 gen_rtx_SET (VOIDmode, new_src,
2122 GEN_INT (temp1)));
2123 emit_constant_insn (cond,
2124 gen_addsi3 (target, new_src,
2125 GEN_INT (-temp2)));
2128 return 2;
2132 /* See if we can generate this by setting the bottom (or the top)
2133 16 bits, and then shifting these into the other half of the
2134 word. We only look for the simplest cases, to do more would cost
2135 too much. Be careful, however, not to generate this when the
2136 alternative would take fewer insns. */
2137 if (val & 0xffff0000)
2139 temp1 = remainder & 0xffff0000;
2140 temp2 = remainder & 0x0000ffff;
2142 /* Overlaps outside this range are best done using other methods. */
2143 for (i = 9; i < 24; i++)
2145 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2146 && !const_ok_for_arm (temp2))
2148 rtx new_src = (subtargets
2149 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2150 : target);
2151 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2152 source, subtargets, generate);
2153 source = new_src;
2154 if (generate)
2155 emit_constant_insn
2156 (cond,
2157 gen_rtx_SET
2158 (VOIDmode, target,
2159 gen_rtx_IOR (mode,
2160 gen_rtx_ASHIFT (mode, source,
2161 GEN_INT (i)),
2162 source)));
2163 return insns + 1;
2167 /* Don't duplicate cases already considered. */
2168 for (i = 17; i < 24; i++)
2170 if (((temp1 | (temp1 >> i)) == remainder)
2171 && !const_ok_for_arm (temp1))
2173 rtx new_src = (subtargets
2174 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2175 : target);
2176 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2177 source, subtargets, generate);
2178 source = new_src;
2179 if (generate)
2180 emit_constant_insn
2181 (cond,
2182 gen_rtx_SET (VOIDmode, target,
2183 gen_rtx_IOR
2184 (mode,
2185 gen_rtx_LSHIFTRT (mode, source,
2186 GEN_INT (i)),
2187 source)));
2188 return insns + 1;
2192 break;
2194 case IOR:
2195 case XOR:
2196 /* If we have IOR or XOR, and the constant can be loaded in a
2197 single instruction, and we can find a temporary to put it in,
2198 then this can be done in two instructions instead of 3-4. */
2199 if (subtargets
2200 /* TARGET can't be NULL if SUBTARGETS is 0 */
2201 || (reload_completed && !reg_mentioned_p (target, source)))
2203 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2205 if (generate)
2207 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2209 emit_constant_insn (cond,
2210 gen_rtx_SET (VOIDmode, sub,
2211 GEN_INT (val)));
2212 emit_constant_insn (cond,
2213 gen_rtx_SET (VOIDmode, target,
2214 gen_rtx_fmt_ee (code, mode,
2215 source, sub)));
2217 return 2;
2221 if (code == XOR)
2222 break;
2224 if (set_sign_bit_copies > 8
2225 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2227 if (generate)
2229 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2230 rtx shift = GEN_INT (set_sign_bit_copies);
2232 emit_constant_insn
2233 (cond,
2234 gen_rtx_SET (VOIDmode, sub,
2235 gen_rtx_NOT (mode,
2236 gen_rtx_ASHIFT (mode,
2237 source,
2238 shift))));
2239 emit_constant_insn
2240 (cond,
2241 gen_rtx_SET (VOIDmode, target,
2242 gen_rtx_NOT (mode,
2243 gen_rtx_LSHIFTRT (mode, sub,
2244 shift))));
2246 return 2;
2249 if (set_zero_bit_copies > 8
2250 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2252 if (generate)
2254 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2255 rtx shift = GEN_INT (set_zero_bit_copies);
2257 emit_constant_insn
2258 (cond,
2259 gen_rtx_SET (VOIDmode, sub,
2260 gen_rtx_NOT (mode,
2261 gen_rtx_LSHIFTRT (mode,
2262 source,
2263 shift))));
2264 emit_constant_insn
2265 (cond,
2266 gen_rtx_SET (VOIDmode, target,
2267 gen_rtx_NOT (mode,
2268 gen_rtx_ASHIFT (mode, sub,
2269 shift))));
2271 return 2;
2274 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2276 if (generate)
2278 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2279 emit_constant_insn (cond,
2280 gen_rtx_SET (VOIDmode, sub,
2281 gen_rtx_NOT (mode, source)));
2282 source = sub;
2283 if (subtargets)
2284 sub = gen_reg_rtx (mode);
2285 emit_constant_insn (cond,
2286 gen_rtx_SET (VOIDmode, sub,
2287 gen_rtx_AND (mode, source,
2288 GEN_INT (temp1))));
2289 emit_constant_insn (cond,
2290 gen_rtx_SET (VOIDmode, target,
2291 gen_rtx_NOT (mode, sub)));
2293 return 3;
2295 break;
2297 case AND:
2298 /* See if two shifts will do 2 or more insn's worth of work. */
2299 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2301 HOST_WIDE_INT shift_mask = ((0xffffffff
2302 << (32 - clear_sign_bit_copies))
2303 & 0xffffffff);
2305 if ((remainder | shift_mask) != 0xffffffff)
2307 if (generate)
2309 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2310 insns = arm_gen_constant (AND, mode, cond,
2311 remainder | shift_mask,
2312 new_src, source, subtargets, 1);
2313 source = new_src;
2315 else
2317 rtx targ = subtargets ? NULL_RTX : target;
2318 insns = arm_gen_constant (AND, mode, cond,
2319 remainder | shift_mask,
2320 targ, source, subtargets, 0);
2324 if (generate)
2326 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2327 rtx shift = GEN_INT (clear_sign_bit_copies);
2329 emit_insn (gen_ashlsi3 (new_src, source, shift));
2330 emit_insn (gen_lshrsi3 (target, new_src, shift));
2333 return insns + 2;
2336 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2338 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2340 if ((remainder | shift_mask) != 0xffffffff)
2342 if (generate)
2344 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2346 insns = arm_gen_constant (AND, mode, cond,
2347 remainder | shift_mask,
2348 new_src, source, subtargets, 1);
2349 source = new_src;
2351 else
2353 rtx targ = subtargets ? NULL_RTX : target;
2355 insns = arm_gen_constant (AND, mode, cond,
2356 remainder | shift_mask,
2357 targ, source, subtargets, 0);
2361 if (generate)
2363 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2364 rtx shift = GEN_INT (clear_zero_bit_copies);
2366 emit_insn (gen_lshrsi3 (new_src, source, shift));
2367 emit_insn (gen_ashlsi3 (target, new_src, shift));
2370 return insns + 2;
2373 break;
2375 default:
2376 break;
2379 for (i = 0; i < 32; i++)
2380 if (remainder & (1 << i))
2381 num_bits_set++;
2383 if (code == AND || (can_invert && num_bits_set > 16))
2384 remainder = (~remainder) & 0xffffffff;
2385 else if (code == PLUS && num_bits_set > 16)
2386 remainder = (-remainder) & 0xffffffff;
2387 else
2389 can_invert = 0;
2390 can_negate = 0;
2393 /* Now try and find a way of doing the job in either two or three
2394 instructions.
2395 We start by looking for the largest block of zeros that are aligned on
2396 a 2-bit boundary, we then fill up the temps, wrapping around to the
2397 top of the word when we drop off the bottom.
2398 In the worst case this code should produce no more than four insns.
2399 Thumb-2 constants are shifted, not rotated, so the MSB is always the
2400 best place to start. */
2402 /* ??? Use thumb2 replicated constants when the high and low halfwords are
2403 the same. */
2405 int best_start = 0;
2406 if (!TARGET_THUMB2)
2408 int best_consecutive_zeros = 0;
2410 for (i = 0; i < 32; i += 2)
2412 int consecutive_zeros = 0;
2414 if (!(remainder & (3 << i)))
2416 while ((i < 32) && !(remainder & (3 << i)))
2418 consecutive_zeros += 2;
2419 i += 2;
2421 if (consecutive_zeros > best_consecutive_zeros)
2423 best_consecutive_zeros = consecutive_zeros;
2424 best_start = i - consecutive_zeros;
2426 i -= 2;
2430 /* So long as it won't require any more insns to do so, it's
2431 desirable to emit a small constant (in bits 0...9) in the last
2432 insn. This way there is more chance that it can be combined with
2433 a later addressing insn to form a pre-indexed load or store
2434 operation. Consider:
2436 *((volatile int *)0xe0000100) = 1;
2437 *((volatile int *)0xe0000110) = 2;
2439 We want this to wind up as:
2441 mov rA, #0xe0000000
2442 mov rB, #1
2443 str rB, [rA, #0x100]
2444 mov rB, #2
2445 str rB, [rA, #0x110]
2447 rather than having to synthesize both large constants from scratch.
2449 Therefore, we calculate how many insns would be required to emit
2450 the constant starting from `best_start', and also starting from
2451 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2452 yield a shorter sequence, we may as well use zero. */
2453 if (best_start != 0
2454 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2455 && (count_insns_for_constant (remainder, 0) <=
2456 count_insns_for_constant (remainder, best_start)))
2457 best_start = 0;
2460 /* Now start emitting the insns. */
2461 i = best_start;
2464 int end;
2466 if (i <= 0)
2467 i += 32;
2468 if (remainder & (3 << (i - 2)))
2470 end = i - 8;
2471 if (end < 0)
2472 end += 32;
2473 temp1 = remainder & ((0x0ff << end)
2474 | ((i < end) ? (0xff >> (32 - end)) : 0));
2475 remainder &= ~temp1;
2477 if (generate)
2479 rtx new_src, temp1_rtx;
2481 if (code == SET || code == MINUS)
2483 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2484 if (can_invert && code != MINUS)
2485 temp1 = ~temp1;
2487 else
2489 if (remainder && subtargets)
2490 new_src = gen_reg_rtx (mode);
2491 else
2492 new_src = target;
2493 if (can_invert)
2494 temp1 = ~temp1;
2495 else if (can_negate)
2496 temp1 = -temp1;
2499 temp1 = trunc_int_for_mode (temp1, mode);
2500 temp1_rtx = GEN_INT (temp1);
2502 if (code == SET)
2504 else if (code == MINUS)
2505 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2506 else
2507 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2509 emit_constant_insn (cond,
2510 gen_rtx_SET (VOIDmode, new_src,
2511 temp1_rtx));
2512 source = new_src;
2515 if (code == SET)
2517 can_invert = 0;
2518 code = PLUS;
2520 else if (code == MINUS)
2521 code = PLUS;
2523 insns++;
2524 if (TARGET_ARM)
2525 i -= 6;
2526 else
2527 i -= 7;
2529 /* Arm allows rotates by a multiple of two. Thumb-2 allows arbitrary
2530 shifts. */
2531 if (TARGET_ARM)
2532 i -= 2;
2533 else
2534 i--;
2536 while (remainder);
2539 return insns;
2542 /* Canonicalize a comparison so that we are more likely to recognize it.
2543 This can be done for a few constant compares, where we can make the
2544 immediate value easier to load. */
2546 enum rtx_code
2547 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2548 rtx * op1)
2550 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2551 unsigned HOST_WIDE_INT maxval;
2552 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2554 switch (code)
2556 case EQ:
2557 case NE:
2558 return code;
2560 case GT:
2561 case LE:
2562 if (i != maxval
2563 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2565 *op1 = GEN_INT (i + 1);
2566 return code == GT ? GE : LT;
2568 break;
2570 case GE:
2571 case LT:
2572 if (i != ~maxval
2573 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2575 *op1 = GEN_INT (i - 1);
2576 return code == GE ? GT : LE;
2578 break;
2580 case GTU:
2581 case LEU:
2582 if (i != ~((unsigned HOST_WIDE_INT) 0)
2583 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2585 *op1 = GEN_INT (i + 1);
2586 return code == GTU ? GEU : LTU;
2588 break;
2590 case GEU:
2591 case LTU:
2592 if (i != 0
2593 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2595 *op1 = GEN_INT (i - 1);
2596 return code == GEU ? GTU : LEU;
2598 break;
2600 default:
2601 gcc_unreachable ();
2604 return code;
2608 /* Define how to find the value returned by a function. */
2611 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2613 enum machine_mode mode;
2614 int unsignedp ATTRIBUTE_UNUSED;
2615 rtx r ATTRIBUTE_UNUSED;
2617 mode = TYPE_MODE (type);
2618 /* Promote integer types. */
2619 if (INTEGRAL_TYPE_P (type))
2620 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2622 /* Promotes small structs returned in a register to full-word size
2623 for big-endian AAPCS. */
2624 if (arm_return_in_msb (type))
2626 HOST_WIDE_INT size = int_size_in_bytes (type);
2627 if (size % UNITS_PER_WORD != 0)
2629 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2630 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2634 return LIBCALL_VALUE(mode);
2637 /* Determine the amount of memory needed to store the possible return
2638 registers of an untyped call. */
2640 arm_apply_result_size (void)
2642 int size = 16;
2644 if (TARGET_ARM)
2646 if (TARGET_HARD_FLOAT_ABI)
2648 if (TARGET_FPA)
2649 size += 12;
2650 if (TARGET_MAVERICK)
2651 size += 8;
2653 if (TARGET_IWMMXT_ABI)
2654 size += 8;
2657 return size;
2660 /* Decide whether a type should be returned in memory (true)
2661 or in a register (false). This is called by the macro
2662 RETURN_IN_MEMORY. */
2664 arm_return_in_memory (tree type)
2666 HOST_WIDE_INT size;
2668 if (!AGGREGATE_TYPE_P (type) &&
2669 (TREE_CODE (type) != VECTOR_TYPE) &&
2670 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2671 /* All simple types are returned in registers.
2672 For AAPCS, complex types are treated the same as aggregates. */
2673 return 0;
2675 size = int_size_in_bytes (type);
2677 if (arm_abi != ARM_ABI_APCS)
2679 /* ATPCS and later return aggregate types in memory only if they are
2680 larger than a word (or are variable size). */
2681 return (size < 0 || size > UNITS_PER_WORD);
2684 /* To maximize backwards compatibility with previous versions of gcc,
2685 return vectors up to 4 words in registers. */
2686 if (TREE_CODE (type) == VECTOR_TYPE)
2687 return (size < 0 || size > (4 * UNITS_PER_WORD));
2689 /* For the arm-wince targets we choose to be compatible with Microsoft's
2690 ARM and Thumb compilers, which always return aggregates in memory. */
2691 #ifndef ARM_WINCE
2692 /* All structures/unions bigger than one word are returned in memory.
2693 Also catch the case where int_size_in_bytes returns -1. In this case
2694 the aggregate is either huge or of variable size, and in either case
2695 we will want to return it via memory and not in a register. */
2696 if (size < 0 || size > UNITS_PER_WORD)
2697 return 1;
2699 if (TREE_CODE (type) == RECORD_TYPE)
2701 tree field;
2703 /* For a struct the APCS says that we only return in a register
2704 if the type is 'integer like' and every addressable element
2705 has an offset of zero. For practical purposes this means
2706 that the structure can have at most one non bit-field element
2707 and that this element must be the first one in the structure. */
2709 /* Find the first field, ignoring non FIELD_DECL things which will
2710 have been created by C++. */
2711 for (field = TYPE_FIELDS (type);
2712 field && TREE_CODE (field) != FIELD_DECL;
2713 field = TREE_CHAIN (field))
2714 continue;
2716 if (field == NULL)
2717 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2719 /* Check that the first field is valid for returning in a register. */
2721 /* ... Floats are not allowed */
2722 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2723 return 1;
2725 /* ... Aggregates that are not themselves valid for returning in
2726 a register are not allowed. */
2727 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2728 return 1;
2730 /* Now check the remaining fields, if any. Only bitfields are allowed,
2731 since they are not addressable. */
2732 for (field = TREE_CHAIN (field);
2733 field;
2734 field = TREE_CHAIN (field))
2736 if (TREE_CODE (field) != FIELD_DECL)
2737 continue;
2739 if (!DECL_BIT_FIELD_TYPE (field))
2740 return 1;
2743 return 0;
2746 if (TREE_CODE (type) == UNION_TYPE)
2748 tree field;
2750 /* Unions can be returned in registers if every element is
2751 integral, or can be returned in an integer register. */
2752 for (field = TYPE_FIELDS (type);
2753 field;
2754 field = TREE_CHAIN (field))
2756 if (TREE_CODE (field) != FIELD_DECL)
2757 continue;
2759 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2760 return 1;
2762 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2763 return 1;
2766 return 0;
2768 #endif /* not ARM_WINCE */
2770 /* Return all other types in memory. */
2771 return 1;
2774 /* Indicate whether or not words of a double are in big-endian order. */
2777 arm_float_words_big_endian (void)
2779 if (TARGET_MAVERICK)
2780 return 0;
2782 /* For FPA, float words are always big-endian. For VFP, floats words
2783 follow the memory system mode. */
2785 if (TARGET_FPA)
2787 return 1;
2790 if (TARGET_VFP)
2791 return (TARGET_BIG_END ? 1 : 0);
2793 return 1;
2796 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2797 for a call to a function whose data type is FNTYPE.
2798 For a library call, FNTYPE is NULL. */
2799 void
2800 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2801 rtx libname ATTRIBUTE_UNUSED,
2802 tree fndecl ATTRIBUTE_UNUSED)
2804 /* On the ARM, the offset starts at 0. */
2805 pcum->nregs = 0;
2806 pcum->iwmmxt_nregs = 0;
2807 pcum->can_split = true;
2809 /* Varargs vectors are treated the same as long long.
2810 named_count avoids having to change the way arm handles 'named' */
2811 pcum->named_count = 0;
2812 pcum->nargs = 0;
2814 if (TARGET_REALLY_IWMMXT && fntype)
2816 tree fn_arg;
2818 for (fn_arg = TYPE_ARG_TYPES (fntype);
2819 fn_arg;
2820 fn_arg = TREE_CHAIN (fn_arg))
2821 pcum->named_count += 1;
2823 if (! pcum->named_count)
2824 pcum->named_count = INT_MAX;
2829 /* Return true if mode/type need doubleword alignment. */
2830 bool
2831 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2833 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2834 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2838 /* Determine where to put an argument to a function.
2839 Value is zero to push the argument on the stack,
2840 or a hard register in which to store the argument.
2842 MODE is the argument's machine mode.
2843 TYPE is the data type of the argument (as a tree).
2844 This is null for libcalls where that information may
2845 not be available.
2846 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2847 the preceding args and about the function being called.
2848 NAMED is nonzero if this argument is a named parameter
2849 (otherwise it is an extra parameter matching an ellipsis). */
2852 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2853 tree type, int named)
2855 int nregs;
2857 /* Varargs vectors are treated the same as long long.
2858 named_count avoids having to change the way arm handles 'named' */
2859 if (TARGET_IWMMXT_ABI
2860 && arm_vector_mode_supported_p (mode)
2861 && pcum->named_count > pcum->nargs + 1)
2863 if (pcum->iwmmxt_nregs <= 9)
2864 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2865 else
2867 pcum->can_split = false;
2868 return NULL_RTX;
2872 /* Put doubleword aligned quantities in even register pairs. */
2873 if (pcum->nregs & 1
2874 && ARM_DOUBLEWORD_ALIGN
2875 && arm_needs_doubleword_align (mode, type))
2876 pcum->nregs++;
2878 if (mode == VOIDmode)
2879 /* Pick an arbitrary value for operand 2 of the call insn. */
2880 return const0_rtx;
2882 /* Only allow splitting an arg between regs and memory if all preceding
2883 args were allocated to regs. For args passed by reference we only count
2884 the reference pointer. */
2885 if (pcum->can_split)
2886 nregs = 1;
2887 else
2888 nregs = ARM_NUM_REGS2 (mode, type);
2890 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2891 return NULL_RTX;
2893 return gen_rtx_REG (mode, pcum->nregs);
2896 static int
2897 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2898 tree type, bool named ATTRIBUTE_UNUSED)
2900 int nregs = pcum->nregs;
2902 if (arm_vector_mode_supported_p (mode))
2903 return 0;
2905 if (NUM_ARG_REGS > nregs
2906 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2907 && pcum->can_split)
2908 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2910 return 0;
2913 /* Variable sized types are passed by reference. This is a GCC
2914 extension to the ARM ABI. */
2916 static bool
2917 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2918 enum machine_mode mode ATTRIBUTE_UNUSED,
2919 tree type, bool named ATTRIBUTE_UNUSED)
2921 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2924 /* Encode the current state of the #pragma [no_]long_calls. */
2925 typedef enum
2927 OFF, /* No #pragma [no_]long_calls is in effect. */
2928 LONG, /* #pragma long_calls is in effect. */
2929 SHORT /* #pragma no_long_calls is in effect. */
2930 } arm_pragma_enum;
2932 static arm_pragma_enum arm_pragma_long_calls = OFF;
2934 void
2935 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2937 arm_pragma_long_calls = LONG;
2940 void
2941 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2943 arm_pragma_long_calls = SHORT;
2946 void
2947 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2949 arm_pragma_long_calls = OFF;
2952 /* Table of machine attributes. */
2953 const struct attribute_spec arm_attribute_table[] =
2955 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2956 /* Function calls made to this symbol must be done indirectly, because
2957 it may lie outside of the 26 bit addressing range of a normal function
2958 call. */
2959 { "long_call", 0, 0, false, true, true, NULL },
2960 /* Whereas these functions are always known to reside within the 26 bit
2961 addressing range. */
2962 { "short_call", 0, 0, false, true, true, NULL },
2963 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2964 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2965 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2966 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2967 #ifdef ARM_PE
2968 /* ARM/PE has three new attributes:
2969 interfacearm - ?
2970 dllexport - for exporting a function/variable that will live in a dll
2971 dllimport - for importing a function/variable from a dll
2973 Microsoft allows multiple declspecs in one __declspec, separating
2974 them with spaces. We do NOT support this. Instead, use __declspec
2975 multiple times.
2977 { "dllimport", 0, 0, true, false, false, NULL },
2978 { "dllexport", 0, 0, true, false, false, NULL },
2979 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2980 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2981 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2982 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2983 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2984 #endif
2985 { NULL, 0, 0, false, false, false, NULL }
2988 /* Handle an attribute requiring a FUNCTION_DECL;
2989 arguments as in struct attribute_spec.handler. */
2990 static tree
2991 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2992 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2994 if (TREE_CODE (*node) != FUNCTION_DECL)
2996 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2997 IDENTIFIER_POINTER (name));
2998 *no_add_attrs = true;
3001 return NULL_TREE;
3004 /* Handle an "interrupt" or "isr" attribute;
3005 arguments as in struct attribute_spec.handler. */
3006 static tree
3007 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
3008 bool *no_add_attrs)
3010 if (DECL_P (*node))
3012 if (TREE_CODE (*node) != FUNCTION_DECL)
3014 warning (OPT_Wattributes, "%qs attribute only applies to functions",
3015 IDENTIFIER_POINTER (name));
3016 *no_add_attrs = true;
3018 /* FIXME: the argument if any is checked for type attributes;
3019 should it be checked for decl ones? */
3021 else
3023 if (TREE_CODE (*node) == FUNCTION_TYPE
3024 || TREE_CODE (*node) == METHOD_TYPE)
3026 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
3028 warning (OPT_Wattributes, "%qs attribute ignored",
3029 IDENTIFIER_POINTER (name));
3030 *no_add_attrs = true;
3033 else if (TREE_CODE (*node) == POINTER_TYPE
3034 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
3035 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
3036 && arm_isr_value (args) != ARM_FT_UNKNOWN)
3038 *node = build_variant_type_copy (*node);
3039 TREE_TYPE (*node) = build_type_attribute_variant
3040 (TREE_TYPE (*node),
3041 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
3042 *no_add_attrs = true;
3044 else
3046 /* Possibly pass this attribute on from the type to a decl. */
3047 if (flags & ((int) ATTR_FLAG_DECL_NEXT
3048 | (int) ATTR_FLAG_FUNCTION_NEXT
3049 | (int) ATTR_FLAG_ARRAY_NEXT))
3051 *no_add_attrs = true;
3052 return tree_cons (name, args, NULL_TREE);
3054 else
3056 warning (OPT_Wattributes, "%qs attribute ignored",
3057 IDENTIFIER_POINTER (name));
3062 return NULL_TREE;
3065 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
3066 /* Handle the "notshared" attribute. This attribute is another way of
3067 requesting hidden visibility. ARM's compiler supports
3068 "__declspec(notshared)"; we support the same thing via an
3069 attribute. */
3071 static tree
3072 arm_handle_notshared_attribute (tree *node,
3073 tree name ATTRIBUTE_UNUSED,
3074 tree args ATTRIBUTE_UNUSED,
3075 int flags ATTRIBUTE_UNUSED,
3076 bool *no_add_attrs)
3078 tree decl = TYPE_NAME (*node);
3080 if (decl)
3082 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
3083 DECL_VISIBILITY_SPECIFIED (decl) = 1;
3084 *no_add_attrs = false;
3086 return NULL_TREE;
3088 #endif
3090 /* Return 0 if the attributes for two types are incompatible, 1 if they
3091 are compatible, and 2 if they are nearly compatible (which causes a
3092 warning to be generated). */
3093 static int
3094 arm_comp_type_attributes (tree type1, tree type2)
3096 int l1, l2, s1, s2;
3098 /* Check for mismatch of non-default calling convention. */
3099 if (TREE_CODE (type1) != FUNCTION_TYPE)
3100 return 1;
3102 /* Check for mismatched call attributes. */
3103 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
3104 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
3105 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
3106 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
3108 /* Only bother to check if an attribute is defined. */
3109 if (l1 | l2 | s1 | s2)
3111 /* If one type has an attribute, the other must have the same attribute. */
3112 if ((l1 != l2) || (s1 != s2))
3113 return 0;
3115 /* Disallow mixed attributes. */
3116 if ((l1 & s2) || (l2 & s1))
3117 return 0;
3120 /* Check for mismatched ISR attribute. */
3121 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3122 if (! l1)
3123 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3124 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3125 if (! l2)
3126 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3127 if (l1 != l2)
3128 return 0;
3130 return 1;
3133 /* Assigns default attributes to newly defined type. This is used to
3134 set short_call/long_call attributes for function types of
3135 functions defined inside corresponding #pragma scopes. */
3136 static void
3137 arm_set_default_type_attributes (tree type)
3139 /* Add __attribute__ ((long_call)) to all functions, when
3140 inside #pragma long_calls or __attribute__ ((short_call)),
3141 when inside #pragma no_long_calls. */
3142 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3144 tree type_attr_list, attr_name;
3145 type_attr_list = TYPE_ATTRIBUTES (type);
3147 if (arm_pragma_long_calls == LONG)
3148 attr_name = get_identifier ("long_call");
3149 else if (arm_pragma_long_calls == SHORT)
3150 attr_name = get_identifier ("short_call");
3151 else
3152 return;
3154 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3155 TYPE_ATTRIBUTES (type) = type_attr_list;
3159 /* Return true if DECL is known to be linked into section SECTION. */
3161 static bool
3162 arm_function_in_section_p (tree decl, section *section)
3164 /* We can only be certain about functions defined in the same
3165 compilation unit. */
3166 if (!TREE_STATIC (decl))
3167 return false;
3169 /* Make sure that SYMBOL always binds to the definition in this
3170 compilation unit. */
3171 if (!targetm.binds_local_p (decl))
3172 return false;
3174 /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
3175 if (!DECL_SECTION_NAME (decl))
3177 /* Only cater for unit-at-a-time mode, where we know that the user
3178 cannot later specify a section for DECL. */
3179 if (!flag_unit_at_a_time)
3180 return false;
3182 /* Make sure that we will not create a unique section for DECL. */
3183 if (flag_function_sections || DECL_ONE_ONLY (decl))
3184 return false;
3187 return function_section (decl) == section;
3190 /* Return nonzero if a 32-bit "long_call" should be generated for
3191 a call from the current function to DECL. We generate a long_call
3192 if the function:
3194 a. has an __attribute__((long call))
3195 or b. is within the scope of a #pragma long_calls
3196 or c. the -mlong-calls command line switch has been specified
3198 However we do not generate a long call if the function:
3200 d. has an __attribute__ ((short_call))
3201 or e. is inside the scope of a #pragma no_long_calls
3202 or f. is defined in the same section as the current function. */
3204 bool
3205 arm_is_long_call_p (tree decl)
3207 tree attrs;
3209 if (!decl)
3210 return TARGET_LONG_CALLS;
3212 attrs = TYPE_ATTRIBUTES (TREE_TYPE (decl));
3213 if (lookup_attribute ("short_call", attrs))
3214 return false;
3216 /* For "f", be conservative, and only cater for cases in which the
3217 whole of the current function is placed in the same section. */
3218 if (!flag_reorder_blocks_and_partition
3219 && arm_function_in_section_p (decl, current_function_section ()))
3220 return false;
3222 if (lookup_attribute ("long_call", attrs))
3223 return true;
3225 return TARGET_LONG_CALLS;
3228 /* Return nonzero if it is ok to make a tail-call to DECL. */
3229 static bool
3230 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3232 unsigned long func_type;
3234 if (cfun->machine->sibcall_blocked)
3235 return false;
3237 /* Never tailcall something for which we have no decl, or if we
3238 are in Thumb mode. */
3239 if (decl == NULL || TARGET_THUMB)
3240 return false;
3242 /* The PIC register is live on entry to VxWorks PLT entries, so we
3243 must make the call before restoring the PIC register. */
3244 if (TARGET_VXWORKS_RTP && flag_pic && !targetm.binds_local_p (decl))
3245 return false;
3247 /* Cannot tail-call to long calls, since these are out of range of
3248 a branch instruction. */
3249 if (arm_is_long_call_p (decl))
3250 return false;
3252 /* If we are interworking and the function is not declared static
3253 then we can't tail-call it unless we know that it exists in this
3254 compilation unit (since it might be a Thumb routine). */
3255 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3256 return false;
3258 func_type = arm_current_func_type ();
3259 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3260 if (IS_INTERRUPT (func_type))
3261 return false;
3263 /* Never tailcall if function may be called with a misaligned SP. */
3264 if (IS_STACKALIGN (func_type))
3265 return false;
3267 /* Everything else is ok. */
3268 return true;
3272 /* Addressing mode support functions. */
3274 /* Return nonzero if X is a legitimate immediate operand when compiling
3275 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3277 legitimate_pic_operand_p (rtx x)
3279 if (GET_CODE (x) == SYMBOL_REF
3280 || (GET_CODE (x) == CONST
3281 && GET_CODE (XEXP (x, 0)) == PLUS
3282 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3283 return 0;
3285 return 1;
3288 /* Record that the current function needs a PIC register. Initialize
3289 cfun->machine->pic_reg if we have not already done so. */
3291 static void
3292 require_pic_register (void)
3294 /* A lot of the logic here is made obscure by the fact that this
3295 routine gets called as part of the rtx cost estimation process.
3296 We don't want those calls to affect any assumptions about the real
3297 function; and further, we can't call entry_of_function() until we
3298 start the real expansion process. */
3299 if (!current_function_uses_pic_offset_table)
3301 gcc_assert (!no_new_pseudos);
3302 if (arm_pic_register != INVALID_REGNUM)
3304 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3306 /* Play games to avoid marking the function as needing pic
3307 if we are being called as part of the cost-estimation
3308 process. */
3309 if (current_ir_type () != IR_GIMPLE)
3310 current_function_uses_pic_offset_table = 1;
3312 else
3314 rtx seq;
3316 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3318 /* Play games to avoid marking the function as needing pic
3319 if we are being called as part of the cost-estimation
3320 process. */
3321 if (current_ir_type () != IR_GIMPLE)
3323 current_function_uses_pic_offset_table = 1;
3324 start_sequence ();
3326 arm_load_pic_register (0UL);
3328 seq = get_insns ();
3329 end_sequence ();
3330 emit_insn_after (seq, entry_of_function ());
3337 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3339 if (GET_CODE (orig) == SYMBOL_REF
3340 || GET_CODE (orig) == LABEL_REF)
3342 #ifndef AOF_ASSEMBLER
3343 rtx pic_ref, address;
3344 #endif
3345 rtx insn;
3346 int subregs = 0;
3348 /* If this function doesn't have a pic register, create one now. */
3349 require_pic_register ();
3351 if (reg == 0)
3353 gcc_assert (!no_new_pseudos);
3354 reg = gen_reg_rtx (Pmode);
3356 subregs = 1;
3359 #ifdef AOF_ASSEMBLER
3360 /* The AOF assembler can generate relocations for these directly, and
3361 understands that the PIC register has to be added into the offset. */
3362 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3363 #else
3364 if (subregs)
3365 address = gen_reg_rtx (Pmode);
3366 else
3367 address = reg;
3369 if (TARGET_ARM)
3370 emit_insn (gen_pic_load_addr_arm (address, orig));
3371 else if (TARGET_THUMB2)
3372 emit_insn (gen_pic_load_addr_thumb2 (address, orig));
3373 else /* TARGET_THUMB1 */
3374 emit_insn (gen_pic_load_addr_thumb1 (address, orig));
3376 /* VxWorks does not impose a fixed gap between segments; the run-time
3377 gap can be different from the object-file gap. We therefore can't
3378 use GOTOFF unless we are absolutely sure that the symbol is in the
3379 same segment as the GOT. Unfortunately, the flexibility of linker
3380 scripts means that we can't be sure of that in general, so assume
3381 that GOTOFF is never valid on VxWorks. */
3382 if ((GET_CODE (orig) == LABEL_REF
3383 || (GET_CODE (orig) == SYMBOL_REF &&
3384 SYMBOL_REF_LOCAL_P (orig)))
3385 && NEED_GOT_RELOC
3386 && !TARGET_VXWORKS_RTP)
3387 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3388 else
3390 pic_ref = gen_const_mem (Pmode,
3391 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3392 address));
3395 insn = emit_move_insn (reg, pic_ref);
3396 #endif
3397 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3398 by loop. */
3399 set_unique_reg_note (insn, REG_EQUAL, orig);
3401 return reg;
3403 else if (GET_CODE (orig) == CONST)
3405 rtx base, offset;
3407 if (GET_CODE (XEXP (orig, 0)) == PLUS
3408 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3409 return orig;
3411 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3412 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3413 return orig;
3415 if (reg == 0)
3417 gcc_assert (!no_new_pseudos);
3418 reg = gen_reg_rtx (Pmode);
3421 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3423 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3424 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3425 base == reg ? 0 : reg);
3427 if (GET_CODE (offset) == CONST_INT)
3429 /* The base register doesn't really matter, we only want to
3430 test the index for the appropriate mode. */
3431 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3433 gcc_assert (!no_new_pseudos);
3434 offset = force_reg (Pmode, offset);
3437 if (GET_CODE (offset) == CONST_INT)
3438 return plus_constant (base, INTVAL (offset));
3441 if (GET_MODE_SIZE (mode) > 4
3442 && (GET_MODE_CLASS (mode) == MODE_INT
3443 || TARGET_SOFT_FLOAT))
3445 emit_insn (gen_addsi3 (reg, base, offset));
3446 return reg;
3449 return gen_rtx_PLUS (Pmode, base, offset);
3452 return orig;
3456 /* Find a spare register to use during the prolog of a function. */
3458 static int
3459 thumb_find_work_register (unsigned long pushed_regs_mask)
3461 int reg;
3463 /* Check the argument registers first as these are call-used. The
3464 register allocation order means that sometimes r3 might be used
3465 but earlier argument registers might not, so check them all. */
3466 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3467 if (!df_regs_ever_live_p (reg))
3468 return reg;
3470 /* Before going on to check the call-saved registers we can try a couple
3471 more ways of deducing that r3 is available. The first is when we are
3472 pushing anonymous arguments onto the stack and we have less than 4
3473 registers worth of fixed arguments(*). In this case r3 will be part of
3474 the variable argument list and so we can be sure that it will be
3475 pushed right at the start of the function. Hence it will be available
3476 for the rest of the prologue.
3477 (*): ie current_function_pretend_args_size is greater than 0. */
3478 if (cfun->machine->uses_anonymous_args
3479 && current_function_pretend_args_size > 0)
3480 return LAST_ARG_REGNUM;
3482 /* The other case is when we have fixed arguments but less than 4 registers
3483 worth. In this case r3 might be used in the body of the function, but
3484 it is not being used to convey an argument into the function. In theory
3485 we could just check current_function_args_size to see how many bytes are
3486 being passed in argument registers, but it seems that it is unreliable.
3487 Sometimes it will have the value 0 when in fact arguments are being
3488 passed. (See testcase execute/20021111-1.c for an example). So we also
3489 check the args_info.nregs field as well. The problem with this field is
3490 that it makes no allowances for arguments that are passed to the
3491 function but which are not used. Hence we could miss an opportunity
3492 when a function has an unused argument in r3. But it is better to be
3493 safe than to be sorry. */
3494 if (! cfun->machine->uses_anonymous_args
3495 && current_function_args_size >= 0
3496 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3497 && cfun->args_info.nregs < 4)
3498 return LAST_ARG_REGNUM;
3500 /* Otherwise look for a call-saved register that is going to be pushed. */
3501 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3502 if (pushed_regs_mask & (1 << reg))
3503 return reg;
3505 if (TARGET_THUMB2)
3507 /* Thumb-2 can use high regs. */
3508 for (reg = FIRST_HI_REGNUM; reg < 15; reg ++)
3509 if (pushed_regs_mask & (1 << reg))
3510 return reg;
3512 /* Something went wrong - thumb_compute_save_reg_mask()
3513 should have arranged for a suitable register to be pushed. */
3514 gcc_unreachable ();
3517 static GTY(()) int pic_labelno;
3519 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3520 low register. */
3522 void
3523 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3525 #ifndef AOF_ASSEMBLER
3526 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx, pic_reg;
3527 rtx global_offset_table;
3529 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3530 return;
3532 gcc_assert (flag_pic);
3534 pic_reg = cfun->machine->pic_reg;
3535 if (TARGET_VXWORKS_RTP)
3537 pic_rtx = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE);
3538 pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
3539 emit_insn (gen_pic_load_addr_arm (pic_reg, pic_rtx));
3541 emit_insn (gen_rtx_SET (Pmode, pic_reg, gen_rtx_MEM (Pmode, pic_reg)));
3543 pic_tmp = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
3544 emit_insn (gen_pic_offset_arm (pic_reg, pic_reg, pic_tmp));
3546 else
3548 /* We use an UNSPEC rather than a LABEL_REF because this label
3549 never appears in the code stream. */
3551 labelno = GEN_INT (pic_labelno++);
3552 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3553 l1 = gen_rtx_CONST (VOIDmode, l1);
3555 global_offset_table
3556 = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3557 /* On the ARM the PC register contains 'dot + 8' at the time of the
3558 addition, on the Thumb it is 'dot + 4'. */
3559 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3560 if (GOT_PCREL)
3562 pic_tmp2 = gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx);
3563 pic_tmp2 = gen_rtx_CONST (VOIDmode, pic_tmp2);
3565 else
3566 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3568 pic_rtx = gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp);
3569 pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
3571 if (TARGET_ARM)
3573 emit_insn (gen_pic_load_addr_arm (pic_reg, pic_rtx));
3574 emit_insn (gen_pic_add_dot_plus_eight (pic_reg, pic_reg, labelno));
3576 else if (TARGET_THUMB2)
3578 /* Thumb-2 only allows very limited access to the PC. Calculate the
3579 address in a temporary register. */
3580 if (arm_pic_register != INVALID_REGNUM)
3582 pic_tmp = gen_rtx_REG (SImode,
3583 thumb_find_work_register (saved_regs));
3585 else
3587 gcc_assert (!no_new_pseudos);
3588 pic_tmp = gen_reg_rtx (Pmode);
3591 emit_insn (gen_pic_load_addr_thumb2 (pic_reg, pic_rtx));
3592 emit_insn (gen_pic_load_dot_plus_four (pic_tmp, labelno));
3593 emit_insn (gen_addsi3 (pic_reg, pic_reg, pic_tmp));
3595 else /* TARGET_THUMB1 */
3597 if (arm_pic_register != INVALID_REGNUM
3598 && REGNO (pic_reg) > LAST_LO_REGNUM)
3600 /* We will have pushed the pic register, so we should always be
3601 able to find a work register. */
3602 pic_tmp = gen_rtx_REG (SImode,
3603 thumb_find_work_register (saved_regs));
3604 emit_insn (gen_pic_load_addr_thumb1 (pic_tmp, pic_rtx));
3605 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3607 else
3608 emit_insn (gen_pic_load_addr_thumb1 (pic_reg, pic_rtx));
3609 emit_insn (gen_pic_add_dot_plus_four (pic_reg, pic_reg, labelno));
3613 /* Need to emit this whether or not we obey regdecls,
3614 since setjmp/longjmp can cause life info to screw up. */
3615 emit_insn (gen_rtx_USE (VOIDmode, pic_reg));
3616 #endif /* AOF_ASSEMBLER */
3620 /* Return nonzero if X is valid as an ARM state addressing register. */
3621 static int
3622 arm_address_register_rtx_p (rtx x, int strict_p)
3624 int regno;
3626 if (GET_CODE (x) != REG)
3627 return 0;
3629 regno = REGNO (x);
3631 if (strict_p)
3632 return ARM_REGNO_OK_FOR_BASE_P (regno);
3634 return (regno <= LAST_ARM_REGNUM
3635 || regno >= FIRST_PSEUDO_REGISTER
3636 || regno == FRAME_POINTER_REGNUM
3637 || regno == ARG_POINTER_REGNUM);
3640 /* Return TRUE if this rtx is the difference of a symbol and a label,
3641 and will reduce to a PC-relative relocation in the object file.
3642 Expressions like this can be left alone when generating PIC, rather
3643 than forced through the GOT. */
3644 static int
3645 pcrel_constant_p (rtx x)
3647 if (GET_CODE (x) == MINUS)
3648 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3650 return FALSE;
3653 /* Return nonzero if X is a valid ARM state address operand. */
3655 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3656 int strict_p)
3658 bool use_ldrd;
3659 enum rtx_code code = GET_CODE (x);
3661 if (arm_address_register_rtx_p (x, strict_p))
3662 return 1;
3664 use_ldrd = (TARGET_LDRD
3665 && (mode == DImode
3666 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3668 if (code == POST_INC || code == PRE_DEC
3669 || ((code == PRE_INC || code == POST_DEC)
3670 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3671 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3673 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3674 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3675 && GET_CODE (XEXP (x, 1)) == PLUS
3676 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3678 rtx addend = XEXP (XEXP (x, 1), 1);
3680 /* Don't allow ldrd post increment by register because it's hard
3681 to fixup invalid register choices. */
3682 if (use_ldrd
3683 && GET_CODE (x) == POST_MODIFY
3684 && GET_CODE (addend) == REG)
3685 return 0;
3687 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3688 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3691 /* After reload constants split into minipools will have addresses
3692 from a LABEL_REF. */
3693 else if (reload_completed
3694 && (code == LABEL_REF
3695 || (code == CONST
3696 && GET_CODE (XEXP (x, 0)) == PLUS
3697 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3698 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3699 return 1;
3701 else if (mode == TImode)
3702 return 0;
3704 else if (code == PLUS)
3706 rtx xop0 = XEXP (x, 0);
3707 rtx xop1 = XEXP (x, 1);
3709 return ((arm_address_register_rtx_p (xop0, strict_p)
3710 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3711 || (arm_address_register_rtx_p (xop1, strict_p)
3712 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3715 #if 0
3716 /* Reload currently can't handle MINUS, so disable this for now */
3717 else if (GET_CODE (x) == MINUS)
3719 rtx xop0 = XEXP (x, 0);
3720 rtx xop1 = XEXP (x, 1);
3722 return (arm_address_register_rtx_p (xop0, strict_p)
3723 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3725 #endif
3727 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3728 && code == SYMBOL_REF
3729 && CONSTANT_POOL_ADDRESS_P (x)
3730 && ! (flag_pic
3731 && symbol_mentioned_p (get_pool_constant (x))
3732 && ! pcrel_constant_p (get_pool_constant (x))))
3733 return 1;
3735 return 0;
3738 /* Return nonzero if X is a valid Thumb-2 address operand. */
3740 thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3742 bool use_ldrd;
3743 enum rtx_code code = GET_CODE (x);
3745 if (arm_address_register_rtx_p (x, strict_p))
3746 return 1;
3748 use_ldrd = (TARGET_LDRD
3749 && (mode == DImode
3750 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3752 if (code == POST_INC || code == PRE_DEC
3753 || ((code == PRE_INC || code == POST_DEC)
3754 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3755 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3757 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3758 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3759 && GET_CODE (XEXP (x, 1)) == PLUS
3760 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3762 /* Thumb-2 only has autoincrement by constant. */
3763 rtx addend = XEXP (XEXP (x, 1), 1);
3764 HOST_WIDE_INT offset;
3766 if (GET_CODE (addend) != CONST_INT)
3767 return 0;
3769 offset = INTVAL(addend);
3770 if (GET_MODE_SIZE (mode) <= 4)
3771 return (offset > -256 && offset < 256);
3773 return (use_ldrd && offset > -1024 && offset < 1024
3774 && (offset & 3) == 0);
3777 /* After reload constants split into minipools will have addresses
3778 from a LABEL_REF. */
3779 else if (reload_completed
3780 && (code == LABEL_REF
3781 || (code == CONST
3782 && GET_CODE (XEXP (x, 0)) == PLUS
3783 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3784 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3785 return 1;
3787 else if (mode == TImode)
3788 return 0;
3790 else if (code == PLUS)
3792 rtx xop0 = XEXP (x, 0);
3793 rtx xop1 = XEXP (x, 1);
3795 return ((arm_address_register_rtx_p (xop0, strict_p)
3796 && thumb2_legitimate_index_p (mode, xop1, strict_p))
3797 || (arm_address_register_rtx_p (xop1, strict_p)
3798 && thumb2_legitimate_index_p (mode, xop0, strict_p)));
3801 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3802 && code == SYMBOL_REF
3803 && CONSTANT_POOL_ADDRESS_P (x)
3804 && ! (flag_pic
3805 && symbol_mentioned_p (get_pool_constant (x))
3806 && ! pcrel_constant_p (get_pool_constant (x))))
3807 return 1;
3809 return 0;
3812 /* Return nonzero if INDEX is valid for an address index operand in
3813 ARM state. */
3814 static int
3815 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3816 int strict_p)
3818 HOST_WIDE_INT range;
3819 enum rtx_code code = GET_CODE (index);
3821 /* Standard coprocessor addressing modes. */
3822 if (TARGET_HARD_FLOAT
3823 && (TARGET_FPA || TARGET_MAVERICK)
3824 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3825 || (TARGET_MAVERICK && mode == DImode)))
3826 return (code == CONST_INT && INTVAL (index) < 1024
3827 && INTVAL (index) > -1024
3828 && (INTVAL (index) & 3) == 0);
3830 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3831 return (code == CONST_INT
3832 && INTVAL (index) < 1024
3833 && INTVAL (index) > -1024
3834 && (INTVAL (index) & 3) == 0);
3836 if (arm_address_register_rtx_p (index, strict_p)
3837 && (GET_MODE_SIZE (mode) <= 4))
3838 return 1;
3840 if (mode == DImode || mode == DFmode)
3842 if (code == CONST_INT)
3844 HOST_WIDE_INT val = INTVAL (index);
3846 if (TARGET_LDRD)
3847 return val > -256 && val < 256;
3848 else
3849 return val > -4096 && val < 4092;
3852 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3855 if (GET_MODE_SIZE (mode) <= 4
3856 && ! (arm_arch4
3857 && (mode == HImode
3858 || (mode == QImode && outer == SIGN_EXTEND))))
3860 if (code == MULT)
3862 rtx xiop0 = XEXP (index, 0);
3863 rtx xiop1 = XEXP (index, 1);
3865 return ((arm_address_register_rtx_p (xiop0, strict_p)
3866 && power_of_two_operand (xiop1, SImode))
3867 || (arm_address_register_rtx_p (xiop1, strict_p)
3868 && power_of_two_operand (xiop0, SImode)));
3870 else if (code == LSHIFTRT || code == ASHIFTRT
3871 || code == ASHIFT || code == ROTATERT)
3873 rtx op = XEXP (index, 1);
3875 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3876 && GET_CODE (op) == CONST_INT
3877 && INTVAL (op) > 0
3878 && INTVAL (op) <= 31);
3882 /* For ARM v4 we may be doing a sign-extend operation during the
3883 load. */
3884 if (arm_arch4)
3886 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3887 range = 256;
3888 else
3889 range = 4096;
3891 else
3892 range = (mode == HImode) ? 4095 : 4096;
3894 return (code == CONST_INT
3895 && INTVAL (index) < range
3896 && INTVAL (index) > -range);
3899 /* Return true if OP is a valid index scaling factor for Thumb-2 address
3900 index operand. i.e. 1, 2, 4 or 8. */
3901 static bool
3902 thumb2_index_mul_operand (rtx op)
3904 HOST_WIDE_INT val;
3906 if (GET_CODE(op) != CONST_INT)
3907 return false;
3909 val = INTVAL(op);
3910 return (val == 1 || val == 2 || val == 4 || val == 8);
3913 /* Return nonzero if INDEX is a valid Thumb-2 address index operand. */
3914 static int
3915 thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
3917 enum rtx_code code = GET_CODE (index);
3919 /* ??? Combine arm and thumb2 coprocessor addressing modes. */
3920 /* Standard coprocessor addressing modes. */
3921 if (TARGET_HARD_FLOAT
3922 && (TARGET_FPA || TARGET_MAVERICK)
3923 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3924 || (TARGET_MAVERICK && mode == DImode)))
3925 return (code == CONST_INT && INTVAL (index) < 1024
3926 && INTVAL (index) > -1024
3927 && (INTVAL (index) & 3) == 0);
3929 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3931 /* For DImode assume values will usually live in core regs
3932 and only allow LDRD addressing modes. */
3933 if (!TARGET_LDRD || mode != DImode)
3934 return (code == CONST_INT
3935 && INTVAL (index) < 1024
3936 && INTVAL (index) > -1024
3937 && (INTVAL (index) & 3) == 0);
3940 if (arm_address_register_rtx_p (index, strict_p)
3941 && (GET_MODE_SIZE (mode) <= 4))
3942 return 1;
3944 if (mode == DImode || mode == DFmode)
3946 HOST_WIDE_INT val = INTVAL (index);
3947 /* ??? Can we assume ldrd for thumb2? */
3948 /* Thumb-2 ldrd only has reg+const addressing modes. */
3949 if (code != CONST_INT)
3950 return 0;
3952 /* ldrd supports offsets of +-1020.
3953 However the ldr fallback does not. */
3954 return val > -256 && val < 256 && (val & 3) == 0;
3957 if (code == MULT)
3959 rtx xiop0 = XEXP (index, 0);
3960 rtx xiop1 = XEXP (index, 1);
3962 return ((arm_address_register_rtx_p (xiop0, strict_p)
3963 && thumb2_index_mul_operand (xiop1))
3964 || (arm_address_register_rtx_p (xiop1, strict_p)
3965 && thumb2_index_mul_operand (xiop0)));
3967 else if (code == ASHIFT)
3969 rtx op = XEXP (index, 1);
3971 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3972 && GET_CODE (op) == CONST_INT
3973 && INTVAL (op) > 0
3974 && INTVAL (op) <= 3);
3977 return (code == CONST_INT
3978 && INTVAL (index) < 4096
3979 && INTVAL (index) > -256);
3982 /* Return nonzero if X is valid as a 16-bit Thumb state base register. */
3983 static int
3984 thumb1_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3986 int regno;
3988 if (GET_CODE (x) != REG)
3989 return 0;
3991 regno = REGNO (x);
3993 if (strict_p)
3994 return THUMB1_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3996 return (regno <= LAST_LO_REGNUM
3997 || regno > LAST_VIRTUAL_REGISTER
3998 || regno == FRAME_POINTER_REGNUM
3999 || (GET_MODE_SIZE (mode) >= 4
4000 && (regno == STACK_POINTER_REGNUM
4001 || regno >= FIRST_PSEUDO_REGISTER
4002 || x == hard_frame_pointer_rtx
4003 || x == arg_pointer_rtx)));
4006 /* Return nonzero if x is a legitimate index register. This is the case
4007 for any base register that can access a QImode object. */
4008 inline static int
4009 thumb1_index_register_rtx_p (rtx x, int strict_p)
4011 return thumb1_base_register_rtx_p (x, QImode, strict_p);
4014 /* Return nonzero if x is a legitimate 16-bit Thumb-state address.
4016 The AP may be eliminated to either the SP or the FP, so we use the
4017 least common denominator, e.g. SImode, and offsets from 0 to 64.
4019 ??? Verify whether the above is the right approach.
4021 ??? Also, the FP may be eliminated to the SP, so perhaps that
4022 needs special handling also.
4024 ??? Look at how the mips16 port solves this problem. It probably uses
4025 better ways to solve some of these problems.
4027 Although it is not incorrect, we don't accept QImode and HImode
4028 addresses based on the frame pointer or arg pointer until the
4029 reload pass starts. This is so that eliminating such addresses
4030 into stack based ones won't produce impossible code. */
4032 thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
4034 /* ??? Not clear if this is right. Experiment. */
4035 if (GET_MODE_SIZE (mode) < 4
4036 && !(reload_in_progress || reload_completed)
4037 && (reg_mentioned_p (frame_pointer_rtx, x)
4038 || reg_mentioned_p (arg_pointer_rtx, x)
4039 || reg_mentioned_p (virtual_incoming_args_rtx, x)
4040 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
4041 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
4042 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
4043 return 0;
4045 /* Accept any base register. SP only in SImode or larger. */
4046 else if (thumb1_base_register_rtx_p (x, mode, strict_p))
4047 return 1;
4049 /* This is PC relative data before arm_reorg runs. */
4050 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
4051 && GET_CODE (x) == SYMBOL_REF
4052 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
4053 return 1;
4055 /* This is PC relative data after arm_reorg runs. */
4056 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
4057 && (GET_CODE (x) == LABEL_REF
4058 || (GET_CODE (x) == CONST
4059 && GET_CODE (XEXP (x, 0)) == PLUS
4060 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
4061 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
4062 return 1;
4064 /* Post-inc indexing only supported for SImode and larger. */
4065 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
4066 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p))
4067 return 1;
4069 else if (GET_CODE (x) == PLUS)
4071 /* REG+REG address can be any two index registers. */
4072 /* We disallow FRAME+REG addressing since we know that FRAME
4073 will be replaced with STACK, and SP relative addressing only
4074 permits SP+OFFSET. */
4075 if (GET_MODE_SIZE (mode) <= 4
4076 && XEXP (x, 0) != frame_pointer_rtx
4077 && XEXP (x, 1) != frame_pointer_rtx
4078 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
4079 && thumb1_index_register_rtx_p (XEXP (x, 1), strict_p))
4080 return 1;
4082 /* REG+const has 5-7 bit offset for non-SP registers. */
4083 else if ((thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
4084 || XEXP (x, 0) == arg_pointer_rtx)
4085 && GET_CODE (XEXP (x, 1)) == CONST_INT
4086 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4087 return 1;
4089 /* REG+const has 10-bit offset for SP, but only SImode and
4090 larger is supported. */
4091 /* ??? Should probably check for DI/DFmode overflow here
4092 just like GO_IF_LEGITIMATE_OFFSET does. */
4093 else if (GET_CODE (XEXP (x, 0)) == REG
4094 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
4095 && GET_MODE_SIZE (mode) >= 4
4096 && GET_CODE (XEXP (x, 1)) == CONST_INT
4097 && INTVAL (XEXP (x, 1)) >= 0
4098 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
4099 && (INTVAL (XEXP (x, 1)) & 3) == 0)
4100 return 1;
4102 else if (GET_CODE (XEXP (x, 0)) == REG
4103 && (REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
4104 || REGNO (XEXP (x, 0)) == ARG_POINTER_REGNUM
4105 || (REGNO (XEXP (x, 0)) >= FIRST_VIRTUAL_REGISTER
4106 && REGNO (XEXP (x, 0)) <= LAST_VIRTUAL_REGISTER))
4107 && GET_MODE_SIZE (mode) >= 4
4108 && GET_CODE (XEXP (x, 1)) == CONST_INT
4109 && (INTVAL (XEXP (x, 1)) & 3) == 0)
4110 return 1;
4113 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
4114 && GET_MODE_SIZE (mode) == 4
4115 && GET_CODE (x) == SYMBOL_REF
4116 && CONSTANT_POOL_ADDRESS_P (x)
4117 && ! (flag_pic
4118 && symbol_mentioned_p (get_pool_constant (x))
4119 && ! pcrel_constant_p (get_pool_constant (x))))
4120 return 1;
4122 return 0;
4125 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
4126 instruction of mode MODE. */
4128 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
4130 switch (GET_MODE_SIZE (mode))
4132 case 1:
4133 return val >= 0 && val < 32;
4135 case 2:
4136 return val >= 0 && val < 64 && (val & 1) == 0;
4138 default:
4139 return (val >= 0
4140 && (val + GET_MODE_SIZE (mode)) <= 128
4141 && (val & 3) == 0);
4145 /* Build the SYMBOL_REF for __tls_get_addr. */
4147 static GTY(()) rtx tls_get_addr_libfunc;
4149 static rtx
4150 get_tls_get_addr (void)
4152 if (!tls_get_addr_libfunc)
4153 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
4154 return tls_get_addr_libfunc;
4157 static rtx
4158 arm_load_tp (rtx target)
4160 if (!target)
4161 target = gen_reg_rtx (SImode);
4163 if (TARGET_HARD_TP)
4165 /* Can return in any reg. */
4166 emit_insn (gen_load_tp_hard (target));
4168 else
4170 /* Always returned in r0. Immediately copy the result into a pseudo,
4171 otherwise other uses of r0 (e.g. setting up function arguments) may
4172 clobber the value. */
4174 rtx tmp;
4176 emit_insn (gen_load_tp_soft ());
4178 tmp = gen_rtx_REG (SImode, 0);
4179 emit_move_insn (target, tmp);
4181 return target;
4184 static rtx
4185 load_tls_operand (rtx x, rtx reg)
4187 rtx tmp;
4189 if (reg == NULL_RTX)
4190 reg = gen_reg_rtx (SImode);
4192 tmp = gen_rtx_CONST (SImode, x);
4194 emit_move_insn (reg, tmp);
4196 return reg;
4199 static rtx
4200 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
4202 rtx insns, label, labelno, sum;
4204 start_sequence ();
4206 labelno = GEN_INT (pic_labelno++);
4207 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
4208 label = gen_rtx_CONST (VOIDmode, label);
4210 sum = gen_rtx_UNSPEC (Pmode,
4211 gen_rtvec (4, x, GEN_INT (reloc), label,
4212 GEN_INT (TARGET_ARM ? 8 : 4)),
4213 UNSPEC_TLS);
4214 reg = load_tls_operand (sum, reg);
4216 if (TARGET_ARM)
4217 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
4218 else if (TARGET_THUMB2)
4220 rtx tmp;
4221 /* Thumb-2 only allows very limited access to the PC. Calculate
4222 the address in a temporary register. */
4223 tmp = gen_reg_rtx (SImode);
4224 emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
4225 emit_insn (gen_addsi3(reg, reg, tmp));
4227 else /* TARGET_THUMB1 */
4228 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
4230 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
4231 Pmode, 1, reg, Pmode);
4233 insns = get_insns ();
4234 end_sequence ();
4236 return insns;
4240 legitimize_tls_address (rtx x, rtx reg)
4242 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
4243 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
4245 switch (model)
4247 case TLS_MODEL_GLOBAL_DYNAMIC:
4248 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
4249 dest = gen_reg_rtx (Pmode);
4250 emit_libcall_block (insns, dest, ret, x);
4251 return dest;
4253 case TLS_MODEL_LOCAL_DYNAMIC:
4254 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
4256 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
4257 share the LDM result with other LD model accesses. */
4258 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
4259 UNSPEC_TLS);
4260 dest = gen_reg_rtx (Pmode);
4261 emit_libcall_block (insns, dest, ret, eqv);
4263 /* Load the addend. */
4264 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
4265 UNSPEC_TLS);
4266 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
4267 return gen_rtx_PLUS (Pmode, dest, addend);
4269 case TLS_MODEL_INITIAL_EXEC:
4270 labelno = GEN_INT (pic_labelno++);
4271 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
4272 label = gen_rtx_CONST (VOIDmode, label);
4273 sum = gen_rtx_UNSPEC (Pmode,
4274 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
4275 GEN_INT (TARGET_ARM ? 8 : 4)),
4276 UNSPEC_TLS);
4277 reg = load_tls_operand (sum, reg);
4279 if (TARGET_ARM)
4280 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
4281 else if (TARGET_THUMB2)
4283 rtx tmp;
4284 /* Thumb-2 only allows very limited access to the PC. Calculate
4285 the address in a temporary register. */
4286 tmp = gen_reg_rtx (SImode);
4287 emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
4288 emit_insn (gen_addsi3(reg, reg, tmp));
4289 emit_move_insn (reg, gen_const_mem (SImode, reg));
4291 else
4293 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
4294 emit_move_insn (reg, gen_const_mem (SImode, reg));
4297 tp = arm_load_tp (NULL_RTX);
4299 return gen_rtx_PLUS (Pmode, tp, reg);
4301 case TLS_MODEL_LOCAL_EXEC:
4302 tp = arm_load_tp (NULL_RTX);
4304 reg = gen_rtx_UNSPEC (Pmode,
4305 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
4306 UNSPEC_TLS);
4307 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
4309 return gen_rtx_PLUS (Pmode, tp, reg);
4311 default:
4312 abort ();
4316 /* Try machine-dependent ways of modifying an illegitimate address
4317 to be legitimate. If we find one, return the new, valid address. */
4319 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4321 if (arm_tls_symbol_p (x))
4322 return legitimize_tls_address (x, NULL_RTX);
4324 if (GET_CODE (x) == PLUS)
4326 rtx xop0 = XEXP (x, 0);
4327 rtx xop1 = XEXP (x, 1);
4329 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
4330 xop0 = force_reg (SImode, xop0);
4332 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4333 xop1 = force_reg (SImode, xop1);
4335 if (ARM_BASE_REGISTER_RTX_P (xop0)
4336 && GET_CODE (xop1) == CONST_INT)
4338 HOST_WIDE_INT n, low_n;
4339 rtx base_reg, val;
4340 n = INTVAL (xop1);
4342 /* VFP addressing modes actually allow greater offsets, but for
4343 now we just stick with the lowest common denominator. */
4344 if (mode == DImode
4345 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4347 low_n = n & 0x0f;
4348 n &= ~0x0f;
4349 if (low_n > 4)
4351 n += 16;
4352 low_n -= 16;
4355 else
4357 low_n = ((mode) == TImode ? 0
4358 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4359 n -= low_n;
4362 base_reg = gen_reg_rtx (SImode);
4363 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4364 emit_move_insn (base_reg, val);
4365 x = plus_constant (base_reg, low_n);
4367 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4368 x = gen_rtx_PLUS (SImode, xop0, xop1);
4371 /* XXX We don't allow MINUS any more -- see comment in
4372 arm_legitimate_address_p (). */
4373 else if (GET_CODE (x) == MINUS)
4375 rtx xop0 = XEXP (x, 0);
4376 rtx xop1 = XEXP (x, 1);
4378 if (CONSTANT_P (xop0))
4379 xop0 = force_reg (SImode, xop0);
4381 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4382 xop1 = force_reg (SImode, xop1);
4384 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4385 x = gen_rtx_MINUS (SImode, xop0, xop1);
4388 /* Make sure to take full advantage of the pre-indexed addressing mode
4389 with absolute addresses which often allows for the base register to
4390 be factorized for multiple adjacent memory references, and it might
4391 even allows for the mini pool to be avoided entirely. */
4392 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4394 unsigned int bits;
4395 HOST_WIDE_INT mask, base, index;
4396 rtx base_reg;
4398 /* ldr and ldrb can use a 12-bit index, ldrsb and the rest can only
4399 use a 8-bit index. So let's use a 12-bit index for SImode only and
4400 hope that arm_gen_constant will enable ldrb to use more bits. */
4401 bits = (mode == SImode) ? 12 : 8;
4402 mask = (1 << bits) - 1;
4403 base = INTVAL (x) & ~mask;
4404 index = INTVAL (x) & mask;
4405 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4407 /* It'll most probably be more efficient to generate the base
4408 with more bits set and use a negative index instead. */
4409 base |= mask;
4410 index -= mask;
4412 base_reg = force_reg (SImode, GEN_INT (base));
4413 x = plus_constant (base_reg, index);
4416 if (flag_pic)
4418 /* We need to find and carefully transform any SYMBOL and LABEL
4419 references; so go back to the original address expression. */
4420 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4422 if (new_x != orig_x)
4423 x = new_x;
4426 return x;
4430 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4431 to be legitimate. If we find one, return the new, valid address. */
4433 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4435 if (arm_tls_symbol_p (x))
4436 return legitimize_tls_address (x, NULL_RTX);
4438 if (GET_CODE (x) == PLUS
4439 && GET_CODE (XEXP (x, 1)) == CONST_INT
4440 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4441 || INTVAL (XEXP (x, 1)) < 0))
4443 rtx xop0 = XEXP (x, 0);
4444 rtx xop1 = XEXP (x, 1);
4445 HOST_WIDE_INT offset = INTVAL (xop1);
4447 /* Try and fold the offset into a biasing of the base register and
4448 then offsetting that. Don't do this when optimizing for space
4449 since it can cause too many CSEs. */
4450 if (optimize_size && offset >= 0
4451 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4453 HOST_WIDE_INT delta;
4455 if (offset >= 256)
4456 delta = offset - (256 - GET_MODE_SIZE (mode));
4457 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4458 delta = 31 * GET_MODE_SIZE (mode);
4459 else
4460 delta = offset & (~31 * GET_MODE_SIZE (mode));
4462 xop0 = force_operand (plus_constant (xop0, offset - delta),
4463 NULL_RTX);
4464 x = plus_constant (xop0, delta);
4466 else if (offset < 0 && offset > -256)
4467 /* Small negative offsets are best done with a subtract before the
4468 dereference, forcing these into a register normally takes two
4469 instructions. */
4470 x = force_operand (x, NULL_RTX);
4471 else
4473 /* For the remaining cases, force the constant into a register. */
4474 xop1 = force_reg (SImode, xop1);
4475 x = gen_rtx_PLUS (SImode, xop0, xop1);
4478 else if (GET_CODE (x) == PLUS
4479 && s_register_operand (XEXP (x, 1), SImode)
4480 && !s_register_operand (XEXP (x, 0), SImode))
4482 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4484 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4487 if (flag_pic)
4489 /* We need to find and carefully transform any SYMBOL and LABEL
4490 references; so go back to the original address expression. */
4491 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4493 if (new_x != orig_x)
4494 x = new_x;
4497 return x;
4501 thumb_legitimize_reload_address (rtx *x_p,
4502 enum machine_mode mode,
4503 int opnum, int type,
4504 int ind_levels ATTRIBUTE_UNUSED)
4506 rtx x = *x_p;
4508 if (GET_CODE (x) == PLUS
4509 && GET_MODE_SIZE (mode) < 4
4510 && REG_P (XEXP (x, 0))
4511 && XEXP (x, 0) == stack_pointer_rtx
4512 && GET_CODE (XEXP (x, 1)) == CONST_INT
4513 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4515 rtx orig_x = x;
4517 x = copy_rtx (x);
4518 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4519 Pmode, VOIDmode, 0, 0, opnum, type);
4520 return x;
4523 /* If both registers are hi-regs, then it's better to reload the
4524 entire expression rather than each register individually. That
4525 only requires one reload register rather than two. */
4526 if (GET_CODE (x) == PLUS
4527 && REG_P (XEXP (x, 0))
4528 && REG_P (XEXP (x, 1))
4529 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4530 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4532 rtx orig_x = x;
4534 x = copy_rtx (x);
4535 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4536 Pmode, VOIDmode, 0, 0, opnum, type);
4537 return x;
4540 return NULL;
4543 /* Test for various thread-local symbols. */
4545 /* Return TRUE if X is a thread-local symbol. */
4547 static bool
4548 arm_tls_symbol_p (rtx x)
4550 if (! TARGET_HAVE_TLS)
4551 return false;
4553 if (GET_CODE (x) != SYMBOL_REF)
4554 return false;
4556 return SYMBOL_REF_TLS_MODEL (x) != 0;
4559 /* Helper for arm_tls_referenced_p. */
4561 static int
4562 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4564 if (GET_CODE (*x) == SYMBOL_REF)
4565 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4567 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4568 TLS offsets, not real symbol references. */
4569 if (GET_CODE (*x) == UNSPEC
4570 && XINT (*x, 1) == UNSPEC_TLS)
4571 return -1;
4573 return 0;
4576 /* Return TRUE if X contains any TLS symbol references. */
4578 bool
4579 arm_tls_referenced_p (rtx x)
4581 if (! TARGET_HAVE_TLS)
4582 return false;
4584 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4587 #define REG_OR_SUBREG_REG(X) \
4588 (GET_CODE (X) == REG \
4589 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4591 #define REG_OR_SUBREG_RTX(X) \
4592 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4594 #ifndef COSTS_N_INSNS
4595 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4596 #endif
4597 static inline int
4598 thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4600 enum machine_mode mode = GET_MODE (x);
4602 switch (code)
4604 case ASHIFT:
4605 case ASHIFTRT:
4606 case LSHIFTRT:
4607 case ROTATERT:
4608 case PLUS:
4609 case MINUS:
4610 case COMPARE:
4611 case NEG:
4612 case NOT:
4613 return COSTS_N_INSNS (1);
4615 case MULT:
4616 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4618 int cycles = 0;
4619 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4621 while (i)
4623 i >>= 2;
4624 cycles++;
4626 return COSTS_N_INSNS (2) + cycles;
4628 return COSTS_N_INSNS (1) + 16;
4630 case SET:
4631 return (COSTS_N_INSNS (1)
4632 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4633 + GET_CODE (SET_DEST (x)) == MEM));
4635 case CONST_INT:
4636 if (outer == SET)
4638 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4639 return 0;
4640 if (thumb_shiftable_const (INTVAL (x)))
4641 return COSTS_N_INSNS (2);
4642 return COSTS_N_INSNS (3);
4644 else if ((outer == PLUS || outer == COMPARE)
4645 && INTVAL (x) < 256 && INTVAL (x) > -256)
4646 return 0;
4647 else if (outer == AND
4648 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4649 return COSTS_N_INSNS (1);
4650 else if (outer == ASHIFT || outer == ASHIFTRT
4651 || outer == LSHIFTRT)
4652 return 0;
4653 return COSTS_N_INSNS (2);
4655 case CONST:
4656 case CONST_DOUBLE:
4657 case LABEL_REF:
4658 case SYMBOL_REF:
4659 return COSTS_N_INSNS (3);
4661 case UDIV:
4662 case UMOD:
4663 case DIV:
4664 case MOD:
4665 return 100;
4667 case TRUNCATE:
4668 return 99;
4670 case AND:
4671 case XOR:
4672 case IOR:
4673 /* XXX guess. */
4674 return 8;
4676 case MEM:
4677 /* XXX another guess. */
4678 /* Memory costs quite a lot for the first word, but subsequent words
4679 load at the equivalent of a single insn each. */
4680 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4681 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4682 ? 4 : 0));
4684 case IF_THEN_ELSE:
4685 /* XXX a guess. */
4686 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4687 return 14;
4688 return 2;
4690 case ZERO_EXTEND:
4691 /* XXX still guessing. */
4692 switch (GET_MODE (XEXP (x, 0)))
4694 case QImode:
4695 return (1 + (mode == DImode ? 4 : 0)
4696 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4698 case HImode:
4699 return (4 + (mode == DImode ? 4 : 0)
4700 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4702 case SImode:
4703 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4705 default:
4706 return 99;
4709 default:
4710 return 99;
4715 /* Worker routine for arm_rtx_costs. */
4716 /* ??? This needs updating for thumb2. */
4717 static inline int
4718 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4720 enum machine_mode mode = GET_MODE (x);
4721 enum rtx_code subcode;
4722 int extra_cost;
4724 switch (code)
4726 case MEM:
4727 /* Memory costs quite a lot for the first word, but subsequent words
4728 load at the equivalent of a single insn each. */
4729 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4730 + (GET_CODE (x) == SYMBOL_REF
4731 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4733 case DIV:
4734 case MOD:
4735 case UDIV:
4736 case UMOD:
4737 return optimize_size ? COSTS_N_INSNS (2) : 100;
4739 case ROTATE:
4740 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4741 return 4;
4742 /* Fall through */
4743 case ROTATERT:
4744 if (mode != SImode)
4745 return 8;
4746 /* Fall through */
4747 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4748 if (mode == DImode)
4749 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4750 + ((GET_CODE (XEXP (x, 0)) == REG
4751 || (GET_CODE (XEXP (x, 0)) == SUBREG
4752 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4753 ? 0 : 8));
4754 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4755 || (GET_CODE (XEXP (x, 0)) == SUBREG
4756 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4757 ? 0 : 4)
4758 + ((GET_CODE (XEXP (x, 1)) == REG
4759 || (GET_CODE (XEXP (x, 1)) == SUBREG
4760 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4761 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4762 ? 0 : 4));
4764 case MINUS:
4765 if (GET_CODE (XEXP (x, 1)) == MULT && mode == SImode && arm_arch_thumb2)
4767 extra_cost = rtx_cost (XEXP (x, 1), code);
4768 if (!REG_OR_SUBREG_REG (XEXP (x, 0)))
4769 extra_cost += 4 * ARM_NUM_REGS (mode);
4770 return extra_cost;
4773 if (mode == DImode)
4774 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4775 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4776 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4777 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4778 ? 0 : 8));
4780 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4781 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4782 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4783 && arm_const_double_rtx (XEXP (x, 1))))
4784 ? 0 : 8)
4785 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4786 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4787 && arm_const_double_rtx (XEXP (x, 0))))
4788 ? 0 : 8));
4790 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4791 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4792 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4793 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4794 || subcode == ASHIFTRT || subcode == LSHIFTRT
4795 || subcode == ROTATE || subcode == ROTATERT
4796 || (subcode == MULT
4797 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4798 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4799 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4800 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4801 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4802 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4803 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4804 return 1;
4805 /* Fall through */
4807 case PLUS:
4808 if (GET_CODE (XEXP (x, 0)) == MULT)
4810 extra_cost = rtx_cost (XEXP (x, 0), code);
4811 if (!REG_OR_SUBREG_REG (XEXP (x, 1)))
4812 extra_cost += 4 * ARM_NUM_REGS (mode);
4813 return extra_cost;
4816 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4817 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4818 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4819 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4820 && arm_const_double_rtx (XEXP (x, 1))))
4821 ? 0 : 8));
4823 /* Fall through */
4824 case AND: case XOR: case IOR:
4825 extra_cost = 0;
4827 /* Normally the frame registers will be spilt into reg+const during
4828 reload, so it is a bad idea to combine them with other instructions,
4829 since then they might not be moved outside of loops. As a compromise
4830 we allow integration with ops that have a constant as their second
4831 operand. */
4832 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4833 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4834 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4835 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4836 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4837 extra_cost = 4;
4839 if (mode == DImode)
4840 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4841 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4842 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4843 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4844 ? 0 : 8));
4846 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4847 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4848 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4849 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4850 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4851 ? 0 : 4));
4853 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4854 return (1 + extra_cost
4855 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4856 || subcode == LSHIFTRT || subcode == ASHIFTRT
4857 || subcode == ROTATE || subcode == ROTATERT
4858 || (subcode == MULT
4859 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4860 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4861 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4862 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4863 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4864 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4865 ? 0 : 4));
4867 return 8;
4869 case MULT:
4870 /* This should have been handled by the CPU specific routines. */
4871 gcc_unreachable ();
4873 case TRUNCATE:
4874 if (arm_arch3m && mode == SImode
4875 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4876 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4877 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4878 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4879 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4880 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4881 return 8;
4882 return 99;
4884 case NEG:
4885 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4886 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4887 /* Fall through */
4888 case NOT:
4889 if (mode == DImode)
4890 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4892 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4894 case IF_THEN_ELSE:
4895 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4896 return 14;
4897 return 2;
4899 case COMPARE:
4900 return 1;
4902 case ABS:
4903 return 4 + (mode == DImode ? 4 : 0);
4905 case SIGN_EXTEND:
4906 /* ??? value extensions are cheaper on armv6. */
4907 if (GET_MODE (XEXP (x, 0)) == QImode)
4908 return (4 + (mode == DImode ? 4 : 0)
4909 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4910 /* Fall through */
4911 case ZERO_EXTEND:
4912 switch (GET_MODE (XEXP (x, 0)))
4914 case QImode:
4915 return (1 + (mode == DImode ? 4 : 0)
4916 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4918 case HImode:
4919 return (4 + (mode == DImode ? 4 : 0)
4920 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4922 case SImode:
4923 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4925 case V8QImode:
4926 case V4HImode:
4927 case V2SImode:
4928 case V4QImode:
4929 case V2HImode:
4930 return 1;
4932 default:
4933 gcc_unreachable ();
4935 gcc_unreachable ();
4937 case CONST_INT:
4938 if (const_ok_for_arm (INTVAL (x)))
4939 return outer == SET ? 2 : -1;
4940 else if (outer == AND
4941 && const_ok_for_arm (~INTVAL (x)))
4942 return -1;
4943 else if ((outer == COMPARE
4944 || outer == PLUS || outer == MINUS)
4945 && const_ok_for_arm (-INTVAL (x)))
4946 return -1;
4947 else
4948 return 5;
4950 case CONST:
4951 case LABEL_REF:
4952 case SYMBOL_REF:
4953 return 6;
4955 case CONST_DOUBLE:
4956 if (arm_const_double_rtx (x) || vfp3_const_double_rtx (x))
4957 return outer == SET ? 2 : -1;
4958 else if ((outer == COMPARE || outer == PLUS)
4959 && neg_const_double_rtx_ok_for_fpa (x))
4960 return -1;
4961 return 7;
4963 default:
4964 return 99;
4968 /* RTX costs when optimizing for size. */
4969 static bool
4970 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4972 enum machine_mode mode = GET_MODE (x);
4974 if (TARGET_THUMB)
4976 /* XXX TBD. For now, use the standard costs. */
4977 *total = thumb1_rtx_costs (x, code, outer_code);
4978 return true;
4981 switch (code)
4983 case MEM:
4984 /* A memory access costs 1 insn if the mode is small, or the address is
4985 a single register, otherwise it costs one insn per word. */
4986 if (REG_P (XEXP (x, 0)))
4987 *total = COSTS_N_INSNS (1);
4988 else
4989 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4990 return true;
4992 case DIV:
4993 case MOD:
4994 case UDIV:
4995 case UMOD:
4996 /* Needs a libcall, so it costs about this. */
4997 *total = COSTS_N_INSNS (2);
4998 return false;
5000 case ROTATE:
5001 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
5003 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
5004 return true;
5006 /* Fall through */
5007 case ROTATERT:
5008 case ASHIFT:
5009 case LSHIFTRT:
5010 case ASHIFTRT:
5011 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
5013 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
5014 return true;
5016 else if (mode == SImode)
5018 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
5019 /* Slightly disparage register shifts, but not by much. */
5020 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5021 *total += 1 + rtx_cost (XEXP (x, 1), code);
5022 return true;
5025 /* Needs a libcall. */
5026 *total = COSTS_N_INSNS (2);
5027 return false;
5029 case MINUS:
5030 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5032 *total = COSTS_N_INSNS (1);
5033 return false;
5036 if (mode == SImode)
5038 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
5039 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
5041 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
5042 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
5043 || subcode1 == ROTATE || subcode1 == ROTATERT
5044 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
5045 || subcode1 == ASHIFTRT)
5047 /* It's just the cost of the two operands. */
5048 *total = 0;
5049 return false;
5052 *total = COSTS_N_INSNS (1);
5053 return false;
5056 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5057 return false;
5059 case PLUS:
5060 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5062 *total = COSTS_N_INSNS (1);
5063 return false;
5066 /* Fall through */
5067 case AND: case XOR: case IOR:
5068 if (mode == SImode)
5070 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
5072 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
5073 || subcode == LSHIFTRT || subcode == ASHIFTRT
5074 || (code == AND && subcode == NOT))
5076 /* It's just the cost of the two operands. */
5077 *total = 0;
5078 return false;
5082 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5083 return false;
5085 case MULT:
5086 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5087 return false;
5089 case NEG:
5090 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5091 *total = COSTS_N_INSNS (1);
5092 /* Fall through */
5093 case NOT:
5094 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5096 return false;
5098 case IF_THEN_ELSE:
5099 *total = 0;
5100 return false;
5102 case COMPARE:
5103 if (cc_register (XEXP (x, 0), VOIDmode))
5104 * total = 0;
5105 else
5106 *total = COSTS_N_INSNS (1);
5107 return false;
5109 case ABS:
5110 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5111 *total = COSTS_N_INSNS (1);
5112 else
5113 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
5114 return false;
5116 case SIGN_EXTEND:
5117 *total = 0;
5118 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
5120 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
5121 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
5123 if (mode == DImode)
5124 *total += COSTS_N_INSNS (1);
5125 return false;
5127 case ZERO_EXTEND:
5128 *total = 0;
5129 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
5131 switch (GET_MODE (XEXP (x, 0)))
5133 case QImode:
5134 *total += COSTS_N_INSNS (1);
5135 break;
5137 case HImode:
5138 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
5140 case SImode:
5141 break;
5143 default:
5144 *total += COSTS_N_INSNS (2);
5148 if (mode == DImode)
5149 *total += COSTS_N_INSNS (1);
5151 return false;
5153 case CONST_INT:
5154 if (const_ok_for_arm (INTVAL (x)))
5155 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
5156 else if (const_ok_for_arm (~INTVAL (x)))
5157 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
5158 else if (const_ok_for_arm (-INTVAL (x)))
5160 if (outer_code == COMPARE || outer_code == PLUS
5161 || outer_code == MINUS)
5162 *total = 0;
5163 else
5164 *total = COSTS_N_INSNS (1);
5166 else
5167 *total = COSTS_N_INSNS (2);
5168 return true;
5170 case CONST:
5171 case LABEL_REF:
5172 case SYMBOL_REF:
5173 *total = COSTS_N_INSNS (2);
5174 return true;
5176 case CONST_DOUBLE:
5177 *total = COSTS_N_INSNS (4);
5178 return true;
5180 default:
5181 if (mode != VOIDmode)
5182 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5183 else
5184 *total = COSTS_N_INSNS (4); /* How knows? */
5185 return false;
5189 /* RTX costs for cores with a slow MUL implementation. Thumb-2 is not
5190 supported on any "slowmul" cores, so it can be ignored. */
5192 static bool
5193 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
5195 enum machine_mode mode = GET_MODE (x);
5197 if (TARGET_THUMB)
5199 *total = thumb1_rtx_costs (x, code, outer_code);
5200 return true;
5203 switch (code)
5205 case MULT:
5206 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5207 || mode == DImode)
5209 *total = 30;
5210 return true;
5213 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5215 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5216 & (unsigned HOST_WIDE_INT) 0xffffffff);
5217 int cost, const_ok = const_ok_for_arm (i);
5218 int j, booth_unit_size;
5220 /* Tune as appropriate. */
5221 cost = const_ok ? 4 : 8;
5222 booth_unit_size = 2;
5223 for (j = 0; i && j < 32; j += booth_unit_size)
5225 i >>= booth_unit_size;
5226 cost += 2;
5229 *total = cost;
5230 return true;
5233 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5234 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5235 return true;
5237 default:
5238 *total = arm_rtx_costs_1 (x, code, outer_code);
5239 return true;
5244 /* RTX cost for cores with a fast multiply unit (M variants). */
5246 static bool
5247 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
5249 enum machine_mode mode = GET_MODE (x);
5251 if (TARGET_THUMB1)
5253 *total = thumb1_rtx_costs (x, code, outer_code);
5254 return true;
5257 /* ??? should thumb2 use different costs? */
5258 switch (code)
5260 case MULT:
5261 /* There is no point basing this on the tuning, since it is always the
5262 fast variant if it exists at all. */
5263 if (mode == DImode
5264 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5265 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5266 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5268 *total = 8;
5269 return true;
5273 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5274 || mode == DImode)
5276 *total = 30;
5277 return true;
5280 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5282 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5283 & (unsigned HOST_WIDE_INT) 0xffffffff);
5284 int cost, const_ok = const_ok_for_arm (i);
5285 int j, booth_unit_size;
5287 /* Tune as appropriate. */
5288 cost = const_ok ? 4 : 8;
5289 booth_unit_size = 8;
5290 for (j = 0; i && j < 32; j += booth_unit_size)
5292 i >>= booth_unit_size;
5293 cost += 2;
5296 *total = cost;
5297 return true;
5300 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5301 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5302 return true;
5304 default:
5305 *total = arm_rtx_costs_1 (x, code, outer_code);
5306 return true;
5311 /* RTX cost for XScale CPUs. Thumb-2 is not supported on any xscale cores,
5312 so it can be ignored. */
5314 static bool
5315 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
5317 enum machine_mode mode = GET_MODE (x);
5319 if (TARGET_THUMB)
5321 *total = thumb1_rtx_costs (x, code, outer_code);
5322 return true;
5325 switch (code)
5327 case MULT:
5328 /* There is no point basing this on the tuning, since it is always the
5329 fast variant if it exists at all. */
5330 if (mode == DImode
5331 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5332 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5333 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5335 *total = 8;
5336 return true;
5340 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5341 || mode == DImode)
5343 *total = 30;
5344 return true;
5347 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5349 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5350 & (unsigned HOST_WIDE_INT) 0xffffffff);
5351 int cost, const_ok = const_ok_for_arm (i);
5352 unsigned HOST_WIDE_INT masked_const;
5354 /* The cost will be related to two insns.
5355 First a load of the constant (MOV or LDR), then a multiply. */
5356 cost = 2;
5357 if (! const_ok)
5358 cost += 1; /* LDR is probably more expensive because
5359 of longer result latency. */
5360 masked_const = i & 0xffff8000;
5361 if (masked_const != 0 && masked_const != 0xffff8000)
5363 masked_const = i & 0xf8000000;
5364 if (masked_const == 0 || masked_const == 0xf8000000)
5365 cost += 1;
5366 else
5367 cost += 2;
5369 *total = cost;
5370 return true;
5373 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5374 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5375 return true;
5377 case COMPARE:
5378 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5379 will stall until the multiplication is complete. */
5380 if (GET_CODE (XEXP (x, 0)) == MULT)
5381 *total = 4 + rtx_cost (XEXP (x, 0), code);
5382 else
5383 *total = arm_rtx_costs_1 (x, code, outer_code);
5384 return true;
5386 default:
5387 *total = arm_rtx_costs_1 (x, code, outer_code);
5388 return true;
5393 /* RTX costs for 9e (and later) cores. */
5395 static bool
5396 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5398 enum machine_mode mode = GET_MODE (x);
5399 int nonreg_cost;
5400 int cost;
5402 if (TARGET_THUMB1)
5404 switch (code)
5406 case MULT:
5407 *total = COSTS_N_INSNS (3);
5408 return true;
5410 default:
5411 *total = thumb1_rtx_costs (x, code, outer_code);
5412 return true;
5416 switch (code)
5418 case MULT:
5419 /* There is no point basing this on the tuning, since it is always the
5420 fast variant if it exists at all. */
5421 if (mode == DImode
5422 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5423 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5424 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5426 *total = 3;
5427 return true;
5431 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5433 *total = 30;
5434 return true;
5436 if (mode == DImode)
5438 cost = 7;
5439 nonreg_cost = 8;
5441 else
5443 cost = 2;
5444 nonreg_cost = 4;
5448 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5449 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5450 return true;
5452 default:
5453 *total = arm_rtx_costs_1 (x, code, outer_code);
5454 return true;
5457 /* All address computations that can be done are free, but rtx cost returns
5458 the same for practically all of them. So we weight the different types
5459 of address here in the order (most pref first):
5460 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5461 static inline int
5462 arm_arm_address_cost (rtx x)
5464 enum rtx_code c = GET_CODE (x);
5466 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5467 return 0;
5468 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5469 return 10;
5471 if (c == PLUS || c == MINUS)
5473 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5474 return 2;
5476 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5477 return 3;
5479 return 4;
5482 return 6;
5485 static inline int
5486 arm_thumb_address_cost (rtx x)
5488 enum rtx_code c = GET_CODE (x);
5490 if (c == REG)
5491 return 1;
5492 if (c == PLUS
5493 && GET_CODE (XEXP (x, 0)) == REG
5494 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5495 return 1;
5497 return 2;
5500 static int
5501 arm_address_cost (rtx x)
5503 return TARGET_32BIT ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5506 static int
5507 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5509 rtx i_pat, d_pat;
5511 /* Some true dependencies can have a higher cost depending
5512 on precisely how certain input operands are used. */
5513 if (arm_tune_xscale
5514 && REG_NOTE_KIND (link) == 0
5515 && recog_memoized (insn) >= 0
5516 && recog_memoized (dep) >= 0)
5518 int shift_opnum = get_attr_shift (insn);
5519 enum attr_type attr_type = get_attr_type (dep);
5521 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5522 operand for INSN. If we have a shifted input operand and the
5523 instruction we depend on is another ALU instruction, then we may
5524 have to account for an additional stall. */
5525 if (shift_opnum != 0
5526 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5528 rtx shifted_operand;
5529 int opno;
5531 /* Get the shifted operand. */
5532 extract_insn (insn);
5533 shifted_operand = recog_data.operand[shift_opnum];
5535 /* Iterate over all the operands in DEP. If we write an operand
5536 that overlaps with SHIFTED_OPERAND, then we have increase the
5537 cost of this dependency. */
5538 extract_insn (dep);
5539 preprocess_constraints ();
5540 for (opno = 0; opno < recog_data.n_operands; opno++)
5542 /* We can ignore strict inputs. */
5543 if (recog_data.operand_type[opno] == OP_IN)
5544 continue;
5546 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5547 shifted_operand))
5548 return 2;
5553 /* XXX This is not strictly true for the FPA. */
5554 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5555 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5556 return 0;
5558 /* Call insns don't incur a stall, even if they follow a load. */
5559 if (REG_NOTE_KIND (link) == 0
5560 && GET_CODE (insn) == CALL_INSN)
5561 return 1;
5563 if ((i_pat = single_set (insn)) != NULL
5564 && GET_CODE (SET_SRC (i_pat)) == MEM
5565 && (d_pat = single_set (dep)) != NULL
5566 && GET_CODE (SET_DEST (d_pat)) == MEM)
5568 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5569 /* This is a load after a store, there is no conflict if the load reads
5570 from a cached area. Assume that loads from the stack, and from the
5571 constant pool are cached, and that others will miss. This is a
5572 hack. */
5574 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5575 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5576 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5577 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5578 return 1;
5581 return cost;
5584 static int fp_consts_inited = 0;
5586 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5587 static const char * const strings_fp[8] =
5589 "0", "1", "2", "3",
5590 "4", "5", "0.5", "10"
5593 static REAL_VALUE_TYPE values_fp[8];
5595 static void
5596 init_fp_table (void)
5598 int i;
5599 REAL_VALUE_TYPE r;
5601 if (TARGET_VFP)
5602 fp_consts_inited = 1;
5603 else
5604 fp_consts_inited = 8;
5606 for (i = 0; i < fp_consts_inited; i++)
5608 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5609 values_fp[i] = r;
5613 /* Return TRUE if rtx X is a valid immediate FP constant. */
5615 arm_const_double_rtx (rtx x)
5617 REAL_VALUE_TYPE r;
5618 int i;
5620 if (!fp_consts_inited)
5621 init_fp_table ();
5623 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5624 if (REAL_VALUE_MINUS_ZERO (r))
5625 return 0;
5627 for (i = 0; i < fp_consts_inited; i++)
5628 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5629 return 1;
5631 return 0;
5634 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5636 neg_const_double_rtx_ok_for_fpa (rtx x)
5638 REAL_VALUE_TYPE r;
5639 int i;
5641 if (!fp_consts_inited)
5642 init_fp_table ();
5644 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5645 r = REAL_VALUE_NEGATE (r);
5646 if (REAL_VALUE_MINUS_ZERO (r))
5647 return 0;
5649 for (i = 0; i < 8; i++)
5650 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5651 return 1;
5653 return 0;
5657 /* VFPv3 has a fairly wide range of representable immediates, formed from
5658 "quarter-precision" floating-point values. These can be evaluated using this
5659 formula (with ^ for exponentiation):
5661 -1^s * n * 2^-r
5663 Where 's' is a sign bit (0/1), 'n' and 'r' are integers such that
5664 16 <= n <= 31 and 0 <= r <= 7.
5666 These values are mapped onto an 8-bit integer ABCDEFGH s.t.
5668 - A (most-significant) is the sign bit.
5669 - BCD are the exponent (encoded as r XOR 3).
5670 - EFGH are the mantissa (encoded as n - 16).
5673 /* Return an integer index for a VFPv3 immediate operand X suitable for the
5674 fconst[sd] instruction, or -1 if X isn't suitable. */
5675 static int
5676 vfp3_const_double_index (rtx x)
5678 REAL_VALUE_TYPE r, m;
5679 int sign, exponent;
5680 unsigned HOST_WIDE_INT mantissa, mant_hi;
5681 unsigned HOST_WIDE_INT mask;
5682 int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
5684 if (!TARGET_VFP3 || GET_CODE (x) != CONST_DOUBLE)
5685 return -1;
5687 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5689 /* We can't represent these things, so detect them first. */
5690 if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r) || REAL_VALUE_MINUS_ZERO (r))
5691 return -1;
5693 /* Extract sign, exponent and mantissa. */
5694 sign = REAL_VALUE_NEGATIVE (r) ? 1 : 0;
5695 r = REAL_VALUE_ABS (r);
5696 exponent = REAL_EXP (&r);
5697 /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
5698 highest (sign) bit, with a fixed binary point at bit point_pos.
5699 WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1
5700 bits for the mantissa, this may fail (low bits would be lost). */
5701 real_ldexp (&m, &r, point_pos - exponent);
5702 REAL_VALUE_TO_INT (&mantissa, &mant_hi, m);
5704 /* If there are bits set in the low part of the mantissa, we can't
5705 represent this value. */
5706 if (mantissa != 0)
5707 return -1;
5709 /* Now make it so that mantissa contains the most-significant bits, and move
5710 the point_pos to indicate that the least-significant bits have been
5711 discarded. */
5712 point_pos -= HOST_BITS_PER_WIDE_INT;
5713 mantissa = mant_hi;
5715 /* We can permit four significant bits of mantissa only, plus a high bit
5716 which is always 1. */
5717 mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
5718 if ((mantissa & mask) != 0)
5719 return -1;
5721 /* Now we know the mantissa is in range, chop off the unneeded bits. */
5722 mantissa >>= point_pos - 5;
5724 /* The mantissa may be zero. Disallow that case. (It's possible to load the
5725 floating-point immediate zero with Neon using an integer-zero load, but
5726 that case is handled elsewhere.) */
5727 if (mantissa == 0)
5728 return -1;
5730 gcc_assert (mantissa >= 16 && mantissa <= 31);
5732 /* The value of 5 here would be 4 if GCC used IEEE754-like encoding (where
5733 normalised significands are in the range [1, 2). (Our mantissa is shifted
5734 left 4 places at this point relative to normalised IEEE754 values). GCC
5735 internally uses [0.5, 1) (see real.c), so the exponent returned from
5736 REAL_EXP must be altered. */
5737 exponent = 5 - exponent;
5739 if (exponent < 0 || exponent > 7)
5740 return -1;
5742 /* Sign, mantissa and exponent are now in the correct form to plug into the
5743 formulae described in the comment above. */
5744 return (sign << 7) | ((exponent ^ 3) << 4) | (mantissa - 16);
5747 /* Return TRUE if rtx X is a valid immediate VFPv3 constant. */
5749 vfp3_const_double_rtx (rtx x)
5751 if (!TARGET_VFP3)
5752 return 0;
5754 return vfp3_const_double_index (x) != -1;
5758 /* Predicates for `match_operand' and `match_operator'. */
5760 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5762 cirrus_memory_offset (rtx op)
5764 /* Reject eliminable registers. */
5765 if (! (reload_in_progress || reload_completed)
5766 && ( reg_mentioned_p (frame_pointer_rtx, op)
5767 || reg_mentioned_p (arg_pointer_rtx, op)
5768 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5769 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5770 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5771 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5772 return 0;
5774 if (GET_CODE (op) == MEM)
5776 rtx ind;
5778 ind = XEXP (op, 0);
5780 /* Match: (mem (reg)). */
5781 if (GET_CODE (ind) == REG)
5782 return 1;
5784 /* Match:
5785 (mem (plus (reg)
5786 (const))). */
5787 if (GET_CODE (ind) == PLUS
5788 && GET_CODE (XEXP (ind, 0)) == REG
5789 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5790 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5791 return 1;
5794 return 0;
5797 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5798 WB is true if full writeback address modes are allowed and is false
5799 if limited writeback address modes (POST_INC and PRE_DEC) are
5800 allowed. */
5803 arm_coproc_mem_operand (rtx op, bool wb)
5805 rtx ind;
5807 /* Reject eliminable registers. */
5808 if (! (reload_in_progress || reload_completed)
5809 && ( reg_mentioned_p (frame_pointer_rtx, op)
5810 || reg_mentioned_p (arg_pointer_rtx, op)
5811 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5812 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5813 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5814 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5815 return FALSE;
5817 /* Constants are converted into offsets from labels. */
5818 if (GET_CODE (op) != MEM)
5819 return FALSE;
5821 ind = XEXP (op, 0);
5823 if (reload_completed
5824 && (GET_CODE (ind) == LABEL_REF
5825 || (GET_CODE (ind) == CONST
5826 && GET_CODE (XEXP (ind, 0)) == PLUS
5827 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5828 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5829 return TRUE;
5831 /* Match: (mem (reg)). */
5832 if (GET_CODE (ind) == REG)
5833 return arm_address_register_rtx_p (ind, 0);
5835 /* Autoincremment addressing modes. POST_INC and PRE_DEC are
5836 acceptable in any case (subject to verification by
5837 arm_address_register_rtx_p). We need WB to be true to accept
5838 PRE_INC and POST_DEC. */
5839 if (GET_CODE (ind) == POST_INC
5840 || GET_CODE (ind) == PRE_DEC
5841 || (wb
5842 && (GET_CODE (ind) == PRE_INC
5843 || GET_CODE (ind) == POST_DEC)))
5844 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5846 if (wb
5847 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5848 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5849 && GET_CODE (XEXP (ind, 1)) == PLUS
5850 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5851 ind = XEXP (ind, 1);
5853 /* Match:
5854 (plus (reg)
5855 (const)). */
5856 if (GET_CODE (ind) == PLUS
5857 && GET_CODE (XEXP (ind, 0)) == REG
5858 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5859 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5860 && INTVAL (XEXP (ind, 1)) > -1024
5861 && INTVAL (XEXP (ind, 1)) < 1024
5862 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5863 return TRUE;
5865 return FALSE;
5868 /* Return true if X is a register that will be eliminated later on. */
5870 arm_eliminable_register (rtx x)
5872 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5873 || REGNO (x) == ARG_POINTER_REGNUM
5874 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5875 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5878 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5879 coprocessor registers. Otherwise return NO_REGS. */
5881 enum reg_class
5882 coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb)
5884 if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
5885 return NO_REGS;
5887 return GENERAL_REGS;
5890 /* Values which must be returned in the most-significant end of the return
5891 register. */
5893 static bool
5894 arm_return_in_msb (tree valtype)
5896 return (TARGET_AAPCS_BASED
5897 && BYTES_BIG_ENDIAN
5898 && (AGGREGATE_TYPE_P (valtype)
5899 || TREE_CODE (valtype) == COMPLEX_TYPE));
5902 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5903 Use by the Cirrus Maverick code which has to workaround
5904 a hardware bug triggered by such instructions. */
5905 static bool
5906 arm_memory_load_p (rtx insn)
5908 rtx body, lhs, rhs;;
5910 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5911 return false;
5913 body = PATTERN (insn);
5915 if (GET_CODE (body) != SET)
5916 return false;
5918 lhs = XEXP (body, 0);
5919 rhs = XEXP (body, 1);
5921 lhs = REG_OR_SUBREG_RTX (lhs);
5923 /* If the destination is not a general purpose
5924 register we do not have to worry. */
5925 if (GET_CODE (lhs) != REG
5926 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5927 return false;
5929 /* As well as loads from memory we also have to react
5930 to loads of invalid constants which will be turned
5931 into loads from the minipool. */
5932 return (GET_CODE (rhs) == MEM
5933 || GET_CODE (rhs) == SYMBOL_REF
5934 || note_invalid_constants (insn, -1, false));
5937 /* Return TRUE if INSN is a Cirrus instruction. */
5938 static bool
5939 arm_cirrus_insn_p (rtx insn)
5941 enum attr_cirrus attr;
5943 /* get_attr cannot accept USE or CLOBBER. */
5944 if (!insn
5945 || GET_CODE (insn) != INSN
5946 || GET_CODE (PATTERN (insn)) == USE
5947 || GET_CODE (PATTERN (insn)) == CLOBBER)
5948 return 0;
5950 attr = get_attr_cirrus (insn);
5952 return attr != CIRRUS_NOT;
5955 /* Cirrus reorg for invalid instruction combinations. */
5956 static void
5957 cirrus_reorg (rtx first)
5959 enum attr_cirrus attr;
5960 rtx body = PATTERN (first);
5961 rtx t;
5962 int nops;
5964 /* Any branch must be followed by 2 non Cirrus instructions. */
5965 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5967 nops = 0;
5968 t = next_nonnote_insn (first);
5970 if (arm_cirrus_insn_p (t))
5971 ++ nops;
5973 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5974 ++ nops;
5976 while (nops --)
5977 emit_insn_after (gen_nop (), first);
5979 return;
5982 /* (float (blah)) is in parallel with a clobber. */
5983 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5984 body = XVECEXP (body, 0, 0);
5986 if (GET_CODE (body) == SET)
5988 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5990 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5991 be followed by a non Cirrus insn. */
5992 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5994 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5995 emit_insn_after (gen_nop (), first);
5997 return;
5999 else if (arm_memory_load_p (first))
6001 unsigned int arm_regno;
6003 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
6004 ldr/cfmv64hr combination where the Rd field is the same
6005 in both instructions must be split with a non Cirrus
6006 insn. Example:
6008 ldr r0, blah
6010 cfmvsr mvf0, r0. */
6012 /* Get Arm register number for ldr insn. */
6013 if (GET_CODE (lhs) == REG)
6014 arm_regno = REGNO (lhs);
6015 else
6017 gcc_assert (GET_CODE (rhs) == REG);
6018 arm_regno = REGNO (rhs);
6021 /* Next insn. */
6022 first = next_nonnote_insn (first);
6024 if (! arm_cirrus_insn_p (first))
6025 return;
6027 body = PATTERN (first);
6029 /* (float (blah)) is in parallel with a clobber. */
6030 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
6031 body = XVECEXP (body, 0, 0);
6033 if (GET_CODE (body) == FLOAT)
6034 body = XEXP (body, 0);
6036 if (get_attr_cirrus (first) == CIRRUS_MOVE
6037 && GET_CODE (XEXP (body, 1)) == REG
6038 && arm_regno == REGNO (XEXP (body, 1)))
6039 emit_insn_after (gen_nop (), first);
6041 return;
6045 /* get_attr cannot accept USE or CLOBBER. */
6046 if (!first
6047 || GET_CODE (first) != INSN
6048 || GET_CODE (PATTERN (first)) == USE
6049 || GET_CODE (PATTERN (first)) == CLOBBER)
6050 return;
6052 attr = get_attr_cirrus (first);
6054 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
6055 must be followed by a non-coprocessor instruction. */
6056 if (attr == CIRRUS_COMPARE)
6058 nops = 0;
6060 t = next_nonnote_insn (first);
6062 if (arm_cirrus_insn_p (t))
6063 ++ nops;
6065 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
6066 ++ nops;
6068 while (nops --)
6069 emit_insn_after (gen_nop (), first);
6071 return;
6075 /* Return TRUE if X references a SYMBOL_REF. */
6077 symbol_mentioned_p (rtx x)
6079 const char * fmt;
6080 int i;
6082 if (GET_CODE (x) == SYMBOL_REF)
6083 return 1;
6085 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
6086 are constant offsets, not symbols. */
6087 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
6088 return 0;
6090 fmt = GET_RTX_FORMAT (GET_CODE (x));
6092 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6094 if (fmt[i] == 'E')
6096 int j;
6098 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6099 if (symbol_mentioned_p (XVECEXP (x, i, j)))
6100 return 1;
6102 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
6103 return 1;
6106 return 0;
6109 /* Return TRUE if X references a LABEL_REF. */
6111 label_mentioned_p (rtx x)
6113 const char * fmt;
6114 int i;
6116 if (GET_CODE (x) == LABEL_REF)
6117 return 1;
6119 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
6120 instruction, but they are constant offsets, not symbols. */
6121 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
6122 return 0;
6124 fmt = GET_RTX_FORMAT (GET_CODE (x));
6125 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6127 if (fmt[i] == 'E')
6129 int j;
6131 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6132 if (label_mentioned_p (XVECEXP (x, i, j)))
6133 return 1;
6135 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
6136 return 1;
6139 return 0;
6143 tls_mentioned_p (rtx x)
6145 switch (GET_CODE (x))
6147 case CONST:
6148 return tls_mentioned_p (XEXP (x, 0));
6150 case UNSPEC:
6151 if (XINT (x, 1) == UNSPEC_TLS)
6152 return 1;
6154 default:
6155 return 0;
6159 /* Must not copy a SET whose source operand is PC-relative. */
6161 static bool
6162 arm_cannot_copy_insn_p (rtx insn)
6164 rtx pat = PATTERN (insn);
6166 if (GET_CODE (pat) == SET)
6168 rtx rhs = SET_SRC (pat);
6170 if (GET_CODE (rhs) == UNSPEC
6171 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
6172 return TRUE;
6174 if (GET_CODE (rhs) == MEM
6175 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
6176 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
6177 return TRUE;
6180 return FALSE;
6183 enum rtx_code
6184 minmax_code (rtx x)
6186 enum rtx_code code = GET_CODE (x);
6188 switch (code)
6190 case SMAX:
6191 return GE;
6192 case SMIN:
6193 return LE;
6194 case UMIN:
6195 return LEU;
6196 case UMAX:
6197 return GEU;
6198 default:
6199 gcc_unreachable ();
6203 /* Return 1 if memory locations are adjacent. */
6205 adjacent_mem_locations (rtx a, rtx b)
6207 /* We don't guarantee to preserve the order of these memory refs. */
6208 if (volatile_refs_p (a) || volatile_refs_p (b))
6209 return 0;
6211 if ((GET_CODE (XEXP (a, 0)) == REG
6212 || (GET_CODE (XEXP (a, 0)) == PLUS
6213 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
6214 && (GET_CODE (XEXP (b, 0)) == REG
6215 || (GET_CODE (XEXP (b, 0)) == PLUS
6216 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
6218 HOST_WIDE_INT val0 = 0, val1 = 0;
6219 rtx reg0, reg1;
6220 int val_diff;
6222 if (GET_CODE (XEXP (a, 0)) == PLUS)
6224 reg0 = XEXP (XEXP (a, 0), 0);
6225 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
6227 else
6228 reg0 = XEXP (a, 0);
6230 if (GET_CODE (XEXP (b, 0)) == PLUS)
6232 reg1 = XEXP (XEXP (b, 0), 0);
6233 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
6235 else
6236 reg1 = XEXP (b, 0);
6238 /* Don't accept any offset that will require multiple
6239 instructions to handle, since this would cause the
6240 arith_adjacentmem pattern to output an overlong sequence. */
6241 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
6242 return 0;
6244 /* Don't allow an eliminable register: register elimination can make
6245 the offset too large. */
6246 if (arm_eliminable_register (reg0))
6247 return 0;
6249 val_diff = val1 - val0;
6251 if (arm_ld_sched)
6253 /* If the target has load delay slots, then there's no benefit
6254 to using an ldm instruction unless the offset is zero and
6255 we are optimizing for size. */
6256 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
6257 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
6258 && (val_diff == 4 || val_diff == -4));
6261 return ((REGNO (reg0) == REGNO (reg1))
6262 && (val_diff == 4 || val_diff == -4));
6265 return 0;
6269 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6270 HOST_WIDE_INT *load_offset)
6272 int unsorted_regs[4];
6273 HOST_WIDE_INT unsorted_offsets[4];
6274 int order[4];
6275 int base_reg = -1;
6276 int i;
6278 /* Can only handle 2, 3, or 4 insns at present,
6279 though could be easily extended if required. */
6280 gcc_assert (nops >= 2 && nops <= 4);
6282 /* Loop over the operands and check that the memory references are
6283 suitable (i.e. immediate offsets from the same base register). At
6284 the same time, extract the target register, and the memory
6285 offsets. */
6286 for (i = 0; i < nops; i++)
6288 rtx reg;
6289 rtx offset;
6291 /* Convert a subreg of a mem into the mem itself. */
6292 if (GET_CODE (operands[nops + i]) == SUBREG)
6293 operands[nops + i] = alter_subreg (operands + (nops + i));
6295 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6297 /* Don't reorder volatile memory references; it doesn't seem worth
6298 looking for the case where the order is ok anyway. */
6299 if (MEM_VOLATILE_P (operands[nops + i]))
6300 return 0;
6302 offset = const0_rtx;
6304 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6305 || (GET_CODE (reg) == SUBREG
6306 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6307 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6308 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6309 == REG)
6310 || (GET_CODE (reg) == SUBREG
6311 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6312 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6313 == CONST_INT)))
6315 if (i == 0)
6317 base_reg = REGNO (reg);
6318 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6319 ? REGNO (operands[i])
6320 : REGNO (SUBREG_REG (operands[i])));
6321 order[0] = 0;
6323 else
6325 if (base_reg != (int) REGNO (reg))
6326 /* Not addressed from the same base register. */
6327 return 0;
6329 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6330 ? REGNO (operands[i])
6331 : REGNO (SUBREG_REG (operands[i])));
6332 if (unsorted_regs[i] < unsorted_regs[order[0]])
6333 order[0] = i;
6336 /* If it isn't an integer register, or if it overwrites the
6337 base register but isn't the last insn in the list, then
6338 we can't do this. */
6339 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
6340 || (i != nops - 1 && unsorted_regs[i] == base_reg))
6341 return 0;
6343 unsorted_offsets[i] = INTVAL (offset);
6345 else
6346 /* Not a suitable memory address. */
6347 return 0;
6350 /* All the useful information has now been extracted from the
6351 operands into unsorted_regs and unsorted_offsets; additionally,
6352 order[0] has been set to the lowest numbered register in the
6353 list. Sort the registers into order, and check that the memory
6354 offsets are ascending and adjacent. */
6356 for (i = 1; i < nops; i++)
6358 int j;
6360 order[i] = order[i - 1];
6361 for (j = 0; j < nops; j++)
6362 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6363 && (order[i] == order[i - 1]
6364 || unsorted_regs[j] < unsorted_regs[order[i]]))
6365 order[i] = j;
6367 /* Have we found a suitable register? if not, one must be used more
6368 than once. */
6369 if (order[i] == order[i - 1])
6370 return 0;
6372 /* Is the memory address adjacent and ascending? */
6373 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6374 return 0;
6377 if (base)
6379 *base = base_reg;
6381 for (i = 0; i < nops; i++)
6382 regs[i] = unsorted_regs[order[i]];
6384 *load_offset = unsorted_offsets[order[0]];
6387 if (unsorted_offsets[order[0]] == 0)
6388 return 1; /* ldmia */
6390 if (TARGET_ARM && unsorted_offsets[order[0]] == 4)
6391 return 2; /* ldmib */
6393 if (TARGET_ARM && unsorted_offsets[order[nops - 1]] == 0)
6394 return 3; /* ldmda */
6396 if (unsorted_offsets[order[nops - 1]] == -4)
6397 return 4; /* ldmdb */
6399 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
6400 if the offset isn't small enough. The reason 2 ldrs are faster
6401 is because these ARMs are able to do more than one cache access
6402 in a single cycle. The ARM9 and StrongARM have Harvard caches,
6403 whilst the ARM8 has a double bandwidth cache. This means that
6404 these cores can do both an instruction fetch and a data fetch in
6405 a single cycle, so the trick of calculating the address into a
6406 scratch register (one of the result regs) and then doing a load
6407 multiple actually becomes slower (and no smaller in code size).
6408 That is the transformation
6410 ldr rd1, [rbase + offset]
6411 ldr rd2, [rbase + offset + 4]
6415 add rd1, rbase, offset
6416 ldmia rd1, {rd1, rd2}
6418 produces worse code -- '3 cycles + any stalls on rd2' instead of
6419 '2 cycles + any stalls on rd2'. On ARMs with only one cache
6420 access per cycle, the first sequence could never complete in less
6421 than 6 cycles, whereas the ldm sequence would only take 5 and
6422 would make better use of sequential accesses if not hitting the
6423 cache.
6425 We cheat here and test 'arm_ld_sched' which we currently know to
6426 only be true for the ARM8, ARM9 and StrongARM. If this ever
6427 changes, then the test below needs to be reworked. */
6428 if (nops == 2 && arm_ld_sched)
6429 return 0;
6431 /* Can't do it without setting up the offset, only do this if it takes
6432 no more than one insn. */
6433 return (const_ok_for_arm (unsorted_offsets[order[0]])
6434 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
6437 const char *
6438 emit_ldm_seq (rtx *operands, int nops)
6440 int regs[4];
6441 int base_reg;
6442 HOST_WIDE_INT offset;
6443 char buf[100];
6444 int i;
6446 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6448 case 1:
6449 strcpy (buf, "ldm%(ia%)\t");
6450 break;
6452 case 2:
6453 strcpy (buf, "ldm%(ib%)\t");
6454 break;
6456 case 3:
6457 strcpy (buf, "ldm%(da%)\t");
6458 break;
6460 case 4:
6461 strcpy (buf, "ldm%(db%)\t");
6462 break;
6464 case 5:
6465 if (offset >= 0)
6466 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6467 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6468 (long) offset);
6469 else
6470 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6471 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6472 (long) -offset);
6473 output_asm_insn (buf, operands);
6474 base_reg = regs[0];
6475 strcpy (buf, "ldm%(ia%)\t");
6476 break;
6478 default:
6479 gcc_unreachable ();
6482 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6483 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6485 for (i = 1; i < nops; i++)
6486 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6487 reg_names[regs[i]]);
6489 strcat (buf, "}\t%@ phole ldm");
6491 output_asm_insn (buf, operands);
6492 return "";
6496 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6497 HOST_WIDE_INT * load_offset)
6499 int unsorted_regs[4];
6500 HOST_WIDE_INT unsorted_offsets[4];
6501 int order[4];
6502 int base_reg = -1;
6503 int i;
6505 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6506 extended if required. */
6507 gcc_assert (nops >= 2 && nops <= 4);
6509 /* Loop over the operands and check that the memory references are
6510 suitable (i.e. immediate offsets from the same base register). At
6511 the same time, extract the target register, and the memory
6512 offsets. */
6513 for (i = 0; i < nops; i++)
6515 rtx reg;
6516 rtx offset;
6518 /* Convert a subreg of a mem into the mem itself. */
6519 if (GET_CODE (operands[nops + i]) == SUBREG)
6520 operands[nops + i] = alter_subreg (operands + (nops + i));
6522 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6524 /* Don't reorder volatile memory references; it doesn't seem worth
6525 looking for the case where the order is ok anyway. */
6526 if (MEM_VOLATILE_P (operands[nops + i]))
6527 return 0;
6529 offset = const0_rtx;
6531 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6532 || (GET_CODE (reg) == SUBREG
6533 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6534 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6535 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6536 == REG)
6537 || (GET_CODE (reg) == SUBREG
6538 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6539 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6540 == CONST_INT)))
6542 if (i == 0)
6544 base_reg = REGNO (reg);
6545 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6546 ? REGNO (operands[i])
6547 : REGNO (SUBREG_REG (operands[i])));
6548 order[0] = 0;
6550 else
6552 if (base_reg != (int) REGNO (reg))
6553 /* Not addressed from the same base register. */
6554 return 0;
6556 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6557 ? REGNO (operands[i])
6558 : REGNO (SUBREG_REG (operands[i])));
6559 if (unsorted_regs[i] < unsorted_regs[order[0]])
6560 order[0] = i;
6563 /* If it isn't an integer register, then we can't do this. */
6564 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6565 return 0;
6567 unsorted_offsets[i] = INTVAL (offset);
6569 else
6570 /* Not a suitable memory address. */
6571 return 0;
6574 /* All the useful information has now been extracted from the
6575 operands into unsorted_regs and unsorted_offsets; additionally,
6576 order[0] has been set to the lowest numbered register in the
6577 list. Sort the registers into order, and check that the memory
6578 offsets are ascending and adjacent. */
6580 for (i = 1; i < nops; i++)
6582 int j;
6584 order[i] = order[i - 1];
6585 for (j = 0; j < nops; j++)
6586 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6587 && (order[i] == order[i - 1]
6588 || unsorted_regs[j] < unsorted_regs[order[i]]))
6589 order[i] = j;
6591 /* Have we found a suitable register? if not, one must be used more
6592 than once. */
6593 if (order[i] == order[i - 1])
6594 return 0;
6596 /* Is the memory address adjacent and ascending? */
6597 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6598 return 0;
6601 if (base)
6603 *base = base_reg;
6605 for (i = 0; i < nops; i++)
6606 regs[i] = unsorted_regs[order[i]];
6608 *load_offset = unsorted_offsets[order[0]];
6611 if (unsorted_offsets[order[0]] == 0)
6612 return 1; /* stmia */
6614 if (unsorted_offsets[order[0]] == 4)
6615 return 2; /* stmib */
6617 if (unsorted_offsets[order[nops - 1]] == 0)
6618 return 3; /* stmda */
6620 if (unsorted_offsets[order[nops - 1]] == -4)
6621 return 4; /* stmdb */
6623 return 0;
6626 const char *
6627 emit_stm_seq (rtx *operands, int nops)
6629 int regs[4];
6630 int base_reg;
6631 HOST_WIDE_INT offset;
6632 char buf[100];
6633 int i;
6635 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6637 case 1:
6638 strcpy (buf, "stm%(ia%)\t");
6639 break;
6641 case 2:
6642 strcpy (buf, "stm%(ib%)\t");
6643 break;
6645 case 3:
6646 strcpy (buf, "stm%(da%)\t");
6647 break;
6649 case 4:
6650 strcpy (buf, "stm%(db%)\t");
6651 break;
6653 default:
6654 gcc_unreachable ();
6657 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6658 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6660 for (i = 1; i < nops; i++)
6661 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6662 reg_names[regs[i]]);
6664 strcat (buf, "}\t%@ phole stm");
6666 output_asm_insn (buf, operands);
6667 return "";
6670 /* Routines for use in generating RTL. */
6673 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6674 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6676 HOST_WIDE_INT offset = *offsetp;
6677 int i = 0, j;
6678 rtx result;
6679 int sign = up ? 1 : -1;
6680 rtx mem, addr;
6682 /* XScale has load-store double instructions, but they have stricter
6683 alignment requirements than load-store multiple, so we cannot
6684 use them.
6686 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6687 the pipeline until completion.
6689 NREGS CYCLES
6695 An ldr instruction takes 1-3 cycles, but does not block the
6696 pipeline.
6698 NREGS CYCLES
6699 1 1-3
6700 2 2-6
6701 3 3-9
6702 4 4-12
6704 Best case ldr will always win. However, the more ldr instructions
6705 we issue, the less likely we are to be able to schedule them well.
6706 Using ldr instructions also increases code size.
6708 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6709 for counts of 3 or 4 regs. */
6710 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6712 rtx seq;
6714 start_sequence ();
6716 for (i = 0; i < count; i++)
6718 addr = plus_constant (from, i * 4 * sign);
6719 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6720 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6721 offset += 4 * sign;
6724 if (write_back)
6726 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6727 *offsetp = offset;
6730 seq = get_insns ();
6731 end_sequence ();
6733 return seq;
6736 result = gen_rtx_PARALLEL (VOIDmode,
6737 rtvec_alloc (count + (write_back ? 1 : 0)));
6738 if (write_back)
6740 XVECEXP (result, 0, 0)
6741 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6742 i = 1;
6743 count++;
6746 for (j = 0; i < count; i++, j++)
6748 addr = plus_constant (from, j * 4 * sign);
6749 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6750 XVECEXP (result, 0, i)
6751 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6752 offset += 4 * sign;
6755 if (write_back)
6756 *offsetp = offset;
6758 return result;
6762 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6763 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6765 HOST_WIDE_INT offset = *offsetp;
6766 int i = 0, j;
6767 rtx result;
6768 int sign = up ? 1 : -1;
6769 rtx mem, addr;
6771 /* See arm_gen_load_multiple for discussion of
6772 the pros/cons of ldm/stm usage for XScale. */
6773 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6775 rtx seq;
6777 start_sequence ();
6779 for (i = 0; i < count; i++)
6781 addr = plus_constant (to, i * 4 * sign);
6782 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6783 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6784 offset += 4 * sign;
6787 if (write_back)
6789 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6790 *offsetp = offset;
6793 seq = get_insns ();
6794 end_sequence ();
6796 return seq;
6799 result = gen_rtx_PARALLEL (VOIDmode,
6800 rtvec_alloc (count + (write_back ? 1 : 0)));
6801 if (write_back)
6803 XVECEXP (result, 0, 0)
6804 = gen_rtx_SET (VOIDmode, to,
6805 plus_constant (to, count * 4 * sign));
6806 i = 1;
6807 count++;
6810 for (j = 0; i < count; i++, j++)
6812 addr = plus_constant (to, j * 4 * sign);
6813 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6814 XVECEXP (result, 0, i)
6815 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6816 offset += 4 * sign;
6819 if (write_back)
6820 *offsetp = offset;
6822 return result;
6826 arm_gen_movmemqi (rtx *operands)
6828 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6829 HOST_WIDE_INT srcoffset, dstoffset;
6830 int i;
6831 rtx src, dst, srcbase, dstbase;
6832 rtx part_bytes_reg = NULL;
6833 rtx mem;
6835 if (GET_CODE (operands[2]) != CONST_INT
6836 || GET_CODE (operands[3]) != CONST_INT
6837 || INTVAL (operands[2]) > 64
6838 || INTVAL (operands[3]) & 3)
6839 return 0;
6841 dstbase = operands[0];
6842 srcbase = operands[1];
6844 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6845 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6847 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6848 out_words_to_go = INTVAL (operands[2]) / 4;
6849 last_bytes = INTVAL (operands[2]) & 3;
6850 dstoffset = srcoffset = 0;
6852 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6853 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6855 for (i = 0; in_words_to_go >= 2; i+=4)
6857 if (in_words_to_go > 4)
6858 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6859 srcbase, &srcoffset));
6860 else
6861 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6862 FALSE, srcbase, &srcoffset));
6864 if (out_words_to_go)
6866 if (out_words_to_go > 4)
6867 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6868 dstbase, &dstoffset));
6869 else if (out_words_to_go != 1)
6870 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6871 dst, TRUE,
6872 (last_bytes == 0
6873 ? FALSE : TRUE),
6874 dstbase, &dstoffset));
6875 else
6877 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6878 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6879 if (last_bytes != 0)
6881 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6882 dstoffset += 4;
6887 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6888 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6891 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6892 if (out_words_to_go)
6894 rtx sreg;
6896 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6897 sreg = copy_to_reg (mem);
6899 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6900 emit_move_insn (mem, sreg);
6901 in_words_to_go--;
6903 gcc_assert (!in_words_to_go); /* Sanity check */
6906 if (in_words_to_go)
6908 gcc_assert (in_words_to_go > 0);
6910 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6911 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6914 gcc_assert (!last_bytes || part_bytes_reg);
6916 if (BYTES_BIG_ENDIAN && last_bytes)
6918 rtx tmp = gen_reg_rtx (SImode);
6920 /* The bytes we want are in the top end of the word. */
6921 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6922 GEN_INT (8 * (4 - last_bytes))));
6923 part_bytes_reg = tmp;
6925 while (last_bytes)
6927 mem = adjust_automodify_address (dstbase, QImode,
6928 plus_constant (dst, last_bytes - 1),
6929 dstoffset + last_bytes - 1);
6930 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6932 if (--last_bytes)
6934 tmp = gen_reg_rtx (SImode);
6935 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6936 part_bytes_reg = tmp;
6941 else
6943 if (last_bytes > 1)
6945 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6946 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6947 last_bytes -= 2;
6948 if (last_bytes)
6950 rtx tmp = gen_reg_rtx (SImode);
6951 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6952 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6953 part_bytes_reg = tmp;
6954 dstoffset += 2;
6958 if (last_bytes)
6960 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6961 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6965 return 1;
6968 /* Select a dominance comparison mode if possible for a test of the general
6969 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6970 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6971 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6972 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6973 In all cases OP will be either EQ or NE, but we don't need to know which
6974 here. If we are unable to support a dominance comparison we return
6975 CC mode. This will then fail to match for the RTL expressions that
6976 generate this call. */
6977 enum machine_mode
6978 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6980 enum rtx_code cond1, cond2;
6981 int swapped = 0;
6983 /* Currently we will probably get the wrong result if the individual
6984 comparisons are not simple. This also ensures that it is safe to
6985 reverse a comparison if necessary. */
6986 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6987 != CCmode)
6988 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6989 != CCmode))
6990 return CCmode;
6992 /* The if_then_else variant of this tests the second condition if the
6993 first passes, but is true if the first fails. Reverse the first
6994 condition to get a true "inclusive-or" expression. */
6995 if (cond_or == DOM_CC_NX_OR_Y)
6996 cond1 = reverse_condition (cond1);
6998 /* If the comparisons are not equal, and one doesn't dominate the other,
6999 then we can't do this. */
7000 if (cond1 != cond2
7001 && !comparison_dominates_p (cond1, cond2)
7002 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
7003 return CCmode;
7005 if (swapped)
7007 enum rtx_code temp = cond1;
7008 cond1 = cond2;
7009 cond2 = temp;
7012 switch (cond1)
7014 case EQ:
7015 if (cond_or == DOM_CC_X_AND_Y)
7016 return CC_DEQmode;
7018 switch (cond2)
7020 case EQ: return CC_DEQmode;
7021 case LE: return CC_DLEmode;
7022 case LEU: return CC_DLEUmode;
7023 case GE: return CC_DGEmode;
7024 case GEU: return CC_DGEUmode;
7025 default: gcc_unreachable ();
7028 case LT:
7029 if (cond_or == DOM_CC_X_AND_Y)
7030 return CC_DLTmode;
7032 switch (cond2)
7034 case LT:
7035 return CC_DLTmode;
7036 case LE:
7037 return CC_DLEmode;
7038 case NE:
7039 return CC_DNEmode;
7040 default:
7041 gcc_unreachable ();
7044 case GT:
7045 if (cond_or == DOM_CC_X_AND_Y)
7046 return CC_DGTmode;
7048 switch (cond2)
7050 case GT:
7051 return CC_DGTmode;
7052 case GE:
7053 return CC_DGEmode;
7054 case NE:
7055 return CC_DNEmode;
7056 default:
7057 gcc_unreachable ();
7060 case LTU:
7061 if (cond_or == DOM_CC_X_AND_Y)
7062 return CC_DLTUmode;
7064 switch (cond2)
7066 case LTU:
7067 return CC_DLTUmode;
7068 case LEU:
7069 return CC_DLEUmode;
7070 case NE:
7071 return CC_DNEmode;
7072 default:
7073 gcc_unreachable ();
7076 case GTU:
7077 if (cond_or == DOM_CC_X_AND_Y)
7078 return CC_DGTUmode;
7080 switch (cond2)
7082 case GTU:
7083 return CC_DGTUmode;
7084 case GEU:
7085 return CC_DGEUmode;
7086 case NE:
7087 return CC_DNEmode;
7088 default:
7089 gcc_unreachable ();
7092 /* The remaining cases only occur when both comparisons are the
7093 same. */
7094 case NE:
7095 gcc_assert (cond1 == cond2);
7096 return CC_DNEmode;
7098 case LE:
7099 gcc_assert (cond1 == cond2);
7100 return CC_DLEmode;
7102 case GE:
7103 gcc_assert (cond1 == cond2);
7104 return CC_DGEmode;
7106 case LEU:
7107 gcc_assert (cond1 == cond2);
7108 return CC_DLEUmode;
7110 case GEU:
7111 gcc_assert (cond1 == cond2);
7112 return CC_DGEUmode;
7114 default:
7115 gcc_unreachable ();
7119 enum machine_mode
7120 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
7122 /* All floating point compares return CCFP if it is an equality
7123 comparison, and CCFPE otherwise. */
7124 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
7126 switch (op)
7128 case EQ:
7129 case NE:
7130 case UNORDERED:
7131 case ORDERED:
7132 case UNLT:
7133 case UNLE:
7134 case UNGT:
7135 case UNGE:
7136 case UNEQ:
7137 case LTGT:
7138 return CCFPmode;
7140 case LT:
7141 case LE:
7142 case GT:
7143 case GE:
7144 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
7145 return CCFPmode;
7146 return CCFPEmode;
7148 default:
7149 gcc_unreachable ();
7153 /* A compare with a shifted operand. Because of canonicalization, the
7154 comparison will have to be swapped when we emit the assembler. */
7155 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
7156 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
7157 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
7158 || GET_CODE (x) == ROTATERT))
7159 return CC_SWPmode;
7161 /* This operation is performed swapped, but since we only rely on the Z
7162 flag we don't need an additional mode. */
7163 if (GET_MODE (y) == SImode && REG_P (y)
7164 && GET_CODE (x) == NEG
7165 && (op == EQ || op == NE))
7166 return CC_Zmode;
7168 /* This is a special case that is used by combine to allow a
7169 comparison of a shifted byte load to be split into a zero-extend
7170 followed by a comparison of the shifted integer (only valid for
7171 equalities and unsigned inequalities). */
7172 if (GET_MODE (x) == SImode
7173 && GET_CODE (x) == ASHIFT
7174 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
7175 && GET_CODE (XEXP (x, 0)) == SUBREG
7176 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
7177 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
7178 && (op == EQ || op == NE
7179 || op == GEU || op == GTU || op == LTU || op == LEU)
7180 && GET_CODE (y) == CONST_INT)
7181 return CC_Zmode;
7183 /* A construct for a conditional compare, if the false arm contains
7184 0, then both conditions must be true, otherwise either condition
7185 must be true. Not all conditions are possible, so CCmode is
7186 returned if it can't be done. */
7187 if (GET_CODE (x) == IF_THEN_ELSE
7188 && (XEXP (x, 2) == const0_rtx
7189 || XEXP (x, 2) == const1_rtx)
7190 && COMPARISON_P (XEXP (x, 0))
7191 && COMPARISON_P (XEXP (x, 1)))
7192 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7193 INTVAL (XEXP (x, 2)));
7195 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
7196 if (GET_CODE (x) == AND
7197 && COMPARISON_P (XEXP (x, 0))
7198 && COMPARISON_P (XEXP (x, 1)))
7199 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7200 DOM_CC_X_AND_Y);
7202 if (GET_CODE (x) == IOR
7203 && COMPARISON_P (XEXP (x, 0))
7204 && COMPARISON_P (XEXP (x, 1)))
7205 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7206 DOM_CC_X_OR_Y);
7208 /* An operation (on Thumb) where we want to test for a single bit.
7209 This is done by shifting that bit up into the top bit of a
7210 scratch register; we can then branch on the sign bit. */
7211 if (TARGET_THUMB1
7212 && GET_MODE (x) == SImode
7213 && (op == EQ || op == NE)
7214 && GET_CODE (x) == ZERO_EXTRACT
7215 && XEXP (x, 1) == const1_rtx)
7216 return CC_Nmode;
7218 /* An operation that sets the condition codes as a side-effect, the
7219 V flag is not set correctly, so we can only use comparisons where
7220 this doesn't matter. (For LT and GE we can use "mi" and "pl"
7221 instead.) */
7222 /* ??? Does the ZERO_EXTRACT case really apply to thumb2? */
7223 if (GET_MODE (x) == SImode
7224 && y == const0_rtx
7225 && (op == EQ || op == NE || op == LT || op == GE)
7226 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
7227 || GET_CODE (x) == AND || GET_CODE (x) == IOR
7228 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
7229 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
7230 || GET_CODE (x) == LSHIFTRT
7231 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
7232 || GET_CODE (x) == ROTATERT
7233 || (TARGET_32BIT && GET_CODE (x) == ZERO_EXTRACT)))
7234 return CC_NOOVmode;
7236 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
7237 return CC_Zmode;
7239 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
7240 && GET_CODE (x) == PLUS
7241 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
7242 return CC_Cmode;
7244 return CCmode;
7247 /* X and Y are two things to compare using CODE. Emit the compare insn and
7248 return the rtx for register 0 in the proper mode. FP means this is a
7249 floating point compare: I don't think that it is needed on the arm. */
7251 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
7253 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
7254 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
7256 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
7258 return cc_reg;
7261 /* Generate a sequence of insns that will generate the correct return
7262 address mask depending on the physical architecture that the program
7263 is running on. */
7265 arm_gen_return_addr_mask (void)
7267 rtx reg = gen_reg_rtx (Pmode);
7269 emit_insn (gen_return_addr_mask (reg));
7270 return reg;
7273 void
7274 arm_reload_in_hi (rtx *operands)
7276 rtx ref = operands[1];
7277 rtx base, scratch;
7278 HOST_WIDE_INT offset = 0;
7280 if (GET_CODE (ref) == SUBREG)
7282 offset = SUBREG_BYTE (ref);
7283 ref = SUBREG_REG (ref);
7286 if (GET_CODE (ref) == REG)
7288 /* We have a pseudo which has been spilt onto the stack; there
7289 are two cases here: the first where there is a simple
7290 stack-slot replacement and a second where the stack-slot is
7291 out of range, or is used as a subreg. */
7292 if (reg_equiv_mem[REGNO (ref)])
7294 ref = reg_equiv_mem[REGNO (ref)];
7295 base = find_replacement (&XEXP (ref, 0));
7297 else
7298 /* The slot is out of range, or was dressed up in a SUBREG. */
7299 base = reg_equiv_address[REGNO (ref)];
7301 else
7302 base = find_replacement (&XEXP (ref, 0));
7304 /* Handle the case where the address is too complex to be offset by 1. */
7305 if (GET_CODE (base) == MINUS
7306 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
7308 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7310 emit_set_insn (base_plus, base);
7311 base = base_plus;
7313 else if (GET_CODE (base) == PLUS)
7315 /* The addend must be CONST_INT, or we would have dealt with it above. */
7316 HOST_WIDE_INT hi, lo;
7318 offset += INTVAL (XEXP (base, 1));
7319 base = XEXP (base, 0);
7321 /* Rework the address into a legal sequence of insns. */
7322 /* Valid range for lo is -4095 -> 4095 */
7323 lo = (offset >= 0
7324 ? (offset & 0xfff)
7325 : -((-offset) & 0xfff));
7327 /* Corner case, if lo is the max offset then we would be out of range
7328 once we have added the additional 1 below, so bump the msb into the
7329 pre-loading insn(s). */
7330 if (lo == 4095)
7331 lo &= 0x7ff;
7333 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7334 ^ (HOST_WIDE_INT) 0x80000000)
7335 - (HOST_WIDE_INT) 0x80000000);
7337 gcc_assert (hi + lo == offset);
7339 if (hi != 0)
7341 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7343 /* Get the base address; addsi3 knows how to handle constants
7344 that require more than one insn. */
7345 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7346 base = base_plus;
7347 offset = lo;
7351 /* Operands[2] may overlap operands[0] (though it won't overlap
7352 operands[1]), that's why we asked for a DImode reg -- so we can
7353 use the bit that does not overlap. */
7354 if (REGNO (operands[2]) == REGNO (operands[0]))
7355 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7356 else
7357 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
7359 emit_insn (gen_zero_extendqisi2 (scratch,
7360 gen_rtx_MEM (QImode,
7361 plus_constant (base,
7362 offset))));
7363 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
7364 gen_rtx_MEM (QImode,
7365 plus_constant (base,
7366 offset + 1))));
7367 if (!BYTES_BIG_ENDIAN)
7368 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
7369 gen_rtx_IOR (SImode,
7370 gen_rtx_ASHIFT
7371 (SImode,
7372 gen_rtx_SUBREG (SImode, operands[0], 0),
7373 GEN_INT (8)),
7374 scratch));
7375 else
7376 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
7377 gen_rtx_IOR (SImode,
7378 gen_rtx_ASHIFT (SImode, scratch,
7379 GEN_INT (8)),
7380 gen_rtx_SUBREG (SImode, operands[0], 0)));
7383 /* Handle storing a half-word to memory during reload by synthesizing as two
7384 byte stores. Take care not to clobber the input values until after we
7385 have moved them somewhere safe. This code assumes that if the DImode
7386 scratch in operands[2] overlaps either the input value or output address
7387 in some way, then that value must die in this insn (we absolutely need
7388 two scratch registers for some corner cases). */
7389 void
7390 arm_reload_out_hi (rtx *operands)
7392 rtx ref = operands[0];
7393 rtx outval = operands[1];
7394 rtx base, scratch;
7395 HOST_WIDE_INT offset = 0;
7397 if (GET_CODE (ref) == SUBREG)
7399 offset = SUBREG_BYTE (ref);
7400 ref = SUBREG_REG (ref);
7403 if (GET_CODE (ref) == REG)
7405 /* We have a pseudo which has been spilt onto the stack; there
7406 are two cases here: the first where there is a simple
7407 stack-slot replacement and a second where the stack-slot is
7408 out of range, or is used as a subreg. */
7409 if (reg_equiv_mem[REGNO (ref)])
7411 ref = reg_equiv_mem[REGNO (ref)];
7412 base = find_replacement (&XEXP (ref, 0));
7414 else
7415 /* The slot is out of range, or was dressed up in a SUBREG. */
7416 base = reg_equiv_address[REGNO (ref)];
7418 else
7419 base = find_replacement (&XEXP (ref, 0));
7421 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
7423 /* Handle the case where the address is too complex to be offset by 1. */
7424 if (GET_CODE (base) == MINUS
7425 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
7427 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7429 /* Be careful not to destroy OUTVAL. */
7430 if (reg_overlap_mentioned_p (base_plus, outval))
7432 /* Updating base_plus might destroy outval, see if we can
7433 swap the scratch and base_plus. */
7434 if (!reg_overlap_mentioned_p (scratch, outval))
7436 rtx tmp = scratch;
7437 scratch = base_plus;
7438 base_plus = tmp;
7440 else
7442 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7444 /* Be conservative and copy OUTVAL into the scratch now,
7445 this should only be necessary if outval is a subreg
7446 of something larger than a word. */
7447 /* XXX Might this clobber base? I can't see how it can,
7448 since scratch is known to overlap with OUTVAL, and
7449 must be wider than a word. */
7450 emit_insn (gen_movhi (scratch_hi, outval));
7451 outval = scratch_hi;
7455 emit_set_insn (base_plus, base);
7456 base = base_plus;
7458 else if (GET_CODE (base) == PLUS)
7460 /* The addend must be CONST_INT, or we would have dealt with it above. */
7461 HOST_WIDE_INT hi, lo;
7463 offset += INTVAL (XEXP (base, 1));
7464 base = XEXP (base, 0);
7466 /* Rework the address into a legal sequence of insns. */
7467 /* Valid range for lo is -4095 -> 4095 */
7468 lo = (offset >= 0
7469 ? (offset & 0xfff)
7470 : -((-offset) & 0xfff));
7472 /* Corner case, if lo is the max offset then we would be out of range
7473 once we have added the additional 1 below, so bump the msb into the
7474 pre-loading insn(s). */
7475 if (lo == 4095)
7476 lo &= 0x7ff;
7478 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7479 ^ (HOST_WIDE_INT) 0x80000000)
7480 - (HOST_WIDE_INT) 0x80000000);
7482 gcc_assert (hi + lo == offset);
7484 if (hi != 0)
7486 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7488 /* Be careful not to destroy OUTVAL. */
7489 if (reg_overlap_mentioned_p (base_plus, outval))
7491 /* Updating base_plus might destroy outval, see if we
7492 can swap the scratch and base_plus. */
7493 if (!reg_overlap_mentioned_p (scratch, outval))
7495 rtx tmp = scratch;
7496 scratch = base_plus;
7497 base_plus = tmp;
7499 else
7501 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7503 /* Be conservative and copy outval into scratch now,
7504 this should only be necessary if outval is a
7505 subreg of something larger than a word. */
7506 /* XXX Might this clobber base? I can't see how it
7507 can, since scratch is known to overlap with
7508 outval. */
7509 emit_insn (gen_movhi (scratch_hi, outval));
7510 outval = scratch_hi;
7514 /* Get the base address; addsi3 knows how to handle constants
7515 that require more than one insn. */
7516 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7517 base = base_plus;
7518 offset = lo;
7522 if (BYTES_BIG_ENDIAN)
7524 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7525 plus_constant (base, offset + 1)),
7526 gen_lowpart (QImode, outval)));
7527 emit_insn (gen_lshrsi3 (scratch,
7528 gen_rtx_SUBREG (SImode, outval, 0),
7529 GEN_INT (8)));
7530 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7531 gen_lowpart (QImode, scratch)));
7533 else
7535 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7536 gen_lowpart (QImode, outval)));
7537 emit_insn (gen_lshrsi3 (scratch,
7538 gen_rtx_SUBREG (SImode, outval, 0),
7539 GEN_INT (8)));
7540 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7541 plus_constant (base, offset + 1)),
7542 gen_lowpart (QImode, scratch)));
7546 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7547 (padded to the size of a word) should be passed in a register. */
7549 static bool
7550 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7552 if (TARGET_AAPCS_BASED)
7553 return must_pass_in_stack_var_size (mode, type);
7554 else
7555 return must_pass_in_stack_var_size_or_pad (mode, type);
7559 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7560 Return true if an argument passed on the stack should be padded upwards,
7561 i.e. if the least-significant byte has useful data.
7562 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7563 aggregate types are placed in the lowest memory address. */
7565 bool
7566 arm_pad_arg_upward (enum machine_mode mode, tree type)
7568 if (!TARGET_AAPCS_BASED)
7569 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7571 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7572 return false;
7574 return true;
7578 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7579 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7580 byte of the register has useful data, and return the opposite if the
7581 most significant byte does.
7582 For AAPCS, small aggregates and small complex types are always padded
7583 upwards. */
7585 bool
7586 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7587 tree type, int first ATTRIBUTE_UNUSED)
7589 if (TARGET_AAPCS_BASED
7590 && BYTES_BIG_ENDIAN
7591 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7592 && int_size_in_bytes (type) <= 4)
7593 return true;
7595 /* Otherwise, use default padding. */
7596 return !BYTES_BIG_ENDIAN;
7600 /* Print a symbolic form of X to the debug file, F. */
7601 static void
7602 arm_print_value (FILE *f, rtx x)
7604 switch (GET_CODE (x))
7606 case CONST_INT:
7607 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7608 return;
7610 case CONST_DOUBLE:
7611 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7612 return;
7614 case CONST_VECTOR:
7616 int i;
7618 fprintf (f, "<");
7619 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7621 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7622 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7623 fputc (',', f);
7625 fprintf (f, ">");
7627 return;
7629 case CONST_STRING:
7630 fprintf (f, "\"%s\"", XSTR (x, 0));
7631 return;
7633 case SYMBOL_REF:
7634 fprintf (f, "`%s'", XSTR (x, 0));
7635 return;
7637 case LABEL_REF:
7638 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7639 return;
7641 case CONST:
7642 arm_print_value (f, XEXP (x, 0));
7643 return;
7645 case PLUS:
7646 arm_print_value (f, XEXP (x, 0));
7647 fprintf (f, "+");
7648 arm_print_value (f, XEXP (x, 1));
7649 return;
7651 case PC:
7652 fprintf (f, "pc");
7653 return;
7655 default:
7656 fprintf (f, "????");
7657 return;
7661 /* Routines for manipulation of the constant pool. */
7663 /* Arm instructions cannot load a large constant directly into a
7664 register; they have to come from a pc relative load. The constant
7665 must therefore be placed in the addressable range of the pc
7666 relative load. Depending on the precise pc relative load
7667 instruction the range is somewhere between 256 bytes and 4k. This
7668 means that we often have to dump a constant inside a function, and
7669 generate code to branch around it.
7671 It is important to minimize this, since the branches will slow
7672 things down and make the code larger.
7674 Normally we can hide the table after an existing unconditional
7675 branch so that there is no interruption of the flow, but in the
7676 worst case the code looks like this:
7678 ldr rn, L1
7680 b L2
7681 align
7682 L1: .long value
7686 ldr rn, L3
7688 b L4
7689 align
7690 L3: .long value
7694 We fix this by performing a scan after scheduling, which notices
7695 which instructions need to have their operands fetched from the
7696 constant table and builds the table.
7698 The algorithm starts by building a table of all the constants that
7699 need fixing up and all the natural barriers in the function (places
7700 where a constant table can be dropped without breaking the flow).
7701 For each fixup we note how far the pc-relative replacement will be
7702 able to reach and the offset of the instruction into the function.
7704 Having built the table we then group the fixes together to form
7705 tables that are as large as possible (subject to addressing
7706 constraints) and emit each table of constants after the last
7707 barrier that is within range of all the instructions in the group.
7708 If a group does not contain a barrier, then we forcibly create one
7709 by inserting a jump instruction into the flow. Once the table has
7710 been inserted, the insns are then modified to reference the
7711 relevant entry in the pool.
7713 Possible enhancements to the algorithm (not implemented) are:
7715 1) For some processors and object formats, there may be benefit in
7716 aligning the pools to the start of cache lines; this alignment
7717 would need to be taken into account when calculating addressability
7718 of a pool. */
7720 /* These typedefs are located at the start of this file, so that
7721 they can be used in the prototypes there. This comment is to
7722 remind readers of that fact so that the following structures
7723 can be understood more easily.
7725 typedef struct minipool_node Mnode;
7726 typedef struct minipool_fixup Mfix; */
7728 struct minipool_node
7730 /* Doubly linked chain of entries. */
7731 Mnode * next;
7732 Mnode * prev;
7733 /* The maximum offset into the code that this entry can be placed. While
7734 pushing fixes for forward references, all entries are sorted in order
7735 of increasing max_address. */
7736 HOST_WIDE_INT max_address;
7737 /* Similarly for an entry inserted for a backwards ref. */
7738 HOST_WIDE_INT min_address;
7739 /* The number of fixes referencing this entry. This can become zero
7740 if we "unpush" an entry. In this case we ignore the entry when we
7741 come to emit the code. */
7742 int refcount;
7743 /* The offset from the start of the minipool. */
7744 HOST_WIDE_INT offset;
7745 /* The value in table. */
7746 rtx value;
7747 /* The mode of value. */
7748 enum machine_mode mode;
7749 /* The size of the value. With iWMMXt enabled
7750 sizes > 4 also imply an alignment of 8-bytes. */
7751 int fix_size;
7754 struct minipool_fixup
7756 Mfix * next;
7757 rtx insn;
7758 HOST_WIDE_INT address;
7759 rtx * loc;
7760 enum machine_mode mode;
7761 int fix_size;
7762 rtx value;
7763 Mnode * minipool;
7764 HOST_WIDE_INT forwards;
7765 HOST_WIDE_INT backwards;
7768 /* Fixes less than a word need padding out to a word boundary. */
7769 #define MINIPOOL_FIX_SIZE(mode) \
7770 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7772 static Mnode * minipool_vector_head;
7773 static Mnode * minipool_vector_tail;
7774 static rtx minipool_vector_label;
7775 static int minipool_pad;
7777 /* The linked list of all minipool fixes required for this function. */
7778 Mfix * minipool_fix_head;
7779 Mfix * minipool_fix_tail;
7780 /* The fix entry for the current minipool, once it has been placed. */
7781 Mfix * minipool_barrier;
7783 /* Determines if INSN is the start of a jump table. Returns the end
7784 of the TABLE or NULL_RTX. */
7785 static rtx
7786 is_jump_table (rtx insn)
7788 rtx table;
7790 if (GET_CODE (insn) == JUMP_INSN
7791 && JUMP_LABEL (insn) != NULL
7792 && ((table = next_real_insn (JUMP_LABEL (insn)))
7793 == next_real_insn (insn))
7794 && table != NULL
7795 && GET_CODE (table) == JUMP_INSN
7796 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7797 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7798 return table;
7800 return NULL_RTX;
7803 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7804 #define JUMP_TABLES_IN_TEXT_SECTION 0
7805 #endif
7807 static HOST_WIDE_INT
7808 get_jump_table_size (rtx insn)
7810 /* ADDR_VECs only take room if read-only data does into the text
7811 section. */
7812 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7814 rtx body = PATTERN (insn);
7815 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7816 HOST_WIDE_INT size;
7817 HOST_WIDE_INT modesize;
7819 modesize = GET_MODE_SIZE (GET_MODE (body));
7820 size = modesize * XVECLEN (body, elt);
7821 switch (modesize)
7823 case 1:
7824 /* Round up size of TBB table to a halfword boundary. */
7825 size = (size + 1) & ~(HOST_WIDE_INT)1;
7826 break;
7827 case 2:
7828 /* No padding necessary for TBH. */
7829 break;
7830 case 4:
7831 /* Add two bytes for alignment on Thumb. */
7832 if (TARGET_THUMB)
7833 size += 2;
7834 break;
7835 default:
7836 gcc_unreachable ();
7838 return size;
7841 return 0;
7844 /* Move a minipool fix MP from its current location to before MAX_MP.
7845 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7846 constraints may need updating. */
7847 static Mnode *
7848 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7849 HOST_WIDE_INT max_address)
7851 /* The code below assumes these are different. */
7852 gcc_assert (mp != max_mp);
7854 if (max_mp == NULL)
7856 if (max_address < mp->max_address)
7857 mp->max_address = max_address;
7859 else
7861 if (max_address > max_mp->max_address - mp->fix_size)
7862 mp->max_address = max_mp->max_address - mp->fix_size;
7863 else
7864 mp->max_address = max_address;
7866 /* Unlink MP from its current position. Since max_mp is non-null,
7867 mp->prev must be non-null. */
7868 mp->prev->next = mp->next;
7869 if (mp->next != NULL)
7870 mp->next->prev = mp->prev;
7871 else
7872 minipool_vector_tail = mp->prev;
7874 /* Re-insert it before MAX_MP. */
7875 mp->next = max_mp;
7876 mp->prev = max_mp->prev;
7877 max_mp->prev = mp;
7879 if (mp->prev != NULL)
7880 mp->prev->next = mp;
7881 else
7882 minipool_vector_head = mp;
7885 /* Save the new entry. */
7886 max_mp = mp;
7888 /* Scan over the preceding entries and adjust their addresses as
7889 required. */
7890 while (mp->prev != NULL
7891 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7893 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7894 mp = mp->prev;
7897 return max_mp;
7900 /* Add a constant to the minipool for a forward reference. Returns the
7901 node added or NULL if the constant will not fit in this pool. */
7902 static Mnode *
7903 add_minipool_forward_ref (Mfix *fix)
7905 /* If set, max_mp is the first pool_entry that has a lower
7906 constraint than the one we are trying to add. */
7907 Mnode * max_mp = NULL;
7908 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7909 Mnode * mp;
7911 /* If the minipool starts before the end of FIX->INSN then this FIX
7912 can not be placed into the current pool. Furthermore, adding the
7913 new constant pool entry may cause the pool to start FIX_SIZE bytes
7914 earlier. */
7915 if (minipool_vector_head &&
7916 (fix->address + get_attr_length (fix->insn)
7917 >= minipool_vector_head->max_address - fix->fix_size))
7918 return NULL;
7920 /* Scan the pool to see if a constant with the same value has
7921 already been added. While we are doing this, also note the
7922 location where we must insert the constant if it doesn't already
7923 exist. */
7924 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7926 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7927 && fix->mode == mp->mode
7928 && (GET_CODE (fix->value) != CODE_LABEL
7929 || (CODE_LABEL_NUMBER (fix->value)
7930 == CODE_LABEL_NUMBER (mp->value)))
7931 && rtx_equal_p (fix->value, mp->value))
7933 /* More than one fix references this entry. */
7934 mp->refcount++;
7935 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7938 /* Note the insertion point if necessary. */
7939 if (max_mp == NULL
7940 && mp->max_address > max_address)
7941 max_mp = mp;
7943 /* If we are inserting an 8-bytes aligned quantity and
7944 we have not already found an insertion point, then
7945 make sure that all such 8-byte aligned quantities are
7946 placed at the start of the pool. */
7947 if (ARM_DOUBLEWORD_ALIGN
7948 && max_mp == NULL
7949 && fix->fix_size == 8
7950 && mp->fix_size != 8)
7952 max_mp = mp;
7953 max_address = mp->max_address;
7957 /* The value is not currently in the minipool, so we need to create
7958 a new entry for it. If MAX_MP is NULL, the entry will be put on
7959 the end of the list since the placement is less constrained than
7960 any existing entry. Otherwise, we insert the new fix before
7961 MAX_MP and, if necessary, adjust the constraints on the other
7962 entries. */
7963 mp = XNEW (Mnode);
7964 mp->fix_size = fix->fix_size;
7965 mp->mode = fix->mode;
7966 mp->value = fix->value;
7967 mp->refcount = 1;
7968 /* Not yet required for a backwards ref. */
7969 mp->min_address = -65536;
7971 if (max_mp == NULL)
7973 mp->max_address = max_address;
7974 mp->next = NULL;
7975 mp->prev = minipool_vector_tail;
7977 if (mp->prev == NULL)
7979 minipool_vector_head = mp;
7980 minipool_vector_label = gen_label_rtx ();
7982 else
7983 mp->prev->next = mp;
7985 minipool_vector_tail = mp;
7987 else
7989 if (max_address > max_mp->max_address - mp->fix_size)
7990 mp->max_address = max_mp->max_address - mp->fix_size;
7991 else
7992 mp->max_address = max_address;
7994 mp->next = max_mp;
7995 mp->prev = max_mp->prev;
7996 max_mp->prev = mp;
7997 if (mp->prev != NULL)
7998 mp->prev->next = mp;
7999 else
8000 minipool_vector_head = mp;
8003 /* Save the new entry. */
8004 max_mp = mp;
8006 /* Scan over the preceding entries and adjust their addresses as
8007 required. */
8008 while (mp->prev != NULL
8009 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
8011 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
8012 mp = mp->prev;
8015 return max_mp;
8018 static Mnode *
8019 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
8020 HOST_WIDE_INT min_address)
8022 HOST_WIDE_INT offset;
8024 /* The code below assumes these are different. */
8025 gcc_assert (mp != min_mp);
8027 if (min_mp == NULL)
8029 if (min_address > mp->min_address)
8030 mp->min_address = min_address;
8032 else
8034 /* We will adjust this below if it is too loose. */
8035 mp->min_address = min_address;
8037 /* Unlink MP from its current position. Since min_mp is non-null,
8038 mp->next must be non-null. */
8039 mp->next->prev = mp->prev;
8040 if (mp->prev != NULL)
8041 mp->prev->next = mp->next;
8042 else
8043 minipool_vector_head = mp->next;
8045 /* Reinsert it after MIN_MP. */
8046 mp->prev = min_mp;
8047 mp->next = min_mp->next;
8048 min_mp->next = mp;
8049 if (mp->next != NULL)
8050 mp->next->prev = mp;
8051 else
8052 minipool_vector_tail = mp;
8055 min_mp = mp;
8057 offset = 0;
8058 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
8060 mp->offset = offset;
8061 if (mp->refcount > 0)
8062 offset += mp->fix_size;
8064 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
8065 mp->next->min_address = mp->min_address + mp->fix_size;
8068 return min_mp;
8071 /* Add a constant to the minipool for a backward reference. Returns the
8072 node added or NULL if the constant will not fit in this pool.
8074 Note that the code for insertion for a backwards reference can be
8075 somewhat confusing because the calculated offsets for each fix do
8076 not take into account the size of the pool (which is still under
8077 construction. */
8078 static Mnode *
8079 add_minipool_backward_ref (Mfix *fix)
8081 /* If set, min_mp is the last pool_entry that has a lower constraint
8082 than the one we are trying to add. */
8083 Mnode *min_mp = NULL;
8084 /* This can be negative, since it is only a constraint. */
8085 HOST_WIDE_INT min_address = fix->address - fix->backwards;
8086 Mnode *mp;
8088 /* If we can't reach the current pool from this insn, or if we can't
8089 insert this entry at the end of the pool without pushing other
8090 fixes out of range, then we don't try. This ensures that we
8091 can't fail later on. */
8092 if (min_address >= minipool_barrier->address
8093 || (minipool_vector_tail->min_address + fix->fix_size
8094 >= minipool_barrier->address))
8095 return NULL;
8097 /* Scan the pool to see if a constant with the same value has
8098 already been added. While we are doing this, also note the
8099 location where we must insert the constant if it doesn't already
8100 exist. */
8101 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
8103 if (GET_CODE (fix->value) == GET_CODE (mp->value)
8104 && fix->mode == mp->mode
8105 && (GET_CODE (fix->value) != CODE_LABEL
8106 || (CODE_LABEL_NUMBER (fix->value)
8107 == CODE_LABEL_NUMBER (mp->value)))
8108 && rtx_equal_p (fix->value, mp->value)
8109 /* Check that there is enough slack to move this entry to the
8110 end of the table (this is conservative). */
8111 && (mp->max_address
8112 > (minipool_barrier->address
8113 + minipool_vector_tail->offset
8114 + minipool_vector_tail->fix_size)))
8116 mp->refcount++;
8117 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
8120 if (min_mp != NULL)
8121 mp->min_address += fix->fix_size;
8122 else
8124 /* Note the insertion point if necessary. */
8125 if (mp->min_address < min_address)
8127 /* For now, we do not allow the insertion of 8-byte alignment
8128 requiring nodes anywhere but at the start of the pool. */
8129 if (ARM_DOUBLEWORD_ALIGN
8130 && fix->fix_size == 8 && mp->fix_size != 8)
8131 return NULL;
8132 else
8133 min_mp = mp;
8135 else if (mp->max_address
8136 < minipool_barrier->address + mp->offset + fix->fix_size)
8138 /* Inserting before this entry would push the fix beyond
8139 its maximum address (which can happen if we have
8140 re-located a forwards fix); force the new fix to come
8141 after it. */
8142 min_mp = mp;
8143 min_address = mp->min_address + fix->fix_size;
8145 /* If we are inserting an 8-bytes aligned quantity and
8146 we have not already found an insertion point, then
8147 make sure that all such 8-byte aligned quantities are
8148 placed at the start of the pool. */
8149 else if (ARM_DOUBLEWORD_ALIGN
8150 && min_mp == NULL
8151 && fix->fix_size == 8
8152 && mp->fix_size < 8)
8154 min_mp = mp;
8155 min_address = mp->min_address + fix->fix_size;
8160 /* We need to create a new entry. */
8161 mp = XNEW (Mnode);
8162 mp->fix_size = fix->fix_size;
8163 mp->mode = fix->mode;
8164 mp->value = fix->value;
8165 mp->refcount = 1;
8166 mp->max_address = minipool_barrier->address + 65536;
8168 mp->min_address = min_address;
8170 if (min_mp == NULL)
8172 mp->prev = NULL;
8173 mp->next = minipool_vector_head;
8175 if (mp->next == NULL)
8177 minipool_vector_tail = mp;
8178 minipool_vector_label = gen_label_rtx ();
8180 else
8181 mp->next->prev = mp;
8183 minipool_vector_head = mp;
8185 else
8187 mp->next = min_mp->next;
8188 mp->prev = min_mp;
8189 min_mp->next = mp;
8191 if (mp->next != NULL)
8192 mp->next->prev = mp;
8193 else
8194 minipool_vector_tail = mp;
8197 /* Save the new entry. */
8198 min_mp = mp;
8200 if (mp->prev)
8201 mp = mp->prev;
8202 else
8203 mp->offset = 0;
8205 /* Scan over the following entries and adjust their offsets. */
8206 while (mp->next != NULL)
8208 if (mp->next->min_address < mp->min_address + mp->fix_size)
8209 mp->next->min_address = mp->min_address + mp->fix_size;
8211 if (mp->refcount)
8212 mp->next->offset = mp->offset + mp->fix_size;
8213 else
8214 mp->next->offset = mp->offset;
8216 mp = mp->next;
8219 return min_mp;
8222 static void
8223 assign_minipool_offsets (Mfix *barrier)
8225 HOST_WIDE_INT offset = 0;
8226 Mnode *mp;
8228 minipool_barrier = barrier;
8230 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
8232 mp->offset = offset;
8234 if (mp->refcount > 0)
8235 offset += mp->fix_size;
8239 /* Output the literal table */
8240 static void
8241 dump_minipool (rtx scan)
8243 Mnode * mp;
8244 Mnode * nmp;
8245 int align64 = 0;
8247 if (ARM_DOUBLEWORD_ALIGN)
8248 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
8249 if (mp->refcount > 0 && mp->fix_size == 8)
8251 align64 = 1;
8252 break;
8255 if (dump_file)
8256 fprintf (dump_file,
8257 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
8258 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
8260 scan = emit_label_after (gen_label_rtx (), scan);
8261 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
8262 scan = emit_label_after (minipool_vector_label, scan);
8264 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
8266 if (mp->refcount > 0)
8268 if (dump_file)
8270 fprintf (dump_file,
8271 ";; Offset %u, min %ld, max %ld ",
8272 (unsigned) mp->offset, (unsigned long) mp->min_address,
8273 (unsigned long) mp->max_address);
8274 arm_print_value (dump_file, mp->value);
8275 fputc ('\n', dump_file);
8278 switch (mp->fix_size)
8280 #ifdef HAVE_consttable_1
8281 case 1:
8282 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
8283 break;
8285 #endif
8286 #ifdef HAVE_consttable_2
8287 case 2:
8288 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
8289 break;
8291 #endif
8292 #ifdef HAVE_consttable_4
8293 case 4:
8294 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
8295 break;
8297 #endif
8298 #ifdef HAVE_consttable_8
8299 case 8:
8300 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
8301 break;
8303 #endif
8304 default:
8305 gcc_unreachable ();
8309 nmp = mp->next;
8310 free (mp);
8313 minipool_vector_head = minipool_vector_tail = NULL;
8314 scan = emit_insn_after (gen_consttable_end (), scan);
8315 scan = emit_barrier_after (scan);
8318 /* Return the cost of forcibly inserting a barrier after INSN. */
8319 static int
8320 arm_barrier_cost (rtx insn)
8322 /* Basing the location of the pool on the loop depth is preferable,
8323 but at the moment, the basic block information seems to be
8324 corrupt by this stage of the compilation. */
8325 int base_cost = 50;
8326 rtx next = next_nonnote_insn (insn);
8328 if (next != NULL && GET_CODE (next) == CODE_LABEL)
8329 base_cost -= 20;
8331 switch (GET_CODE (insn))
8333 case CODE_LABEL:
8334 /* It will always be better to place the table before the label, rather
8335 than after it. */
8336 return 50;
8338 case INSN:
8339 case CALL_INSN:
8340 return base_cost;
8342 case JUMP_INSN:
8343 return base_cost - 10;
8345 default:
8346 return base_cost + 10;
8350 /* Find the best place in the insn stream in the range
8351 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
8352 Create the barrier by inserting a jump and add a new fix entry for
8353 it. */
8354 static Mfix *
8355 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
8357 HOST_WIDE_INT count = 0;
8358 rtx barrier;
8359 rtx from = fix->insn;
8360 /* The instruction after which we will insert the jump. */
8361 rtx selected = NULL;
8362 int selected_cost;
8363 /* The address at which the jump instruction will be placed. */
8364 HOST_WIDE_INT selected_address;
8365 Mfix * new_fix;
8366 HOST_WIDE_INT max_count = max_address - fix->address;
8367 rtx label = gen_label_rtx ();
8369 selected_cost = arm_barrier_cost (from);
8370 selected_address = fix->address;
8372 while (from && count < max_count)
8374 rtx tmp;
8375 int new_cost;
8377 /* This code shouldn't have been called if there was a natural barrier
8378 within range. */
8379 gcc_assert (GET_CODE (from) != BARRIER);
8381 /* Count the length of this insn. */
8382 count += get_attr_length (from);
8384 /* If there is a jump table, add its length. */
8385 tmp = is_jump_table (from);
8386 if (tmp != NULL)
8388 count += get_jump_table_size (tmp);
8390 /* Jump tables aren't in a basic block, so base the cost on
8391 the dispatch insn. If we select this location, we will
8392 still put the pool after the table. */
8393 new_cost = arm_barrier_cost (from);
8395 if (count < max_count
8396 && (!selected || new_cost <= selected_cost))
8398 selected = tmp;
8399 selected_cost = new_cost;
8400 selected_address = fix->address + count;
8403 /* Continue after the dispatch table. */
8404 from = NEXT_INSN (tmp);
8405 continue;
8408 new_cost = arm_barrier_cost (from);
8410 if (count < max_count
8411 && (!selected || new_cost <= selected_cost))
8413 selected = from;
8414 selected_cost = new_cost;
8415 selected_address = fix->address + count;
8418 from = NEXT_INSN (from);
8421 /* Make sure that we found a place to insert the jump. */
8422 gcc_assert (selected);
8424 /* Create a new JUMP_INSN that branches around a barrier. */
8425 from = emit_jump_insn_after (gen_jump (label), selected);
8426 JUMP_LABEL (from) = label;
8427 barrier = emit_barrier_after (from);
8428 emit_label_after (label, barrier);
8430 /* Create a minipool barrier entry for the new barrier. */
8431 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
8432 new_fix->insn = barrier;
8433 new_fix->address = selected_address;
8434 new_fix->next = fix->next;
8435 fix->next = new_fix;
8437 return new_fix;
8440 /* Record that there is a natural barrier in the insn stream at
8441 ADDRESS. */
8442 static void
8443 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
8445 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8447 fix->insn = insn;
8448 fix->address = address;
8450 fix->next = NULL;
8451 if (minipool_fix_head != NULL)
8452 minipool_fix_tail->next = fix;
8453 else
8454 minipool_fix_head = fix;
8456 minipool_fix_tail = fix;
8459 /* Record INSN, which will need fixing up to load a value from the
8460 minipool. ADDRESS is the offset of the insn since the start of the
8461 function; LOC is a pointer to the part of the insn which requires
8462 fixing; VALUE is the constant that must be loaded, which is of type
8463 MODE. */
8464 static void
8465 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
8466 enum machine_mode mode, rtx value)
8468 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8470 #ifdef AOF_ASSEMBLER
8471 /* PIC symbol references need to be converted into offsets into the
8472 based area. */
8473 /* XXX This shouldn't be done here. */
8474 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
8475 value = aof_pic_entry (value);
8476 #endif /* AOF_ASSEMBLER */
8478 fix->insn = insn;
8479 fix->address = address;
8480 fix->loc = loc;
8481 fix->mode = mode;
8482 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8483 fix->value = value;
8484 fix->forwards = get_attr_pool_range (insn);
8485 fix->backwards = get_attr_neg_pool_range (insn);
8486 fix->minipool = NULL;
8488 /* If an insn doesn't have a range defined for it, then it isn't
8489 expecting to be reworked by this code. Better to stop now than
8490 to generate duff assembly code. */
8491 gcc_assert (fix->forwards || fix->backwards);
8493 /* If an entry requires 8-byte alignment then assume all constant pools
8494 require 4 bytes of padding. Trying to do this later on a per-pool
8495 basis is awkward because existing pool entries have to be modified. */
8496 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8497 minipool_pad = 4;
8499 if (dump_file)
8501 fprintf (dump_file,
8502 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8503 GET_MODE_NAME (mode),
8504 INSN_UID (insn), (unsigned long) address,
8505 -1 * (long)fix->backwards, (long)fix->forwards);
8506 arm_print_value (dump_file, fix->value);
8507 fprintf (dump_file, "\n");
8510 /* Add it to the chain of fixes. */
8511 fix->next = NULL;
8513 if (minipool_fix_head != NULL)
8514 minipool_fix_tail->next = fix;
8515 else
8516 minipool_fix_head = fix;
8518 minipool_fix_tail = fix;
8521 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8522 Returns the number of insns needed, or 99 if we don't know how to
8523 do it. */
8525 arm_const_double_inline_cost (rtx val)
8527 rtx lowpart, highpart;
8528 enum machine_mode mode;
8530 mode = GET_MODE (val);
8532 if (mode == VOIDmode)
8533 mode = DImode;
8535 gcc_assert (GET_MODE_SIZE (mode) == 8);
8537 lowpart = gen_lowpart (SImode, val);
8538 highpart = gen_highpart_mode (SImode, mode, val);
8540 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8541 gcc_assert (GET_CODE (highpart) == CONST_INT);
8543 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8544 NULL_RTX, NULL_RTX, 0, 0)
8545 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8546 NULL_RTX, NULL_RTX, 0, 0));
8549 /* Return true if it is worthwhile to split a 64-bit constant into two
8550 32-bit operations. This is the case if optimizing for size, or
8551 if we have load delay slots, or if one 32-bit part can be done with
8552 a single data operation. */
8553 bool
8554 arm_const_double_by_parts (rtx val)
8556 enum machine_mode mode = GET_MODE (val);
8557 rtx part;
8559 if (optimize_size || arm_ld_sched)
8560 return true;
8562 if (mode == VOIDmode)
8563 mode = DImode;
8565 part = gen_highpart_mode (SImode, mode, val);
8567 gcc_assert (GET_CODE (part) == CONST_INT);
8569 if (const_ok_for_arm (INTVAL (part))
8570 || const_ok_for_arm (~INTVAL (part)))
8571 return true;
8573 part = gen_lowpart (SImode, val);
8575 gcc_assert (GET_CODE (part) == CONST_INT);
8577 if (const_ok_for_arm (INTVAL (part))
8578 || const_ok_for_arm (~INTVAL (part)))
8579 return true;
8581 return false;
8584 /* Scan INSN and note any of its operands that need fixing.
8585 If DO_PUSHES is false we do not actually push any of the fixups
8586 needed. The function returns TRUE if any fixups were needed/pushed.
8587 This is used by arm_memory_load_p() which needs to know about loads
8588 of constants that will be converted into minipool loads. */
8589 static bool
8590 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8592 bool result = false;
8593 int opno;
8595 extract_insn (insn);
8597 if (!constrain_operands (1))
8598 fatal_insn_not_found (insn);
8600 if (recog_data.n_alternatives == 0)
8601 return false;
8603 /* Fill in recog_op_alt with information about the constraints of
8604 this insn. */
8605 preprocess_constraints ();
8607 for (opno = 0; opno < recog_data.n_operands; opno++)
8609 /* Things we need to fix can only occur in inputs. */
8610 if (recog_data.operand_type[opno] != OP_IN)
8611 continue;
8613 /* If this alternative is a memory reference, then any mention
8614 of constants in this alternative is really to fool reload
8615 into allowing us to accept one there. We need to fix them up
8616 now so that we output the right code. */
8617 if (recog_op_alt[opno][which_alternative].memory_ok)
8619 rtx op = recog_data.operand[opno];
8621 if (CONSTANT_P (op))
8623 if (do_pushes)
8624 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8625 recog_data.operand_mode[opno], op);
8626 result = true;
8628 else if (GET_CODE (op) == MEM
8629 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8630 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8632 if (do_pushes)
8634 rtx cop = avoid_constant_pool_reference (op);
8636 /* Casting the address of something to a mode narrower
8637 than a word can cause avoid_constant_pool_reference()
8638 to return the pool reference itself. That's no good to
8639 us here. Lets just hope that we can use the
8640 constant pool value directly. */
8641 if (op == cop)
8642 cop = get_pool_constant (XEXP (op, 0));
8644 push_minipool_fix (insn, address,
8645 recog_data.operand_loc[opno],
8646 recog_data.operand_mode[opno], cop);
8649 result = true;
8654 return result;
8657 /* Gcc puts the pool in the wrong place for ARM, since we can only
8658 load addresses a limited distance around the pc. We do some
8659 special munging to move the constant pool values to the correct
8660 point in the code. */
8661 static void
8662 arm_reorg (void)
8664 rtx insn;
8665 HOST_WIDE_INT address = 0;
8666 Mfix * fix;
8668 minipool_fix_head = minipool_fix_tail = NULL;
8670 /* The first insn must always be a note, or the code below won't
8671 scan it properly. */
8672 insn = get_insns ();
8673 gcc_assert (GET_CODE (insn) == NOTE);
8674 minipool_pad = 0;
8676 /* Scan all the insns and record the operands that will need fixing. */
8677 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8679 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8680 && (arm_cirrus_insn_p (insn)
8681 || GET_CODE (insn) == JUMP_INSN
8682 || arm_memory_load_p (insn)))
8683 cirrus_reorg (insn);
8685 if (GET_CODE (insn) == BARRIER)
8686 push_minipool_barrier (insn, address);
8687 else if (INSN_P (insn))
8689 rtx table;
8691 note_invalid_constants (insn, address, true);
8692 address += get_attr_length (insn);
8694 /* If the insn is a vector jump, add the size of the table
8695 and skip the table. */
8696 if ((table = is_jump_table (insn)) != NULL)
8698 address += get_jump_table_size (table);
8699 insn = table;
8704 fix = minipool_fix_head;
8706 /* Now scan the fixups and perform the required changes. */
8707 while (fix)
8709 Mfix * ftmp;
8710 Mfix * fdel;
8711 Mfix * last_added_fix;
8712 Mfix * last_barrier = NULL;
8713 Mfix * this_fix;
8715 /* Skip any further barriers before the next fix. */
8716 while (fix && GET_CODE (fix->insn) == BARRIER)
8717 fix = fix->next;
8719 /* No more fixes. */
8720 if (fix == NULL)
8721 break;
8723 last_added_fix = NULL;
8725 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8727 if (GET_CODE (ftmp->insn) == BARRIER)
8729 if (ftmp->address >= minipool_vector_head->max_address)
8730 break;
8732 last_barrier = ftmp;
8734 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8735 break;
8737 last_added_fix = ftmp; /* Keep track of the last fix added. */
8740 /* If we found a barrier, drop back to that; any fixes that we
8741 could have reached but come after the barrier will now go in
8742 the next mini-pool. */
8743 if (last_barrier != NULL)
8745 /* Reduce the refcount for those fixes that won't go into this
8746 pool after all. */
8747 for (fdel = last_barrier->next;
8748 fdel && fdel != ftmp;
8749 fdel = fdel->next)
8751 fdel->minipool->refcount--;
8752 fdel->minipool = NULL;
8755 ftmp = last_barrier;
8757 else
8759 /* ftmp is first fix that we can't fit into this pool and
8760 there no natural barriers that we could use. Insert a
8761 new barrier in the code somewhere between the previous
8762 fix and this one, and arrange to jump around it. */
8763 HOST_WIDE_INT max_address;
8765 /* The last item on the list of fixes must be a barrier, so
8766 we can never run off the end of the list of fixes without
8767 last_barrier being set. */
8768 gcc_assert (ftmp);
8770 max_address = minipool_vector_head->max_address;
8771 /* Check that there isn't another fix that is in range that
8772 we couldn't fit into this pool because the pool was
8773 already too large: we need to put the pool before such an
8774 instruction. The pool itself may come just after the
8775 fix because create_fix_barrier also allows space for a
8776 jump instruction. */
8777 if (ftmp->address < max_address)
8778 max_address = ftmp->address + 1;
8780 last_barrier = create_fix_barrier (last_added_fix, max_address);
8783 assign_minipool_offsets (last_barrier);
8785 while (ftmp)
8787 if (GET_CODE (ftmp->insn) != BARRIER
8788 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8789 == NULL))
8790 break;
8792 ftmp = ftmp->next;
8795 /* Scan over the fixes we have identified for this pool, fixing them
8796 up and adding the constants to the pool itself. */
8797 for (this_fix = fix; this_fix && ftmp != this_fix;
8798 this_fix = this_fix->next)
8799 if (GET_CODE (this_fix->insn) != BARRIER)
8801 rtx addr
8802 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8803 minipool_vector_label),
8804 this_fix->minipool->offset);
8805 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8808 dump_minipool (last_barrier->insn);
8809 fix = ftmp;
8812 /* From now on we must synthesize any constants that we can't handle
8813 directly. This can happen if the RTL gets split during final
8814 instruction generation. */
8815 after_arm_reorg = 1;
8817 /* Free the minipool memory. */
8818 obstack_free (&minipool_obstack, minipool_startobj);
8821 /* Routines to output assembly language. */
8823 /* If the rtx is the correct value then return the string of the number.
8824 In this way we can ensure that valid double constants are generated even
8825 when cross compiling. */
8826 const char *
8827 fp_immediate_constant (rtx x)
8829 REAL_VALUE_TYPE r;
8830 int i;
8832 if (!fp_consts_inited)
8833 init_fp_table ();
8835 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8836 for (i = 0; i < 8; i++)
8837 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8838 return strings_fp[i];
8840 gcc_unreachable ();
8843 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8844 static const char *
8845 fp_const_from_val (REAL_VALUE_TYPE *r)
8847 int i;
8849 if (!fp_consts_inited)
8850 init_fp_table ();
8852 for (i = 0; i < 8; i++)
8853 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8854 return strings_fp[i];
8856 gcc_unreachable ();
8859 /* Output the operands of a LDM/STM instruction to STREAM.
8860 MASK is the ARM register set mask of which only bits 0-15 are important.
8861 REG is the base register, either the frame pointer or the stack pointer,
8862 INSTR is the possibly suffixed load or store instruction.
8863 RFE is nonzero if the instruction should also copy spsr to cpsr. */
8865 static void
8866 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8867 unsigned long mask, int rfe)
8869 unsigned i;
8870 bool not_first = FALSE;
8872 gcc_assert (!rfe || (mask & (1 << PC_REGNUM)));
8873 fputc ('\t', stream);
8874 asm_fprintf (stream, instr, reg);
8875 fputc ('{', stream);
8877 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8878 if (mask & (1 << i))
8880 if (not_first)
8881 fprintf (stream, ", ");
8883 asm_fprintf (stream, "%r", i);
8884 not_first = TRUE;
8887 if (rfe)
8888 fprintf (stream, "}^\n");
8889 else
8890 fprintf (stream, "}\n");
8894 /* Output a FLDMD instruction to STREAM.
8895 BASE if the register containing the address.
8896 REG and COUNT specify the register range.
8897 Extra registers may be added to avoid hardware bugs.
8899 We output FLDMD even for ARMv5 VFP implementations. Although
8900 FLDMD is technically not supported until ARMv6, it is believed
8901 that all VFP implementations support its use in this context. */
8903 static void
8904 vfp_output_fldmd (FILE * stream, unsigned int base, int reg, int count)
8906 int i;
8908 /* Workaround ARM10 VFPr1 bug. */
8909 if (count == 2 && !arm_arch6)
8911 if (reg == 15)
8912 reg--;
8913 count++;
8916 /* FLDMD may not load more than 16 doubleword registers at a time. Split the
8917 load into multiple parts if we have to handle more than 16 registers. */
8918 if (count > 16)
8920 vfp_output_fldmd (stream, base, reg, 16);
8921 vfp_output_fldmd (stream, base, reg + 16, count - 16);
8922 return;
8925 fputc ('\t', stream);
8926 asm_fprintf (stream, "fldmfdd\t%r!, {", base);
8928 for (i = reg; i < reg + count; i++)
8930 if (i > reg)
8931 fputs (", ", stream);
8932 asm_fprintf (stream, "d%d", i);
8934 fputs ("}\n", stream);
8939 /* Output the assembly for a store multiple. */
8941 const char *
8942 vfp_output_fstmd (rtx * operands)
8944 char pattern[100];
8945 int p;
8946 int base;
8947 int i;
8949 strcpy (pattern, "fstmfdd\t%m0!, {%P1");
8950 p = strlen (pattern);
8952 gcc_assert (GET_CODE (operands[1]) == REG);
8954 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8955 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8957 p += sprintf (&pattern[p], ", d%d", base + i);
8959 strcpy (&pattern[p], "}");
8961 output_asm_insn (pattern, operands);
8962 return "";
8966 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8967 number of bytes pushed. */
8969 static int
8970 vfp_emit_fstmd (int base_reg, int count)
8972 rtx par;
8973 rtx dwarf;
8974 rtx tmp, reg;
8975 int i;
8977 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8978 register pairs are stored by a store multiple insn. We avoid this
8979 by pushing an extra pair. */
8980 if (count == 2 && !arm_arch6)
8982 if (base_reg == LAST_VFP_REGNUM - 3)
8983 base_reg -= 2;
8984 count++;
8987 /* FSTMD may not store more than 16 doubleword registers at once. Split
8988 larger stores into multiple parts (up to a maximum of two, in
8989 practice). */
8990 if (count > 16)
8992 int saved;
8993 /* NOTE: base_reg is an internal register number, so each D register
8994 counts as 2. */
8995 saved = vfp_emit_fstmd (base_reg + 32, count - 16);
8996 saved += vfp_emit_fstmd (base_reg, 16);
8997 return saved;
9000 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9001 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9003 reg = gen_rtx_REG (DFmode, base_reg);
9004 base_reg += 2;
9006 XVECEXP (par, 0, 0)
9007 = gen_rtx_SET (VOIDmode,
9008 gen_frame_mem (BLKmode,
9009 gen_rtx_PRE_DEC (BLKmode,
9010 stack_pointer_rtx)),
9011 gen_rtx_UNSPEC (BLKmode,
9012 gen_rtvec (1, reg),
9013 UNSPEC_PUSH_MULT));
9015 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9016 plus_constant (stack_pointer_rtx, -(count * 8)));
9017 RTX_FRAME_RELATED_P (tmp) = 1;
9018 XVECEXP (dwarf, 0, 0) = tmp;
9020 tmp = gen_rtx_SET (VOIDmode,
9021 gen_frame_mem (DFmode, stack_pointer_rtx),
9022 reg);
9023 RTX_FRAME_RELATED_P (tmp) = 1;
9024 XVECEXP (dwarf, 0, 1) = tmp;
9026 for (i = 1; i < count; i++)
9028 reg = gen_rtx_REG (DFmode, base_reg);
9029 base_reg += 2;
9030 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9032 tmp = gen_rtx_SET (VOIDmode,
9033 gen_frame_mem (DFmode,
9034 plus_constant (stack_pointer_rtx,
9035 i * 8)),
9036 reg);
9037 RTX_FRAME_RELATED_P (tmp) = 1;
9038 XVECEXP (dwarf, 0, i + 1) = tmp;
9041 par = emit_insn (par);
9042 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9043 REG_NOTES (par));
9044 RTX_FRAME_RELATED_P (par) = 1;
9046 return count * 8;
9049 /* Emit a call instruction with pattern PAT. ADDR is the address of
9050 the call target. */
9052 void
9053 arm_emit_call_insn (rtx pat, rtx addr)
9055 rtx insn;
9057 insn = emit_call_insn (pat);
9059 /* The PIC register is live on entry to VxWorks PIC PLT entries.
9060 If the call might use such an entry, add a use of the PIC register
9061 to the instruction's CALL_INSN_FUNCTION_USAGE. */
9062 if (TARGET_VXWORKS_RTP
9063 && flag_pic
9064 && GET_CODE (addr) == SYMBOL_REF
9065 && (SYMBOL_REF_DECL (addr)
9066 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
9067 : !SYMBOL_REF_LOCAL_P (addr)))
9069 require_pic_register ();
9070 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), cfun->machine->pic_reg);
9074 /* Output a 'call' insn. */
9075 const char *
9076 output_call (rtx *operands)
9078 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
9080 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
9081 if (REGNO (operands[0]) == LR_REGNUM)
9083 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
9084 output_asm_insn ("mov%?\t%0, %|lr", operands);
9087 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
9089 if (TARGET_INTERWORK || arm_arch4t)
9090 output_asm_insn ("bx%?\t%0", operands);
9091 else
9092 output_asm_insn ("mov%?\t%|pc, %0", operands);
9094 return "";
9097 /* Output a 'call' insn that is a reference in memory. */
9098 const char *
9099 output_call_mem (rtx *operands)
9101 if (TARGET_INTERWORK && !arm_arch5)
9103 output_asm_insn ("ldr%?\t%|ip, %0", operands);
9104 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
9105 output_asm_insn ("bx%?\t%|ip", operands);
9107 else if (regno_use_in (LR_REGNUM, operands[0]))
9109 /* LR is used in the memory address. We load the address in the
9110 first instruction. It's safe to use IP as the target of the
9111 load since the call will kill it anyway. */
9112 output_asm_insn ("ldr%?\t%|ip, %0", operands);
9113 if (arm_arch5)
9114 output_asm_insn ("blx%?\t%|ip", operands);
9115 else
9117 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
9118 if (arm_arch4t)
9119 output_asm_insn ("bx%?\t%|ip", operands);
9120 else
9121 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
9124 else
9126 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
9127 output_asm_insn ("ldr%?\t%|pc, %0", operands);
9130 return "";
9134 /* Output a move from arm registers to an fpa registers.
9135 OPERANDS[0] is an fpa register.
9136 OPERANDS[1] is the first registers of an arm register pair. */
9137 const char *
9138 output_mov_long_double_fpa_from_arm (rtx *operands)
9140 int arm_reg0 = REGNO (operands[1]);
9141 rtx ops[3];
9143 gcc_assert (arm_reg0 != IP_REGNUM);
9145 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9146 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9147 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
9149 output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
9150 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
9152 return "";
9155 /* Output a move from an fpa register to arm registers.
9156 OPERANDS[0] is the first registers of an arm register pair.
9157 OPERANDS[1] is an fpa register. */
9158 const char *
9159 output_mov_long_double_arm_from_fpa (rtx *operands)
9161 int arm_reg0 = REGNO (operands[0]);
9162 rtx ops[3];
9164 gcc_assert (arm_reg0 != IP_REGNUM);
9166 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9167 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9168 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
9170 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
9171 output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
9172 return "";
9175 /* Output a move from arm registers to arm registers of a long double
9176 OPERANDS[0] is the destination.
9177 OPERANDS[1] is the source. */
9178 const char *
9179 output_mov_long_double_arm_from_arm (rtx *operands)
9181 /* We have to be careful here because the two might overlap. */
9182 int dest_start = REGNO (operands[0]);
9183 int src_start = REGNO (operands[1]);
9184 rtx ops[2];
9185 int i;
9187 if (dest_start < src_start)
9189 for (i = 0; i < 3; i++)
9191 ops[0] = gen_rtx_REG (SImode, dest_start + i);
9192 ops[1] = gen_rtx_REG (SImode, src_start + i);
9193 output_asm_insn ("mov%?\t%0, %1", ops);
9196 else
9198 for (i = 2; i >= 0; i--)
9200 ops[0] = gen_rtx_REG (SImode, dest_start + i);
9201 ops[1] = gen_rtx_REG (SImode, src_start + i);
9202 output_asm_insn ("mov%?\t%0, %1", ops);
9206 return "";
9210 /* Output a move from arm registers to an fpa registers.
9211 OPERANDS[0] is an fpa register.
9212 OPERANDS[1] is the first registers of an arm register pair. */
9213 const char *
9214 output_mov_double_fpa_from_arm (rtx *operands)
9216 int arm_reg0 = REGNO (operands[1]);
9217 rtx ops[2];
9219 gcc_assert (arm_reg0 != IP_REGNUM);
9221 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9222 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9223 output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1}", ops);
9224 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
9225 return "";
9228 /* Output a move from an fpa register to arm registers.
9229 OPERANDS[0] is the first registers of an arm register pair.
9230 OPERANDS[1] is an fpa register. */
9231 const char *
9232 output_mov_double_arm_from_fpa (rtx *operands)
9234 int arm_reg0 = REGNO (operands[0]);
9235 rtx ops[2];
9237 gcc_assert (arm_reg0 != IP_REGNUM);
9239 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9240 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9241 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
9242 output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1}", ops);
9243 return "";
9246 /* Output a move between double words.
9247 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
9248 or MEM<-REG and all MEMs must be offsettable addresses. */
9249 const char *
9250 output_move_double (rtx *operands)
9252 enum rtx_code code0 = GET_CODE (operands[0]);
9253 enum rtx_code code1 = GET_CODE (operands[1]);
9254 rtx otherops[3];
9256 if (code0 == REG)
9258 int reg0 = REGNO (operands[0]);
9260 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9262 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
9264 switch (GET_CODE (XEXP (operands[1], 0)))
9266 case REG:
9267 output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
9268 break;
9270 case PRE_INC:
9271 gcc_assert (TARGET_LDRD);
9272 output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
9273 break;
9275 case PRE_DEC:
9276 if (TARGET_LDRD)
9277 output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
9278 else
9279 output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
9280 break;
9282 case POST_INC:
9283 output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
9284 break;
9286 case POST_DEC:
9287 gcc_assert (TARGET_LDRD);
9288 output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
9289 break;
9291 case PRE_MODIFY:
9292 case POST_MODIFY:
9293 otherops[0] = operands[0];
9294 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
9295 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
9297 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
9299 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
9301 /* Registers overlap so split out the increment. */
9302 output_asm_insn ("add%?\t%1, %1, %2", otherops);
9303 output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
9305 else
9307 /* IWMMXT allows offsets larger than ldrd can handle,
9308 fix these up with a pair of ldr. */
9309 if (GET_CODE (otherops[2]) == CONST_INT
9310 && (INTVAL(otherops[2]) <= -256
9311 || INTVAL(otherops[2]) >= 256))
9313 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
9314 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9315 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9317 else
9318 output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
9321 else
9323 /* IWMMXT allows offsets larger than ldrd can handle,
9324 fix these up with a pair of ldr. */
9325 if (GET_CODE (otherops[2]) == CONST_INT
9326 && (INTVAL(otherops[2]) <= -256
9327 || INTVAL(otherops[2]) >= 256))
9329 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9330 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9331 otherops[0] = operands[0];
9332 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
9334 else
9335 /* We only allow constant increments, so this is safe. */
9336 output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
9338 break;
9340 case LABEL_REF:
9341 case CONST:
9342 output_asm_insn ("adr%?\t%0, %1", operands);
9343 output_asm_insn ("ldm%(ia%)\t%0, %M0", operands);
9344 break;
9346 /* ??? This needs checking for thumb2. */
9347 default:
9348 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
9349 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
9351 otherops[0] = operands[0];
9352 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
9353 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
9355 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
9357 if (GET_CODE (otherops[2]) == CONST_INT)
9359 switch ((int) INTVAL (otherops[2]))
9361 case -8:
9362 output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
9363 return "";
9364 case -4:
9365 if (TARGET_THUMB2)
9366 break;
9367 output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
9368 return "";
9369 case 4:
9370 if (TARGET_THUMB2)
9371 break;
9372 output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
9373 return "";
9376 if (TARGET_LDRD
9377 && (GET_CODE (otherops[2]) == REG
9378 || (GET_CODE (otherops[2]) == CONST_INT
9379 && INTVAL (otherops[2]) > -256
9380 && INTVAL (otherops[2]) < 256)))
9382 if (reg_overlap_mentioned_p (otherops[0],
9383 otherops[2]))
9385 /* Swap base and index registers over to
9386 avoid a conflict. */
9387 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
9388 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
9390 /* If both registers conflict, it will usually
9391 have been fixed by a splitter. */
9392 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
9394 output_asm_insn ("add%?\t%1, %1, %2", otherops);
9395 output_asm_insn ("ldr%(d%)\t%0, [%1]",
9396 otherops);
9398 else
9399 output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
9400 return "";
9403 if (GET_CODE (otherops[2]) == CONST_INT)
9405 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
9406 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
9407 else
9408 output_asm_insn ("add%?\t%0, %1, %2", otherops);
9410 else
9411 output_asm_insn ("add%?\t%0, %1, %2", otherops);
9413 else
9414 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
9416 return "ldm%(ia%)\t%0, %M0";
9418 else
9420 otherops[1] = adjust_address (operands[1], SImode, 4);
9421 /* Take care of overlapping base/data reg. */
9422 if (reg_mentioned_p (operands[0], operands[1]))
9424 output_asm_insn ("ldr%?\t%0, %1", otherops);
9425 output_asm_insn ("ldr%?\t%0, %1", operands);
9427 else
9429 output_asm_insn ("ldr%?\t%0, %1", operands);
9430 output_asm_insn ("ldr%?\t%0, %1", otherops);
9435 else
9437 /* Constraints should ensure this. */
9438 gcc_assert (code0 == MEM && code1 == REG);
9439 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
9441 switch (GET_CODE (XEXP (operands[0], 0)))
9443 case REG:
9444 output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
9445 break;
9447 case PRE_INC:
9448 gcc_assert (TARGET_LDRD);
9449 output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
9450 break;
9452 case PRE_DEC:
9453 if (TARGET_LDRD)
9454 output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
9455 else
9456 output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
9457 break;
9459 case POST_INC:
9460 output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
9461 break;
9463 case POST_DEC:
9464 gcc_assert (TARGET_LDRD);
9465 output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
9466 break;
9468 case PRE_MODIFY:
9469 case POST_MODIFY:
9470 otherops[0] = operands[1];
9471 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
9472 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
9474 /* IWMMXT allows offsets larger than ldrd can handle,
9475 fix these up with a pair of ldr. */
9476 if (GET_CODE (otherops[2]) == CONST_INT
9477 && (INTVAL(otherops[2]) <= -256
9478 || INTVAL(otherops[2]) >= 256))
9480 rtx reg1;
9481 reg1 = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9482 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
9484 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
9485 otherops[0] = reg1;
9486 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9488 else
9490 otherops[0] = reg1;
9491 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9492 otherops[0] = operands[1];
9493 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
9496 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
9497 output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
9498 else
9499 output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
9500 break;
9502 case PLUS:
9503 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
9504 if (GET_CODE (otherops[2]) == CONST_INT)
9506 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
9508 case -8:
9509 output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
9510 return "";
9512 case -4:
9513 if (TARGET_THUMB2)
9514 break;
9515 output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
9516 return "";
9518 case 4:
9519 if (TARGET_THUMB2)
9520 break;
9521 output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
9522 return "";
9525 if (TARGET_LDRD
9526 && (GET_CODE (otherops[2]) == REG
9527 || (GET_CODE (otherops[2]) == CONST_INT
9528 && INTVAL (otherops[2]) > -256
9529 && INTVAL (otherops[2]) < 256)))
9531 otherops[0] = operands[1];
9532 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
9533 output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
9534 return "";
9536 /* Fall through */
9538 default:
9539 otherops[0] = adjust_address (operands[0], SImode, 4);
9540 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9541 output_asm_insn ("str%?\t%1, %0", operands);
9542 output_asm_insn ("str%?\t%1, %0", otherops);
9546 return "";
9549 /* Output a VFP load or store instruction. */
9551 const char *
9552 output_move_vfp (rtx *operands)
9554 rtx reg, mem, addr, ops[2];
9555 int load = REG_P (operands[0]);
9556 int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
9557 int integer_p = GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT;
9558 const char *template;
9559 char buff[50];
9561 reg = operands[!load];
9562 mem = operands[load];
9564 gcc_assert (REG_P (reg));
9565 gcc_assert (IS_VFP_REGNUM (REGNO (reg)));
9566 gcc_assert (GET_MODE (reg) == SFmode
9567 || GET_MODE (reg) == DFmode
9568 || GET_MODE (reg) == SImode
9569 || GET_MODE (reg) == DImode);
9570 gcc_assert (MEM_P (mem));
9572 addr = XEXP (mem, 0);
9574 switch (GET_CODE (addr))
9576 case PRE_DEC:
9577 template = "f%smdb%c%%?\t%%0!, {%%%s1}%s";
9578 ops[0] = XEXP (addr, 0);
9579 ops[1] = reg;
9580 break;
9582 case POST_INC:
9583 template = "f%smia%c%%?\t%%0!, {%%%s1}%s";
9584 ops[0] = XEXP (addr, 0);
9585 ops[1] = reg;
9586 break;
9588 default:
9589 template = "f%s%c%%?\t%%%s0, %%1%s";
9590 ops[0] = reg;
9591 ops[1] = mem;
9592 break;
9595 sprintf (buff, template,
9596 load ? "ld" : "st",
9597 dp ? 'd' : 's',
9598 dp ? "P" : "",
9599 integer_p ? "\t%@ int" : "");
9600 output_asm_insn (buff, ops);
9602 return "";
9605 /* Output an ADD r, s, #n where n may be too big for one instruction.
9606 If adding zero to one register, output nothing. */
9607 const char *
9608 output_add_immediate (rtx *operands)
9610 HOST_WIDE_INT n = INTVAL (operands[2]);
9612 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
9614 if (n < 0)
9615 output_multi_immediate (operands,
9616 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
9617 -n);
9618 else
9619 output_multi_immediate (operands,
9620 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
9624 return "";
9627 /* Output a multiple immediate operation.
9628 OPERANDS is the vector of operands referred to in the output patterns.
9629 INSTR1 is the output pattern to use for the first constant.
9630 INSTR2 is the output pattern to use for subsequent constants.
9631 IMMED_OP is the index of the constant slot in OPERANDS.
9632 N is the constant value. */
9633 static const char *
9634 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
9635 int immed_op, HOST_WIDE_INT n)
9637 #if HOST_BITS_PER_WIDE_INT > 32
9638 n &= 0xffffffff;
9639 #endif
9641 if (n == 0)
9643 /* Quick and easy output. */
9644 operands[immed_op] = const0_rtx;
9645 output_asm_insn (instr1, operands);
9647 else
9649 int i;
9650 const char * instr = instr1;
9652 /* Note that n is never zero here (which would give no output). */
9653 for (i = 0; i < 32; i += 2)
9655 if (n & (3 << i))
9657 operands[immed_op] = GEN_INT (n & (255 << i));
9658 output_asm_insn (instr, operands);
9659 instr = instr2;
9660 i += 6;
9665 return "";
9668 /* Return the name of a shifter operation. */
9669 static const char *
9670 arm_shift_nmem(enum rtx_code code)
9672 switch (code)
9674 case ASHIFT:
9675 return ARM_LSL_NAME;
9677 case ASHIFTRT:
9678 return "asr";
9680 case LSHIFTRT:
9681 return "lsr";
9683 case ROTATERT:
9684 return "ror";
9686 default:
9687 abort();
9691 /* Return the appropriate ARM instruction for the operation code.
9692 The returned result should not be overwritten. OP is the rtx of the
9693 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9694 was shifted. */
9695 const char *
9696 arithmetic_instr (rtx op, int shift_first_arg)
9698 switch (GET_CODE (op))
9700 case PLUS:
9701 return "add";
9703 case MINUS:
9704 return shift_first_arg ? "rsb" : "sub";
9706 case IOR:
9707 return "orr";
9709 case XOR:
9710 return "eor";
9712 case AND:
9713 return "and";
9715 case ASHIFT:
9716 case ASHIFTRT:
9717 case LSHIFTRT:
9718 case ROTATERT:
9719 return arm_shift_nmem(GET_CODE(op));
9721 default:
9722 gcc_unreachable ();
9726 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9727 for the operation code. The returned result should not be overwritten.
9728 OP is the rtx code of the shift.
9729 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9730 shift. */
9731 static const char *
9732 shift_op (rtx op, HOST_WIDE_INT *amountp)
9734 const char * mnem;
9735 enum rtx_code code = GET_CODE (op);
9737 switch (GET_CODE (XEXP (op, 1)))
9739 case REG:
9740 case SUBREG:
9741 *amountp = -1;
9742 break;
9744 case CONST_INT:
9745 *amountp = INTVAL (XEXP (op, 1));
9746 break;
9748 default:
9749 gcc_unreachable ();
9752 switch (code)
9754 case ROTATE:
9755 gcc_assert (*amountp != -1);
9756 *amountp = 32 - *amountp;
9757 code = ROTATERT;
9759 /* Fall through. */
9761 case ASHIFT:
9762 case ASHIFTRT:
9763 case LSHIFTRT:
9764 case ROTATERT:
9765 mnem = arm_shift_nmem(code);
9766 break;
9768 case MULT:
9769 /* We never have to worry about the amount being other than a
9770 power of 2, since this case can never be reloaded from a reg. */
9771 gcc_assert (*amountp != -1);
9772 *amountp = int_log2 (*amountp);
9773 return ARM_LSL_NAME;
9775 default:
9776 gcc_unreachable ();
9779 if (*amountp != -1)
9781 /* This is not 100% correct, but follows from the desire to merge
9782 multiplication by a power of 2 with the recognizer for a
9783 shift. >=32 is not a valid shift for "lsl", so we must try and
9784 output a shift that produces the correct arithmetical result.
9785 Using lsr #32 is identical except for the fact that the carry bit
9786 is not set correctly if we set the flags; but we never use the
9787 carry bit from such an operation, so we can ignore that. */
9788 if (code == ROTATERT)
9789 /* Rotate is just modulo 32. */
9790 *amountp &= 31;
9791 else if (*amountp != (*amountp & 31))
9793 if (code == ASHIFT)
9794 mnem = "lsr";
9795 *amountp = 32;
9798 /* Shifts of 0 are no-ops. */
9799 if (*amountp == 0)
9800 return NULL;
9803 return mnem;
9806 /* Obtain the shift from the POWER of two. */
9808 static HOST_WIDE_INT
9809 int_log2 (HOST_WIDE_INT power)
9811 HOST_WIDE_INT shift = 0;
9813 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9815 gcc_assert (shift <= 31);
9816 shift++;
9819 return shift;
9822 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9823 because /bin/as is horribly restrictive. The judgement about
9824 whether or not each character is 'printable' (and can be output as
9825 is) or not (and must be printed with an octal escape) must be made
9826 with reference to the *host* character set -- the situation is
9827 similar to that discussed in the comments above pp_c_char in
9828 c-pretty-print.c. */
9830 #define MAX_ASCII_LEN 51
9832 void
9833 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9835 int i;
9836 int len_so_far = 0;
9838 fputs ("\t.ascii\t\"", stream);
9840 for (i = 0; i < len; i++)
9842 int c = p[i];
9844 if (len_so_far >= MAX_ASCII_LEN)
9846 fputs ("\"\n\t.ascii\t\"", stream);
9847 len_so_far = 0;
9850 if (ISPRINT (c))
9852 if (c == '\\' || c == '\"')
9854 putc ('\\', stream);
9855 len_so_far++;
9857 putc (c, stream);
9858 len_so_far++;
9860 else
9862 fprintf (stream, "\\%03o", c);
9863 len_so_far += 4;
9867 fputs ("\"\n", stream);
9870 /* Compute the register save mask for registers 0 through 12
9871 inclusive. This code is used by arm_compute_save_reg_mask. */
9873 static unsigned long
9874 arm_compute_save_reg0_reg12_mask (void)
9876 unsigned long func_type = arm_current_func_type ();
9877 unsigned long save_reg_mask = 0;
9878 unsigned int reg;
9880 if (IS_INTERRUPT (func_type))
9882 unsigned int max_reg;
9883 /* Interrupt functions must not corrupt any registers,
9884 even call clobbered ones. If this is a leaf function
9885 we can just examine the registers used by the RTL, but
9886 otherwise we have to assume that whatever function is
9887 called might clobber anything, and so we have to save
9888 all the call-clobbered registers as well. */
9889 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9890 /* FIQ handlers have registers r8 - r12 banked, so
9891 we only need to check r0 - r7, Normal ISRs only
9892 bank r14 and r15, so we must check up to r12.
9893 r13 is the stack pointer which is always preserved,
9894 so we do not need to consider it here. */
9895 max_reg = 7;
9896 else
9897 max_reg = 12;
9899 for (reg = 0; reg <= max_reg; reg++)
9900 if (df_regs_ever_live_p (reg)
9901 || (! current_function_is_leaf && call_used_regs[reg]))
9902 save_reg_mask |= (1 << reg);
9904 /* Also save the pic base register if necessary. */
9905 if (flag_pic
9906 && !TARGET_SINGLE_PIC_BASE
9907 && arm_pic_register != INVALID_REGNUM
9908 && current_function_uses_pic_offset_table)
9909 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9911 else
9913 /* In arm mode we handle r11 (FP) as a special case. */
9914 unsigned last_reg = TARGET_ARM ? 10 : 11;
9916 /* In the normal case we only need to save those registers
9917 which are call saved and which are used by this function. */
9918 for (reg = 0; reg <= last_reg; reg++)
9919 if (df_regs_ever_live_p (reg) && ! call_used_regs[reg])
9920 save_reg_mask |= (1 << reg);
9922 /* Handle the frame pointer as a special case. */
9923 if (! TARGET_APCS_FRAME
9924 && ! frame_pointer_needed
9925 && df_regs_ever_live_p (HARD_FRAME_POINTER_REGNUM)
9926 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9927 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9928 else if (! TARGET_APCS_FRAME
9929 && ! frame_pointer_needed
9930 && df_regs_ever_live_p (HARD_FRAME_POINTER_REGNUM)
9931 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9932 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9934 /* If we aren't loading the PIC register,
9935 don't stack it even though it may be live. */
9936 if (flag_pic
9937 && !TARGET_SINGLE_PIC_BASE
9938 && arm_pic_register != INVALID_REGNUM
9939 && (df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM)
9940 || current_function_uses_pic_offset_table))
9941 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9943 /* The prologue will copy SP into R0, so save it. */
9944 if (IS_STACKALIGN (func_type))
9945 save_reg_mask |= 1;
9948 /* Save registers so the exception handler can modify them. */
9949 if (current_function_calls_eh_return)
9951 unsigned int i;
9953 for (i = 0; ; i++)
9955 reg = EH_RETURN_DATA_REGNO (i);
9956 if (reg == INVALID_REGNUM)
9957 break;
9958 save_reg_mask |= 1 << reg;
9962 return save_reg_mask;
9966 /* Compute a bit mask of which registers need to be
9967 saved on the stack for the current function. */
9969 static unsigned long
9970 arm_compute_save_reg_mask (void)
9972 unsigned int save_reg_mask = 0;
9973 unsigned long func_type = arm_current_func_type ();
9974 unsigned int reg;
9976 if (IS_NAKED (func_type))
9977 /* This should never really happen. */
9978 return 0;
9980 /* If we are creating a stack frame, then we must save the frame pointer,
9981 IP (which will hold the old stack pointer), LR and the PC. */
9982 if (frame_pointer_needed && TARGET_ARM)
9983 save_reg_mask |=
9984 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9985 | (1 << IP_REGNUM)
9986 | (1 << LR_REGNUM)
9987 | (1 << PC_REGNUM);
9989 /* Volatile functions do not return, so there
9990 is no need to save any other registers. */
9991 if (IS_VOLATILE (func_type))
9992 return save_reg_mask;
9994 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9996 /* Decide if we need to save the link register.
9997 Interrupt routines have their own banked link register,
9998 so they never need to save it.
9999 Otherwise if we do not use the link register we do not need to save
10000 it. If we are pushing other registers onto the stack however, we
10001 can save an instruction in the epilogue by pushing the link register
10002 now and then popping it back into the PC. This incurs extra memory
10003 accesses though, so we only do it when optimizing for size, and only
10004 if we know that we will not need a fancy return sequence. */
10005 if (df_regs_ever_live_p (LR_REGNUM)
10006 || (save_reg_mask
10007 && optimize_size
10008 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10009 && !current_function_calls_eh_return))
10010 save_reg_mask |= 1 << LR_REGNUM;
10012 if (cfun->machine->lr_save_eliminated)
10013 save_reg_mask &= ~ (1 << LR_REGNUM);
10015 if (TARGET_REALLY_IWMMXT
10016 && ((bit_count (save_reg_mask)
10017 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
10019 /* The total number of registers that are going to be pushed
10020 onto the stack is odd. We need to ensure that the stack
10021 is 64-bit aligned before we start to save iWMMXt registers,
10022 and also before we start to create locals. (A local variable
10023 might be a double or long long which we will load/store using
10024 an iWMMXt instruction). Therefore we need to push another
10025 ARM register, so that the stack will be 64-bit aligned. We
10026 try to avoid using the arg registers (r0 -r3) as they might be
10027 used to pass values in a tail call. */
10028 for (reg = 4; reg <= 12; reg++)
10029 if ((save_reg_mask & (1 << reg)) == 0)
10030 break;
10032 if (reg <= 12)
10033 save_reg_mask |= (1 << reg);
10034 else
10036 cfun->machine->sibcall_blocked = 1;
10037 save_reg_mask |= (1 << 3);
10041 /* We may need to push an additional register for use initializing the
10042 PIC base register. */
10043 if (TARGET_THUMB2 && IS_NESTED (func_type) && flag_pic
10044 && (save_reg_mask & THUMB2_WORK_REGS) == 0)
10046 reg = thumb_find_work_register (1 << 4);
10047 if (!call_used_regs[reg])
10048 save_reg_mask |= (1 << reg);
10051 return save_reg_mask;
10055 /* Compute a bit mask of which registers need to be
10056 saved on the stack for the current function. */
10057 static unsigned long
10058 thumb1_compute_save_reg_mask (void)
10060 unsigned long mask;
10061 unsigned reg;
10063 mask = 0;
10064 for (reg = 0; reg < 12; reg ++)
10065 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10066 mask |= 1 << reg;
10068 if (flag_pic
10069 && !TARGET_SINGLE_PIC_BASE
10070 && arm_pic_register != INVALID_REGNUM
10071 && current_function_uses_pic_offset_table)
10072 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
10074 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
10075 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
10076 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
10078 /* LR will also be pushed if any lo regs are pushed. */
10079 if (mask & 0xff || thumb_force_lr_save ())
10080 mask |= (1 << LR_REGNUM);
10082 /* Make sure we have a low work register if we need one.
10083 We will need one if we are going to push a high register,
10084 but we are not currently intending to push a low register. */
10085 if ((mask & 0xff) == 0
10086 && ((mask & 0x0f00) || TARGET_BACKTRACE))
10088 /* Use thumb_find_work_register to choose which register
10089 we will use. If the register is live then we will
10090 have to push it. Use LAST_LO_REGNUM as our fallback
10091 choice for the register to select. */
10092 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
10094 if (! call_used_regs[reg])
10095 mask |= 1 << reg;
10098 return mask;
10102 /* Return the number of bytes required to save VFP registers. */
10103 static int
10104 arm_get_vfp_saved_size (void)
10106 unsigned int regno;
10107 int count;
10108 int saved;
10110 saved = 0;
10111 /* Space for saved VFP registers. */
10112 if (TARGET_HARD_FLOAT && TARGET_VFP)
10114 count = 0;
10115 for (regno = FIRST_VFP_REGNUM;
10116 regno < LAST_VFP_REGNUM;
10117 regno += 2)
10119 if ((!df_regs_ever_live_p (regno) || call_used_regs[regno])
10120 && (!df_regs_ever_live_p (regno + 1) || call_used_regs[regno + 1]))
10122 if (count > 0)
10124 /* Workaround ARM10 VFPr1 bug. */
10125 if (count == 2 && !arm_arch6)
10126 count++;
10127 saved += count * 8;
10129 count = 0;
10131 else
10132 count++;
10134 if (count > 0)
10136 if (count == 2 && !arm_arch6)
10137 count++;
10138 saved += count * 8;
10141 return saved;
10145 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
10146 everything bar the final return instruction. */
10147 const char *
10148 output_return_instruction (rtx operand, int really_return, int reverse)
10150 char conditional[10];
10151 char instr[100];
10152 unsigned reg;
10153 unsigned long live_regs_mask;
10154 unsigned long func_type;
10155 arm_stack_offsets *offsets;
10157 func_type = arm_current_func_type ();
10159 if (IS_NAKED (func_type))
10160 return "";
10162 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
10164 /* If this function was declared non-returning, and we have
10165 found a tail call, then we have to trust that the called
10166 function won't return. */
10167 if (really_return)
10169 rtx ops[2];
10171 /* Otherwise, trap an attempted return by aborting. */
10172 ops[0] = operand;
10173 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
10174 : "abort");
10175 assemble_external_libcall (ops[1]);
10176 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
10179 return "";
10182 gcc_assert (!current_function_calls_alloca || really_return);
10184 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
10186 return_used_this_function = 1;
10188 live_regs_mask = arm_compute_save_reg_mask ();
10190 if (live_regs_mask)
10192 const char * return_reg;
10194 /* If we do not have any special requirements for function exit
10195 (e.g. interworking) then we can load the return address
10196 directly into the PC. Otherwise we must load it into LR. */
10197 if (really_return
10198 && (IS_INTERRUPT (func_type) || !TARGET_INTERWORK))
10199 return_reg = reg_names[PC_REGNUM];
10200 else
10201 return_reg = reg_names[LR_REGNUM];
10203 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
10205 /* There are three possible reasons for the IP register
10206 being saved. 1) a stack frame was created, in which case
10207 IP contains the old stack pointer, or 2) an ISR routine
10208 corrupted it, or 3) it was saved to align the stack on
10209 iWMMXt. In case 1, restore IP into SP, otherwise just
10210 restore IP. */
10211 if (frame_pointer_needed)
10213 live_regs_mask &= ~ (1 << IP_REGNUM);
10214 live_regs_mask |= (1 << SP_REGNUM);
10216 else
10217 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
10220 /* On some ARM architectures it is faster to use LDR rather than
10221 LDM to load a single register. On other architectures, the
10222 cost is the same. In 26 bit mode, or for exception handlers,
10223 we have to use LDM to load the PC so that the CPSR is also
10224 restored. */
10225 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
10226 if (live_regs_mask == (1U << reg))
10227 break;
10229 if (reg <= LAST_ARM_REGNUM
10230 && (reg != LR_REGNUM
10231 || ! really_return
10232 || ! IS_INTERRUPT (func_type)))
10234 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
10235 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
10237 else
10239 char *p;
10240 int first = 1;
10242 /* Generate the load multiple instruction to restore the
10243 registers. Note we can get here, even if
10244 frame_pointer_needed is true, but only if sp already
10245 points to the base of the saved core registers. */
10246 if (live_regs_mask & (1 << SP_REGNUM))
10248 unsigned HOST_WIDE_INT stack_adjust;
10250 offsets = arm_get_frame_offsets ();
10251 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
10252 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
10254 if (stack_adjust && arm_arch5 && TARGET_ARM)
10255 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
10256 else
10258 /* If we can't use ldmib (SA110 bug),
10259 then try to pop r3 instead. */
10260 if (stack_adjust)
10261 live_regs_mask |= 1 << 3;
10262 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
10265 else
10266 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
10268 p = instr + strlen (instr);
10270 for (reg = 0; reg <= SP_REGNUM; reg++)
10271 if (live_regs_mask & (1 << reg))
10273 int l = strlen (reg_names[reg]);
10275 if (first)
10276 first = 0;
10277 else
10279 memcpy (p, ", ", 2);
10280 p += 2;
10283 memcpy (p, "%|", 2);
10284 memcpy (p + 2, reg_names[reg], l);
10285 p += l + 2;
10288 if (live_regs_mask & (1 << LR_REGNUM))
10290 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
10291 /* If returning from an interrupt, restore the CPSR. */
10292 if (IS_INTERRUPT (func_type))
10293 strcat (p, "^");
10295 else
10296 strcpy (p, "}");
10299 output_asm_insn (instr, & operand);
10301 /* See if we need to generate an extra instruction to
10302 perform the actual function return. */
10303 if (really_return
10304 && func_type != ARM_FT_INTERWORKED
10305 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
10307 /* The return has already been handled
10308 by loading the LR into the PC. */
10309 really_return = 0;
10313 if (really_return)
10315 switch ((int) ARM_FUNC_TYPE (func_type))
10317 case ARM_FT_ISR:
10318 case ARM_FT_FIQ:
10319 /* ??? This is wrong for unified assembly syntax. */
10320 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
10321 break;
10323 case ARM_FT_INTERWORKED:
10324 sprintf (instr, "bx%s\t%%|lr", conditional);
10325 break;
10327 case ARM_FT_EXCEPTION:
10328 /* ??? This is wrong for unified assembly syntax. */
10329 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
10330 break;
10332 default:
10333 /* Use bx if it's available. */
10334 if (arm_arch5 || arm_arch4t)
10335 sprintf (instr, "bx%s\t%%|lr", conditional);
10336 else
10337 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
10338 break;
10341 output_asm_insn (instr, & operand);
10344 return "";
10347 /* Write the function name into the code section, directly preceding
10348 the function prologue.
10350 Code will be output similar to this:
10352 .ascii "arm_poke_function_name", 0
10353 .align
10355 .word 0xff000000 + (t1 - t0)
10356 arm_poke_function_name
10357 mov ip, sp
10358 stmfd sp!, {fp, ip, lr, pc}
10359 sub fp, ip, #4
10361 When performing a stack backtrace, code can inspect the value
10362 of 'pc' stored at 'fp' + 0. If the trace function then looks
10363 at location pc - 12 and the top 8 bits are set, then we know
10364 that there is a function name embedded immediately preceding this
10365 location and has length ((pc[-3]) & 0xff000000).
10367 We assume that pc is declared as a pointer to an unsigned long.
10369 It is of no benefit to output the function name if we are assembling
10370 a leaf function. These function types will not contain a stack
10371 backtrace structure, therefore it is not possible to determine the
10372 function name. */
10373 void
10374 arm_poke_function_name (FILE *stream, const char *name)
10376 unsigned long alignlength;
10377 unsigned long length;
10378 rtx x;
10380 length = strlen (name) + 1;
10381 alignlength = ROUND_UP_WORD (length);
10383 ASM_OUTPUT_ASCII (stream, name, length);
10384 ASM_OUTPUT_ALIGN (stream, 2);
10385 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
10386 assemble_aligned_integer (UNITS_PER_WORD, x);
10389 /* Place some comments into the assembler stream
10390 describing the current function. */
10391 static void
10392 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
10394 unsigned long func_type;
10396 if (TARGET_THUMB1)
10398 thumb1_output_function_prologue (f, frame_size);
10399 return;
10402 /* Sanity check. */
10403 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
10405 func_type = arm_current_func_type ();
10407 switch ((int) ARM_FUNC_TYPE (func_type))
10409 default:
10410 case ARM_FT_NORMAL:
10411 break;
10412 case ARM_FT_INTERWORKED:
10413 asm_fprintf (f, "\t%@ Function supports interworking.\n");
10414 break;
10415 case ARM_FT_ISR:
10416 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
10417 break;
10418 case ARM_FT_FIQ:
10419 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
10420 break;
10421 case ARM_FT_EXCEPTION:
10422 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
10423 break;
10426 if (IS_NAKED (func_type))
10427 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
10429 if (IS_VOLATILE (func_type))
10430 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
10432 if (IS_NESTED (func_type))
10433 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
10434 if (IS_STACKALIGN (func_type))
10435 asm_fprintf (f, "\t%@ Stack Align: May be called with mis-aligned SP.\n");
10437 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
10438 current_function_args_size,
10439 current_function_pretend_args_size, frame_size);
10441 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
10442 frame_pointer_needed,
10443 cfun->machine->uses_anonymous_args);
10445 if (cfun->machine->lr_save_eliminated)
10446 asm_fprintf (f, "\t%@ link register save eliminated.\n");
10448 if (current_function_calls_eh_return)
10449 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
10451 #ifdef AOF_ASSEMBLER
10452 if (flag_pic)
10453 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
10454 #endif
10456 return_used_this_function = 0;
10459 const char *
10460 arm_output_epilogue (rtx sibling)
10462 int reg;
10463 unsigned long saved_regs_mask;
10464 unsigned long func_type;
10465 /* Floats_offset is the offset from the "virtual" frame. In an APCS
10466 frame that is $fp + 4 for a non-variadic function. */
10467 int floats_offset = 0;
10468 rtx operands[3];
10469 FILE * f = asm_out_file;
10470 unsigned int lrm_count = 0;
10471 int really_return = (sibling == NULL);
10472 int start_reg;
10473 arm_stack_offsets *offsets;
10475 /* If we have already generated the return instruction
10476 then it is futile to generate anything else. */
10477 if (use_return_insn (FALSE, sibling) && return_used_this_function)
10478 return "";
10480 func_type = arm_current_func_type ();
10482 if (IS_NAKED (func_type))
10483 /* Naked functions don't have epilogues. */
10484 return "";
10486 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
10488 rtx op;
10490 /* A volatile function should never return. Call abort. */
10491 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
10492 assemble_external_libcall (op);
10493 output_asm_insn ("bl\t%a0", &op);
10495 return "";
10498 /* If we are throwing an exception, then we really must be doing a
10499 return, so we can't tail-call. */
10500 gcc_assert (!current_function_calls_eh_return || really_return);
10502 offsets = arm_get_frame_offsets ();
10503 saved_regs_mask = arm_compute_save_reg_mask ();
10505 if (TARGET_IWMMXT)
10506 lrm_count = bit_count (saved_regs_mask);
10508 floats_offset = offsets->saved_args;
10509 /* Compute how far away the floats will be. */
10510 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
10511 if (saved_regs_mask & (1 << reg))
10512 floats_offset += 4;
10514 if (frame_pointer_needed && TARGET_ARM)
10516 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
10517 int vfp_offset = offsets->frame;
10519 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10521 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10522 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10524 floats_offset += 12;
10525 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
10526 reg, FP_REGNUM, floats_offset - vfp_offset);
10529 else
10531 start_reg = LAST_FPA_REGNUM;
10533 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10535 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10537 floats_offset += 12;
10539 /* We can't unstack more than four registers at once. */
10540 if (start_reg - reg == 3)
10542 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
10543 reg, FP_REGNUM, floats_offset - vfp_offset);
10544 start_reg = reg - 1;
10547 else
10549 if (reg != start_reg)
10550 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
10551 reg + 1, start_reg - reg,
10552 FP_REGNUM, floats_offset - vfp_offset);
10553 start_reg = reg - 1;
10557 /* Just in case the last register checked also needs unstacking. */
10558 if (reg != start_reg)
10559 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
10560 reg + 1, start_reg - reg,
10561 FP_REGNUM, floats_offset - vfp_offset);
10564 if (TARGET_HARD_FLOAT && TARGET_VFP)
10566 int saved_size;
10568 /* The fldmd insns do not have base+offset addressing
10569 modes, so we use IP to hold the address. */
10570 saved_size = arm_get_vfp_saved_size ();
10572 if (saved_size > 0)
10574 floats_offset += saved_size;
10575 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
10576 FP_REGNUM, floats_offset - vfp_offset);
10578 start_reg = FIRST_VFP_REGNUM;
10579 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10581 if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
10582 && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
10584 if (start_reg != reg)
10585 vfp_output_fldmd (f, IP_REGNUM,
10586 (start_reg - FIRST_VFP_REGNUM) / 2,
10587 (reg - start_reg) / 2);
10588 start_reg = reg + 2;
10591 if (start_reg != reg)
10592 vfp_output_fldmd (f, IP_REGNUM,
10593 (start_reg - FIRST_VFP_REGNUM) / 2,
10594 (reg - start_reg) / 2);
10597 if (TARGET_IWMMXT)
10599 /* The frame pointer is guaranteed to be non-double-word aligned.
10600 This is because it is set to (old_stack_pointer - 4) and the
10601 old_stack_pointer was double word aligned. Thus the offset to
10602 the iWMMXt registers to be loaded must also be non-double-word
10603 sized, so that the resultant address *is* double-word aligned.
10604 We can ignore floats_offset since that was already included in
10605 the live_regs_mask. */
10606 lrm_count += (lrm_count % 2 ? 2 : 1);
10608 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10609 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10611 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
10612 reg, FP_REGNUM, lrm_count * 4);
10613 lrm_count += 2;
10617 /* saved_regs_mask should contain the IP, which at the time of stack
10618 frame generation actually contains the old stack pointer. So a
10619 quick way to unwind the stack is just pop the IP register directly
10620 into the stack pointer. */
10621 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
10622 saved_regs_mask &= ~ (1 << IP_REGNUM);
10623 saved_regs_mask |= (1 << SP_REGNUM);
10625 /* There are two registers left in saved_regs_mask - LR and PC. We
10626 only need to restore the LR register (the return address), but to
10627 save time we can load it directly into the PC, unless we need a
10628 special function exit sequence, or we are not really returning. */
10629 if (really_return
10630 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10631 && !current_function_calls_eh_return)
10632 /* Delete the LR from the register mask, so that the LR on
10633 the stack is loaded into the PC in the register mask. */
10634 saved_regs_mask &= ~ (1 << LR_REGNUM);
10635 else
10636 saved_regs_mask &= ~ (1 << PC_REGNUM);
10638 /* We must use SP as the base register, because SP is one of the
10639 registers being restored. If an interrupt or page fault
10640 happens in the ldm instruction, the SP might or might not
10641 have been restored. That would be bad, as then SP will no
10642 longer indicate the safe area of stack, and we can get stack
10643 corruption. Using SP as the base register means that it will
10644 be reset correctly to the original value, should an interrupt
10645 occur. If the stack pointer already points at the right
10646 place, then omit the subtraction. */
10647 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
10648 || current_function_calls_alloca)
10649 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
10650 4 * bit_count (saved_regs_mask));
10651 print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask, 0);
10653 if (IS_INTERRUPT (func_type))
10654 /* Interrupt handlers will have pushed the
10655 IP onto the stack, so restore it now. */
10656 print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, 1 << IP_REGNUM, 0);
10658 else
10660 HOST_WIDE_INT amount;
10661 int rfe;
10662 /* Restore stack pointer if necessary. */
10663 if (frame_pointer_needed)
10665 /* For Thumb-2 restore sp from the frame pointer.
10666 Operand restrictions mean we have to increment FP, then copy
10667 to SP. */
10668 amount = offsets->locals_base - offsets->saved_regs;
10669 operands[0] = hard_frame_pointer_rtx;
10671 else
10673 operands[0] = stack_pointer_rtx;
10674 amount = offsets->outgoing_args - offsets->saved_regs;
10677 if (amount)
10679 operands[1] = operands[0];
10680 operands[2] = GEN_INT (amount);
10681 output_add_immediate (operands);
10683 if (frame_pointer_needed)
10684 asm_fprintf (f, "\tmov\t%r, %r\n",
10685 SP_REGNUM, HARD_FRAME_POINTER_REGNUM);
10687 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10689 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10690 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10691 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
10692 reg, SP_REGNUM);
10694 else
10696 start_reg = FIRST_FPA_REGNUM;
10698 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10700 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10702 if (reg - start_reg == 3)
10704 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
10705 start_reg, SP_REGNUM);
10706 start_reg = reg + 1;
10709 else
10711 if (reg != start_reg)
10712 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10713 start_reg, reg - start_reg,
10714 SP_REGNUM);
10716 start_reg = reg + 1;
10720 /* Just in case the last register checked also needs unstacking. */
10721 if (reg != start_reg)
10722 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10723 start_reg, reg - start_reg, SP_REGNUM);
10726 if (TARGET_HARD_FLOAT && TARGET_VFP)
10728 start_reg = FIRST_VFP_REGNUM;
10729 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10731 if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
10732 && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
10734 if (start_reg != reg)
10735 vfp_output_fldmd (f, SP_REGNUM,
10736 (start_reg - FIRST_VFP_REGNUM) / 2,
10737 (reg - start_reg) / 2);
10738 start_reg = reg + 2;
10741 if (start_reg != reg)
10742 vfp_output_fldmd (f, SP_REGNUM,
10743 (start_reg - FIRST_VFP_REGNUM) / 2,
10744 (reg - start_reg) / 2);
10746 if (TARGET_IWMMXT)
10747 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10748 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
10749 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10751 /* If we can, restore the LR into the PC. */
10752 if (ARM_FUNC_TYPE (func_type) != ARM_FT_INTERWORKED
10753 && (TARGET_ARM || ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
10754 && !IS_STACKALIGN (func_type)
10755 && really_return
10756 && current_function_pretend_args_size == 0
10757 && saved_regs_mask & (1 << LR_REGNUM)
10758 && !current_function_calls_eh_return)
10760 saved_regs_mask &= ~ (1 << LR_REGNUM);
10761 saved_regs_mask |= (1 << PC_REGNUM);
10762 rfe = IS_INTERRUPT (func_type);
10764 else
10765 rfe = 0;
10767 /* Load the registers off the stack. If we only have one register
10768 to load use the LDR instruction - it is faster. For Thumb-2
10769 always use pop and the assembler will pick the best instruction.*/
10770 if (TARGET_ARM && saved_regs_mask == (1 << LR_REGNUM)
10771 && !IS_INTERRUPT(func_type))
10773 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10775 else if (saved_regs_mask)
10777 if (saved_regs_mask & (1 << SP_REGNUM))
10778 /* Note - write back to the stack register is not enabled
10779 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10780 in the list of registers and if we add writeback the
10781 instruction becomes UNPREDICTABLE. */
10782 print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask,
10783 rfe);
10784 else if (TARGET_ARM)
10785 print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask,
10786 rfe);
10787 else
10788 print_multi_reg (f, "pop\t", SP_REGNUM, saved_regs_mask, 0);
10791 if (current_function_pretend_args_size)
10793 /* Unwind the pre-pushed regs. */
10794 operands[0] = operands[1] = stack_pointer_rtx;
10795 operands[2] = GEN_INT (current_function_pretend_args_size);
10796 output_add_immediate (operands);
10800 /* We may have already restored PC directly from the stack. */
10801 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10802 return "";
10804 /* Stack adjustment for exception handler. */
10805 if (current_function_calls_eh_return)
10806 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10807 ARM_EH_STACKADJ_REGNUM);
10809 /* Generate the return instruction. */
10810 switch ((int) ARM_FUNC_TYPE (func_type))
10812 case ARM_FT_ISR:
10813 case ARM_FT_FIQ:
10814 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10815 break;
10817 case ARM_FT_EXCEPTION:
10818 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10819 break;
10821 case ARM_FT_INTERWORKED:
10822 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10823 break;
10825 default:
10826 if (IS_STACKALIGN (func_type))
10828 /* See comment in arm_expand_prologue. */
10829 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, 0);
10831 if (arm_arch5 || arm_arch4t)
10832 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10833 else
10834 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10835 break;
10838 return "";
10841 static void
10842 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10843 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10845 arm_stack_offsets *offsets;
10847 if (TARGET_THUMB1)
10849 int regno;
10851 /* Emit any call-via-reg trampolines that are needed for v4t support
10852 of call_reg and call_value_reg type insns. */
10853 for (regno = 0; regno < LR_REGNUM; regno++)
10855 rtx label = cfun->machine->call_via[regno];
10857 if (label != NULL)
10859 switch_to_section (function_section (current_function_decl));
10860 targetm.asm_out.internal_label (asm_out_file, "L",
10861 CODE_LABEL_NUMBER (label));
10862 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10866 /* ??? Probably not safe to set this here, since it assumes that a
10867 function will be emitted as assembly immediately after we generate
10868 RTL for it. This does not happen for inline functions. */
10869 return_used_this_function = 0;
10871 else /* TARGET_32BIT */
10873 /* We need to take into account any stack-frame rounding. */
10874 offsets = arm_get_frame_offsets ();
10876 gcc_assert (!use_return_insn (FALSE, NULL)
10877 || !return_used_this_function
10878 || offsets->saved_regs == offsets->outgoing_args
10879 || frame_pointer_needed);
10881 /* Reset the ARM-specific per-function variables. */
10882 after_arm_reorg = 0;
10886 /* Generate and emit an insn that we will recognize as a push_multi.
10887 Unfortunately, since this insn does not reflect very well the actual
10888 semantics of the operation, we need to annotate the insn for the benefit
10889 of DWARF2 frame unwind information. */
10890 static rtx
10891 emit_multi_reg_push (unsigned long mask)
10893 int num_regs = 0;
10894 int num_dwarf_regs;
10895 int i, j;
10896 rtx par;
10897 rtx dwarf;
10898 int dwarf_par_index;
10899 rtx tmp, reg;
10901 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10902 if (mask & (1 << i))
10903 num_regs++;
10905 gcc_assert (num_regs && num_regs <= 16);
10907 /* We don't record the PC in the dwarf frame information. */
10908 num_dwarf_regs = num_regs;
10909 if (mask & (1 << PC_REGNUM))
10910 num_dwarf_regs--;
10912 /* For the body of the insn we are going to generate an UNSPEC in
10913 parallel with several USEs. This allows the insn to be recognized
10914 by the push_multi pattern in the arm.md file. The insn looks
10915 something like this:
10917 (parallel [
10918 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10919 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10920 (use (reg:SI 11 fp))
10921 (use (reg:SI 12 ip))
10922 (use (reg:SI 14 lr))
10923 (use (reg:SI 15 pc))
10926 For the frame note however, we try to be more explicit and actually
10927 show each register being stored into the stack frame, plus a (single)
10928 decrement of the stack pointer. We do it this way in order to be
10929 friendly to the stack unwinding code, which only wants to see a single
10930 stack decrement per instruction. The RTL we generate for the note looks
10931 something like this:
10933 (sequence [
10934 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10935 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10936 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10937 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10938 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10941 This sequence is used both by the code to support stack unwinding for
10942 exceptions handlers and the code to generate dwarf2 frame debugging. */
10944 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10945 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10946 dwarf_par_index = 1;
10948 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10950 if (mask & (1 << i))
10952 reg = gen_rtx_REG (SImode, i);
10954 XVECEXP (par, 0, 0)
10955 = gen_rtx_SET (VOIDmode,
10956 gen_frame_mem (BLKmode,
10957 gen_rtx_PRE_DEC (BLKmode,
10958 stack_pointer_rtx)),
10959 gen_rtx_UNSPEC (BLKmode,
10960 gen_rtvec (1, reg),
10961 UNSPEC_PUSH_MULT));
10963 if (i != PC_REGNUM)
10965 tmp = gen_rtx_SET (VOIDmode,
10966 gen_frame_mem (SImode, stack_pointer_rtx),
10967 reg);
10968 RTX_FRAME_RELATED_P (tmp) = 1;
10969 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10970 dwarf_par_index++;
10973 break;
10977 for (j = 1, i++; j < num_regs; i++)
10979 if (mask & (1 << i))
10981 reg = gen_rtx_REG (SImode, i);
10983 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10985 if (i != PC_REGNUM)
10988 = gen_rtx_SET (VOIDmode,
10989 gen_frame_mem (SImode,
10990 plus_constant (stack_pointer_rtx,
10991 4 * j)),
10992 reg);
10993 RTX_FRAME_RELATED_P (tmp) = 1;
10994 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10997 j++;
11001 par = emit_insn (par);
11003 tmp = gen_rtx_SET (VOIDmode,
11004 stack_pointer_rtx,
11005 plus_constant (stack_pointer_rtx, -4 * num_regs));
11006 RTX_FRAME_RELATED_P (tmp) = 1;
11007 XVECEXP (dwarf, 0, 0) = tmp;
11009 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
11010 REG_NOTES (par));
11011 return par;
11014 /* Calculate the size of the return value that is passed in registers. */
11015 static int
11016 arm_size_return_regs (void)
11018 enum machine_mode mode;
11020 if (current_function_return_rtx != 0)
11021 mode = GET_MODE (current_function_return_rtx);
11022 else
11023 mode = DECL_MODE (DECL_RESULT (current_function_decl));
11025 return GET_MODE_SIZE (mode);
11028 static rtx
11029 emit_sfm (int base_reg, int count)
11031 rtx par;
11032 rtx dwarf;
11033 rtx tmp, reg;
11034 int i;
11036 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
11037 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
11039 reg = gen_rtx_REG (XFmode, base_reg++);
11041 XVECEXP (par, 0, 0)
11042 = gen_rtx_SET (VOIDmode,
11043 gen_frame_mem (BLKmode,
11044 gen_rtx_PRE_DEC (BLKmode,
11045 stack_pointer_rtx)),
11046 gen_rtx_UNSPEC (BLKmode,
11047 gen_rtvec (1, reg),
11048 UNSPEC_PUSH_MULT));
11049 tmp = gen_rtx_SET (VOIDmode,
11050 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
11051 RTX_FRAME_RELATED_P (tmp) = 1;
11052 XVECEXP (dwarf, 0, 1) = tmp;
11054 for (i = 1; i < count; i++)
11056 reg = gen_rtx_REG (XFmode, base_reg++);
11057 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
11059 tmp = gen_rtx_SET (VOIDmode,
11060 gen_frame_mem (XFmode,
11061 plus_constant (stack_pointer_rtx,
11062 i * 12)),
11063 reg);
11064 RTX_FRAME_RELATED_P (tmp) = 1;
11065 XVECEXP (dwarf, 0, i + 1) = tmp;
11068 tmp = gen_rtx_SET (VOIDmode,
11069 stack_pointer_rtx,
11070 plus_constant (stack_pointer_rtx, -12 * count));
11072 RTX_FRAME_RELATED_P (tmp) = 1;
11073 XVECEXP (dwarf, 0, 0) = tmp;
11075 par = emit_insn (par);
11076 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
11077 REG_NOTES (par));
11078 return par;
11082 /* Return true if the current function needs to save/restore LR. */
11084 static bool
11085 thumb_force_lr_save (void)
11087 return !cfun->machine->lr_save_eliminated
11088 && (!leaf_function_p ()
11089 || thumb_far_jump_used_p ()
11090 || df_regs_ever_live_p (LR_REGNUM));
11094 /* Compute the distance from register FROM to register TO.
11095 These can be the arg pointer (26), the soft frame pointer (25),
11096 the stack pointer (13) or the hard frame pointer (11).
11097 In thumb mode r7 is used as the soft frame pointer, if needed.
11098 Typical stack layout looks like this:
11100 old stack pointer -> | |
11101 ----
11102 | | \
11103 | | saved arguments for
11104 | | vararg functions
11105 | | /
11107 hard FP & arg pointer -> | | \
11108 | | stack
11109 | | frame
11110 | | /
11112 | | \
11113 | | call saved
11114 | | registers
11115 soft frame pointer -> | | /
11117 | | \
11118 | | local
11119 | | variables
11120 locals base pointer -> | | /
11122 | | \
11123 | | outgoing
11124 | | arguments
11125 current stack pointer -> | | /
11128 For a given function some or all of these stack components
11129 may not be needed, giving rise to the possibility of
11130 eliminating some of the registers.
11132 The values returned by this function must reflect the behavior
11133 of arm_expand_prologue() and arm_compute_save_reg_mask().
11135 The sign of the number returned reflects the direction of stack
11136 growth, so the values are positive for all eliminations except
11137 from the soft frame pointer to the hard frame pointer.
11139 SFP may point just inside the local variables block to ensure correct
11140 alignment. */
11143 /* Calculate stack offsets. These are used to calculate register elimination
11144 offsets and in prologue/epilogue code. */
11146 static arm_stack_offsets *
11147 arm_get_frame_offsets (void)
11149 struct arm_stack_offsets *offsets;
11150 unsigned long func_type;
11151 int leaf;
11152 int saved;
11153 HOST_WIDE_INT frame_size;
11155 offsets = &cfun->machine->stack_offsets;
11157 /* We need to know if we are a leaf function. Unfortunately, it
11158 is possible to be called after start_sequence has been called,
11159 which causes get_insns to return the insns for the sequence,
11160 not the function, which will cause leaf_function_p to return
11161 the incorrect result.
11163 to know about leaf functions once reload has completed, and the
11164 frame size cannot be changed after that time, so we can safely
11165 use the cached value. */
11167 if (reload_completed)
11168 return offsets;
11170 /* Initially this is the size of the local variables. It will translated
11171 into an offset once we have determined the size of preceding data. */
11172 frame_size = ROUND_UP_WORD (get_frame_size ());
11174 leaf = leaf_function_p ();
11176 /* Space for variadic functions. */
11177 offsets->saved_args = current_function_pretend_args_size;
11179 /* In Thumb mode this is incorrect, but never used. */
11180 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
11182 if (TARGET_32BIT)
11184 unsigned int regno;
11186 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
11188 /* We know that SP will be doubleword aligned on entry, and we must
11189 preserve that condition at any subroutine call. We also require the
11190 soft frame pointer to be doubleword aligned. */
11192 if (TARGET_REALLY_IWMMXT)
11194 /* Check for the call-saved iWMMXt registers. */
11195 for (regno = FIRST_IWMMXT_REGNUM;
11196 regno <= LAST_IWMMXT_REGNUM;
11197 regno++)
11198 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
11199 saved += 8;
11202 func_type = arm_current_func_type ();
11203 if (! IS_VOLATILE (func_type))
11205 /* Space for saved FPA registers. */
11206 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
11207 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
11208 saved += 12;
11210 /* Space for saved VFP registers. */
11211 if (TARGET_HARD_FLOAT && TARGET_VFP)
11212 saved += arm_get_vfp_saved_size ();
11215 else /* TARGET_THUMB1 */
11217 saved = bit_count (thumb1_compute_save_reg_mask ()) * 4;
11218 if (TARGET_BACKTRACE)
11219 saved += 16;
11222 /* Saved registers include the stack frame. */
11223 offsets->saved_regs = offsets->saved_args + saved;
11224 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
11225 /* A leaf function does not need any stack alignment if it has nothing
11226 on the stack. */
11227 if (leaf && frame_size == 0)
11229 offsets->outgoing_args = offsets->soft_frame;
11230 offsets->locals_base = offsets->soft_frame;
11231 return offsets;
11234 /* Ensure SFP has the correct alignment. */
11235 if (ARM_DOUBLEWORD_ALIGN
11236 && (offsets->soft_frame & 7))
11237 offsets->soft_frame += 4;
11239 offsets->locals_base = offsets->soft_frame + frame_size;
11240 offsets->outgoing_args = (offsets->locals_base
11241 + current_function_outgoing_args_size);
11243 if (ARM_DOUBLEWORD_ALIGN)
11245 /* Ensure SP remains doubleword aligned. */
11246 if (offsets->outgoing_args & 7)
11247 offsets->outgoing_args += 4;
11248 gcc_assert (!(offsets->outgoing_args & 7));
11251 return offsets;
11255 /* Calculate the relative offsets for the different stack pointers. Positive
11256 offsets are in the direction of stack growth. */
11258 HOST_WIDE_INT
11259 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
11261 arm_stack_offsets *offsets;
11263 offsets = arm_get_frame_offsets ();
11265 /* OK, now we have enough information to compute the distances.
11266 There must be an entry in these switch tables for each pair
11267 of registers in ELIMINABLE_REGS, even if some of the entries
11268 seem to be redundant or useless. */
11269 switch (from)
11271 case ARG_POINTER_REGNUM:
11272 switch (to)
11274 case THUMB_HARD_FRAME_POINTER_REGNUM:
11275 return 0;
11277 case FRAME_POINTER_REGNUM:
11278 /* This is the reverse of the soft frame pointer
11279 to hard frame pointer elimination below. */
11280 return offsets->soft_frame - offsets->saved_args;
11282 case ARM_HARD_FRAME_POINTER_REGNUM:
11283 /* If there is no stack frame then the hard
11284 frame pointer and the arg pointer coincide. */
11285 if (offsets->frame == offsets->saved_regs)
11286 return 0;
11287 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
11288 return (frame_pointer_needed
11289 && cfun->static_chain_decl != NULL
11290 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
11292 case STACK_POINTER_REGNUM:
11293 /* If nothing has been pushed on the stack at all
11294 then this will return -4. This *is* correct! */
11295 return offsets->outgoing_args - (offsets->saved_args + 4);
11297 default:
11298 gcc_unreachable ();
11300 gcc_unreachable ();
11302 case FRAME_POINTER_REGNUM:
11303 switch (to)
11305 case THUMB_HARD_FRAME_POINTER_REGNUM:
11306 return 0;
11308 case ARM_HARD_FRAME_POINTER_REGNUM:
11309 /* The hard frame pointer points to the top entry in the
11310 stack frame. The soft frame pointer to the bottom entry
11311 in the stack frame. If there is no stack frame at all,
11312 then they are identical. */
11314 return offsets->frame - offsets->soft_frame;
11316 case STACK_POINTER_REGNUM:
11317 return offsets->outgoing_args - offsets->soft_frame;
11319 default:
11320 gcc_unreachable ();
11322 gcc_unreachable ();
11324 default:
11325 /* You cannot eliminate from the stack pointer.
11326 In theory you could eliminate from the hard frame
11327 pointer to the stack pointer, but this will never
11328 happen, since if a stack frame is not needed the
11329 hard frame pointer will never be used. */
11330 gcc_unreachable ();
11335 /* Emit RTL to save coprocessor registers on function entry. Returns the
11336 number of bytes pushed. */
11338 static int
11339 arm_save_coproc_regs(void)
11341 int saved_size = 0;
11342 unsigned reg;
11343 unsigned start_reg;
11344 rtx insn;
11346 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
11347 if (df_regs_ever_live_p (reg) && ! call_used_regs[reg])
11349 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
11350 insn = gen_rtx_MEM (V2SImode, insn);
11351 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
11352 RTX_FRAME_RELATED_P (insn) = 1;
11353 saved_size += 8;
11356 /* Save any floating point call-saved registers used by this
11357 function. */
11358 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
11360 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
11361 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
11363 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
11364 insn = gen_rtx_MEM (XFmode, insn);
11365 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
11366 RTX_FRAME_RELATED_P (insn) = 1;
11367 saved_size += 12;
11370 else
11372 start_reg = LAST_FPA_REGNUM;
11374 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
11376 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
11378 if (start_reg - reg == 3)
11380 insn = emit_sfm (reg, 4);
11381 RTX_FRAME_RELATED_P (insn) = 1;
11382 saved_size += 48;
11383 start_reg = reg - 1;
11386 else
11388 if (start_reg != reg)
11390 insn = emit_sfm (reg + 1, start_reg - reg);
11391 RTX_FRAME_RELATED_P (insn) = 1;
11392 saved_size += (start_reg - reg) * 12;
11394 start_reg = reg - 1;
11398 if (start_reg != reg)
11400 insn = emit_sfm (reg + 1, start_reg - reg);
11401 saved_size += (start_reg - reg) * 12;
11402 RTX_FRAME_RELATED_P (insn) = 1;
11405 if (TARGET_HARD_FLOAT && TARGET_VFP)
11407 start_reg = FIRST_VFP_REGNUM;
11409 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
11411 if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
11412 && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
11414 if (start_reg != reg)
11415 saved_size += vfp_emit_fstmd (start_reg,
11416 (reg - start_reg) / 2);
11417 start_reg = reg + 2;
11420 if (start_reg != reg)
11421 saved_size += vfp_emit_fstmd (start_reg,
11422 (reg - start_reg) / 2);
11424 return saved_size;
11428 /* Set the Thumb frame pointer from the stack pointer. */
11430 static void
11431 thumb_set_frame_pointer (arm_stack_offsets *offsets)
11433 HOST_WIDE_INT amount;
11434 rtx insn, dwarf;
11436 amount = offsets->outgoing_args - offsets->locals_base;
11437 if (amount < 1024)
11438 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
11439 stack_pointer_rtx, GEN_INT (amount)));
11440 else
11442 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
11443 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
11444 hard_frame_pointer_rtx,
11445 stack_pointer_rtx));
11446 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
11447 plus_constant (stack_pointer_rtx, amount));
11448 RTX_FRAME_RELATED_P (dwarf) = 1;
11449 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
11450 REG_NOTES (insn));
11453 RTX_FRAME_RELATED_P (insn) = 1;
11456 /* Generate the prologue instructions for entry into an ARM or Thumb-2
11457 function. */
11458 void
11459 arm_expand_prologue (void)
11461 rtx amount;
11462 rtx insn;
11463 rtx ip_rtx;
11464 unsigned long live_regs_mask;
11465 unsigned long func_type;
11466 int fp_offset = 0;
11467 int saved_pretend_args = 0;
11468 int saved_regs = 0;
11469 unsigned HOST_WIDE_INT args_to_push;
11470 arm_stack_offsets *offsets;
11472 func_type = arm_current_func_type ();
11474 /* Naked functions don't have prologues. */
11475 if (IS_NAKED (func_type))
11476 return;
11478 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
11479 args_to_push = current_function_pretend_args_size;
11481 /* Compute which register we will have to save onto the stack. */
11482 live_regs_mask = arm_compute_save_reg_mask ();
11484 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
11486 if (IS_STACKALIGN (func_type))
11488 rtx dwarf;
11489 rtx r0;
11490 rtx r1;
11491 /* Handle a word-aligned stack pointer. We generate the following:
11493 mov r0, sp
11494 bic r1, r0, #7
11495 mov sp, r1
11496 <save and restore r0 in normal prologue/epilogue>
11497 mov sp, r0
11498 bx lr
11500 The unwinder doesn't need to know about the stack realignment.
11501 Just tell it we saved SP in r0. */
11502 gcc_assert (TARGET_THUMB2 && !arm_arch_notm && args_to_push == 0);
11504 r0 = gen_rtx_REG (SImode, 0);
11505 r1 = gen_rtx_REG (SImode, 1);
11506 dwarf = gen_rtx_UNSPEC (SImode, NULL_RTVEC, UNSPEC_STACK_ALIGN);
11507 dwarf = gen_rtx_SET (VOIDmode, r0, dwarf);
11508 insn = gen_movsi (r0, stack_pointer_rtx);
11509 RTX_FRAME_RELATED_P (insn) = 1;
11510 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
11511 dwarf, REG_NOTES (insn));
11512 emit_insn (insn);
11513 emit_insn (gen_andsi3 (r1, r0, GEN_INT (~(HOST_WIDE_INT)7)));
11514 emit_insn (gen_movsi (stack_pointer_rtx, r1));
11517 if (frame_pointer_needed && TARGET_ARM)
11519 if (IS_INTERRUPT (func_type))
11521 /* Interrupt functions must not corrupt any registers.
11522 Creating a frame pointer however, corrupts the IP
11523 register, so we must push it first. */
11524 insn = emit_multi_reg_push (1 << IP_REGNUM);
11526 /* Do not set RTX_FRAME_RELATED_P on this insn.
11527 The dwarf stack unwinding code only wants to see one
11528 stack decrement per function, and this is not it. If
11529 this instruction is labeled as being part of the frame
11530 creation sequence then dwarf2out_frame_debug_expr will
11531 die when it encounters the assignment of IP to FP
11532 later on, since the use of SP here establishes SP as
11533 the CFA register and not IP.
11535 Anyway this instruction is not really part of the stack
11536 frame creation although it is part of the prologue. */
11538 else if (IS_NESTED (func_type))
11540 /* The Static chain register is the same as the IP register
11541 used as a scratch register during stack frame creation.
11542 To get around this need to find somewhere to store IP
11543 whilst the frame is being created. We try the following
11544 places in order:
11546 1. The last argument register.
11547 2. A slot on the stack above the frame. (This only
11548 works if the function is not a varargs function).
11549 3. Register r3, after pushing the argument registers
11550 onto the stack.
11552 Note - we only need to tell the dwarf2 backend about the SP
11553 adjustment in the second variant; the static chain register
11554 doesn't need to be unwound, as it doesn't contain a value
11555 inherited from the caller. */
11557 if (df_regs_ever_live_p (3) == false)
11558 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
11559 else if (args_to_push == 0)
11561 rtx dwarf;
11563 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
11564 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
11565 fp_offset = 4;
11567 /* Just tell the dwarf backend that we adjusted SP. */
11568 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
11569 plus_constant (stack_pointer_rtx,
11570 -fp_offset));
11571 RTX_FRAME_RELATED_P (insn) = 1;
11572 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
11573 dwarf, REG_NOTES (insn));
11575 else
11577 /* Store the args on the stack. */
11578 if (cfun->machine->uses_anonymous_args)
11579 insn = emit_multi_reg_push
11580 ((0xf0 >> (args_to_push / 4)) & 0xf);
11581 else
11582 insn = emit_insn
11583 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11584 GEN_INT (- args_to_push)));
11586 RTX_FRAME_RELATED_P (insn) = 1;
11588 saved_pretend_args = 1;
11589 fp_offset = args_to_push;
11590 args_to_push = 0;
11592 /* Now reuse r3 to preserve IP. */
11593 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
11597 insn = emit_set_insn (ip_rtx,
11598 plus_constant (stack_pointer_rtx, fp_offset));
11599 RTX_FRAME_RELATED_P (insn) = 1;
11602 if (args_to_push)
11604 /* Push the argument registers, or reserve space for them. */
11605 if (cfun->machine->uses_anonymous_args)
11606 insn = emit_multi_reg_push
11607 ((0xf0 >> (args_to_push / 4)) & 0xf);
11608 else
11609 insn = emit_insn
11610 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11611 GEN_INT (- args_to_push)));
11612 RTX_FRAME_RELATED_P (insn) = 1;
11615 /* If this is an interrupt service routine, and the link register
11616 is going to be pushed, and we are not creating a stack frame,
11617 (which would involve an extra push of IP and a pop in the epilogue)
11618 subtracting four from LR now will mean that the function return
11619 can be done with a single instruction. */
11620 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
11621 && (live_regs_mask & (1 << LR_REGNUM)) != 0
11622 && ! frame_pointer_needed
11623 && TARGET_ARM)
11625 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
11627 emit_set_insn (lr, plus_constant (lr, -4));
11630 if (live_regs_mask)
11632 insn = emit_multi_reg_push (live_regs_mask);
11633 saved_regs += bit_count (live_regs_mask) * 4;
11634 RTX_FRAME_RELATED_P (insn) = 1;
11637 if (! IS_VOLATILE (func_type))
11638 saved_regs += arm_save_coproc_regs ();
11640 if (frame_pointer_needed && TARGET_ARM)
11642 /* Create the new frame pointer. */
11644 insn = GEN_INT (-(4 + args_to_push + fp_offset));
11645 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
11646 RTX_FRAME_RELATED_P (insn) = 1;
11648 if (IS_NESTED (func_type))
11650 /* Recover the static chain register. */
11651 if (!df_regs_ever_live_p (3)
11652 || saved_pretend_args)
11653 insn = gen_rtx_REG (SImode, 3);
11654 else /* if (current_function_pretend_args_size == 0) */
11656 insn = plus_constant (hard_frame_pointer_rtx, 4);
11657 insn = gen_frame_mem (SImode, insn);
11659 emit_set_insn (ip_rtx, insn);
11660 /* Add a USE to stop propagate_one_insn() from barfing. */
11661 emit_insn (gen_prologue_use (ip_rtx));
11666 offsets = arm_get_frame_offsets ();
11667 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
11669 /* This add can produce multiple insns for a large constant, so we
11670 need to get tricky. */
11671 rtx last = get_last_insn ();
11673 amount = GEN_INT (offsets->saved_args + saved_regs
11674 - offsets->outgoing_args);
11676 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11677 amount));
11680 last = last ? NEXT_INSN (last) : get_insns ();
11681 RTX_FRAME_RELATED_P (last) = 1;
11683 while (last != insn);
11685 /* If the frame pointer is needed, emit a special barrier that
11686 will prevent the scheduler from moving stores to the frame
11687 before the stack adjustment. */
11688 if (frame_pointer_needed)
11689 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
11690 hard_frame_pointer_rtx));
11694 if (frame_pointer_needed && TARGET_THUMB2)
11695 thumb_set_frame_pointer (offsets);
11697 if (flag_pic && arm_pic_register != INVALID_REGNUM)
11699 unsigned long mask;
11701 mask = live_regs_mask;
11702 mask &= THUMB2_WORK_REGS;
11703 if (!IS_NESTED (func_type))
11704 mask |= (1 << IP_REGNUM);
11705 arm_load_pic_register (mask);
11708 /* If we are profiling, make sure no instructions are scheduled before
11709 the call to mcount. Similarly if the user has requested no
11710 scheduling in the prolog. Similarly if we want non-call exceptions
11711 using the EABI unwinder, to prevent faulting instructions from being
11712 swapped with a stack adjustment. */
11713 if (current_function_profile || !TARGET_SCHED_PROLOG
11714 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
11715 emit_insn (gen_blockage ());
11717 /* If the link register is being kept alive, with the return address in it,
11718 then make sure that it does not get reused by the ce2 pass. */
11719 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
11720 cfun->machine->lr_save_eliminated = 1;
11723 /* Print condition code to STREAM. Helper function for arm_print_operand. */
11724 static void
11725 arm_print_condition (FILE *stream)
11727 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
11729 /* Branch conversion is not implemented for Thumb-2. */
11730 if (TARGET_THUMB)
11732 output_operand_lossage ("predicated Thumb instruction");
11733 return;
11735 if (current_insn_predicate != NULL)
11737 output_operand_lossage
11738 ("predicated instruction in conditional sequence");
11739 return;
11742 fputs (arm_condition_codes[arm_current_cc], stream);
11744 else if (current_insn_predicate)
11746 enum arm_cond_code code;
11748 if (TARGET_THUMB1)
11750 output_operand_lossage ("predicated Thumb instruction");
11751 return;
11754 code = get_arm_condition_code (current_insn_predicate);
11755 fputs (arm_condition_codes[code], stream);
11760 /* If CODE is 'd', then the X is a condition operand and the instruction
11761 should only be executed if the condition is true.
11762 if CODE is 'D', then the X is a condition operand and the instruction
11763 should only be executed if the condition is false: however, if the mode
11764 of the comparison is CCFPEmode, then always execute the instruction -- we
11765 do this because in these circumstances !GE does not necessarily imply LT;
11766 in these cases the instruction pattern will take care to make sure that
11767 an instruction containing %d will follow, thereby undoing the effects of
11768 doing this instruction unconditionally.
11769 If CODE is 'N' then X is a floating point operand that must be negated
11770 before output.
11771 If CODE is 'B' then output a bitwise inverted value of X (a const int).
11772 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
11773 void
11774 arm_print_operand (FILE *stream, rtx x, int code)
11776 switch (code)
11778 case '@':
11779 fputs (ASM_COMMENT_START, stream);
11780 return;
11782 case '_':
11783 fputs (user_label_prefix, stream);
11784 return;
11786 case '|':
11787 fputs (REGISTER_PREFIX, stream);
11788 return;
11790 case '?':
11791 arm_print_condition (stream);
11792 return;
11794 case '(':
11795 /* Nothing in unified syntax, otherwise the current condition code. */
11796 if (!TARGET_UNIFIED_ASM)
11797 arm_print_condition (stream);
11798 break;
11800 case ')':
11801 /* The current condition code in unified syntax, otherwise nothing. */
11802 if (TARGET_UNIFIED_ASM)
11803 arm_print_condition (stream);
11804 break;
11806 case '.':
11807 /* The current condition code for a condition code setting instruction.
11808 Preceded by 's' in unified syntax, otherwise followed by 's'. */
11809 if (TARGET_UNIFIED_ASM)
11811 fputc('s', stream);
11812 arm_print_condition (stream);
11814 else
11816 arm_print_condition (stream);
11817 fputc('s', stream);
11819 return;
11821 case '!':
11822 /* If the instruction is conditionally executed then print
11823 the current condition code, otherwise print 's'. */
11824 gcc_assert (TARGET_THUMB2 && TARGET_UNIFIED_ASM);
11825 if (current_insn_predicate)
11826 arm_print_condition (stream);
11827 else
11828 fputc('s', stream);
11829 break;
11831 case 'N':
11833 REAL_VALUE_TYPE r;
11834 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11835 r = REAL_VALUE_NEGATE (r);
11836 fprintf (stream, "%s", fp_const_from_val (&r));
11838 return;
11840 case 'B':
11841 if (GET_CODE (x) == CONST_INT)
11843 HOST_WIDE_INT val;
11844 val = ARM_SIGN_EXTEND (~INTVAL (x));
11845 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
11847 else
11849 putc ('~', stream);
11850 output_addr_const (stream, x);
11852 return;
11854 case 'L':
11855 /* The low 16 bits of an immediate constant. */
11856 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL(x) & 0xffff);
11857 return;
11859 case 'i':
11860 fprintf (stream, "%s", arithmetic_instr (x, 1));
11861 return;
11863 /* Truncate Cirrus shift counts. */
11864 case 's':
11865 if (GET_CODE (x) == CONST_INT)
11867 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11868 return;
11870 arm_print_operand (stream, x, 0);
11871 return;
11873 case 'I':
11874 fprintf (stream, "%s", arithmetic_instr (x, 0));
11875 return;
11877 case 'S':
11879 HOST_WIDE_INT val;
11880 const char *shift;
11882 if (!shift_operator (x, SImode))
11884 output_operand_lossage ("invalid shift operand");
11885 break;
11888 shift = shift_op (x, &val);
11890 if (shift)
11892 fprintf (stream, ", %s ", shift);
11893 if (val == -1)
11894 arm_print_operand (stream, XEXP (x, 1), 0);
11895 else
11896 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11899 return;
11901 /* An explanation of the 'Q', 'R' and 'H' register operands:
11903 In a pair of registers containing a DI or DF value the 'Q'
11904 operand returns the register number of the register containing
11905 the least significant part of the value. The 'R' operand returns
11906 the register number of the register containing the most
11907 significant part of the value.
11909 The 'H' operand returns the higher of the two register numbers.
11910 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11911 same as the 'Q' operand, since the most significant part of the
11912 value is held in the lower number register. The reverse is true
11913 on systems where WORDS_BIG_ENDIAN is false.
11915 The purpose of these operands is to distinguish between cases
11916 where the endian-ness of the values is important (for example
11917 when they are added together), and cases where the endian-ness
11918 is irrelevant, but the order of register operations is important.
11919 For example when loading a value from memory into a register
11920 pair, the endian-ness does not matter. Provided that the value
11921 from the lower memory address is put into the lower numbered
11922 register, and the value from the higher address is put into the
11923 higher numbered register, the load will work regardless of whether
11924 the value being loaded is big-wordian or little-wordian. The
11925 order of the two register loads can matter however, if the address
11926 of the memory location is actually held in one of the registers
11927 being overwritten by the load. */
11928 case 'Q':
11929 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11931 output_operand_lossage ("invalid operand for code '%c'", code);
11932 return;
11935 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11936 return;
11938 case 'R':
11939 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11941 output_operand_lossage ("invalid operand for code '%c'", code);
11942 return;
11945 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11946 return;
11948 case 'H':
11949 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11951 output_operand_lossage ("invalid operand for code '%c'", code);
11952 return;
11955 asm_fprintf (stream, "%r", REGNO (x) + 1);
11956 return;
11958 case 'm':
11959 asm_fprintf (stream, "%r",
11960 GET_CODE (XEXP (x, 0)) == REG
11961 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11962 return;
11964 case 'M':
11965 asm_fprintf (stream, "{%r-%r}",
11966 REGNO (x),
11967 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11968 return;
11970 case 'd':
11971 /* CONST_TRUE_RTX means always -- that's the default. */
11972 if (x == const_true_rtx)
11973 return;
11975 if (!COMPARISON_P (x))
11977 output_operand_lossage ("invalid operand for code '%c'", code);
11978 return;
11981 fputs (arm_condition_codes[get_arm_condition_code (x)],
11982 stream);
11983 return;
11985 case 'D':
11986 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11987 want to do that. */
11988 if (x == const_true_rtx)
11990 output_operand_lossage ("instruction never executed");
11991 return;
11993 if (!COMPARISON_P (x))
11995 output_operand_lossage ("invalid operand for code '%c'", code);
11996 return;
11999 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
12000 (get_arm_condition_code (x))],
12001 stream);
12002 return;
12004 /* Cirrus registers can be accessed in a variety of ways:
12005 single floating point (f)
12006 double floating point (d)
12007 32bit integer (fx)
12008 64bit integer (dx). */
12009 case 'W': /* Cirrus register in F mode. */
12010 case 'X': /* Cirrus register in D mode. */
12011 case 'Y': /* Cirrus register in FX mode. */
12012 case 'Z': /* Cirrus register in DX mode. */
12013 gcc_assert (GET_CODE (x) == REG
12014 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
12016 fprintf (stream, "mv%s%s",
12017 code == 'W' ? "f"
12018 : code == 'X' ? "d"
12019 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
12021 return;
12023 /* Print cirrus register in the mode specified by the register's mode. */
12024 case 'V':
12026 int mode = GET_MODE (x);
12028 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
12030 output_operand_lossage ("invalid operand for code '%c'", code);
12031 return;
12034 fprintf (stream, "mv%s%s",
12035 mode == DFmode ? "d"
12036 : mode == SImode ? "fx"
12037 : mode == DImode ? "dx"
12038 : "f", reg_names[REGNO (x)] + 2);
12040 return;
12043 case 'U':
12044 if (GET_CODE (x) != REG
12045 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
12046 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
12047 /* Bad value for wCG register number. */
12049 output_operand_lossage ("invalid operand for code '%c'", code);
12050 return;
12053 else
12054 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
12055 return;
12057 /* Print an iWMMXt control register name. */
12058 case 'w':
12059 if (GET_CODE (x) != CONST_INT
12060 || INTVAL (x) < 0
12061 || INTVAL (x) >= 16)
12062 /* Bad value for wC register number. */
12064 output_operand_lossage ("invalid operand for code '%c'", code);
12065 return;
12068 else
12070 static const char * wc_reg_names [16] =
12072 "wCID", "wCon", "wCSSF", "wCASF",
12073 "wC4", "wC5", "wC6", "wC7",
12074 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
12075 "wC12", "wC13", "wC14", "wC15"
12078 fprintf (stream, wc_reg_names [INTVAL (x)]);
12080 return;
12082 /* Print a VFP double precision register name. */
12083 case 'P':
12085 int mode = GET_MODE (x);
12086 int num;
12088 if (mode != DImode && mode != DFmode)
12090 output_operand_lossage ("invalid operand for code '%c'", code);
12091 return;
12094 if (GET_CODE (x) != REG
12095 || !IS_VFP_REGNUM (REGNO (x)))
12097 output_operand_lossage ("invalid operand for code '%c'", code);
12098 return;
12101 num = REGNO(x) - FIRST_VFP_REGNUM;
12102 if (num & 1)
12104 output_operand_lossage ("invalid operand for code '%c'", code);
12105 return;
12108 fprintf (stream, "d%d", num >> 1);
12110 return;
12112 /* Print a VFPv3 floating-point constant, represented as an integer
12113 index. */
12114 case 'G':
12116 int index = vfp3_const_double_index (x);
12117 gcc_assert (index != -1);
12118 fprintf (stream, "%d", index);
12120 return;
12122 default:
12123 if (x == 0)
12125 output_operand_lossage ("missing operand");
12126 return;
12129 switch (GET_CODE (x))
12131 case REG:
12132 asm_fprintf (stream, "%r", REGNO (x));
12133 break;
12135 case MEM:
12136 output_memory_reference_mode = GET_MODE (x);
12137 output_address (XEXP (x, 0));
12138 break;
12140 case CONST_DOUBLE:
12141 fprintf (stream, "#%s", fp_immediate_constant (x));
12142 break;
12144 default:
12145 gcc_assert (GET_CODE (x) != NEG);
12146 fputc ('#', stream);
12147 output_addr_const (stream, x);
12148 break;
12153 #ifndef AOF_ASSEMBLER
12154 /* Target hook for assembling integer objects. The ARM version needs to
12155 handle word-sized values specially. */
12156 static bool
12157 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
12159 if (size == UNITS_PER_WORD && aligned_p)
12161 fputs ("\t.word\t", asm_out_file);
12162 output_addr_const (asm_out_file, x);
12164 /* Mark symbols as position independent. We only do this in the
12165 .text segment, not in the .data segment. */
12166 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
12167 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
12169 /* See legitimize_pic_address for an explanation of the
12170 TARGET_VXWORKS_RTP check. */
12171 if (TARGET_VXWORKS_RTP
12172 || (GET_CODE (x) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (x)))
12173 fputs ("(GOT)", asm_out_file);
12174 else
12175 fputs ("(GOTOFF)", asm_out_file);
12177 fputc ('\n', asm_out_file);
12178 return true;
12181 if (arm_vector_mode_supported_p (GET_MODE (x)))
12183 int i, units;
12185 gcc_assert (GET_CODE (x) == CONST_VECTOR);
12187 units = CONST_VECTOR_NUNITS (x);
12189 switch (GET_MODE (x))
12191 case V2SImode: size = 4; break;
12192 case V4HImode: size = 2; break;
12193 case V8QImode: size = 1; break;
12194 default:
12195 gcc_unreachable ();
12198 for (i = 0; i < units; i++)
12200 rtx elt;
12202 elt = CONST_VECTOR_ELT (x, i);
12203 assemble_integer
12204 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
12207 return true;
12210 return default_assemble_integer (x, size, aligned_p);
12213 static void
12214 arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
12216 section *s;
12218 if (!TARGET_AAPCS_BASED)
12220 (is_ctor ?
12221 default_named_section_asm_out_constructor
12222 : default_named_section_asm_out_destructor) (symbol, priority);
12223 return;
12226 /* Put these in the .init_array section, using a special relocation. */
12227 if (priority != DEFAULT_INIT_PRIORITY)
12229 char buf[18];
12230 sprintf (buf, "%s.%.5u",
12231 is_ctor ? ".init_array" : ".fini_array",
12232 priority);
12233 s = get_section (buf, SECTION_WRITE, NULL_TREE);
12235 else if (is_ctor)
12236 s = ctors_section;
12237 else
12238 s = dtors_section;
12240 switch_to_section (s);
12241 assemble_align (POINTER_SIZE);
12242 fputs ("\t.word\t", asm_out_file);
12243 output_addr_const (asm_out_file, symbol);
12244 fputs ("(target1)\n", asm_out_file);
12247 /* Add a function to the list of static constructors. */
12249 static void
12250 arm_elf_asm_constructor (rtx symbol, int priority)
12252 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/true);
12255 /* Add a function to the list of static destructors. */
12257 static void
12258 arm_elf_asm_destructor (rtx symbol, int priority)
12260 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/false);
12262 #endif
12264 /* A finite state machine takes care of noticing whether or not instructions
12265 can be conditionally executed, and thus decrease execution time and code
12266 size by deleting branch instructions. The fsm is controlled by
12267 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
12269 /* The state of the fsm controlling condition codes are:
12270 0: normal, do nothing special
12271 1: make ASM_OUTPUT_OPCODE not output this instruction
12272 2: make ASM_OUTPUT_OPCODE not output this instruction
12273 3: make instructions conditional
12274 4: make instructions conditional
12276 State transitions (state->state by whom under condition):
12277 0 -> 1 final_prescan_insn if the `target' is a label
12278 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
12279 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
12280 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
12281 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
12282 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
12283 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
12284 (the target insn is arm_target_insn).
12286 If the jump clobbers the conditions then we use states 2 and 4.
12288 A similar thing can be done with conditional return insns.
12290 XXX In case the `target' is an unconditional branch, this conditionalising
12291 of the instructions always reduces code size, but not always execution
12292 time. But then, I want to reduce the code size to somewhere near what
12293 /bin/cc produces. */
12295 /* In addition to this, state is maintained for Thumb-2 COND_EXEC
12296 instructions. When a COND_EXEC instruction is seen the subsequent
12297 instructions are scanned so that multiple conditional instructions can be
12298 combined into a single IT block. arm_condexec_count and arm_condexec_mask
12299 specify the length and true/false mask for the IT block. These will be
12300 decremented/zeroed by arm_asm_output_opcode as the insns are output. */
12302 /* Returns the index of the ARM condition code string in
12303 `arm_condition_codes'. COMPARISON should be an rtx like
12304 `(eq (...) (...))'. */
12305 static enum arm_cond_code
12306 get_arm_condition_code (rtx comparison)
12308 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
12309 int code;
12310 enum rtx_code comp_code = GET_CODE (comparison);
12312 if (GET_MODE_CLASS (mode) != MODE_CC)
12313 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
12314 XEXP (comparison, 1));
12316 switch (mode)
12318 case CC_DNEmode: code = ARM_NE; goto dominance;
12319 case CC_DEQmode: code = ARM_EQ; goto dominance;
12320 case CC_DGEmode: code = ARM_GE; goto dominance;
12321 case CC_DGTmode: code = ARM_GT; goto dominance;
12322 case CC_DLEmode: code = ARM_LE; goto dominance;
12323 case CC_DLTmode: code = ARM_LT; goto dominance;
12324 case CC_DGEUmode: code = ARM_CS; goto dominance;
12325 case CC_DGTUmode: code = ARM_HI; goto dominance;
12326 case CC_DLEUmode: code = ARM_LS; goto dominance;
12327 case CC_DLTUmode: code = ARM_CC;
12329 dominance:
12330 gcc_assert (comp_code == EQ || comp_code == NE);
12332 if (comp_code == EQ)
12333 return ARM_INVERSE_CONDITION_CODE (code);
12334 return code;
12336 case CC_NOOVmode:
12337 switch (comp_code)
12339 case NE: return ARM_NE;
12340 case EQ: return ARM_EQ;
12341 case GE: return ARM_PL;
12342 case LT: return ARM_MI;
12343 default: gcc_unreachable ();
12346 case CC_Zmode:
12347 switch (comp_code)
12349 case NE: return ARM_NE;
12350 case EQ: return ARM_EQ;
12351 default: gcc_unreachable ();
12354 case CC_Nmode:
12355 switch (comp_code)
12357 case NE: return ARM_MI;
12358 case EQ: return ARM_PL;
12359 default: gcc_unreachable ();
12362 case CCFPEmode:
12363 case CCFPmode:
12364 /* These encodings assume that AC=1 in the FPA system control
12365 byte. This allows us to handle all cases except UNEQ and
12366 LTGT. */
12367 switch (comp_code)
12369 case GE: return ARM_GE;
12370 case GT: return ARM_GT;
12371 case LE: return ARM_LS;
12372 case LT: return ARM_MI;
12373 case NE: return ARM_NE;
12374 case EQ: return ARM_EQ;
12375 case ORDERED: return ARM_VC;
12376 case UNORDERED: return ARM_VS;
12377 case UNLT: return ARM_LT;
12378 case UNLE: return ARM_LE;
12379 case UNGT: return ARM_HI;
12380 case UNGE: return ARM_PL;
12381 /* UNEQ and LTGT do not have a representation. */
12382 case UNEQ: /* Fall through. */
12383 case LTGT: /* Fall through. */
12384 default: gcc_unreachable ();
12387 case CC_SWPmode:
12388 switch (comp_code)
12390 case NE: return ARM_NE;
12391 case EQ: return ARM_EQ;
12392 case GE: return ARM_LE;
12393 case GT: return ARM_LT;
12394 case LE: return ARM_GE;
12395 case LT: return ARM_GT;
12396 case GEU: return ARM_LS;
12397 case GTU: return ARM_CC;
12398 case LEU: return ARM_CS;
12399 case LTU: return ARM_HI;
12400 default: gcc_unreachable ();
12403 case CC_Cmode:
12404 switch (comp_code)
12406 case LTU: return ARM_CS;
12407 case GEU: return ARM_CC;
12408 default: gcc_unreachable ();
12411 case CCmode:
12412 switch (comp_code)
12414 case NE: return ARM_NE;
12415 case EQ: return ARM_EQ;
12416 case GE: return ARM_GE;
12417 case GT: return ARM_GT;
12418 case LE: return ARM_LE;
12419 case LT: return ARM_LT;
12420 case GEU: return ARM_CS;
12421 case GTU: return ARM_HI;
12422 case LEU: return ARM_LS;
12423 case LTU: return ARM_CC;
12424 default: gcc_unreachable ();
12427 default: gcc_unreachable ();
12431 /* Tell arm_asm_output_opcode to output IT blocks for conditionally executed
12432 instructions. */
12433 void
12434 thumb2_final_prescan_insn (rtx insn)
12436 rtx first_insn = insn;
12437 rtx body = PATTERN (insn);
12438 rtx predicate;
12439 enum arm_cond_code code;
12440 int n;
12441 int mask;
12443 /* Remove the previous insn from the count of insns to be output. */
12444 if (arm_condexec_count)
12445 arm_condexec_count--;
12447 /* Nothing to do if we are already inside a conditional block. */
12448 if (arm_condexec_count)
12449 return;
12451 if (GET_CODE (body) != COND_EXEC)
12452 return;
12454 /* Conditional jumps are implemented directly. */
12455 if (GET_CODE (insn) == JUMP_INSN)
12456 return;
12458 predicate = COND_EXEC_TEST (body);
12459 arm_current_cc = get_arm_condition_code (predicate);
12461 n = get_attr_ce_count (insn);
12462 arm_condexec_count = 1;
12463 arm_condexec_mask = (1 << n) - 1;
12464 arm_condexec_masklen = n;
12465 /* See if subsequent instructions can be combined into the same block. */
12466 for (;;)
12468 insn = next_nonnote_insn (insn);
12470 /* Jumping into the middle of an IT block is illegal, so a label or
12471 barrier terminates the block. */
12472 if (GET_CODE (insn) != INSN && GET_CODE(insn) != JUMP_INSN)
12473 break;
12475 body = PATTERN (insn);
12476 /* USE and CLOBBER aren't really insns, so just skip them. */
12477 if (GET_CODE (body) == USE
12478 || GET_CODE (body) == CLOBBER)
12479 continue;
12481 /* ??? Recognize conditional jumps, and combine them with IT blocks. */
12482 if (GET_CODE (body) != COND_EXEC)
12483 break;
12484 /* Allow up to 4 conditionally executed instructions in a block. */
12485 n = get_attr_ce_count (insn);
12486 if (arm_condexec_masklen + n > 4)
12487 break;
12489 predicate = COND_EXEC_TEST (body);
12490 code = get_arm_condition_code (predicate);
12491 mask = (1 << n) - 1;
12492 if (arm_current_cc == code)
12493 arm_condexec_mask |= (mask << arm_condexec_masklen);
12494 else if (arm_current_cc != ARM_INVERSE_CONDITION_CODE(code))
12495 break;
12497 arm_condexec_count++;
12498 arm_condexec_masklen += n;
12500 /* A jump must be the last instruction in a conditional block. */
12501 if (GET_CODE(insn) == JUMP_INSN)
12502 break;
12504 /* Restore recog_data (getting the attributes of other insns can
12505 destroy this array, but final.c assumes that it remains intact
12506 across this call). */
12507 extract_constrain_insn_cached (first_insn);
12510 void
12511 arm_final_prescan_insn (rtx insn)
12513 /* BODY will hold the body of INSN. */
12514 rtx body = PATTERN (insn);
12516 /* This will be 1 if trying to repeat the trick, and things need to be
12517 reversed if it appears to fail. */
12518 int reverse = 0;
12520 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
12521 taken are clobbered, even if the rtl suggests otherwise. It also
12522 means that we have to grub around within the jump expression to find
12523 out what the conditions are when the jump isn't taken. */
12524 int jump_clobbers = 0;
12526 /* If we start with a return insn, we only succeed if we find another one. */
12527 int seeking_return = 0;
12529 /* START_INSN will hold the insn from where we start looking. This is the
12530 first insn after the following code_label if REVERSE is true. */
12531 rtx start_insn = insn;
12533 /* If in state 4, check if the target branch is reached, in order to
12534 change back to state 0. */
12535 if (arm_ccfsm_state == 4)
12537 if (insn == arm_target_insn)
12539 arm_target_insn = NULL;
12540 arm_ccfsm_state = 0;
12542 return;
12545 /* If in state 3, it is possible to repeat the trick, if this insn is an
12546 unconditional branch to a label, and immediately following this branch
12547 is the previous target label which is only used once, and the label this
12548 branch jumps to is not too far off. */
12549 if (arm_ccfsm_state == 3)
12551 if (simplejump_p (insn))
12553 start_insn = next_nonnote_insn (start_insn);
12554 if (GET_CODE (start_insn) == BARRIER)
12556 /* XXX Isn't this always a barrier? */
12557 start_insn = next_nonnote_insn (start_insn);
12559 if (GET_CODE (start_insn) == CODE_LABEL
12560 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
12561 && LABEL_NUSES (start_insn) == 1)
12562 reverse = TRUE;
12563 else
12564 return;
12566 else if (GET_CODE (body) == RETURN)
12568 start_insn = next_nonnote_insn (start_insn);
12569 if (GET_CODE (start_insn) == BARRIER)
12570 start_insn = next_nonnote_insn (start_insn);
12571 if (GET_CODE (start_insn) == CODE_LABEL
12572 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
12573 && LABEL_NUSES (start_insn) == 1)
12575 reverse = TRUE;
12576 seeking_return = 1;
12578 else
12579 return;
12581 else
12582 return;
12585 gcc_assert (!arm_ccfsm_state || reverse);
12586 if (GET_CODE (insn) != JUMP_INSN)
12587 return;
12589 /* This jump might be paralleled with a clobber of the condition codes
12590 the jump should always come first */
12591 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
12592 body = XVECEXP (body, 0, 0);
12594 if (reverse
12595 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
12596 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
12598 int insns_skipped;
12599 int fail = FALSE, succeed = FALSE;
12600 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
12601 int then_not_else = TRUE;
12602 rtx this_insn = start_insn, label = 0;
12604 /* If the jump cannot be done with one instruction, we cannot
12605 conditionally execute the instruction in the inverse case. */
12606 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
12608 jump_clobbers = 1;
12609 return;
12612 /* Register the insn jumped to. */
12613 if (reverse)
12615 if (!seeking_return)
12616 label = XEXP (SET_SRC (body), 0);
12618 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
12619 label = XEXP (XEXP (SET_SRC (body), 1), 0);
12620 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
12622 label = XEXP (XEXP (SET_SRC (body), 2), 0);
12623 then_not_else = FALSE;
12625 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
12626 seeking_return = 1;
12627 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
12629 seeking_return = 1;
12630 then_not_else = FALSE;
12632 else
12633 gcc_unreachable ();
12635 /* See how many insns this branch skips, and what kind of insns. If all
12636 insns are okay, and the label or unconditional branch to the same
12637 label is not too far away, succeed. */
12638 for (insns_skipped = 0;
12639 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
12641 rtx scanbody;
12643 this_insn = next_nonnote_insn (this_insn);
12644 if (!this_insn)
12645 break;
12647 switch (GET_CODE (this_insn))
12649 case CODE_LABEL:
12650 /* Succeed if it is the target label, otherwise fail since
12651 control falls in from somewhere else. */
12652 if (this_insn == label)
12654 if (jump_clobbers)
12656 arm_ccfsm_state = 2;
12657 this_insn = next_nonnote_insn (this_insn);
12659 else
12660 arm_ccfsm_state = 1;
12661 succeed = TRUE;
12663 else
12664 fail = TRUE;
12665 break;
12667 case BARRIER:
12668 /* Succeed if the following insn is the target label.
12669 Otherwise fail.
12670 If return insns are used then the last insn in a function
12671 will be a barrier. */
12672 this_insn = next_nonnote_insn (this_insn);
12673 if (this_insn && this_insn == label)
12675 if (jump_clobbers)
12677 arm_ccfsm_state = 2;
12678 this_insn = next_nonnote_insn (this_insn);
12680 else
12681 arm_ccfsm_state = 1;
12682 succeed = TRUE;
12684 else
12685 fail = TRUE;
12686 break;
12688 case CALL_INSN:
12689 /* The AAPCS says that conditional calls should not be
12690 used since they make interworking inefficient (the
12691 linker can't transform BL<cond> into BLX). That's
12692 only a problem if the machine has BLX. */
12693 if (arm_arch5)
12695 fail = TRUE;
12696 break;
12699 /* Succeed if the following insn is the target label, or
12700 if the following two insns are a barrier and the
12701 target label. */
12702 this_insn = next_nonnote_insn (this_insn);
12703 if (this_insn && GET_CODE (this_insn) == BARRIER)
12704 this_insn = next_nonnote_insn (this_insn);
12706 if (this_insn && this_insn == label
12707 && insns_skipped < max_insns_skipped)
12709 if (jump_clobbers)
12711 arm_ccfsm_state = 2;
12712 this_insn = next_nonnote_insn (this_insn);
12714 else
12715 arm_ccfsm_state = 1;
12716 succeed = TRUE;
12718 else
12719 fail = TRUE;
12720 break;
12722 case JUMP_INSN:
12723 /* If this is an unconditional branch to the same label, succeed.
12724 If it is to another label, do nothing. If it is conditional,
12725 fail. */
12726 /* XXX Probably, the tests for SET and the PC are
12727 unnecessary. */
12729 scanbody = PATTERN (this_insn);
12730 if (GET_CODE (scanbody) == SET
12731 && GET_CODE (SET_DEST (scanbody)) == PC)
12733 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
12734 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
12736 arm_ccfsm_state = 2;
12737 succeed = TRUE;
12739 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
12740 fail = TRUE;
12742 /* Fail if a conditional return is undesirable (e.g. on a
12743 StrongARM), but still allow this if optimizing for size. */
12744 else if (GET_CODE (scanbody) == RETURN
12745 && !use_return_insn (TRUE, NULL)
12746 && !optimize_size)
12747 fail = TRUE;
12748 else if (GET_CODE (scanbody) == RETURN
12749 && seeking_return)
12751 arm_ccfsm_state = 2;
12752 succeed = TRUE;
12754 else if (GET_CODE (scanbody) == PARALLEL)
12756 switch (get_attr_conds (this_insn))
12758 case CONDS_NOCOND:
12759 break;
12760 default:
12761 fail = TRUE;
12762 break;
12765 else
12766 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
12768 break;
12770 case INSN:
12771 /* Instructions using or affecting the condition codes make it
12772 fail. */
12773 scanbody = PATTERN (this_insn);
12774 if (!(GET_CODE (scanbody) == SET
12775 || GET_CODE (scanbody) == PARALLEL)
12776 || get_attr_conds (this_insn) != CONDS_NOCOND)
12777 fail = TRUE;
12779 /* A conditional cirrus instruction must be followed by
12780 a non Cirrus instruction. However, since we
12781 conditionalize instructions in this function and by
12782 the time we get here we can't add instructions
12783 (nops), because shorten_branches() has already been
12784 called, we will disable conditionalizing Cirrus
12785 instructions to be safe. */
12786 if (GET_CODE (scanbody) != USE
12787 && GET_CODE (scanbody) != CLOBBER
12788 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
12789 fail = TRUE;
12790 break;
12792 default:
12793 break;
12796 if (succeed)
12798 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
12799 arm_target_label = CODE_LABEL_NUMBER (label);
12800 else
12802 gcc_assert (seeking_return || arm_ccfsm_state == 2);
12804 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
12806 this_insn = next_nonnote_insn (this_insn);
12807 gcc_assert (!this_insn
12808 || (GET_CODE (this_insn) != BARRIER
12809 && GET_CODE (this_insn) != CODE_LABEL));
12811 if (!this_insn)
12813 /* Oh, dear! we ran off the end.. give up. */
12814 extract_constrain_insn_cached (insn);
12815 arm_ccfsm_state = 0;
12816 arm_target_insn = NULL;
12817 return;
12819 arm_target_insn = this_insn;
12821 if (jump_clobbers)
12823 gcc_assert (!reverse);
12824 arm_current_cc =
12825 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
12826 0), 0), 1));
12827 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
12828 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12829 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
12830 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12832 else
12834 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
12835 what it was. */
12836 if (!reverse)
12837 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
12838 0));
12841 if (reverse || then_not_else)
12842 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12845 /* Restore recog_data (getting the attributes of other insns can
12846 destroy this array, but final.c assumes that it remains intact
12847 across this call. */
12848 extract_constrain_insn_cached (insn);
12852 /* Output IT instructions. */
12853 void
12854 thumb2_asm_output_opcode (FILE * stream)
12856 char buff[5];
12857 int n;
12859 if (arm_condexec_mask)
12861 for (n = 0; n < arm_condexec_masklen; n++)
12862 buff[n] = (arm_condexec_mask & (1 << n)) ? 't' : 'e';
12863 buff[n] = 0;
12864 asm_fprintf(stream, "i%s\t%s\n\t", buff,
12865 arm_condition_codes[arm_current_cc]);
12866 arm_condexec_mask = 0;
12870 /* Returns true if REGNO is a valid register
12871 for holding a quantity of type MODE. */
12873 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
12875 if (GET_MODE_CLASS (mode) == MODE_CC)
12876 return (regno == CC_REGNUM
12877 || (TARGET_HARD_FLOAT && TARGET_VFP
12878 && regno == VFPCC_REGNUM));
12880 if (TARGET_THUMB1)
12881 /* For the Thumb we only allow values bigger than SImode in
12882 registers 0 - 6, so that there is always a second low
12883 register available to hold the upper part of the value.
12884 We probably we ought to ensure that the register is the
12885 start of an even numbered register pair. */
12886 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
12888 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
12889 && IS_CIRRUS_REGNUM (regno))
12890 /* We have outlawed SI values in Cirrus registers because they
12891 reside in the lower 32 bits, but SF values reside in the
12892 upper 32 bits. This causes gcc all sorts of grief. We can't
12893 even split the registers into pairs because Cirrus SI values
12894 get sign extended to 64bits-- aldyh. */
12895 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
12897 if (TARGET_HARD_FLOAT && TARGET_VFP
12898 && IS_VFP_REGNUM (regno))
12900 if (mode == SFmode || mode == SImode)
12901 return VFP_REGNO_OK_FOR_SINGLE (regno);
12903 if (mode == DFmode)
12904 return VFP_REGNO_OK_FOR_DOUBLE (regno);
12905 return FALSE;
12908 if (TARGET_REALLY_IWMMXT)
12910 if (IS_IWMMXT_GR_REGNUM (regno))
12911 return mode == SImode;
12913 if (IS_IWMMXT_REGNUM (regno))
12914 return VALID_IWMMXT_REG_MODE (mode);
12917 /* We allow any value to be stored in the general registers.
12918 Restrict doubleword quantities to even register pairs so that we can
12919 use ldrd. */
12920 if (regno <= LAST_ARM_REGNUM)
12921 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
12923 if (regno == FRAME_POINTER_REGNUM
12924 || regno == ARG_POINTER_REGNUM)
12925 /* We only allow integers in the fake hard registers. */
12926 return GET_MODE_CLASS (mode) == MODE_INT;
12928 /* The only registers left are the FPA registers
12929 which we only allow to hold FP values. */
12930 return (TARGET_HARD_FLOAT && TARGET_FPA
12931 && GET_MODE_CLASS (mode) == MODE_FLOAT
12932 && regno >= FIRST_FPA_REGNUM
12933 && regno <= LAST_FPA_REGNUM);
12936 /* For efficiency and historical reasons LO_REGS, HI_REGS and CC_REGS are
12937 not used in arm mode. */
12939 arm_regno_class (int regno)
12941 if (TARGET_THUMB1)
12943 if (regno == STACK_POINTER_REGNUM)
12944 return STACK_REG;
12945 if (regno == CC_REGNUM)
12946 return CC_REG;
12947 if (regno < 8)
12948 return LO_REGS;
12949 return HI_REGS;
12952 if (TARGET_THUMB2 && regno < 8)
12953 return LO_REGS;
12955 if ( regno <= LAST_ARM_REGNUM
12956 || regno == FRAME_POINTER_REGNUM
12957 || regno == ARG_POINTER_REGNUM)
12958 return TARGET_THUMB2 ? HI_REGS : GENERAL_REGS;
12960 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
12961 return TARGET_THUMB2 ? CC_REG : NO_REGS;
12963 if (IS_CIRRUS_REGNUM (regno))
12964 return CIRRUS_REGS;
12966 if (IS_VFP_REGNUM (regno))
12968 if (regno <= D7_VFP_REGNUM)
12969 return VFP_D0_D7_REGS;
12970 else if (regno <= LAST_LO_VFP_REGNUM)
12971 return VFP_LO_REGS;
12972 else
12973 return VFP_HI_REGS;
12976 if (IS_IWMMXT_REGNUM (regno))
12977 return IWMMXT_REGS;
12979 if (IS_IWMMXT_GR_REGNUM (regno))
12980 return IWMMXT_GR_REGS;
12982 return FPA_REGS;
12985 /* Handle a special case when computing the offset
12986 of an argument from the frame pointer. */
12988 arm_debugger_arg_offset (int value, rtx addr)
12990 rtx insn;
12992 /* We are only interested if dbxout_parms() failed to compute the offset. */
12993 if (value != 0)
12994 return 0;
12996 /* We can only cope with the case where the address is held in a register. */
12997 if (GET_CODE (addr) != REG)
12998 return 0;
13000 /* If we are using the frame pointer to point at the argument, then
13001 an offset of 0 is correct. */
13002 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
13003 return 0;
13005 /* If we are using the stack pointer to point at the
13006 argument, then an offset of 0 is correct. */
13007 /* ??? Check this is consistent with thumb2 frame layout. */
13008 if ((TARGET_THUMB || !frame_pointer_needed)
13009 && REGNO (addr) == SP_REGNUM)
13010 return 0;
13012 /* Oh dear. The argument is pointed to by a register rather
13013 than being held in a register, or being stored at a known
13014 offset from the frame pointer. Since GDB only understands
13015 those two kinds of argument we must translate the address
13016 held in the register into an offset from the frame pointer.
13017 We do this by searching through the insns for the function
13018 looking to see where this register gets its value. If the
13019 register is initialized from the frame pointer plus an offset
13020 then we are in luck and we can continue, otherwise we give up.
13022 This code is exercised by producing debugging information
13023 for a function with arguments like this:
13025 double func (double a, double b, int c, double d) {return d;}
13027 Without this code the stab for parameter 'd' will be set to
13028 an offset of 0 from the frame pointer, rather than 8. */
13030 /* The if() statement says:
13032 If the insn is a normal instruction
13033 and if the insn is setting the value in a register
13034 and if the register being set is the register holding the address of the argument
13035 and if the address is computing by an addition
13036 that involves adding to a register
13037 which is the frame pointer
13038 a constant integer
13040 then... */
13042 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13044 if ( GET_CODE (insn) == INSN
13045 && GET_CODE (PATTERN (insn)) == SET
13046 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
13047 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
13048 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
13049 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
13050 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
13053 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
13055 break;
13059 if (value == 0)
13061 debug_rtx (addr);
13062 warning (0, "unable to compute real location of stacked parameter");
13063 value = 8; /* XXX magic hack */
13066 return value;
13069 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
13070 do \
13072 if ((MASK) & insn_flags) \
13073 add_builtin_function ((NAME), (TYPE), (CODE), \
13074 BUILT_IN_MD, NULL, NULL_TREE); \
13076 while (0)
13078 struct builtin_description
13080 const unsigned int mask;
13081 const enum insn_code icode;
13082 const char * const name;
13083 const enum arm_builtins code;
13084 const enum rtx_code comparison;
13085 const unsigned int flag;
13088 static const struct builtin_description bdesc_2arg[] =
13090 #define IWMMXT_BUILTIN(code, string, builtin) \
13091 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
13092 ARM_BUILTIN_##builtin, 0, 0 },
13094 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
13095 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
13096 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
13097 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
13098 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
13099 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
13100 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
13101 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
13102 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
13103 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
13104 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
13105 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
13106 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
13107 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
13108 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
13109 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
13110 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
13111 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
13112 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
13113 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
13114 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
13115 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
13116 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
13117 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
13118 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
13119 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
13120 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
13121 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
13122 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
13123 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
13124 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
13125 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
13126 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
13127 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
13128 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
13129 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
13130 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
13131 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
13132 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
13133 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
13134 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
13135 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
13136 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
13137 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
13138 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
13139 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
13140 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
13141 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
13142 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
13143 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
13144 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
13145 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
13146 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
13147 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
13148 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
13149 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
13150 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
13151 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
13153 #define IWMMXT_BUILTIN2(code, builtin) \
13154 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
13156 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
13157 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
13158 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
13159 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
13160 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
13161 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
13162 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
13163 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
13164 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
13165 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
13166 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
13167 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
13168 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
13169 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
13170 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
13171 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
13172 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
13173 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
13174 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
13175 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
13176 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
13177 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
13178 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
13179 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
13180 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
13181 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
13182 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
13183 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
13184 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
13185 IWMMXT_BUILTIN2 (rordi3, WRORDI)
13186 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
13187 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
13190 static const struct builtin_description bdesc_1arg[] =
13192 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
13193 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
13194 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
13195 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
13196 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
13197 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
13198 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
13199 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
13200 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
13201 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
13202 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
13203 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
13204 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
13205 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
13206 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
13207 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
13208 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
13209 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
13212 /* Set up all the iWMMXt builtins. This is
13213 not called if TARGET_IWMMXT is zero. */
13215 static void
13216 arm_init_iwmmxt_builtins (void)
13218 const struct builtin_description * d;
13219 size_t i;
13220 tree endlink = void_list_node;
13222 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13223 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13224 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13226 tree int_ftype_int
13227 = build_function_type (integer_type_node,
13228 tree_cons (NULL_TREE, integer_type_node, endlink));
13229 tree v8qi_ftype_v8qi_v8qi_int
13230 = build_function_type (V8QI_type_node,
13231 tree_cons (NULL_TREE, V8QI_type_node,
13232 tree_cons (NULL_TREE, V8QI_type_node,
13233 tree_cons (NULL_TREE,
13234 integer_type_node,
13235 endlink))));
13236 tree v4hi_ftype_v4hi_int
13237 = build_function_type (V4HI_type_node,
13238 tree_cons (NULL_TREE, V4HI_type_node,
13239 tree_cons (NULL_TREE, integer_type_node,
13240 endlink)));
13241 tree v2si_ftype_v2si_int
13242 = build_function_type (V2SI_type_node,
13243 tree_cons (NULL_TREE, V2SI_type_node,
13244 tree_cons (NULL_TREE, integer_type_node,
13245 endlink)));
13246 tree v2si_ftype_di_di
13247 = build_function_type (V2SI_type_node,
13248 tree_cons (NULL_TREE, long_long_integer_type_node,
13249 tree_cons (NULL_TREE, long_long_integer_type_node,
13250 endlink)));
13251 tree di_ftype_di_int
13252 = build_function_type (long_long_integer_type_node,
13253 tree_cons (NULL_TREE, long_long_integer_type_node,
13254 tree_cons (NULL_TREE, integer_type_node,
13255 endlink)));
13256 tree di_ftype_di_int_int
13257 = build_function_type (long_long_integer_type_node,
13258 tree_cons (NULL_TREE, long_long_integer_type_node,
13259 tree_cons (NULL_TREE, integer_type_node,
13260 tree_cons (NULL_TREE,
13261 integer_type_node,
13262 endlink))));
13263 tree int_ftype_v8qi
13264 = build_function_type (integer_type_node,
13265 tree_cons (NULL_TREE, V8QI_type_node,
13266 endlink));
13267 tree int_ftype_v4hi
13268 = build_function_type (integer_type_node,
13269 tree_cons (NULL_TREE, V4HI_type_node,
13270 endlink));
13271 tree int_ftype_v2si
13272 = build_function_type (integer_type_node,
13273 tree_cons (NULL_TREE, V2SI_type_node,
13274 endlink));
13275 tree int_ftype_v8qi_int
13276 = build_function_type (integer_type_node,
13277 tree_cons (NULL_TREE, V8QI_type_node,
13278 tree_cons (NULL_TREE, integer_type_node,
13279 endlink)));
13280 tree int_ftype_v4hi_int
13281 = build_function_type (integer_type_node,
13282 tree_cons (NULL_TREE, V4HI_type_node,
13283 tree_cons (NULL_TREE, integer_type_node,
13284 endlink)));
13285 tree int_ftype_v2si_int
13286 = build_function_type (integer_type_node,
13287 tree_cons (NULL_TREE, V2SI_type_node,
13288 tree_cons (NULL_TREE, integer_type_node,
13289 endlink)));
13290 tree v8qi_ftype_v8qi_int_int
13291 = build_function_type (V8QI_type_node,
13292 tree_cons (NULL_TREE, V8QI_type_node,
13293 tree_cons (NULL_TREE, integer_type_node,
13294 tree_cons (NULL_TREE,
13295 integer_type_node,
13296 endlink))));
13297 tree v4hi_ftype_v4hi_int_int
13298 = build_function_type (V4HI_type_node,
13299 tree_cons (NULL_TREE, V4HI_type_node,
13300 tree_cons (NULL_TREE, integer_type_node,
13301 tree_cons (NULL_TREE,
13302 integer_type_node,
13303 endlink))));
13304 tree v2si_ftype_v2si_int_int
13305 = build_function_type (V2SI_type_node,
13306 tree_cons (NULL_TREE, V2SI_type_node,
13307 tree_cons (NULL_TREE, integer_type_node,
13308 tree_cons (NULL_TREE,
13309 integer_type_node,
13310 endlink))));
13311 /* Miscellaneous. */
13312 tree v8qi_ftype_v4hi_v4hi
13313 = build_function_type (V8QI_type_node,
13314 tree_cons (NULL_TREE, V4HI_type_node,
13315 tree_cons (NULL_TREE, V4HI_type_node,
13316 endlink)));
13317 tree v4hi_ftype_v2si_v2si
13318 = build_function_type (V4HI_type_node,
13319 tree_cons (NULL_TREE, V2SI_type_node,
13320 tree_cons (NULL_TREE, V2SI_type_node,
13321 endlink)));
13322 tree v2si_ftype_v4hi_v4hi
13323 = build_function_type (V2SI_type_node,
13324 tree_cons (NULL_TREE, V4HI_type_node,
13325 tree_cons (NULL_TREE, V4HI_type_node,
13326 endlink)));
13327 tree v2si_ftype_v8qi_v8qi
13328 = build_function_type (V2SI_type_node,
13329 tree_cons (NULL_TREE, V8QI_type_node,
13330 tree_cons (NULL_TREE, V8QI_type_node,
13331 endlink)));
13332 tree v4hi_ftype_v4hi_di
13333 = build_function_type (V4HI_type_node,
13334 tree_cons (NULL_TREE, V4HI_type_node,
13335 tree_cons (NULL_TREE,
13336 long_long_integer_type_node,
13337 endlink)));
13338 tree v2si_ftype_v2si_di
13339 = build_function_type (V2SI_type_node,
13340 tree_cons (NULL_TREE, V2SI_type_node,
13341 tree_cons (NULL_TREE,
13342 long_long_integer_type_node,
13343 endlink)));
13344 tree void_ftype_int_int
13345 = build_function_type (void_type_node,
13346 tree_cons (NULL_TREE, integer_type_node,
13347 tree_cons (NULL_TREE, integer_type_node,
13348 endlink)));
13349 tree di_ftype_void
13350 = build_function_type (long_long_unsigned_type_node, endlink);
13351 tree di_ftype_v8qi
13352 = build_function_type (long_long_integer_type_node,
13353 tree_cons (NULL_TREE, V8QI_type_node,
13354 endlink));
13355 tree di_ftype_v4hi
13356 = build_function_type (long_long_integer_type_node,
13357 tree_cons (NULL_TREE, V4HI_type_node,
13358 endlink));
13359 tree di_ftype_v2si
13360 = build_function_type (long_long_integer_type_node,
13361 tree_cons (NULL_TREE, V2SI_type_node,
13362 endlink));
13363 tree v2si_ftype_v4hi
13364 = build_function_type (V2SI_type_node,
13365 tree_cons (NULL_TREE, V4HI_type_node,
13366 endlink));
13367 tree v4hi_ftype_v8qi
13368 = build_function_type (V4HI_type_node,
13369 tree_cons (NULL_TREE, V8QI_type_node,
13370 endlink));
13372 tree di_ftype_di_v4hi_v4hi
13373 = build_function_type (long_long_unsigned_type_node,
13374 tree_cons (NULL_TREE,
13375 long_long_unsigned_type_node,
13376 tree_cons (NULL_TREE, V4HI_type_node,
13377 tree_cons (NULL_TREE,
13378 V4HI_type_node,
13379 endlink))));
13381 tree di_ftype_v4hi_v4hi
13382 = build_function_type (long_long_unsigned_type_node,
13383 tree_cons (NULL_TREE, V4HI_type_node,
13384 tree_cons (NULL_TREE, V4HI_type_node,
13385 endlink)));
13387 /* Normal vector binops. */
13388 tree v8qi_ftype_v8qi_v8qi
13389 = build_function_type (V8QI_type_node,
13390 tree_cons (NULL_TREE, V8QI_type_node,
13391 tree_cons (NULL_TREE, V8QI_type_node,
13392 endlink)));
13393 tree v4hi_ftype_v4hi_v4hi
13394 = build_function_type (V4HI_type_node,
13395 tree_cons (NULL_TREE, V4HI_type_node,
13396 tree_cons (NULL_TREE, V4HI_type_node,
13397 endlink)));
13398 tree v2si_ftype_v2si_v2si
13399 = build_function_type (V2SI_type_node,
13400 tree_cons (NULL_TREE, V2SI_type_node,
13401 tree_cons (NULL_TREE, V2SI_type_node,
13402 endlink)));
13403 tree di_ftype_di_di
13404 = build_function_type (long_long_unsigned_type_node,
13405 tree_cons (NULL_TREE, long_long_unsigned_type_node,
13406 tree_cons (NULL_TREE,
13407 long_long_unsigned_type_node,
13408 endlink)));
13410 /* Add all builtins that are more or less simple operations on two
13411 operands. */
13412 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13414 /* Use one of the operands; the target can have a different mode for
13415 mask-generating compares. */
13416 enum machine_mode mode;
13417 tree type;
13419 if (d->name == 0)
13420 continue;
13422 mode = insn_data[d->icode].operand[1].mode;
13424 switch (mode)
13426 case V8QImode:
13427 type = v8qi_ftype_v8qi_v8qi;
13428 break;
13429 case V4HImode:
13430 type = v4hi_ftype_v4hi_v4hi;
13431 break;
13432 case V2SImode:
13433 type = v2si_ftype_v2si_v2si;
13434 break;
13435 case DImode:
13436 type = di_ftype_di_di;
13437 break;
13439 default:
13440 gcc_unreachable ();
13443 def_mbuiltin (d->mask, d->name, type, d->code);
13446 /* Add the remaining MMX insns with somewhat more complicated types. */
13447 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
13448 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
13449 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
13451 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
13452 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
13453 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
13454 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
13455 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
13456 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
13458 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
13459 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
13460 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
13461 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
13462 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
13463 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
13465 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
13466 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
13467 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
13468 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
13469 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
13470 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
13472 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
13473 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
13474 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
13475 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
13476 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
13477 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
13479 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
13481 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
13482 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
13483 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
13484 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
13486 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
13487 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
13488 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
13489 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
13490 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
13491 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
13492 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
13493 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
13494 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
13496 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
13497 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
13498 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
13500 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
13501 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
13502 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
13504 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
13505 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
13506 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
13507 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
13508 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
13509 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
13511 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
13512 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
13513 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
13514 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
13515 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
13516 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
13517 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
13518 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
13519 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
13520 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
13521 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
13522 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
13524 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
13525 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
13526 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
13527 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
13529 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
13530 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
13531 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
13532 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
13533 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
13534 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
13535 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
13538 static void
13539 arm_init_tls_builtins (void)
13541 tree ftype;
13542 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
13543 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
13545 ftype = build_function_type (ptr_type_node, void_list_node);
13546 add_builtin_function ("__builtin_thread_pointer", ftype,
13547 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
13548 NULL, const_nothrow);
13551 static void
13552 arm_init_builtins (void)
13554 arm_init_tls_builtins ();
13556 if (TARGET_REALLY_IWMMXT)
13557 arm_init_iwmmxt_builtins ();
13560 /* Errors in the source file can cause expand_expr to return const0_rtx
13561 where we expect a vector. To avoid crashing, use one of the vector
13562 clear instructions. */
13564 static rtx
13565 safe_vector_operand (rtx x, enum machine_mode mode)
13567 if (x != const0_rtx)
13568 return x;
13569 x = gen_reg_rtx (mode);
13571 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
13572 : gen_rtx_SUBREG (DImode, x, 0)));
13573 return x;
13576 /* Subroutine of arm_expand_builtin to take care of binop insns. */
13578 static rtx
13579 arm_expand_binop_builtin (enum insn_code icode,
13580 tree exp, rtx target)
13582 rtx pat;
13583 tree arg0 = CALL_EXPR_ARG (exp, 0);
13584 tree arg1 = CALL_EXPR_ARG (exp, 1);
13585 rtx op0 = expand_normal (arg0);
13586 rtx op1 = expand_normal (arg1);
13587 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13588 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13589 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13591 if (VECTOR_MODE_P (mode0))
13592 op0 = safe_vector_operand (op0, mode0);
13593 if (VECTOR_MODE_P (mode1))
13594 op1 = safe_vector_operand (op1, mode1);
13596 if (! target
13597 || GET_MODE (target) != tmode
13598 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13599 target = gen_reg_rtx (tmode);
13601 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
13603 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13604 op0 = copy_to_mode_reg (mode0, op0);
13605 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13606 op1 = copy_to_mode_reg (mode1, op1);
13608 pat = GEN_FCN (icode) (target, op0, op1);
13609 if (! pat)
13610 return 0;
13611 emit_insn (pat);
13612 return target;
13615 /* Subroutine of arm_expand_builtin to take care of unop insns. */
13617 static rtx
13618 arm_expand_unop_builtin (enum insn_code icode,
13619 tree exp, rtx target, int do_load)
13621 rtx pat;
13622 tree arg0 = CALL_EXPR_ARG (exp, 0);
13623 rtx op0 = expand_normal (arg0);
13624 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13625 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13627 if (! target
13628 || GET_MODE (target) != tmode
13629 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13630 target = gen_reg_rtx (tmode);
13631 if (do_load)
13632 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13633 else
13635 if (VECTOR_MODE_P (mode0))
13636 op0 = safe_vector_operand (op0, mode0);
13638 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13639 op0 = copy_to_mode_reg (mode0, op0);
13642 pat = GEN_FCN (icode) (target, op0);
13643 if (! pat)
13644 return 0;
13645 emit_insn (pat);
13646 return target;
13649 /* Expand an expression EXP that calls a built-in function,
13650 with result going to TARGET if that's convenient
13651 (and in mode MODE if that's convenient).
13652 SUBTARGET may be used as the target for computing one of EXP's operands.
13653 IGNORE is nonzero if the value is to be ignored. */
13655 static rtx
13656 arm_expand_builtin (tree exp,
13657 rtx target,
13658 rtx subtarget ATTRIBUTE_UNUSED,
13659 enum machine_mode mode ATTRIBUTE_UNUSED,
13660 int ignore ATTRIBUTE_UNUSED)
13662 const struct builtin_description * d;
13663 enum insn_code icode;
13664 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13665 tree arg0;
13666 tree arg1;
13667 tree arg2;
13668 rtx op0;
13669 rtx op1;
13670 rtx op2;
13671 rtx pat;
13672 int fcode = DECL_FUNCTION_CODE (fndecl);
13673 size_t i;
13674 enum machine_mode tmode;
13675 enum machine_mode mode0;
13676 enum machine_mode mode1;
13677 enum machine_mode mode2;
13679 switch (fcode)
13681 case ARM_BUILTIN_TEXTRMSB:
13682 case ARM_BUILTIN_TEXTRMUB:
13683 case ARM_BUILTIN_TEXTRMSH:
13684 case ARM_BUILTIN_TEXTRMUH:
13685 case ARM_BUILTIN_TEXTRMSW:
13686 case ARM_BUILTIN_TEXTRMUW:
13687 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
13688 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
13689 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
13690 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
13691 : CODE_FOR_iwmmxt_textrmw);
13693 arg0 = CALL_EXPR_ARG (exp, 0);
13694 arg1 = CALL_EXPR_ARG (exp, 1);
13695 op0 = expand_normal (arg0);
13696 op1 = expand_normal (arg1);
13697 tmode = insn_data[icode].operand[0].mode;
13698 mode0 = insn_data[icode].operand[1].mode;
13699 mode1 = insn_data[icode].operand[2].mode;
13701 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13702 op0 = copy_to_mode_reg (mode0, op0);
13703 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13705 /* @@@ better error message */
13706 error ("selector must be an immediate");
13707 return gen_reg_rtx (tmode);
13709 if (target == 0
13710 || GET_MODE (target) != tmode
13711 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13712 target = gen_reg_rtx (tmode);
13713 pat = GEN_FCN (icode) (target, op0, op1);
13714 if (! pat)
13715 return 0;
13716 emit_insn (pat);
13717 return target;
13719 case ARM_BUILTIN_TINSRB:
13720 case ARM_BUILTIN_TINSRH:
13721 case ARM_BUILTIN_TINSRW:
13722 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
13723 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
13724 : CODE_FOR_iwmmxt_tinsrw);
13725 arg0 = CALL_EXPR_ARG (exp, 0);
13726 arg1 = CALL_EXPR_ARG (exp, 1);
13727 arg2 = CALL_EXPR_ARG (exp, 2);
13728 op0 = expand_normal (arg0);
13729 op1 = expand_normal (arg1);
13730 op2 = expand_normal (arg2);
13731 tmode = insn_data[icode].operand[0].mode;
13732 mode0 = insn_data[icode].operand[1].mode;
13733 mode1 = insn_data[icode].operand[2].mode;
13734 mode2 = insn_data[icode].operand[3].mode;
13736 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13737 op0 = copy_to_mode_reg (mode0, op0);
13738 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13739 op1 = copy_to_mode_reg (mode1, op1);
13740 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13742 /* @@@ better error message */
13743 error ("selector must be an immediate");
13744 return const0_rtx;
13746 if (target == 0
13747 || GET_MODE (target) != tmode
13748 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13749 target = gen_reg_rtx (tmode);
13750 pat = GEN_FCN (icode) (target, op0, op1, op2);
13751 if (! pat)
13752 return 0;
13753 emit_insn (pat);
13754 return target;
13756 case ARM_BUILTIN_SETWCX:
13757 arg0 = CALL_EXPR_ARG (exp, 0);
13758 arg1 = CALL_EXPR_ARG (exp, 1);
13759 op0 = force_reg (SImode, expand_normal (arg0));
13760 op1 = expand_normal (arg1);
13761 emit_insn (gen_iwmmxt_tmcr (op1, op0));
13762 return 0;
13764 case ARM_BUILTIN_GETWCX:
13765 arg0 = CALL_EXPR_ARG (exp, 0);
13766 op0 = expand_normal (arg0);
13767 target = gen_reg_rtx (SImode);
13768 emit_insn (gen_iwmmxt_tmrc (target, op0));
13769 return target;
13771 case ARM_BUILTIN_WSHUFH:
13772 icode = CODE_FOR_iwmmxt_wshufh;
13773 arg0 = CALL_EXPR_ARG (exp, 0);
13774 arg1 = CALL_EXPR_ARG (exp, 1);
13775 op0 = expand_normal (arg0);
13776 op1 = expand_normal (arg1);
13777 tmode = insn_data[icode].operand[0].mode;
13778 mode1 = insn_data[icode].operand[1].mode;
13779 mode2 = insn_data[icode].operand[2].mode;
13781 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13782 op0 = copy_to_mode_reg (mode1, op0);
13783 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13785 /* @@@ better error message */
13786 error ("mask must be an immediate");
13787 return const0_rtx;
13789 if (target == 0
13790 || GET_MODE (target) != tmode
13791 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13792 target = gen_reg_rtx (tmode);
13793 pat = GEN_FCN (icode) (target, op0, op1);
13794 if (! pat)
13795 return 0;
13796 emit_insn (pat);
13797 return target;
13799 case ARM_BUILTIN_WSADB:
13800 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, exp, target);
13801 case ARM_BUILTIN_WSADH:
13802 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, exp, target);
13803 case ARM_BUILTIN_WSADBZ:
13804 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, exp, target);
13805 case ARM_BUILTIN_WSADHZ:
13806 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, exp, target);
13808 /* Several three-argument builtins. */
13809 case ARM_BUILTIN_WMACS:
13810 case ARM_BUILTIN_WMACU:
13811 case ARM_BUILTIN_WALIGN:
13812 case ARM_BUILTIN_TMIA:
13813 case ARM_BUILTIN_TMIAPH:
13814 case ARM_BUILTIN_TMIATT:
13815 case ARM_BUILTIN_TMIATB:
13816 case ARM_BUILTIN_TMIABT:
13817 case ARM_BUILTIN_TMIABB:
13818 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
13819 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
13820 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
13821 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
13822 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
13823 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
13824 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
13825 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
13826 : CODE_FOR_iwmmxt_walign);
13827 arg0 = CALL_EXPR_ARG (exp, 0);
13828 arg1 = CALL_EXPR_ARG (exp, 1);
13829 arg2 = CALL_EXPR_ARG (exp, 2);
13830 op0 = expand_normal (arg0);
13831 op1 = expand_normal (arg1);
13832 op2 = expand_normal (arg2);
13833 tmode = insn_data[icode].operand[0].mode;
13834 mode0 = insn_data[icode].operand[1].mode;
13835 mode1 = insn_data[icode].operand[2].mode;
13836 mode2 = insn_data[icode].operand[3].mode;
13838 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13839 op0 = copy_to_mode_reg (mode0, op0);
13840 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13841 op1 = copy_to_mode_reg (mode1, op1);
13842 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13843 op2 = copy_to_mode_reg (mode2, op2);
13844 if (target == 0
13845 || GET_MODE (target) != tmode
13846 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13847 target = gen_reg_rtx (tmode);
13848 pat = GEN_FCN (icode) (target, op0, op1, op2);
13849 if (! pat)
13850 return 0;
13851 emit_insn (pat);
13852 return target;
13854 case ARM_BUILTIN_WZERO:
13855 target = gen_reg_rtx (DImode);
13856 emit_insn (gen_iwmmxt_clrdi (target));
13857 return target;
13859 case ARM_BUILTIN_THREAD_POINTER:
13860 return arm_load_tp (target);
13862 default:
13863 break;
13866 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13867 if (d->code == (const enum arm_builtins) fcode)
13868 return arm_expand_binop_builtin (d->icode, exp, target);
13870 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13871 if (d->code == (const enum arm_builtins) fcode)
13872 return arm_expand_unop_builtin (d->icode, exp, target, 0);
13874 /* @@@ Should really do something sensible here. */
13875 return NULL_RTX;
13878 /* Return the number (counting from 0) of
13879 the least significant set bit in MASK. */
13881 inline static int
13882 number_of_first_bit_set (unsigned mask)
13884 int bit;
13886 for (bit = 0;
13887 (mask & (1 << bit)) == 0;
13888 ++bit)
13889 continue;
13891 return bit;
13894 /* Emit code to push or pop registers to or from the stack. F is the
13895 assembly file. MASK is the registers to push or pop. PUSH is
13896 nonzero if we should push, and zero if we should pop. For debugging
13897 output, if pushing, adjust CFA_OFFSET by the amount of space added
13898 to the stack. REAL_REGS should have the same number of bits set as
13899 MASK, and will be used instead (in the same order) to describe which
13900 registers were saved - this is used to mark the save slots when we
13901 push high registers after moving them to low registers. */
13902 static void
13903 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
13904 unsigned long real_regs)
13906 int regno;
13907 int lo_mask = mask & 0xFF;
13908 int pushed_words = 0;
13910 gcc_assert (mask);
13912 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
13914 /* Special case. Do not generate a POP PC statement here, do it in
13915 thumb_exit() */
13916 thumb_exit (f, -1);
13917 return;
13920 if (ARM_EABI_UNWIND_TABLES && push)
13922 fprintf (f, "\t.save\t{");
13923 for (regno = 0; regno < 15; regno++)
13925 if (real_regs & (1 << regno))
13927 if (real_regs & ((1 << regno) -1))
13928 fprintf (f, ", ");
13929 asm_fprintf (f, "%r", regno);
13932 fprintf (f, "}\n");
13935 fprintf (f, "\t%s\t{", push ? "push" : "pop");
13937 /* Look at the low registers first. */
13938 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
13940 if (lo_mask & 1)
13942 asm_fprintf (f, "%r", regno);
13944 if ((lo_mask & ~1) != 0)
13945 fprintf (f, ", ");
13947 pushed_words++;
13951 if (push && (mask & (1 << LR_REGNUM)))
13953 /* Catch pushing the LR. */
13954 if (mask & 0xFF)
13955 fprintf (f, ", ");
13957 asm_fprintf (f, "%r", LR_REGNUM);
13959 pushed_words++;
13961 else if (!push && (mask & (1 << PC_REGNUM)))
13963 /* Catch popping the PC. */
13964 if (TARGET_INTERWORK || TARGET_BACKTRACE
13965 || current_function_calls_eh_return)
13967 /* The PC is never poped directly, instead
13968 it is popped into r3 and then BX is used. */
13969 fprintf (f, "}\n");
13971 thumb_exit (f, -1);
13973 return;
13975 else
13977 if (mask & 0xFF)
13978 fprintf (f, ", ");
13980 asm_fprintf (f, "%r", PC_REGNUM);
13984 fprintf (f, "}\n");
13986 if (push && pushed_words && dwarf2out_do_frame ())
13988 char *l = dwarf2out_cfi_label ();
13989 int pushed_mask = real_regs;
13991 *cfa_offset += pushed_words * 4;
13992 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
13994 pushed_words = 0;
13995 pushed_mask = real_regs;
13996 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
13998 if (pushed_mask & 1)
13999 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
14004 /* Generate code to return from a thumb function.
14005 If 'reg_containing_return_addr' is -1, then the return address is
14006 actually on the stack, at the stack pointer. */
14007 static void
14008 thumb_exit (FILE *f, int reg_containing_return_addr)
14010 unsigned regs_available_for_popping;
14011 unsigned regs_to_pop;
14012 int pops_needed;
14013 unsigned available;
14014 unsigned required;
14015 int mode;
14016 int size;
14017 int restore_a4 = FALSE;
14019 /* Compute the registers we need to pop. */
14020 regs_to_pop = 0;
14021 pops_needed = 0;
14023 if (reg_containing_return_addr == -1)
14025 regs_to_pop |= 1 << LR_REGNUM;
14026 ++pops_needed;
14029 if (TARGET_BACKTRACE)
14031 /* Restore the (ARM) frame pointer and stack pointer. */
14032 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
14033 pops_needed += 2;
14036 /* If there is nothing to pop then just emit the BX instruction and
14037 return. */
14038 if (pops_needed == 0)
14040 if (current_function_calls_eh_return)
14041 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
14043 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
14044 return;
14046 /* Otherwise if we are not supporting interworking and we have not created
14047 a backtrace structure and the function was not entered in ARM mode then
14048 just pop the return address straight into the PC. */
14049 else if (!TARGET_INTERWORK
14050 && !TARGET_BACKTRACE
14051 && !is_called_in_ARM_mode (current_function_decl)
14052 && !current_function_calls_eh_return)
14054 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
14055 return;
14058 /* Find out how many of the (return) argument registers we can corrupt. */
14059 regs_available_for_popping = 0;
14061 /* If returning via __builtin_eh_return, the bottom three registers
14062 all contain information needed for the return. */
14063 if (current_function_calls_eh_return)
14064 size = 12;
14065 else
14067 /* If we can deduce the registers used from the function's
14068 return value. This is more reliable that examining
14069 df_regs_ever_live_p () because that will be set if the register is
14070 ever used in the function, not just if the register is used
14071 to hold a return value. */
14073 if (current_function_return_rtx != 0)
14074 mode = GET_MODE (current_function_return_rtx);
14075 else
14076 mode = DECL_MODE (DECL_RESULT (current_function_decl));
14078 size = GET_MODE_SIZE (mode);
14080 if (size == 0)
14082 /* In a void function we can use any argument register.
14083 In a function that returns a structure on the stack
14084 we can use the second and third argument registers. */
14085 if (mode == VOIDmode)
14086 regs_available_for_popping =
14087 (1 << ARG_REGISTER (1))
14088 | (1 << ARG_REGISTER (2))
14089 | (1 << ARG_REGISTER (3));
14090 else
14091 regs_available_for_popping =
14092 (1 << ARG_REGISTER (2))
14093 | (1 << ARG_REGISTER (3));
14095 else if (size <= 4)
14096 regs_available_for_popping =
14097 (1 << ARG_REGISTER (2))
14098 | (1 << ARG_REGISTER (3));
14099 else if (size <= 8)
14100 regs_available_for_popping =
14101 (1 << ARG_REGISTER (3));
14104 /* Match registers to be popped with registers into which we pop them. */
14105 for (available = regs_available_for_popping,
14106 required = regs_to_pop;
14107 required != 0 && available != 0;
14108 available &= ~(available & - available),
14109 required &= ~(required & - required))
14110 -- pops_needed;
14112 /* If we have any popping registers left over, remove them. */
14113 if (available > 0)
14114 regs_available_for_popping &= ~available;
14116 /* Otherwise if we need another popping register we can use
14117 the fourth argument register. */
14118 else if (pops_needed)
14120 /* If we have not found any free argument registers and
14121 reg a4 contains the return address, we must move it. */
14122 if (regs_available_for_popping == 0
14123 && reg_containing_return_addr == LAST_ARG_REGNUM)
14125 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
14126 reg_containing_return_addr = LR_REGNUM;
14128 else if (size > 12)
14130 /* Register a4 is being used to hold part of the return value,
14131 but we have dire need of a free, low register. */
14132 restore_a4 = TRUE;
14134 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
14137 if (reg_containing_return_addr != LAST_ARG_REGNUM)
14139 /* The fourth argument register is available. */
14140 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
14142 --pops_needed;
14146 /* Pop as many registers as we can. */
14147 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14148 regs_available_for_popping);
14150 /* Process the registers we popped. */
14151 if (reg_containing_return_addr == -1)
14153 /* The return address was popped into the lowest numbered register. */
14154 regs_to_pop &= ~(1 << LR_REGNUM);
14156 reg_containing_return_addr =
14157 number_of_first_bit_set (regs_available_for_popping);
14159 /* Remove this register for the mask of available registers, so that
14160 the return address will not be corrupted by further pops. */
14161 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
14164 /* If we popped other registers then handle them here. */
14165 if (regs_available_for_popping)
14167 int frame_pointer;
14169 /* Work out which register currently contains the frame pointer. */
14170 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
14172 /* Move it into the correct place. */
14173 asm_fprintf (f, "\tmov\t%r, %r\n",
14174 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
14176 /* (Temporarily) remove it from the mask of popped registers. */
14177 regs_available_for_popping &= ~(1 << frame_pointer);
14178 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
14180 if (regs_available_for_popping)
14182 int stack_pointer;
14184 /* We popped the stack pointer as well,
14185 find the register that contains it. */
14186 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
14188 /* Move it into the stack register. */
14189 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
14191 /* At this point we have popped all necessary registers, so
14192 do not worry about restoring regs_available_for_popping
14193 to its correct value:
14195 assert (pops_needed == 0)
14196 assert (regs_available_for_popping == (1 << frame_pointer))
14197 assert (regs_to_pop == (1 << STACK_POINTER)) */
14199 else
14201 /* Since we have just move the popped value into the frame
14202 pointer, the popping register is available for reuse, and
14203 we know that we still have the stack pointer left to pop. */
14204 regs_available_for_popping |= (1 << frame_pointer);
14208 /* If we still have registers left on the stack, but we no longer have
14209 any registers into which we can pop them, then we must move the return
14210 address into the link register and make available the register that
14211 contained it. */
14212 if (regs_available_for_popping == 0 && pops_needed > 0)
14214 regs_available_for_popping |= 1 << reg_containing_return_addr;
14216 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
14217 reg_containing_return_addr);
14219 reg_containing_return_addr = LR_REGNUM;
14222 /* If we have registers left on the stack then pop some more.
14223 We know that at most we will want to pop FP and SP. */
14224 if (pops_needed > 0)
14226 int popped_into;
14227 int move_to;
14229 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14230 regs_available_for_popping);
14232 /* We have popped either FP or SP.
14233 Move whichever one it is into the correct register. */
14234 popped_into = number_of_first_bit_set (regs_available_for_popping);
14235 move_to = number_of_first_bit_set (regs_to_pop);
14237 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
14239 regs_to_pop &= ~(1 << move_to);
14241 --pops_needed;
14244 /* If we still have not popped everything then we must have only
14245 had one register available to us and we are now popping the SP. */
14246 if (pops_needed > 0)
14248 int popped_into;
14250 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14251 regs_available_for_popping);
14253 popped_into = number_of_first_bit_set (regs_available_for_popping);
14255 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
14257 assert (regs_to_pop == (1 << STACK_POINTER))
14258 assert (pops_needed == 1)
14262 /* If necessary restore the a4 register. */
14263 if (restore_a4)
14265 if (reg_containing_return_addr != LR_REGNUM)
14267 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
14268 reg_containing_return_addr = LR_REGNUM;
14271 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
14274 if (current_function_calls_eh_return)
14275 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
14277 /* Return to caller. */
14278 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
14282 void
14283 thumb1_final_prescan_insn (rtx insn)
14285 if (flag_print_asm_name)
14286 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
14287 INSN_ADDRESSES (INSN_UID (insn)));
14291 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
14293 unsigned HOST_WIDE_INT mask = 0xff;
14294 int i;
14296 if (val == 0) /* XXX */
14297 return 0;
14299 for (i = 0; i < 25; i++)
14300 if ((val & (mask << i)) == val)
14301 return 1;
14303 return 0;
14306 /* Returns nonzero if the current function contains,
14307 or might contain a far jump. */
14308 static int
14309 thumb_far_jump_used_p (void)
14311 rtx insn;
14313 /* This test is only important for leaf functions. */
14314 /* assert (!leaf_function_p ()); */
14316 /* If we have already decided that far jumps may be used,
14317 do not bother checking again, and always return true even if
14318 it turns out that they are not being used. Once we have made
14319 the decision that far jumps are present (and that hence the link
14320 register will be pushed onto the stack) we cannot go back on it. */
14321 if (cfun->machine->far_jump_used)
14322 return 1;
14324 /* If this function is not being called from the prologue/epilogue
14325 generation code then it must be being called from the
14326 INITIAL_ELIMINATION_OFFSET macro. */
14327 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
14329 /* In this case we know that we are being asked about the elimination
14330 of the arg pointer register. If that register is not being used,
14331 then there are no arguments on the stack, and we do not have to
14332 worry that a far jump might force the prologue to push the link
14333 register, changing the stack offsets. In this case we can just
14334 return false, since the presence of far jumps in the function will
14335 not affect stack offsets.
14337 If the arg pointer is live (or if it was live, but has now been
14338 eliminated and so set to dead) then we do have to test to see if
14339 the function might contain a far jump. This test can lead to some
14340 false negatives, since before reload is completed, then length of
14341 branch instructions is not known, so gcc defaults to returning their
14342 longest length, which in turn sets the far jump attribute to true.
14344 A false negative will not result in bad code being generated, but it
14345 will result in a needless push and pop of the link register. We
14346 hope that this does not occur too often.
14348 If we need doubleword stack alignment this could affect the other
14349 elimination offsets so we can't risk getting it wrong. */
14350 if (df_regs_ever_live_p (ARG_POINTER_REGNUM))
14351 cfun->machine->arg_pointer_live = 1;
14352 else if (!cfun->machine->arg_pointer_live)
14353 return 0;
14356 /* Check to see if the function contains a branch
14357 insn with the far jump attribute set. */
14358 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14360 if (GET_CODE (insn) == JUMP_INSN
14361 /* Ignore tablejump patterns. */
14362 && GET_CODE (PATTERN (insn)) != ADDR_VEC
14363 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
14364 && get_attr_far_jump (insn) == FAR_JUMP_YES
14367 /* Record the fact that we have decided that
14368 the function does use far jumps. */
14369 cfun->machine->far_jump_used = 1;
14370 return 1;
14374 return 0;
14377 /* Return nonzero if FUNC must be entered in ARM mode. */
14379 is_called_in_ARM_mode (tree func)
14381 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
14383 /* Ignore the problem about functions whose address is taken. */
14384 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
14385 return TRUE;
14387 #ifdef ARM_PE
14388 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
14389 #else
14390 return FALSE;
14391 #endif
14394 /* The bits which aren't usefully expanded as rtl. */
14395 const char *
14396 thumb_unexpanded_epilogue (void)
14398 int regno;
14399 unsigned long live_regs_mask = 0;
14400 int high_regs_pushed = 0;
14401 int had_to_push_lr;
14402 int size;
14404 if (return_used_this_function)
14405 return "";
14407 if (IS_NAKED (arm_current_func_type ()))
14408 return "";
14410 live_regs_mask = thumb1_compute_save_reg_mask ();
14411 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
14413 /* If we can deduce the registers used from the function's return value.
14414 This is more reliable that examining df_regs_ever_live_p () because that
14415 will be set if the register is ever used in the function, not just if
14416 the register is used to hold a return value. */
14417 size = arm_size_return_regs ();
14419 /* The prolog may have pushed some high registers to use as
14420 work registers. e.g. the testsuite file:
14421 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
14422 compiles to produce:
14423 push {r4, r5, r6, r7, lr}
14424 mov r7, r9
14425 mov r6, r8
14426 push {r6, r7}
14427 as part of the prolog. We have to undo that pushing here. */
14429 if (high_regs_pushed)
14431 unsigned long mask = live_regs_mask & 0xff;
14432 int next_hi_reg;
14434 /* The available low registers depend on the size of the value we are
14435 returning. */
14436 if (size <= 12)
14437 mask |= 1 << 3;
14438 if (size <= 8)
14439 mask |= 1 << 2;
14441 if (mask == 0)
14442 /* Oh dear! We have no low registers into which we can pop
14443 high registers! */
14444 internal_error
14445 ("no low registers available for popping high registers");
14447 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
14448 if (live_regs_mask & (1 << next_hi_reg))
14449 break;
14451 while (high_regs_pushed)
14453 /* Find lo register(s) into which the high register(s) can
14454 be popped. */
14455 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
14457 if (mask & (1 << regno))
14458 high_regs_pushed--;
14459 if (high_regs_pushed == 0)
14460 break;
14463 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
14465 /* Pop the values into the low register(s). */
14466 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
14468 /* Move the value(s) into the high registers. */
14469 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
14471 if (mask & (1 << regno))
14473 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
14474 regno);
14476 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
14477 if (live_regs_mask & (1 << next_hi_reg))
14478 break;
14482 live_regs_mask &= ~0x0f00;
14485 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
14486 live_regs_mask &= 0xff;
14488 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
14490 /* Pop the return address into the PC. */
14491 if (had_to_push_lr)
14492 live_regs_mask |= 1 << PC_REGNUM;
14494 /* Either no argument registers were pushed or a backtrace
14495 structure was created which includes an adjusted stack
14496 pointer, so just pop everything. */
14497 if (live_regs_mask)
14498 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
14499 live_regs_mask);
14501 /* We have either just popped the return address into the
14502 PC or it is was kept in LR for the entire function. */
14503 if (!had_to_push_lr)
14504 thumb_exit (asm_out_file, LR_REGNUM);
14506 else
14508 /* Pop everything but the return address. */
14509 if (live_regs_mask)
14510 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
14511 live_regs_mask);
14513 if (had_to_push_lr)
14515 if (size > 12)
14517 /* We have no free low regs, so save one. */
14518 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
14519 LAST_ARG_REGNUM);
14522 /* Get the return address into a temporary register. */
14523 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
14524 1 << LAST_ARG_REGNUM);
14526 if (size > 12)
14528 /* Move the return address to lr. */
14529 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
14530 LAST_ARG_REGNUM);
14531 /* Restore the low register. */
14532 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
14533 IP_REGNUM);
14534 regno = LR_REGNUM;
14536 else
14537 regno = LAST_ARG_REGNUM;
14539 else
14540 regno = LR_REGNUM;
14542 /* Remove the argument registers that were pushed onto the stack. */
14543 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
14544 SP_REGNUM, SP_REGNUM,
14545 current_function_pretend_args_size);
14547 thumb_exit (asm_out_file, regno);
14550 return "";
14553 /* Functions to save and restore machine-specific function data. */
14554 static struct machine_function *
14555 arm_init_machine_status (void)
14557 struct machine_function *machine;
14558 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
14560 #if ARM_FT_UNKNOWN != 0
14561 machine->func_type = ARM_FT_UNKNOWN;
14562 #endif
14563 return machine;
14566 /* Return an RTX indicating where the return address to the
14567 calling function can be found. */
14569 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
14571 if (count != 0)
14572 return NULL_RTX;
14574 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
14577 /* Do anything needed before RTL is emitted for each function. */
14578 void
14579 arm_init_expanders (void)
14581 /* Arrange to initialize and mark the machine per-function status. */
14582 init_machine_status = arm_init_machine_status;
14584 /* This is to stop the combine pass optimizing away the alignment
14585 adjustment of va_arg. */
14586 /* ??? It is claimed that this should not be necessary. */
14587 if (cfun)
14588 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
14592 /* Like arm_compute_initial_elimination offset. Simpler because there
14593 isn't an ABI specified frame pointer for Thumb. Instead, we set it
14594 to point at the base of the local variables after static stack
14595 space for a function has been allocated. */
14597 HOST_WIDE_INT
14598 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
14600 arm_stack_offsets *offsets;
14602 offsets = arm_get_frame_offsets ();
14604 switch (from)
14606 case ARG_POINTER_REGNUM:
14607 switch (to)
14609 case STACK_POINTER_REGNUM:
14610 return offsets->outgoing_args - offsets->saved_args;
14612 case FRAME_POINTER_REGNUM:
14613 return offsets->soft_frame - offsets->saved_args;
14615 case ARM_HARD_FRAME_POINTER_REGNUM:
14616 return offsets->saved_regs - offsets->saved_args;
14618 case THUMB_HARD_FRAME_POINTER_REGNUM:
14619 return offsets->locals_base - offsets->saved_args;
14621 default:
14622 gcc_unreachable ();
14624 break;
14626 case FRAME_POINTER_REGNUM:
14627 switch (to)
14629 case STACK_POINTER_REGNUM:
14630 return offsets->outgoing_args - offsets->soft_frame;
14632 case ARM_HARD_FRAME_POINTER_REGNUM:
14633 return offsets->saved_regs - offsets->soft_frame;
14635 case THUMB_HARD_FRAME_POINTER_REGNUM:
14636 return offsets->locals_base - offsets->soft_frame;
14638 default:
14639 gcc_unreachable ();
14641 break;
14643 default:
14644 gcc_unreachable ();
14648 /* Generate the rest of a function's prologue. */
14649 void
14650 thumb1_expand_prologue (void)
14652 rtx insn, dwarf;
14654 HOST_WIDE_INT amount;
14655 arm_stack_offsets *offsets;
14656 unsigned long func_type;
14657 int regno;
14658 unsigned long live_regs_mask;
14660 func_type = arm_current_func_type ();
14662 /* Naked functions don't have prologues. */
14663 if (IS_NAKED (func_type))
14664 return;
14666 if (IS_INTERRUPT (func_type))
14668 error ("interrupt Service Routines cannot be coded in Thumb mode");
14669 return;
14672 live_regs_mask = thumb1_compute_save_reg_mask ();
14673 /* Load the pic register before setting the frame pointer,
14674 so we can use r7 as a temporary work register. */
14675 if (flag_pic && arm_pic_register != INVALID_REGNUM)
14676 arm_load_pic_register (live_regs_mask);
14678 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
14679 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
14680 stack_pointer_rtx);
14682 offsets = arm_get_frame_offsets ();
14683 amount = offsets->outgoing_args - offsets->saved_regs;
14684 if (amount)
14686 if (amount < 512)
14688 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14689 GEN_INT (- amount)));
14690 RTX_FRAME_RELATED_P (insn) = 1;
14692 else
14694 rtx reg;
14696 /* The stack decrement is too big for an immediate value in a single
14697 insn. In theory we could issue multiple subtracts, but after
14698 three of them it becomes more space efficient to place the full
14699 value in the constant pool and load into a register. (Also the
14700 ARM debugger really likes to see only one stack decrement per
14701 function). So instead we look for a scratch register into which
14702 we can load the decrement, and then we subtract this from the
14703 stack pointer. Unfortunately on the thumb the only available
14704 scratch registers are the argument registers, and we cannot use
14705 these as they may hold arguments to the function. Instead we
14706 attempt to locate a call preserved register which is used by this
14707 function. If we can find one, then we know that it will have
14708 been pushed at the start of the prologue and so we can corrupt
14709 it now. */
14710 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
14711 if (live_regs_mask & (1 << regno)
14712 && !(frame_pointer_needed
14713 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
14714 break;
14716 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
14718 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
14720 /* Choose an arbitrary, non-argument low register. */
14721 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
14723 /* Save it by copying it into a high, scratch register. */
14724 emit_insn (gen_movsi (spare, reg));
14725 /* Add a USE to stop propagate_one_insn() from barfing. */
14726 emit_insn (gen_prologue_use (spare));
14728 /* Decrement the stack. */
14729 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
14730 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
14731 stack_pointer_rtx, reg));
14732 RTX_FRAME_RELATED_P (insn) = 1;
14733 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14734 plus_constant (stack_pointer_rtx,
14735 -amount));
14736 RTX_FRAME_RELATED_P (dwarf) = 1;
14737 REG_NOTES (insn)
14738 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
14739 REG_NOTES (insn));
14741 /* Restore the low register's original value. */
14742 emit_insn (gen_movsi (reg, spare));
14744 /* Emit a USE of the restored scratch register, so that flow
14745 analysis will not consider the restore redundant. The
14746 register won't be used again in this function and isn't
14747 restored by the epilogue. */
14748 emit_insn (gen_prologue_use (reg));
14750 else
14752 reg = gen_rtx_REG (SImode, regno);
14754 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
14756 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
14757 stack_pointer_rtx, reg));
14758 RTX_FRAME_RELATED_P (insn) = 1;
14759 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14760 plus_constant (stack_pointer_rtx,
14761 -amount));
14762 RTX_FRAME_RELATED_P (dwarf) = 1;
14763 REG_NOTES (insn)
14764 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
14765 REG_NOTES (insn));
14770 if (frame_pointer_needed)
14771 thumb_set_frame_pointer (offsets);
14773 /* If we are profiling, make sure no instructions are scheduled before
14774 the call to mcount. Similarly if the user has requested no
14775 scheduling in the prolog. Similarly if we want non-call exceptions
14776 using the EABI unwinder, to prevent faulting instructions from being
14777 swapped with a stack adjustment. */
14778 if (current_function_profile || !TARGET_SCHED_PROLOG
14779 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
14780 emit_insn (gen_blockage ());
14782 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
14783 if (live_regs_mask & 0xff)
14784 cfun->machine->lr_save_eliminated = 0;
14788 void
14789 thumb1_expand_epilogue (void)
14791 HOST_WIDE_INT amount;
14792 arm_stack_offsets *offsets;
14793 int regno;
14795 /* Naked functions don't have prologues. */
14796 if (IS_NAKED (arm_current_func_type ()))
14797 return;
14799 offsets = arm_get_frame_offsets ();
14800 amount = offsets->outgoing_args - offsets->saved_regs;
14802 if (frame_pointer_needed)
14804 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
14805 amount = offsets->locals_base - offsets->saved_regs;
14808 gcc_assert (amount >= 0);
14809 if (amount)
14811 if (amount < 512)
14812 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14813 GEN_INT (amount)));
14814 else
14816 /* r3 is always free in the epilogue. */
14817 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
14819 emit_insn (gen_movsi (reg, GEN_INT (amount)));
14820 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
14824 /* Emit a USE (stack_pointer_rtx), so that
14825 the stack adjustment will not be deleted. */
14826 emit_insn (gen_prologue_use (stack_pointer_rtx));
14828 if (current_function_profile || !TARGET_SCHED_PROLOG)
14829 emit_insn (gen_blockage ());
14831 /* Emit a clobber for each insn that will be restored in the epilogue,
14832 so that flow2 will get register lifetimes correct. */
14833 for (regno = 0; regno < 13; regno++)
14834 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
14835 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
14837 if (! df_regs_ever_live_p (LR_REGNUM))
14838 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
14841 static void
14842 thumb1_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
14844 unsigned long live_regs_mask = 0;
14845 unsigned long l_mask;
14846 unsigned high_regs_pushed = 0;
14847 int cfa_offset = 0;
14848 int regno;
14850 if (IS_NAKED (arm_current_func_type ()))
14851 return;
14853 if (is_called_in_ARM_mode (current_function_decl))
14855 const char * name;
14857 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
14858 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
14859 == SYMBOL_REF);
14860 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
14862 /* Generate code sequence to switch us into Thumb mode. */
14863 /* The .code 32 directive has already been emitted by
14864 ASM_DECLARE_FUNCTION_NAME. */
14865 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
14866 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
14868 /* Generate a label, so that the debugger will notice the
14869 change in instruction sets. This label is also used by
14870 the assembler to bypass the ARM code when this function
14871 is called from a Thumb encoded function elsewhere in the
14872 same file. Hence the definition of STUB_NAME here must
14873 agree with the definition in gas/config/tc-arm.c. */
14875 #define STUB_NAME ".real_start_of"
14877 fprintf (f, "\t.code\t16\n");
14878 #ifdef ARM_PE
14879 if (arm_dllexport_name_p (name))
14880 name = arm_strip_name_encoding (name);
14881 #endif
14882 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
14883 fprintf (f, "\t.thumb_func\n");
14884 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
14887 if (current_function_pretend_args_size)
14889 /* Output unwind directive for the stack adjustment. */
14890 if (ARM_EABI_UNWIND_TABLES)
14891 fprintf (f, "\t.pad #%d\n",
14892 current_function_pretend_args_size);
14894 if (cfun->machine->uses_anonymous_args)
14896 int num_pushes;
14898 fprintf (f, "\tpush\t{");
14900 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
14902 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
14903 regno <= LAST_ARG_REGNUM;
14904 regno++)
14905 asm_fprintf (f, "%r%s", regno,
14906 regno == LAST_ARG_REGNUM ? "" : ", ");
14908 fprintf (f, "}\n");
14910 else
14911 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
14912 SP_REGNUM, SP_REGNUM,
14913 current_function_pretend_args_size);
14915 /* We don't need to record the stores for unwinding (would it
14916 help the debugger any if we did?), but record the change in
14917 the stack pointer. */
14918 if (dwarf2out_do_frame ())
14920 char *l = dwarf2out_cfi_label ();
14922 cfa_offset = cfa_offset + current_function_pretend_args_size;
14923 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14927 /* Get the registers we are going to push. */
14928 live_regs_mask = thumb1_compute_save_reg_mask ();
14929 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
14930 l_mask = live_regs_mask & 0x40ff;
14931 /* Then count how many other high registers will need to be pushed. */
14932 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
14934 if (TARGET_BACKTRACE)
14936 unsigned offset;
14937 unsigned work_register;
14939 /* We have been asked to create a stack backtrace structure.
14940 The code looks like this:
14942 0 .align 2
14943 0 func:
14944 0 sub SP, #16 Reserve space for 4 registers.
14945 2 push {R7} Push low registers.
14946 4 add R7, SP, #20 Get the stack pointer before the push.
14947 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
14948 8 mov R7, PC Get hold of the start of this code plus 12.
14949 10 str R7, [SP, #16] Store it.
14950 12 mov R7, FP Get hold of the current frame pointer.
14951 14 str R7, [SP, #4] Store it.
14952 16 mov R7, LR Get hold of the current return address.
14953 18 str R7, [SP, #12] Store it.
14954 20 add R7, SP, #16 Point at the start of the backtrace structure.
14955 22 mov FP, R7 Put this value into the frame pointer. */
14957 work_register = thumb_find_work_register (live_regs_mask);
14959 if (ARM_EABI_UNWIND_TABLES)
14960 asm_fprintf (f, "\t.pad #16\n");
14962 asm_fprintf
14963 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
14964 SP_REGNUM, SP_REGNUM);
14966 if (dwarf2out_do_frame ())
14968 char *l = dwarf2out_cfi_label ();
14970 cfa_offset = cfa_offset + 16;
14971 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14974 if (l_mask)
14976 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14977 offset = bit_count (l_mask) * UNITS_PER_WORD;
14979 else
14980 offset = 0;
14982 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14983 offset + 16 + current_function_pretend_args_size);
14985 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14986 offset + 4);
14988 /* Make sure that the instruction fetching the PC is in the right place
14989 to calculate "start of backtrace creation code + 12". */
14990 if (l_mask)
14992 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14993 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14994 offset + 12);
14995 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14996 ARM_HARD_FRAME_POINTER_REGNUM);
14997 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14998 offset);
15000 else
15002 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
15003 ARM_HARD_FRAME_POINTER_REGNUM);
15004 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
15005 offset);
15006 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
15007 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
15008 offset + 12);
15011 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
15012 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
15013 offset + 8);
15014 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
15015 offset + 12);
15016 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
15017 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
15019 /* Optimization: If we are not pushing any low registers but we are going
15020 to push some high registers then delay our first push. This will just
15021 be a push of LR and we can combine it with the push of the first high
15022 register. */
15023 else if ((l_mask & 0xff) != 0
15024 || (high_regs_pushed == 0 && l_mask))
15025 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
15027 if (high_regs_pushed)
15029 unsigned pushable_regs;
15030 unsigned next_hi_reg;
15032 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
15033 if (live_regs_mask & (1 << next_hi_reg))
15034 break;
15036 pushable_regs = l_mask & 0xff;
15038 if (pushable_regs == 0)
15039 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
15041 while (high_regs_pushed > 0)
15043 unsigned long real_regs_mask = 0;
15045 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
15047 if (pushable_regs & (1 << regno))
15049 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
15051 high_regs_pushed --;
15052 real_regs_mask |= (1 << next_hi_reg);
15054 if (high_regs_pushed)
15056 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
15057 next_hi_reg --)
15058 if (live_regs_mask & (1 << next_hi_reg))
15059 break;
15061 else
15063 pushable_regs &= ~((1 << regno) - 1);
15064 break;
15069 /* If we had to find a work register and we have not yet
15070 saved the LR then add it to the list of regs to push. */
15071 if (l_mask == (1 << LR_REGNUM))
15073 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
15074 1, &cfa_offset,
15075 real_regs_mask | (1 << LR_REGNUM));
15076 l_mask = 0;
15078 else
15079 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
15084 /* Handle the case of a double word load into a low register from
15085 a computed memory address. The computed address may involve a
15086 register which is overwritten by the load. */
15087 const char *
15088 thumb_load_double_from_address (rtx *operands)
15090 rtx addr;
15091 rtx base;
15092 rtx offset;
15093 rtx arg1;
15094 rtx arg2;
15096 gcc_assert (GET_CODE (operands[0]) == REG);
15097 gcc_assert (GET_CODE (operands[1]) == MEM);
15099 /* Get the memory address. */
15100 addr = XEXP (operands[1], 0);
15102 /* Work out how the memory address is computed. */
15103 switch (GET_CODE (addr))
15105 case REG:
15106 operands[2] = adjust_address (operands[1], SImode, 4);
15108 if (REGNO (operands[0]) == REGNO (addr))
15110 output_asm_insn ("ldr\t%H0, %2", operands);
15111 output_asm_insn ("ldr\t%0, %1", operands);
15113 else
15115 output_asm_insn ("ldr\t%0, %1", operands);
15116 output_asm_insn ("ldr\t%H0, %2", operands);
15118 break;
15120 case CONST:
15121 /* Compute <address> + 4 for the high order load. */
15122 operands[2] = adjust_address (operands[1], SImode, 4);
15124 output_asm_insn ("ldr\t%0, %1", operands);
15125 output_asm_insn ("ldr\t%H0, %2", operands);
15126 break;
15128 case PLUS:
15129 arg1 = XEXP (addr, 0);
15130 arg2 = XEXP (addr, 1);
15132 if (CONSTANT_P (arg1))
15133 base = arg2, offset = arg1;
15134 else
15135 base = arg1, offset = arg2;
15137 gcc_assert (GET_CODE (base) == REG);
15139 /* Catch the case of <address> = <reg> + <reg> */
15140 if (GET_CODE (offset) == REG)
15142 int reg_offset = REGNO (offset);
15143 int reg_base = REGNO (base);
15144 int reg_dest = REGNO (operands[0]);
15146 /* Add the base and offset registers together into the
15147 higher destination register. */
15148 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
15149 reg_dest + 1, reg_base, reg_offset);
15151 /* Load the lower destination register from the address in
15152 the higher destination register. */
15153 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
15154 reg_dest, reg_dest + 1);
15156 /* Load the higher destination register from its own address
15157 plus 4. */
15158 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
15159 reg_dest + 1, reg_dest + 1);
15161 else
15163 /* Compute <address> + 4 for the high order load. */
15164 operands[2] = adjust_address (operands[1], SImode, 4);
15166 /* If the computed address is held in the low order register
15167 then load the high order register first, otherwise always
15168 load the low order register first. */
15169 if (REGNO (operands[0]) == REGNO (base))
15171 output_asm_insn ("ldr\t%H0, %2", operands);
15172 output_asm_insn ("ldr\t%0, %1", operands);
15174 else
15176 output_asm_insn ("ldr\t%0, %1", operands);
15177 output_asm_insn ("ldr\t%H0, %2", operands);
15180 break;
15182 case LABEL_REF:
15183 /* With no registers to worry about we can just load the value
15184 directly. */
15185 operands[2] = adjust_address (operands[1], SImode, 4);
15187 output_asm_insn ("ldr\t%H0, %2", operands);
15188 output_asm_insn ("ldr\t%0, %1", operands);
15189 break;
15191 default:
15192 gcc_unreachable ();
15195 return "";
15198 const char *
15199 thumb_output_move_mem_multiple (int n, rtx *operands)
15201 rtx tmp;
15203 switch (n)
15205 case 2:
15206 if (REGNO (operands[4]) > REGNO (operands[5]))
15208 tmp = operands[4];
15209 operands[4] = operands[5];
15210 operands[5] = tmp;
15212 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
15213 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
15214 break;
15216 case 3:
15217 if (REGNO (operands[4]) > REGNO (operands[5]))
15219 tmp = operands[4];
15220 operands[4] = operands[5];
15221 operands[5] = tmp;
15223 if (REGNO (operands[5]) > REGNO (operands[6]))
15225 tmp = operands[5];
15226 operands[5] = operands[6];
15227 operands[6] = tmp;
15229 if (REGNO (operands[4]) > REGNO (operands[5]))
15231 tmp = operands[4];
15232 operands[4] = operands[5];
15233 operands[5] = tmp;
15236 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
15237 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
15238 break;
15240 default:
15241 gcc_unreachable ();
15244 return "";
15247 /* Output a call-via instruction for thumb state. */
15248 const char *
15249 thumb_call_via_reg (rtx reg)
15251 int regno = REGNO (reg);
15252 rtx *labelp;
15254 gcc_assert (regno < LR_REGNUM);
15256 /* If we are in the normal text section we can use a single instance
15257 per compilation unit. If we are doing function sections, then we need
15258 an entry per section, since we can't rely on reachability. */
15259 if (in_section == text_section)
15261 thumb_call_reg_needed = 1;
15263 if (thumb_call_via_label[regno] == NULL)
15264 thumb_call_via_label[regno] = gen_label_rtx ();
15265 labelp = thumb_call_via_label + regno;
15267 else
15269 if (cfun->machine->call_via[regno] == NULL)
15270 cfun->machine->call_via[regno] = gen_label_rtx ();
15271 labelp = cfun->machine->call_via + regno;
15274 output_asm_insn ("bl\t%a0", labelp);
15275 return "";
15278 /* Routines for generating rtl. */
15279 void
15280 thumb_expand_movmemqi (rtx *operands)
15282 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
15283 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
15284 HOST_WIDE_INT len = INTVAL (operands[2]);
15285 HOST_WIDE_INT offset = 0;
15287 while (len >= 12)
15289 emit_insn (gen_movmem12b (out, in, out, in));
15290 len -= 12;
15293 if (len >= 8)
15295 emit_insn (gen_movmem8b (out, in, out, in));
15296 len -= 8;
15299 if (len >= 4)
15301 rtx reg = gen_reg_rtx (SImode);
15302 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
15303 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
15304 len -= 4;
15305 offset += 4;
15308 if (len >= 2)
15310 rtx reg = gen_reg_rtx (HImode);
15311 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
15312 plus_constant (in, offset))));
15313 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
15314 reg));
15315 len -= 2;
15316 offset += 2;
15319 if (len)
15321 rtx reg = gen_reg_rtx (QImode);
15322 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
15323 plus_constant (in, offset))));
15324 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
15325 reg));
15329 void
15330 thumb_reload_out_hi (rtx *operands)
15332 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
15335 /* Handle reading a half-word from memory during reload. */
15336 void
15337 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
15339 gcc_unreachable ();
15342 /* Return the length of a function name prefix
15343 that starts with the character 'c'. */
15344 static int
15345 arm_get_strip_length (int c)
15347 switch (c)
15349 ARM_NAME_ENCODING_LENGTHS
15350 default: return 0;
15354 /* Return a pointer to a function's name with any
15355 and all prefix encodings stripped from it. */
15356 const char *
15357 arm_strip_name_encoding (const char *name)
15359 int skip;
15361 while ((skip = arm_get_strip_length (* name)))
15362 name += skip;
15364 return name;
15367 /* If there is a '*' anywhere in the name's prefix, then
15368 emit the stripped name verbatim, otherwise prepend an
15369 underscore if leading underscores are being used. */
15370 void
15371 arm_asm_output_labelref (FILE *stream, const char *name)
15373 int skip;
15374 int verbatim = 0;
15376 while ((skip = arm_get_strip_length (* name)))
15378 verbatim |= (*name == '*');
15379 name += skip;
15382 if (verbatim)
15383 fputs (name, stream);
15384 else
15385 asm_fprintf (stream, "%U%s", name);
15388 static void
15389 arm_file_start (void)
15391 int val;
15393 if (TARGET_UNIFIED_ASM)
15394 asm_fprintf (asm_out_file, "\t.syntax unified\n");
15396 if (TARGET_BPABI)
15398 const char *fpu_name;
15399 if (arm_select[0].string)
15400 asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
15401 else if (arm_select[1].string)
15402 asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
15403 else
15404 asm_fprintf (asm_out_file, "\t.cpu %s\n",
15405 all_cores[arm_default_cpu].name);
15407 if (TARGET_SOFT_FLOAT)
15409 if (TARGET_VFP)
15410 fpu_name = "softvfp";
15411 else
15412 fpu_name = "softfpa";
15414 else
15416 int set_float_abi_attributes = 0;
15417 switch (arm_fpu_arch)
15419 case FPUTYPE_FPA:
15420 fpu_name = "fpa";
15421 break;
15422 case FPUTYPE_FPA_EMU2:
15423 fpu_name = "fpe2";
15424 break;
15425 case FPUTYPE_FPA_EMU3:
15426 fpu_name = "fpe3";
15427 break;
15428 case FPUTYPE_MAVERICK:
15429 fpu_name = "maverick";
15430 break;
15431 case FPUTYPE_VFP:
15432 fpu_name = "vfp";
15433 set_float_abi_attributes = 1;
15434 break;
15435 case FPUTYPE_VFP3:
15436 fpu_name = "vfp3";
15437 set_float_abi_attributes = 1;
15438 break;
15439 default:
15440 abort();
15442 if (set_float_abi_attributes)
15444 if (TARGET_HARD_FLOAT)
15445 asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
15446 if (TARGET_HARD_FLOAT_ABI)
15447 asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
15450 asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
15452 /* Some of these attributes only apply when the corresponding features
15453 are used. However we don't have any easy way of figuring this out.
15454 Conservatively record the setting that would have been used. */
15456 /* Tag_ABI_PCS_wchar_t. */
15457 asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n",
15458 (int)WCHAR_TYPE_SIZE / BITS_PER_UNIT);
15460 /* Tag_ABI_FP_rounding. */
15461 if (flag_rounding_math)
15462 asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
15463 if (!flag_unsafe_math_optimizations)
15465 /* Tag_ABI_FP_denomal. */
15466 asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
15467 /* Tag_ABI_FP_exceptions. */
15468 asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
15470 /* Tag_ABI_FP_user_exceptions. */
15471 if (flag_signaling_nans)
15472 asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
15473 /* Tag_ABI_FP_number_model. */
15474 asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
15475 flag_finite_math_only ? 1 : 3);
15477 /* Tag_ABI_align8_needed. */
15478 asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
15479 /* Tag_ABI_align8_preserved. */
15480 asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
15481 /* Tag_ABI_enum_size. */
15482 asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
15483 flag_short_enums ? 1 : 2);
15485 /* Tag_ABI_optimization_goals. */
15486 if (optimize_size)
15487 val = 4;
15488 else if (optimize >= 2)
15489 val = 2;
15490 else if (optimize)
15491 val = 1;
15492 else
15493 val = 6;
15494 asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
15496 default_file_start();
15499 static void
15500 arm_file_end (void)
15502 int regno;
15504 if (NEED_INDICATE_EXEC_STACK)
15505 /* Add .note.GNU-stack. */
15506 file_end_indicate_exec_stack ();
15508 if (! thumb_call_reg_needed)
15509 return;
15511 switch_to_section (text_section);
15512 asm_fprintf (asm_out_file, "\t.code 16\n");
15513 ASM_OUTPUT_ALIGN (asm_out_file, 1);
15515 for (regno = 0; regno < LR_REGNUM; regno++)
15517 rtx label = thumb_call_via_label[regno];
15519 if (label != 0)
15521 targetm.asm_out.internal_label (asm_out_file, "L",
15522 CODE_LABEL_NUMBER (label));
15523 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
15528 rtx aof_pic_label;
15530 #ifdef AOF_ASSEMBLER
15531 /* Special functions only needed when producing AOF syntax assembler. */
15533 struct pic_chain
15535 struct pic_chain * next;
15536 const char * symname;
15539 static struct pic_chain * aof_pic_chain = NULL;
15542 aof_pic_entry (rtx x)
15544 struct pic_chain ** chainp;
15545 int offset;
15547 if (aof_pic_label == NULL_RTX)
15549 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
15552 for (offset = 0, chainp = &aof_pic_chain; *chainp;
15553 offset += 4, chainp = &(*chainp)->next)
15554 if ((*chainp)->symname == XSTR (x, 0))
15555 return plus_constant (aof_pic_label, offset);
15557 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
15558 (*chainp)->next = NULL;
15559 (*chainp)->symname = XSTR (x, 0);
15560 return plus_constant (aof_pic_label, offset);
15563 void
15564 aof_dump_pic_table (FILE *f)
15566 struct pic_chain * chain;
15568 if (aof_pic_chain == NULL)
15569 return;
15571 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
15572 PIC_OFFSET_TABLE_REGNUM,
15573 PIC_OFFSET_TABLE_REGNUM);
15574 fputs ("|x$adcons|\n", f);
15576 for (chain = aof_pic_chain; chain; chain = chain->next)
15578 fputs ("\tDCD\t", f);
15579 assemble_name (f, chain->symname);
15580 fputs ("\n", f);
15584 int arm_text_section_count = 1;
15586 /* A get_unnamed_section callback for switching to the text section. */
15588 static void
15589 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
15591 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
15592 arm_text_section_count++);
15593 if (flag_pic)
15594 fprintf (asm_out_file, ", PIC, REENTRANT");
15595 fprintf (asm_out_file, "\n");
15598 static int arm_data_section_count = 1;
15600 /* A get_unnamed_section callback for switching to the data section. */
15602 static void
15603 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
15605 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
15606 arm_data_section_count++);
15609 /* Implement TARGET_ASM_INIT_SECTIONS.
15611 AOF Assembler syntax is a nightmare when it comes to areas, since once
15612 we change from one area to another, we can't go back again. Instead,
15613 we must create a new area with the same attributes and add the new output
15614 to that. Unfortunately, there is nothing we can do here to guarantee that
15615 two areas with the same attributes will be linked adjacently in the
15616 resulting executable, so we have to be careful not to do pc-relative
15617 addressing across such boundaries. */
15619 static void
15620 aof_asm_init_sections (void)
15622 text_section = get_unnamed_section (SECTION_CODE,
15623 aof_output_text_section_asm_op, NULL);
15624 data_section = get_unnamed_section (SECTION_WRITE,
15625 aof_output_data_section_asm_op, NULL);
15626 readonly_data_section = text_section;
15629 void
15630 zero_init_section (void)
15632 static int zero_init_count = 1;
15634 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
15635 in_section = NULL;
15638 /* The AOF assembler is religiously strict about declarations of
15639 imported and exported symbols, so that it is impossible to declare
15640 a function as imported near the beginning of the file, and then to
15641 export it later on. It is, however, possible to delay the decision
15642 until all the functions in the file have been compiled. To get
15643 around this, we maintain a list of the imports and exports, and
15644 delete from it any that are subsequently defined. At the end of
15645 compilation we spit the remainder of the list out before the END
15646 directive. */
15648 struct import
15650 struct import * next;
15651 const char * name;
15654 static struct import * imports_list = NULL;
15656 void
15657 aof_add_import (const char *name)
15659 struct import * new;
15661 for (new = imports_list; new; new = new->next)
15662 if (new->name == name)
15663 return;
15665 new = (struct import *) xmalloc (sizeof (struct import));
15666 new->next = imports_list;
15667 imports_list = new;
15668 new->name = name;
15671 void
15672 aof_delete_import (const char *name)
15674 struct import ** old;
15676 for (old = &imports_list; *old; old = & (*old)->next)
15678 if ((*old)->name == name)
15680 *old = (*old)->next;
15681 return;
15686 int arm_main_function = 0;
15688 static void
15689 aof_dump_imports (FILE *f)
15691 /* The AOF assembler needs this to cause the startup code to be extracted
15692 from the library. Brining in __main causes the whole thing to work
15693 automagically. */
15694 if (arm_main_function)
15696 switch_to_section (text_section);
15697 fputs ("\tIMPORT __main\n", f);
15698 fputs ("\tDCD __main\n", f);
15701 /* Now dump the remaining imports. */
15702 while (imports_list)
15704 fprintf (f, "\tIMPORT\t");
15705 assemble_name (f, imports_list->name);
15706 fputc ('\n', f);
15707 imports_list = imports_list->next;
15711 static void
15712 aof_globalize_label (FILE *stream, const char *name)
15714 default_globalize_label (stream, name);
15715 if (! strcmp (name, "main"))
15716 arm_main_function = 1;
15719 static void
15720 aof_file_start (void)
15722 fputs ("__r0\tRN\t0\n", asm_out_file);
15723 fputs ("__a1\tRN\t0\n", asm_out_file);
15724 fputs ("__a2\tRN\t1\n", asm_out_file);
15725 fputs ("__a3\tRN\t2\n", asm_out_file);
15726 fputs ("__a4\tRN\t3\n", asm_out_file);
15727 fputs ("__v1\tRN\t4\n", asm_out_file);
15728 fputs ("__v2\tRN\t5\n", asm_out_file);
15729 fputs ("__v3\tRN\t6\n", asm_out_file);
15730 fputs ("__v4\tRN\t7\n", asm_out_file);
15731 fputs ("__v5\tRN\t8\n", asm_out_file);
15732 fputs ("__v6\tRN\t9\n", asm_out_file);
15733 fputs ("__sl\tRN\t10\n", asm_out_file);
15734 fputs ("__fp\tRN\t11\n", asm_out_file);
15735 fputs ("__ip\tRN\t12\n", asm_out_file);
15736 fputs ("__sp\tRN\t13\n", asm_out_file);
15737 fputs ("__lr\tRN\t14\n", asm_out_file);
15738 fputs ("__pc\tRN\t15\n", asm_out_file);
15739 fputs ("__f0\tFN\t0\n", asm_out_file);
15740 fputs ("__f1\tFN\t1\n", asm_out_file);
15741 fputs ("__f2\tFN\t2\n", asm_out_file);
15742 fputs ("__f3\tFN\t3\n", asm_out_file);
15743 fputs ("__f4\tFN\t4\n", asm_out_file);
15744 fputs ("__f5\tFN\t5\n", asm_out_file);
15745 fputs ("__f6\tFN\t6\n", asm_out_file);
15746 fputs ("__f7\tFN\t7\n", asm_out_file);
15747 switch_to_section (text_section);
15750 static void
15751 aof_file_end (void)
15753 if (flag_pic)
15754 aof_dump_pic_table (asm_out_file);
15755 arm_file_end ();
15756 aof_dump_imports (asm_out_file);
15757 fputs ("\tEND\n", asm_out_file);
15759 #endif /* AOF_ASSEMBLER */
15761 #ifndef ARM_PE
15762 /* Symbols in the text segment can be accessed without indirecting via the
15763 constant pool; it may take an extra binary operation, but this is still
15764 faster than indirecting via memory. Don't do this when not optimizing,
15765 since we won't be calculating al of the offsets necessary to do this
15766 simplification. */
15768 static void
15769 arm_encode_section_info (tree decl, rtx rtl, int first)
15771 /* This doesn't work with AOF syntax, since the string table may be in
15772 a different AREA. */
15773 #ifndef AOF_ASSEMBLER
15774 if (optimize > 0 && TREE_CONSTANT (decl))
15775 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
15776 #endif
15778 default_encode_section_info (decl, rtl, first);
15780 #endif /* !ARM_PE */
15782 static void
15783 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
15785 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
15786 && !strcmp (prefix, "L"))
15788 arm_ccfsm_state = 0;
15789 arm_target_insn = NULL;
15791 default_internal_label (stream, prefix, labelno);
15794 /* Output code to add DELTA to the first argument, and then jump
15795 to FUNCTION. Used for C++ multiple inheritance. */
15796 static void
15797 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
15798 HOST_WIDE_INT delta,
15799 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
15800 tree function)
15802 static int thunk_label = 0;
15803 char label[256];
15804 char labelpc[256];
15805 int mi_delta = delta;
15806 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
15807 int shift = 0;
15808 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
15809 ? 1 : 0);
15810 if (mi_delta < 0)
15811 mi_delta = - mi_delta;
15812 /* When generating 16-bit thumb code, thunks are entered in arm mode. */
15813 if (TARGET_THUMB1)
15815 int labelno = thunk_label++;
15816 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
15817 fputs ("\tldr\tr12, ", file);
15818 assemble_name (file, label);
15819 fputc ('\n', file);
15820 if (flag_pic)
15822 /* If we are generating PIC, the ldr instruction below loads
15823 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
15824 the address of the add + 8, so we have:
15826 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
15827 = target + 1.
15829 Note that we have "+ 1" because some versions of GNU ld
15830 don't set the low bit of the result for R_ARM_REL32
15831 relocations against thumb function symbols. */
15832 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
15833 assemble_name (file, labelpc);
15834 fputs (":\n", file);
15835 fputs ("\tadd\tr12, pc, r12\n", file);
15838 /* TODO: Use movw/movt for large constants when available. */
15839 while (mi_delta != 0)
15841 if ((mi_delta & (3 << shift)) == 0)
15842 shift += 2;
15843 else
15845 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
15846 mi_op, this_regno, this_regno,
15847 mi_delta & (0xff << shift));
15848 mi_delta &= ~(0xff << shift);
15849 shift += 8;
15852 if (TARGET_THUMB1)
15854 fprintf (file, "\tbx\tr12\n");
15855 ASM_OUTPUT_ALIGN (file, 2);
15856 assemble_name (file, label);
15857 fputs (":\n", file);
15858 if (flag_pic)
15860 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
15861 rtx tem = XEXP (DECL_RTL (function), 0);
15862 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
15863 tem = gen_rtx_MINUS (GET_MODE (tem),
15864 tem,
15865 gen_rtx_SYMBOL_REF (Pmode,
15866 ggc_strdup (labelpc)));
15867 assemble_integer (tem, 4, BITS_PER_WORD, 1);
15869 else
15870 /* Output ".word .LTHUNKn". */
15871 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
15873 else
15875 fputs ("\tb\t", file);
15876 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
15877 if (NEED_PLT_RELOC)
15878 fputs ("(PLT)", file);
15879 fputc ('\n', file);
15884 arm_emit_vector_const (FILE *file, rtx x)
15886 int i;
15887 const char * pattern;
15889 gcc_assert (GET_CODE (x) == CONST_VECTOR);
15891 switch (GET_MODE (x))
15893 case V2SImode: pattern = "%08x"; break;
15894 case V4HImode: pattern = "%04x"; break;
15895 case V8QImode: pattern = "%02x"; break;
15896 default: gcc_unreachable ();
15899 fprintf (file, "0x");
15900 for (i = CONST_VECTOR_NUNITS (x); i--;)
15902 rtx element;
15904 element = CONST_VECTOR_ELT (x, i);
15905 fprintf (file, pattern, INTVAL (element));
15908 return 1;
15911 const char *
15912 arm_output_load_gr (rtx *operands)
15914 rtx reg;
15915 rtx offset;
15916 rtx wcgr;
15917 rtx sum;
15919 if (GET_CODE (operands [1]) != MEM
15920 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
15921 || GET_CODE (reg = XEXP (sum, 0)) != REG
15922 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
15923 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
15924 return "wldrw%?\t%0, %1";
15926 /* Fix up an out-of-range load of a GR register. */
15927 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
15928 wcgr = operands[0];
15929 operands[0] = reg;
15930 output_asm_insn ("ldr%?\t%0, %1", operands);
15932 operands[0] = wcgr;
15933 operands[1] = reg;
15934 output_asm_insn ("tmcr%?\t%0, %1", operands);
15935 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
15937 return "";
15940 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
15942 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
15943 named arg and all anonymous args onto the stack.
15944 XXX I know the prologue shouldn't be pushing registers, but it is faster
15945 that way. */
15947 static void
15948 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
15949 enum machine_mode mode ATTRIBUTE_UNUSED,
15950 tree type ATTRIBUTE_UNUSED,
15951 int *pretend_size,
15952 int second_time ATTRIBUTE_UNUSED)
15954 cfun->machine->uses_anonymous_args = 1;
15955 if (cum->nregs < NUM_ARG_REGS)
15956 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
15959 /* Return nonzero if the CONSUMER instruction (a store) does not need
15960 PRODUCER's value to calculate the address. */
15963 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
15965 rtx value = PATTERN (producer);
15966 rtx addr = PATTERN (consumer);
15968 if (GET_CODE (value) == COND_EXEC)
15969 value = COND_EXEC_CODE (value);
15970 if (GET_CODE (value) == PARALLEL)
15971 value = XVECEXP (value, 0, 0);
15972 value = XEXP (value, 0);
15973 if (GET_CODE (addr) == COND_EXEC)
15974 addr = COND_EXEC_CODE (addr);
15975 if (GET_CODE (addr) == PARALLEL)
15976 addr = XVECEXP (addr, 0, 0);
15977 addr = XEXP (addr, 0);
15979 return !reg_overlap_mentioned_p (value, addr);
15982 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15983 have an early register shift value or amount dependency on the
15984 result of PRODUCER. */
15987 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
15989 rtx value = PATTERN (producer);
15990 rtx op = PATTERN (consumer);
15991 rtx early_op;
15993 if (GET_CODE (value) == COND_EXEC)
15994 value = COND_EXEC_CODE (value);
15995 if (GET_CODE (value) == PARALLEL)
15996 value = XVECEXP (value, 0, 0);
15997 value = XEXP (value, 0);
15998 if (GET_CODE (op) == COND_EXEC)
15999 op = COND_EXEC_CODE (op);
16000 if (GET_CODE (op) == PARALLEL)
16001 op = XVECEXP (op, 0, 0);
16002 op = XEXP (op, 1);
16004 early_op = XEXP (op, 0);
16005 /* This is either an actual independent shift, or a shift applied to
16006 the first operand of another operation. We want the whole shift
16007 operation. */
16008 if (GET_CODE (early_op) == REG)
16009 early_op = op;
16011 return !reg_overlap_mentioned_p (value, early_op);
16014 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
16015 have an early register shift value dependency on the result of
16016 PRODUCER. */
16019 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
16021 rtx value = PATTERN (producer);
16022 rtx op = PATTERN (consumer);
16023 rtx early_op;
16025 if (GET_CODE (value) == COND_EXEC)
16026 value = COND_EXEC_CODE (value);
16027 if (GET_CODE (value) == PARALLEL)
16028 value = XVECEXP (value, 0, 0);
16029 value = XEXP (value, 0);
16030 if (GET_CODE (op) == COND_EXEC)
16031 op = COND_EXEC_CODE (op);
16032 if (GET_CODE (op) == PARALLEL)
16033 op = XVECEXP (op, 0, 0);
16034 op = XEXP (op, 1);
16036 early_op = XEXP (op, 0);
16038 /* This is either an actual independent shift, or a shift applied to
16039 the first operand of another operation. We want the value being
16040 shifted, in either case. */
16041 if (GET_CODE (early_op) != REG)
16042 early_op = XEXP (early_op, 0);
16044 return !reg_overlap_mentioned_p (value, early_op);
16047 /* Return nonzero if the CONSUMER (a mul or mac op) does not
16048 have an early register mult dependency on the result of
16049 PRODUCER. */
16052 arm_no_early_mul_dep (rtx producer, rtx consumer)
16054 rtx value = PATTERN (producer);
16055 rtx op = PATTERN (consumer);
16057 if (GET_CODE (value) == COND_EXEC)
16058 value = COND_EXEC_CODE (value);
16059 if (GET_CODE (value) == PARALLEL)
16060 value = XVECEXP (value, 0, 0);
16061 value = XEXP (value, 0);
16062 if (GET_CODE (op) == COND_EXEC)
16063 op = COND_EXEC_CODE (op);
16064 if (GET_CODE (op) == PARALLEL)
16065 op = XVECEXP (op, 0, 0);
16066 op = XEXP (op, 1);
16068 return (GET_CODE (op) == PLUS
16069 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
16073 /* We can't rely on the caller doing the proper promotion when
16074 using APCS or ATPCS. */
16076 static bool
16077 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
16079 return !TARGET_AAPCS_BASED;
16083 /* AAPCS based ABIs use short enums by default. */
16085 static bool
16086 arm_default_short_enums (void)
16088 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
16092 /* AAPCS requires that anonymous bitfields affect structure alignment. */
16094 static bool
16095 arm_align_anon_bitfield (void)
16097 return TARGET_AAPCS_BASED;
16101 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
16103 static tree
16104 arm_cxx_guard_type (void)
16106 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
16110 /* The EABI says test the least significant bit of a guard variable. */
16112 static bool
16113 arm_cxx_guard_mask_bit (void)
16115 return TARGET_AAPCS_BASED;
16119 /* The EABI specifies that all array cookies are 8 bytes long. */
16121 static tree
16122 arm_get_cookie_size (tree type)
16124 tree size;
16126 if (!TARGET_AAPCS_BASED)
16127 return default_cxx_get_cookie_size (type);
16129 size = build_int_cst (sizetype, 8);
16130 return size;
16134 /* The EABI says that array cookies should also contain the element size. */
16136 static bool
16137 arm_cookie_has_size (void)
16139 return TARGET_AAPCS_BASED;
16143 /* The EABI says constructors and destructors should return a pointer to
16144 the object constructed/destroyed. */
16146 static bool
16147 arm_cxx_cdtor_returns_this (void)
16149 return TARGET_AAPCS_BASED;
16152 /* The EABI says that an inline function may never be the key
16153 method. */
16155 static bool
16156 arm_cxx_key_method_may_be_inline (void)
16158 return !TARGET_AAPCS_BASED;
16161 static void
16162 arm_cxx_determine_class_data_visibility (tree decl)
16164 if (!TARGET_AAPCS_BASED)
16165 return;
16167 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
16168 is exported. However, on systems without dynamic vague linkage,
16169 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
16170 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
16171 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
16172 else
16173 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
16174 DECL_VISIBILITY_SPECIFIED (decl) = 1;
16177 static bool
16178 arm_cxx_class_data_always_comdat (void)
16180 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
16181 vague linkage if the class has no key function. */
16182 return !TARGET_AAPCS_BASED;
16186 /* The EABI says __aeabi_atexit should be used to register static
16187 destructors. */
16189 static bool
16190 arm_cxx_use_aeabi_atexit (void)
16192 return TARGET_AAPCS_BASED;
16196 void
16197 arm_set_return_address (rtx source, rtx scratch)
16199 arm_stack_offsets *offsets;
16200 HOST_WIDE_INT delta;
16201 rtx addr;
16202 unsigned long saved_regs;
16204 saved_regs = arm_compute_save_reg_mask ();
16206 if ((saved_regs & (1 << LR_REGNUM)) == 0)
16207 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
16208 else
16210 if (frame_pointer_needed)
16211 addr = plus_constant(hard_frame_pointer_rtx, -4);
16212 else
16214 /* LR will be the first saved register. */
16215 offsets = arm_get_frame_offsets ();
16216 delta = offsets->outgoing_args - (offsets->frame + 4);
16219 if (delta >= 4096)
16221 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
16222 GEN_INT (delta & ~4095)));
16223 addr = scratch;
16224 delta &= 4095;
16226 else
16227 addr = stack_pointer_rtx;
16229 addr = plus_constant (addr, delta);
16231 emit_move_insn (gen_frame_mem (Pmode, addr), source);
16236 void
16237 thumb_set_return_address (rtx source, rtx scratch)
16239 arm_stack_offsets *offsets;
16240 HOST_WIDE_INT delta;
16241 HOST_WIDE_INT limit;
16242 int reg;
16243 rtx addr;
16244 unsigned long mask;
16246 emit_insn (gen_rtx_USE (VOIDmode, source));
16248 mask = thumb1_compute_save_reg_mask ();
16249 if (mask & (1 << LR_REGNUM))
16251 offsets = arm_get_frame_offsets ();
16253 limit = 1024;
16254 /* Find the saved regs. */
16255 if (frame_pointer_needed)
16257 delta = offsets->soft_frame - offsets->saved_args;
16258 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
16259 if (TARGET_THUMB1)
16260 limit = 128;
16262 else
16264 delta = offsets->outgoing_args - offsets->saved_args;
16265 reg = SP_REGNUM;
16267 /* Allow for the stack frame. */
16268 if (TARGET_THUMB1 && TARGET_BACKTRACE)
16269 delta -= 16;
16270 /* The link register is always the first saved register. */
16271 delta -= 4;
16273 /* Construct the address. */
16274 addr = gen_rtx_REG (SImode, reg);
16275 if (delta > limit)
16277 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
16278 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
16279 addr = scratch;
16281 else
16282 addr = plus_constant (addr, delta);
16284 emit_move_insn (gen_frame_mem (Pmode, addr), source);
16286 else
16287 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
16290 /* Implements target hook vector_mode_supported_p. */
16291 bool
16292 arm_vector_mode_supported_p (enum machine_mode mode)
16294 if ((mode == V2SImode)
16295 || (mode == V4HImode)
16296 || (mode == V8QImode))
16297 return true;
16299 return false;
16302 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
16303 ARM insns and therefore guarantee that the shift count is modulo 256.
16304 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
16305 guarantee no particular behavior for out-of-range counts. */
16307 static unsigned HOST_WIDE_INT
16308 arm_shift_truncation_mask (enum machine_mode mode)
16310 return mode == SImode ? 255 : 0;
16314 /* Map internal gcc register numbers to DWARF2 register numbers. */
16316 unsigned int
16317 arm_dbx_register_number (unsigned int regno)
16319 if (regno < 16)
16320 return regno;
16322 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
16323 compatibility. The EABI defines them as registers 96-103. */
16324 if (IS_FPA_REGNUM (regno))
16325 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
16327 /* FIXME: VFPv3 register numbering. */
16328 if (IS_VFP_REGNUM (regno))
16329 return 64 + regno - FIRST_VFP_REGNUM;
16331 if (IS_IWMMXT_GR_REGNUM (regno))
16332 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
16334 if (IS_IWMMXT_REGNUM (regno))
16335 return 112 + regno - FIRST_IWMMXT_REGNUM;
16337 gcc_unreachable ();
16341 #ifdef TARGET_UNWIND_INFO
16342 /* Emit unwind directives for a store-multiple instruction or stack pointer
16343 push during alignment.
16344 These should only ever be generated by the function prologue code, so
16345 expect them to have a particular form. */
16347 static void
16348 arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
16350 int i;
16351 HOST_WIDE_INT offset;
16352 HOST_WIDE_INT nregs;
16353 int reg_size;
16354 unsigned reg;
16355 unsigned lastreg;
16356 rtx e;
16358 e = XVECEXP (p, 0, 0);
16359 if (GET_CODE (e) != SET)
16360 abort ();
16362 /* First insn will adjust the stack pointer. */
16363 if (GET_CODE (e) != SET
16364 || GET_CODE (XEXP (e, 0)) != REG
16365 || REGNO (XEXP (e, 0)) != SP_REGNUM
16366 || GET_CODE (XEXP (e, 1)) != PLUS)
16367 abort ();
16369 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
16370 nregs = XVECLEN (p, 0) - 1;
16372 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
16373 if (reg < 16)
16375 /* The function prologue may also push pc, but not annotate it as it is
16376 never restored. We turn this into a stack pointer adjustment. */
16377 if (nregs * 4 == offset - 4)
16379 fprintf (asm_out_file, "\t.pad #4\n");
16380 offset -= 4;
16382 reg_size = 4;
16383 fprintf (asm_out_file, "\t.save {");
16385 else if (IS_VFP_REGNUM (reg))
16387 reg_size = 8;
16388 fprintf (asm_out_file, "\t.vsave {");
16390 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
16392 /* FPA registers are done differently. */
16393 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
16394 return;
16396 else
16397 /* Unknown register type. */
16398 abort ();
16400 /* If the stack increment doesn't match the size of the saved registers,
16401 something has gone horribly wrong. */
16402 if (offset != nregs * reg_size)
16403 abort ();
16405 offset = 0;
16406 lastreg = 0;
16407 /* The remaining insns will describe the stores. */
16408 for (i = 1; i <= nregs; i++)
16410 /* Expect (set (mem <addr>) (reg)).
16411 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
16412 e = XVECEXP (p, 0, i);
16413 if (GET_CODE (e) != SET
16414 || GET_CODE (XEXP (e, 0)) != MEM
16415 || GET_CODE (XEXP (e, 1)) != REG)
16416 abort ();
16418 reg = REGNO (XEXP (e, 1));
16419 if (reg < lastreg)
16420 abort ();
16422 if (i != 1)
16423 fprintf (asm_out_file, ", ");
16424 /* We can't use %r for vfp because we need to use the
16425 double precision register names. */
16426 if (IS_VFP_REGNUM (reg))
16427 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
16428 else
16429 asm_fprintf (asm_out_file, "%r", reg);
16431 #ifdef ENABLE_CHECKING
16432 /* Check that the addresses are consecutive. */
16433 e = XEXP (XEXP (e, 0), 0);
16434 if (GET_CODE (e) == PLUS)
16436 offset += reg_size;
16437 if (GET_CODE (XEXP (e, 0)) != REG
16438 || REGNO (XEXP (e, 0)) != SP_REGNUM
16439 || GET_CODE (XEXP (e, 1)) != CONST_INT
16440 || offset != INTVAL (XEXP (e, 1)))
16441 abort ();
16443 else if (i != 1
16444 || GET_CODE (e) != REG
16445 || REGNO (e) != SP_REGNUM)
16446 abort ();
16447 #endif
16449 fprintf (asm_out_file, "}\n");
16452 /* Emit unwind directives for a SET. */
16454 static void
16455 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
16457 rtx e0;
16458 rtx e1;
16459 unsigned reg;
16461 e0 = XEXP (p, 0);
16462 e1 = XEXP (p, 1);
16463 switch (GET_CODE (e0))
16465 case MEM:
16466 /* Pushing a single register. */
16467 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
16468 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
16469 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
16470 abort ();
16472 asm_fprintf (asm_out_file, "\t.save ");
16473 if (IS_VFP_REGNUM (REGNO (e1)))
16474 asm_fprintf(asm_out_file, "{d%d}\n",
16475 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
16476 else
16477 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
16478 break;
16480 case REG:
16481 if (REGNO (e0) == SP_REGNUM)
16483 /* A stack increment. */
16484 if (GET_CODE (e1) != PLUS
16485 || GET_CODE (XEXP (e1, 0)) != REG
16486 || REGNO (XEXP (e1, 0)) != SP_REGNUM
16487 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
16488 abort ();
16490 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
16491 -INTVAL (XEXP (e1, 1)));
16493 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
16495 HOST_WIDE_INT offset;
16497 if (GET_CODE (e1) == PLUS)
16499 if (GET_CODE (XEXP (e1, 0)) != REG
16500 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
16501 abort ();
16502 reg = REGNO (XEXP (e1, 0));
16503 offset = INTVAL (XEXP (e1, 1));
16504 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
16505 HARD_FRAME_POINTER_REGNUM, reg,
16506 INTVAL (XEXP (e1, 1)));
16508 else if (GET_CODE (e1) == REG)
16510 reg = REGNO (e1);
16511 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
16512 HARD_FRAME_POINTER_REGNUM, reg);
16514 else
16515 abort ();
16517 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
16519 /* Move from sp to reg. */
16520 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
16522 else if (GET_CODE (e1) == PLUS
16523 && GET_CODE (XEXP (e1, 0)) == REG
16524 && REGNO (XEXP (e1, 0)) == SP_REGNUM
16525 && GET_CODE (XEXP (e1, 1)) == CONST_INT)
16527 /* Set reg to offset from sp. */
16528 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
16529 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
16531 else if (GET_CODE (e1) == UNSPEC && XINT (e1, 1) == UNSPEC_STACK_ALIGN)
16533 /* Stack pointer save before alignment. */
16534 reg = REGNO (e0);
16535 asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n",
16536 reg + 0x90, reg);
16538 else
16539 abort ();
16540 break;
16542 default:
16543 abort ();
16548 /* Emit unwind directives for the given insn. */
16550 static void
16551 arm_unwind_emit (FILE * asm_out_file, rtx insn)
16553 rtx pat;
16555 if (!ARM_EABI_UNWIND_TABLES)
16556 return;
16558 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
16559 return;
16561 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
16562 if (pat)
16563 pat = XEXP (pat, 0);
16564 else
16565 pat = PATTERN (insn);
16567 switch (GET_CODE (pat))
16569 case SET:
16570 arm_unwind_emit_set (asm_out_file, pat);
16571 break;
16573 case SEQUENCE:
16574 /* Store multiple. */
16575 arm_unwind_emit_sequence (asm_out_file, pat);
16576 break;
16578 default:
16579 abort();
16584 /* Output a reference from a function exception table to the type_info
16585 object X. The EABI specifies that the symbol should be relocated by
16586 an R_ARM_TARGET2 relocation. */
16588 static bool
16589 arm_output_ttype (rtx x)
16591 fputs ("\t.word\t", asm_out_file);
16592 output_addr_const (asm_out_file, x);
16593 /* Use special relocations for symbol references. */
16594 if (GET_CODE (x) != CONST_INT)
16595 fputs ("(TARGET2)", asm_out_file);
16596 fputc ('\n', asm_out_file);
16598 return TRUE;
16600 #endif /* TARGET_UNWIND_INFO */
16603 /* Handle UNSPEC DWARF call frame instructions. These are needed for dynamic
16604 stack alignment. */
16606 static void
16607 arm_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
16609 rtx unspec = SET_SRC (pattern);
16610 gcc_assert (GET_CODE (unspec) == UNSPEC);
16612 switch (index)
16614 case UNSPEC_STACK_ALIGN:
16615 /* ??? We should set the CFA = (SP & ~7). At this point we haven't
16616 put anything on the stack, so hopefully it won't matter.
16617 CFA = SP will be correct after alignment. */
16618 dwarf2out_reg_save_reg (label, stack_pointer_rtx,
16619 SET_DEST (pattern));
16620 break;
16621 default:
16622 gcc_unreachable ();
16627 /* Output unwind directives for the start/end of a function. */
16629 void
16630 arm_output_fn_unwind (FILE * f, bool prologue)
16632 if (!ARM_EABI_UNWIND_TABLES)
16633 return;
16635 if (prologue)
16636 fputs ("\t.fnstart\n", f);
16637 else
16638 fputs ("\t.fnend\n", f);
16641 static bool
16642 arm_emit_tls_decoration (FILE *fp, rtx x)
16644 enum tls_reloc reloc;
16645 rtx val;
16647 val = XVECEXP (x, 0, 0);
16648 reloc = INTVAL (XVECEXP (x, 0, 1));
16650 output_addr_const (fp, val);
16652 switch (reloc)
16654 case TLS_GD32:
16655 fputs ("(tlsgd)", fp);
16656 break;
16657 case TLS_LDM32:
16658 fputs ("(tlsldm)", fp);
16659 break;
16660 case TLS_LDO32:
16661 fputs ("(tlsldo)", fp);
16662 break;
16663 case TLS_IE32:
16664 fputs ("(gottpoff)", fp);
16665 break;
16666 case TLS_LE32:
16667 fputs ("(tpoff)", fp);
16668 break;
16669 default:
16670 gcc_unreachable ();
16673 switch (reloc)
16675 case TLS_GD32:
16676 case TLS_LDM32:
16677 case TLS_IE32:
16678 fputs (" + (. - ", fp);
16679 output_addr_const (fp, XVECEXP (x, 0, 2));
16680 fputs (" - ", fp);
16681 output_addr_const (fp, XVECEXP (x, 0, 3));
16682 fputc (')', fp);
16683 break;
16684 default:
16685 break;
16688 return TRUE;
16691 /* ARM implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
16693 static void
16694 arm_output_dwarf_dtprel (FILE *file, int size, rtx x)
16696 gcc_assert (size == 4);
16697 fputs ("\t.word\t", file);
16698 output_addr_const (file, x);
16699 fputs ("(tlsldo)", file);
16702 bool
16703 arm_output_addr_const_extra (FILE *fp, rtx x)
16705 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
16706 return arm_emit_tls_decoration (fp, x);
16707 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
16709 char label[256];
16710 int labelno = INTVAL (XVECEXP (x, 0, 0));
16712 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
16713 assemble_name_raw (fp, label);
16715 return TRUE;
16717 else if (GET_CODE (x) == CONST_VECTOR)
16718 return arm_emit_vector_const (fp, x);
16720 return FALSE;
16723 /* Output assembly for a shift instruction.
16724 SET_FLAGS determines how the instruction modifies the condition codes.
16725 0 - Do not set condition codes.
16726 1 - Set condition codes.
16727 2 - Use smallest instruction. */
16728 const char *
16729 arm_output_shift(rtx * operands, int set_flags)
16731 char pattern[100];
16732 static const char flag_chars[3] = {'?', '.', '!'};
16733 const char *shift;
16734 HOST_WIDE_INT val;
16735 char c;
16737 c = flag_chars[set_flags];
16738 if (TARGET_UNIFIED_ASM)
16740 shift = shift_op(operands[3], &val);
16741 if (shift)
16743 if (val != -1)
16744 operands[2] = GEN_INT(val);
16745 sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
16747 else
16748 sprintf (pattern, "mov%%%c\t%%0, %%1", c);
16750 else
16751 sprintf (pattern, "mov%%%c\t%%0, %%1%%S3", c);
16752 output_asm_insn (pattern, operands);
16753 return "";
16756 /* Output a Thumb-2 casesi instruction. */
16757 const char *
16758 thumb2_output_casesi (rtx *operands)
16760 rtx diff_vec = PATTERN (next_real_insn (operands[2]));
16762 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
16764 output_asm_insn ("cmp\t%0, %1", operands);
16765 output_asm_insn ("bhi\t%l3", operands);
16766 switch (GET_MODE(diff_vec))
16768 case QImode:
16769 return "tbb\t[%|pc, %0]";
16770 case HImode:
16771 return "tbh\t[%|pc, %0, lsl #1]";
16772 case SImode:
16773 if (flag_pic)
16775 output_asm_insn ("adr\t%4, %l2", operands);
16776 output_asm_insn ("ldr\t%5, [%4, %0, lsl #2]", operands);
16777 output_asm_insn ("add\t%4, %4, %5", operands);
16778 return "bx\t%4";
16780 else
16782 output_asm_insn ("adr\t%4, %l2", operands);
16783 return "ldr\t%|pc, [%4, %0, lsl #2]";
16785 default:
16786 gcc_unreachable ();
16790 #include "gt-arm.h"