* config/arm/arm.c (all_fpus): Fix comment typo.
[official-gcc.git] / gcc / config / arm / arm.c
blob8e3ade5caabacbe74fe275bec403b9f8b5c9c51f
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static rtx emit_set_insn (rtx, rtx);
146 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
147 tree, bool);
149 #ifdef OBJECT_FORMAT_ELF
150 static void arm_elf_asm_constructor (rtx, int);
151 #endif
152 #ifndef ARM_PE
153 static void arm_encode_section_info (tree, rtx, int);
154 #endif
156 static void arm_file_end (void);
158 #ifdef AOF_ASSEMBLER
159 static void aof_globalize_label (FILE *, const char *);
160 static void aof_dump_imports (FILE *);
161 static void aof_dump_pic_table (FILE *);
162 static void aof_file_start (void);
163 static void aof_file_end (void);
164 static void aof_asm_init_sections (void);
165 #endif
166 static rtx arm_struct_value_rtx (tree, int);
167 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
168 tree, int *, int);
169 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
170 enum machine_mode, tree, bool);
171 static bool arm_promote_prototypes (tree);
172 static bool arm_default_short_enums (void);
173 static bool arm_align_anon_bitfield (void);
174 static bool arm_return_in_msb (tree);
175 static bool arm_must_pass_in_stack (enum machine_mode, tree);
176 #ifdef TARGET_UNWIND_INFO
177 static void arm_unwind_emit (FILE *, rtx);
178 static bool arm_output_ttype (rtx);
179 #endif
181 static tree arm_cxx_guard_type (void);
182 static bool arm_cxx_guard_mask_bit (void);
183 static tree arm_get_cookie_size (tree);
184 static bool arm_cookie_has_size (void);
185 static bool arm_cxx_cdtor_returns_this (void);
186 static bool arm_cxx_key_method_may_be_inline (void);
187 static void arm_cxx_determine_class_data_visibility (tree);
188 static bool arm_cxx_class_data_always_comdat (void);
189 static bool arm_cxx_use_aeabi_atexit (void);
190 static void arm_init_libfuncs (void);
191 static bool arm_handle_option (size_t, const char *, int);
192 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
193 static bool arm_cannot_copy_insn_p (rtx);
194 static bool arm_tls_symbol_p (rtx x);
197 /* Initialize the GCC target structure. */
198 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
199 #undef TARGET_MERGE_DECL_ATTRIBUTES
200 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
201 #endif
203 #undef TARGET_ATTRIBUTE_TABLE
204 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
206 #undef TARGET_ASM_FILE_END
207 #define TARGET_ASM_FILE_END arm_file_end
209 #ifdef AOF_ASSEMBLER
210 #undef TARGET_ASM_BYTE_OP
211 #define TARGET_ASM_BYTE_OP "\tDCB\t"
212 #undef TARGET_ASM_ALIGNED_HI_OP
213 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
214 #undef TARGET_ASM_ALIGNED_SI_OP
215 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
216 #undef TARGET_ASM_GLOBALIZE_LABEL
217 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
218 #undef TARGET_ASM_FILE_START
219 #define TARGET_ASM_FILE_START aof_file_start
220 #undef TARGET_ASM_FILE_END
221 #define TARGET_ASM_FILE_END aof_file_end
222 #else
223 #undef TARGET_ASM_ALIGNED_SI_OP
224 #define TARGET_ASM_ALIGNED_SI_OP NULL
225 #undef TARGET_ASM_INTEGER
226 #define TARGET_ASM_INTEGER arm_assemble_integer
227 #endif
229 #undef TARGET_ASM_FUNCTION_PROLOGUE
230 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
232 #undef TARGET_ASM_FUNCTION_EPILOGUE
233 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
235 #undef TARGET_DEFAULT_TARGET_FLAGS
236 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
237 #undef TARGET_HANDLE_OPTION
238 #define TARGET_HANDLE_OPTION arm_handle_option
240 #undef TARGET_COMP_TYPE_ATTRIBUTES
241 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
243 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
244 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
246 #undef TARGET_SCHED_ADJUST_COST
247 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
249 #undef TARGET_ENCODE_SECTION_INFO
250 #ifdef ARM_PE
251 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
252 #else
253 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
254 #endif
256 #undef TARGET_STRIP_NAME_ENCODING
257 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
259 #undef TARGET_ASM_INTERNAL_LABEL
260 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
262 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
263 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
265 #undef TARGET_ASM_OUTPUT_MI_THUNK
266 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
267 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
268 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
270 /* This will be overridden in arm_override_options. */
271 #undef TARGET_RTX_COSTS
272 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
273 #undef TARGET_ADDRESS_COST
274 #define TARGET_ADDRESS_COST arm_address_cost
276 #undef TARGET_SHIFT_TRUNCATION_MASK
277 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
278 #undef TARGET_VECTOR_MODE_SUPPORTED_P
279 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
281 #undef TARGET_MACHINE_DEPENDENT_REORG
282 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
284 #undef TARGET_INIT_BUILTINS
285 #define TARGET_INIT_BUILTINS arm_init_builtins
286 #undef TARGET_EXPAND_BUILTIN
287 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
289 #undef TARGET_INIT_LIBFUNCS
290 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
292 #undef TARGET_PROMOTE_FUNCTION_ARGS
293 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
294 #undef TARGET_PROMOTE_FUNCTION_RETURN
295 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
296 #undef TARGET_PROMOTE_PROTOTYPES
297 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
298 #undef TARGET_PASS_BY_REFERENCE
299 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
300 #undef TARGET_ARG_PARTIAL_BYTES
301 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
303 #undef TARGET_STRUCT_VALUE_RTX
304 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
306 #undef TARGET_SETUP_INCOMING_VARARGS
307 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
309 #undef TARGET_DEFAULT_SHORT_ENUMS
310 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
312 #undef TARGET_ALIGN_ANON_BITFIELD
313 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
315 #undef TARGET_CXX_GUARD_TYPE
316 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
318 #undef TARGET_CXX_GUARD_MASK_BIT
319 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
321 #undef TARGET_CXX_GET_COOKIE_SIZE
322 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
324 #undef TARGET_CXX_COOKIE_HAS_SIZE
325 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
327 #undef TARGET_CXX_CDTOR_RETURNS_THIS
328 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
330 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
331 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
333 #undef TARGET_CXX_USE_AEABI_ATEXIT
334 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
336 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
337 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
338 arm_cxx_determine_class_data_visibility
340 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
341 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
343 #undef TARGET_RETURN_IN_MSB
344 #define TARGET_RETURN_IN_MSB arm_return_in_msb
346 #undef TARGET_MUST_PASS_IN_STACK
347 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
349 #ifdef TARGET_UNWIND_INFO
350 #undef TARGET_UNWIND_EMIT
351 #define TARGET_UNWIND_EMIT arm_unwind_emit
353 /* EABI unwinding tables use a different format for the typeinfo tables. */
354 #undef TARGET_ASM_TTYPE
355 #define TARGET_ASM_TTYPE arm_output_ttype
357 #undef TARGET_ARM_EABI_UNWINDER
358 #define TARGET_ARM_EABI_UNWINDER true
359 #endif /* TARGET_UNWIND_INFO */
361 #undef TARGET_CANNOT_COPY_INSN_P
362 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
364 #ifdef HAVE_AS_TLS
365 #undef TARGET_HAVE_TLS
366 #define TARGET_HAVE_TLS true
367 #endif
369 #undef TARGET_CANNOT_FORCE_CONST_MEM
370 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
372 struct gcc_target targetm = TARGET_INITIALIZER;
374 /* Obstack for minipool constant handling. */
375 static struct obstack minipool_obstack;
376 static char * minipool_startobj;
378 /* The maximum number of insns skipped which
379 will be conditionalised if possible. */
380 static int max_insns_skipped = 5;
382 extern FILE * asm_out_file;
384 /* True if we are currently building a constant table. */
385 int making_const_table;
387 /* Define the information needed to generate branch insns. This is
388 stored from the compare operation. */
389 rtx arm_compare_op0, arm_compare_op1;
391 /* The processor for which instructions should be scheduled. */
392 enum processor_type arm_tune = arm_none;
394 /* Which floating point model to use. */
395 enum arm_fp_model arm_fp_model;
397 /* Which floating point hardware is available. */
398 enum fputype arm_fpu_arch;
400 /* Which floating point hardware to schedule for. */
401 enum fputype arm_fpu_tune;
403 /* Whether to use floating point hardware. */
404 enum float_abi_type arm_float_abi;
406 /* Which ABI to use. */
407 enum arm_abi_type arm_abi;
409 /* Which thread pointer model to use. */
410 enum arm_tp_type target_thread_pointer = TP_AUTO;
412 /* Used to parse -mstructure_size_boundary command line option. */
413 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
415 /* Used for Thumb call_via trampolines. */
416 rtx thumb_call_via_label[14];
417 static int thumb_call_reg_needed;
419 /* Bit values used to identify processor capabilities. */
420 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
421 #define FL_ARCH3M (1 << 1) /* Extended multiply */
422 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
423 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
424 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
425 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
426 #define FL_THUMB (1 << 6) /* Thumb aware */
427 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
428 #define FL_STRONG (1 << 8) /* StrongARM */
429 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
430 #define FL_XSCALE (1 << 10) /* XScale */
431 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
432 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
433 media instructions. */
434 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
435 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
436 Note: ARM6 & 7 derivatives only. */
437 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
439 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
441 #define FL_FOR_ARCH2 0
442 #define FL_FOR_ARCH3 FL_MODE32
443 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
444 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
445 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
446 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
447 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
448 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
449 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
450 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
451 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
452 #define FL_FOR_ARCH6J FL_FOR_ARCH6
453 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
454 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
455 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
457 /* The bits in this mask specify which
458 instructions we are allowed to generate. */
459 static unsigned long insn_flags = 0;
461 /* The bits in this mask specify which instruction scheduling options should
462 be used. */
463 static unsigned long tune_flags = 0;
465 /* The following are used in the arm.md file as equivalents to bits
466 in the above two flag variables. */
468 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
469 int arm_arch3m = 0;
471 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
472 int arm_arch4 = 0;
474 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
475 int arm_arch4t = 0;
477 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
478 int arm_arch5 = 0;
480 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
481 int arm_arch5e = 0;
483 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
484 int arm_arch6 = 0;
486 /* Nonzero if this chip supports the ARM 6K extensions. */
487 int arm_arch6k = 0;
489 /* Nonzero if this chip can benefit from load scheduling. */
490 int arm_ld_sched = 0;
492 /* Nonzero if this chip is a StrongARM. */
493 int arm_tune_strongarm = 0;
495 /* Nonzero if this chip is a Cirrus variant. */
496 int arm_arch_cirrus = 0;
498 /* Nonzero if this chip supports Intel Wireless MMX technology. */
499 int arm_arch_iwmmxt = 0;
501 /* Nonzero if this chip is an XScale. */
502 int arm_arch_xscale = 0;
504 /* Nonzero if tuning for XScale */
505 int arm_tune_xscale = 0;
507 /* Nonzero if we want to tune for stores that access the write-buffer.
508 This typically means an ARM6 or ARM7 with MMU or MPU. */
509 int arm_tune_wbuf = 0;
511 /* Nonzero if generating Thumb instructions. */
512 int thumb_code = 0;
514 /* Nonzero if we should define __THUMB_INTERWORK__ in the
515 preprocessor.
516 XXX This is a bit of a hack, it's intended to help work around
517 problems in GLD which doesn't understand that armv5t code is
518 interworking clean. */
519 int arm_cpp_interwork = 0;
521 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
522 must report the mode of the memory reference from PRINT_OPERAND to
523 PRINT_OPERAND_ADDRESS. */
524 enum machine_mode output_memory_reference_mode;
526 /* The register number to be used for the PIC offset register. */
527 int arm_pic_register = INVALID_REGNUM;
529 /* Set to 1 when a return insn is output, this means that the epilogue
530 is not needed. */
531 int return_used_this_function;
533 /* Set to 1 after arm_reorg has started. Reset to start at the start of
534 the next function. */
535 static int after_arm_reorg = 0;
537 /* The maximum number of insns to be used when loading a constant. */
538 static int arm_constant_limit = 3;
540 /* For an explanation of these variables, see final_prescan_insn below. */
541 int arm_ccfsm_state;
542 enum arm_cond_code arm_current_cc;
543 rtx arm_target_insn;
544 int arm_target_label;
546 /* The condition codes of the ARM, and the inverse function. */
547 static const char * const arm_condition_codes[] =
549 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
550 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
553 #define streq(string1, string2) (strcmp (string1, string2) == 0)
555 /* Initialization code. */
557 struct processors
559 const char *const name;
560 enum processor_type core;
561 const char *arch;
562 const unsigned long flags;
563 bool (* rtx_costs) (rtx, int, int, int *);
566 /* Not all of these give usefully different compilation alternatives,
567 but there is no simple way of generalizing them. */
568 static const struct processors all_cores[] =
570 /* ARM Cores */
571 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
572 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
573 #include "arm-cores.def"
574 #undef ARM_CORE
575 {NULL, arm_none, NULL, 0, NULL}
578 static const struct processors all_architectures[] =
580 /* ARM Architectures */
581 /* We don't specify rtx_costs here as it will be figured out
582 from the core. */
584 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
585 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
586 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
587 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
588 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
589 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
590 implementations that support it, so we will leave it out for now. */
591 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
592 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
593 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
594 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
595 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
596 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
597 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
598 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
599 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
600 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
601 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
602 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
603 {NULL, arm_none, NULL, 0 , NULL}
606 struct arm_cpu_select
608 const char * string;
609 const char * name;
610 const struct processors * processors;
613 /* This is a magic structure. The 'string' field is magically filled in
614 with a pointer to the value specified by the user on the command line
615 assuming that the user has specified such a value. */
617 static struct arm_cpu_select arm_select[] =
619 /* string name processors */
620 { NULL, "-mcpu=", all_cores },
621 { NULL, "-march=", all_architectures },
622 { NULL, "-mtune=", all_cores }
625 /* Defines representing the indexes into the above table. */
626 #define ARM_OPT_SET_CPU 0
627 #define ARM_OPT_SET_ARCH 1
628 #define ARM_OPT_SET_TUNE 2
630 /* The name of the proprocessor macro to define for this architecture. */
632 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
634 struct fpu_desc
636 const char * name;
637 enum fputype fpu;
641 /* Available values for -mfpu=. */
643 static const struct fpu_desc all_fpus[] =
645 {"fpa", FPUTYPE_FPA},
646 {"fpe2", FPUTYPE_FPA_EMU2},
647 {"fpe3", FPUTYPE_FPA_EMU2},
648 {"maverick", FPUTYPE_MAVERICK},
649 {"vfp", FPUTYPE_VFP}
653 /* Floating point models used by the different hardware.
654 See fputype in arm.h. */
656 static const enum fputype fp_model_for_fpu[] =
658 /* No FP hardware. */
659 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
660 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
661 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
662 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
663 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
664 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
668 struct float_abi
670 const char * name;
671 enum float_abi_type abi_type;
675 /* Available values for -mfloat-abi=. */
677 static const struct float_abi all_float_abis[] =
679 {"soft", ARM_FLOAT_ABI_SOFT},
680 {"softfp", ARM_FLOAT_ABI_SOFTFP},
681 {"hard", ARM_FLOAT_ABI_HARD}
685 struct abi_name
687 const char *name;
688 enum arm_abi_type abi_type;
692 /* Available values for -mabi=. */
694 static const struct abi_name arm_all_abis[] =
696 {"apcs-gnu", ARM_ABI_APCS},
697 {"atpcs", ARM_ABI_ATPCS},
698 {"aapcs", ARM_ABI_AAPCS},
699 {"iwmmxt", ARM_ABI_IWMMXT},
700 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
703 /* Supported TLS relocations. */
705 enum tls_reloc {
706 TLS_GD32,
707 TLS_LDM32,
708 TLS_LDO32,
709 TLS_IE32,
710 TLS_LE32
713 /* Emit an insn that's a simple single-set. Both the operands must be known
714 to be valid. */
715 inline static rtx
716 emit_set_insn (rtx x, rtx y)
718 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
721 /* Return the number of bits set in VALUE. */
722 static unsigned
723 bit_count (unsigned long value)
725 unsigned long count = 0;
727 while (value)
729 count++;
730 value &= value - 1; /* Clear the least-significant set bit. */
733 return count;
736 /* Set up library functions unique to ARM. */
738 static void
739 arm_init_libfuncs (void)
741 /* There are no special library functions unless we are using the
742 ARM BPABI. */
743 if (!TARGET_BPABI)
744 return;
746 /* The functions below are described in Section 4 of the "Run-Time
747 ABI for the ARM architecture", Version 1.0. */
749 /* Double-precision floating-point arithmetic. Table 2. */
750 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
751 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
752 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
753 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
754 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
756 /* Double-precision comparisons. Table 3. */
757 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
758 set_optab_libfunc (ne_optab, DFmode, NULL);
759 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
760 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
761 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
762 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
763 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
765 /* Single-precision floating-point arithmetic. Table 4. */
766 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
767 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
768 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
769 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
770 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
772 /* Single-precision comparisons. Table 5. */
773 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
774 set_optab_libfunc (ne_optab, SFmode, NULL);
775 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
776 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
777 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
778 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
779 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
781 /* Floating-point to integer conversions. Table 6. */
782 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
783 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
784 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
785 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
786 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
787 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
788 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
789 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
791 /* Conversions between floating types. Table 7. */
792 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
793 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
795 /* Integer to floating-point conversions. Table 8. */
796 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
797 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
798 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
799 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
800 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
801 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
802 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
803 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
805 /* Long long. Table 9. */
806 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
807 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
808 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
809 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
810 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
811 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
812 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
813 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
815 /* Integer (32/32->32) division. \S 4.3.1. */
816 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
817 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
819 /* The divmod functions are designed so that they can be used for
820 plain division, even though they return both the quotient and the
821 remainder. The quotient is returned in the usual location (i.e.,
822 r0 for SImode, {r0, r1} for DImode), just as would be expected
823 for an ordinary division routine. Because the AAPCS calling
824 conventions specify that all of { r0, r1, r2, r3 } are
825 callee-saved registers, there is no need to tell the compiler
826 explicitly that those registers are clobbered by these
827 routines. */
828 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
829 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
831 /* For SImode division the ABI provides div-without-mod routines,
832 which are faster. */
833 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
834 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
836 /* We don't have mod libcalls. Fortunately gcc knows how to use the
837 divmod libcalls instead. */
838 set_optab_libfunc (smod_optab, DImode, NULL);
839 set_optab_libfunc (umod_optab, DImode, NULL);
840 set_optab_libfunc (smod_optab, SImode, NULL);
841 set_optab_libfunc (umod_optab, SImode, NULL);
844 /* Implement TARGET_HANDLE_OPTION. */
846 static bool
847 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
849 switch (code)
851 case OPT_march_:
852 arm_select[1].string = arg;
853 return true;
855 case OPT_mcpu_:
856 arm_select[0].string = arg;
857 return true;
859 case OPT_mhard_float:
860 target_float_abi_name = "hard";
861 return true;
863 case OPT_msoft_float:
864 target_float_abi_name = "soft";
865 return true;
867 case OPT_mtune_:
868 arm_select[2].string = arg;
869 return true;
871 default:
872 return true;
876 /* Fix up any incompatible options that the user has specified.
877 This has now turned into a maze. */
878 void
879 arm_override_options (void)
881 unsigned i;
882 enum processor_type target_arch_cpu = arm_none;
884 /* Set up the flags based on the cpu/architecture selected by the user. */
885 for (i = ARRAY_SIZE (arm_select); i--;)
887 struct arm_cpu_select * ptr = arm_select + i;
889 if (ptr->string != NULL && ptr->string[0] != '\0')
891 const struct processors * sel;
893 for (sel = ptr->processors; sel->name != NULL; sel++)
894 if (streq (ptr->string, sel->name))
896 /* Set the architecture define. */
897 if (i != ARM_OPT_SET_TUNE)
898 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
900 /* Determine the processor core for which we should
901 tune code-generation. */
902 if (/* -mcpu= is a sensible default. */
903 i == ARM_OPT_SET_CPU
904 /* -mtune= overrides -mcpu= and -march=. */
905 || i == ARM_OPT_SET_TUNE)
906 arm_tune = (enum processor_type) (sel - ptr->processors);
908 /* Remember the CPU associated with this architecture.
909 If no other option is used to set the CPU type,
910 we'll use this to guess the most suitable tuning
911 options. */
912 if (i == ARM_OPT_SET_ARCH)
913 target_arch_cpu = sel->core;
915 if (i != ARM_OPT_SET_TUNE)
917 /* If we have been given an architecture and a processor
918 make sure that they are compatible. We only generate
919 a warning though, and we prefer the CPU over the
920 architecture. */
921 if (insn_flags != 0 && (insn_flags ^ sel->flags))
922 warning (0, "switch -mcpu=%s conflicts with -march= switch",
923 ptr->string);
925 insn_flags = sel->flags;
928 break;
931 if (sel->name == NULL)
932 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
936 /* Guess the tuning options from the architecture if necessary. */
937 if (arm_tune == arm_none)
938 arm_tune = target_arch_cpu;
940 /* If the user did not specify a processor, choose one for them. */
941 if (insn_flags == 0)
943 const struct processors * sel;
944 unsigned int sought;
945 enum processor_type cpu;
947 cpu = TARGET_CPU_DEFAULT;
948 if (cpu == arm_none)
950 #ifdef SUBTARGET_CPU_DEFAULT
951 /* Use the subtarget default CPU if none was specified by
952 configure. */
953 cpu = SUBTARGET_CPU_DEFAULT;
954 #endif
955 /* Default to ARM6. */
956 if (cpu == arm_none)
957 cpu = arm6;
959 sel = &all_cores[cpu];
961 insn_flags = sel->flags;
963 /* Now check to see if the user has specified some command line
964 switch that require certain abilities from the cpu. */
965 sought = 0;
967 if (TARGET_INTERWORK || TARGET_THUMB)
969 sought |= (FL_THUMB | FL_MODE32);
971 /* There are no ARM processors that support both APCS-26 and
972 interworking. Therefore we force FL_MODE26 to be removed
973 from insn_flags here (if it was set), so that the search
974 below will always be able to find a compatible processor. */
975 insn_flags &= ~FL_MODE26;
978 if (sought != 0 && ((sought & insn_flags) != sought))
980 /* Try to locate a CPU type that supports all of the abilities
981 of the default CPU, plus the extra abilities requested by
982 the user. */
983 for (sel = all_cores; sel->name != NULL; sel++)
984 if ((sel->flags & sought) == (sought | insn_flags))
985 break;
987 if (sel->name == NULL)
989 unsigned current_bit_count = 0;
990 const struct processors * best_fit = NULL;
992 /* Ideally we would like to issue an error message here
993 saying that it was not possible to find a CPU compatible
994 with the default CPU, but which also supports the command
995 line options specified by the programmer, and so they
996 ought to use the -mcpu=<name> command line option to
997 override the default CPU type.
999 If we cannot find a cpu that has both the
1000 characteristics of the default cpu and the given
1001 command line options we scan the array again looking
1002 for a best match. */
1003 for (sel = all_cores; sel->name != NULL; sel++)
1004 if ((sel->flags & sought) == sought)
1006 unsigned count;
1008 count = bit_count (sel->flags & insn_flags);
1010 if (count >= current_bit_count)
1012 best_fit = sel;
1013 current_bit_count = count;
1017 gcc_assert (best_fit);
1018 sel = best_fit;
1021 insn_flags = sel->flags;
1023 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1024 if (arm_tune == arm_none)
1025 arm_tune = (enum processor_type) (sel - all_cores);
1028 /* The processor for which we should tune should now have been
1029 chosen. */
1030 gcc_assert (arm_tune != arm_none);
1032 tune_flags = all_cores[(int)arm_tune].flags;
1033 if (optimize_size)
1034 targetm.rtx_costs = arm_size_rtx_costs;
1035 else
1036 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1038 /* Make sure that the processor choice does not conflict with any of the
1039 other command line choices. */
1040 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1042 warning (0, "target CPU does not support interworking" );
1043 target_flags &= ~MASK_INTERWORK;
1046 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1048 warning (0, "target CPU does not support THUMB instructions");
1049 target_flags &= ~MASK_THUMB;
1052 if (TARGET_APCS_FRAME && TARGET_THUMB)
1054 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1055 target_flags &= ~MASK_APCS_FRAME;
1058 /* Callee super interworking implies thumb interworking. Adding
1059 this to the flags here simplifies the logic elsewhere. */
1060 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1061 target_flags |= MASK_INTERWORK;
1063 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1064 from here where no function is being compiled currently. */
1065 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1066 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1068 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1069 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1071 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1072 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1074 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1076 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1077 target_flags |= MASK_APCS_FRAME;
1080 if (TARGET_POKE_FUNCTION_NAME)
1081 target_flags |= MASK_APCS_FRAME;
1083 if (TARGET_APCS_REENT && flag_pic)
1084 error ("-fpic and -mapcs-reent are incompatible");
1086 if (TARGET_APCS_REENT)
1087 warning (0, "APCS reentrant code not supported. Ignored");
1089 /* If this target is normally configured to use APCS frames, warn if they
1090 are turned off and debugging is turned on. */
1091 if (TARGET_ARM
1092 && write_symbols != NO_DEBUG
1093 && !TARGET_APCS_FRAME
1094 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1095 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1097 /* If stack checking is disabled, we can use r10 as the PIC register,
1098 which keeps r9 available. */
1099 if (flag_pic)
1100 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1102 if (TARGET_APCS_FLOAT)
1103 warning (0, "passing floating point arguments in fp regs not yet supported");
1105 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1106 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1107 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1108 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1109 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1110 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1111 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1112 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1113 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1114 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1116 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1117 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1118 thumb_code = (TARGET_ARM == 0);
1119 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1120 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1121 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1123 /* V5 code we generate is completely interworking capable, so we turn off
1124 TARGET_INTERWORK here to avoid many tests later on. */
1126 /* XXX However, we must pass the right pre-processor defines to CPP
1127 or GLD can get confused. This is a hack. */
1128 if (TARGET_INTERWORK)
1129 arm_cpp_interwork = 1;
1131 if (arm_arch5)
1132 target_flags &= ~MASK_INTERWORK;
1134 if (target_abi_name)
1136 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1138 if (streq (arm_all_abis[i].name, target_abi_name))
1140 arm_abi = arm_all_abis[i].abi_type;
1141 break;
1144 if (i == ARRAY_SIZE (arm_all_abis))
1145 error ("invalid ABI option: -mabi=%s", target_abi_name);
1147 else
1148 arm_abi = ARM_DEFAULT_ABI;
1150 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1151 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1153 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1154 error ("iwmmxt abi requires an iwmmxt capable cpu");
1156 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1157 if (target_fpu_name == NULL && target_fpe_name != NULL)
1159 if (streq (target_fpe_name, "2"))
1160 target_fpu_name = "fpe2";
1161 else if (streq (target_fpe_name, "3"))
1162 target_fpu_name = "fpe3";
1163 else
1164 error ("invalid floating point emulation option: -mfpe=%s",
1165 target_fpe_name);
1167 if (target_fpu_name != NULL)
1169 /* The user specified a FPU. */
1170 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1172 if (streq (all_fpus[i].name, target_fpu_name))
1174 arm_fpu_arch = all_fpus[i].fpu;
1175 arm_fpu_tune = arm_fpu_arch;
1176 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1177 break;
1180 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1181 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1183 else
1185 #ifdef FPUTYPE_DEFAULT
1186 /* Use the default if it is specified for this platform. */
1187 arm_fpu_arch = FPUTYPE_DEFAULT;
1188 arm_fpu_tune = FPUTYPE_DEFAULT;
1189 #else
1190 /* Pick one based on CPU type. */
1191 /* ??? Some targets assume FPA is the default.
1192 if ((insn_flags & FL_VFP) != 0)
1193 arm_fpu_arch = FPUTYPE_VFP;
1194 else
1196 if (arm_arch_cirrus)
1197 arm_fpu_arch = FPUTYPE_MAVERICK;
1198 else
1199 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1200 #endif
1201 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1202 arm_fpu_tune = FPUTYPE_FPA;
1203 else
1204 arm_fpu_tune = arm_fpu_arch;
1205 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1206 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1209 if (target_float_abi_name != NULL)
1211 /* The user specified a FP ABI. */
1212 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1214 if (streq (all_float_abis[i].name, target_float_abi_name))
1216 arm_float_abi = all_float_abis[i].abi_type;
1217 break;
1220 if (i == ARRAY_SIZE (all_float_abis))
1221 error ("invalid floating point abi: -mfloat-abi=%s",
1222 target_float_abi_name);
1224 else
1225 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1227 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1228 sorry ("-mfloat-abi=hard and VFP");
1230 /* If soft-float is specified then don't use FPU. */
1231 if (TARGET_SOFT_FLOAT)
1232 arm_fpu_arch = FPUTYPE_NONE;
1234 /* For arm2/3 there is no need to do any scheduling if there is only
1235 a floating point emulator, or we are doing software floating-point. */
1236 if ((TARGET_SOFT_FLOAT
1237 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1238 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1239 && (tune_flags & FL_MODE32) == 0)
1240 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1242 if (target_thread_switch)
1244 if (strcmp (target_thread_switch, "soft") == 0)
1245 target_thread_pointer = TP_SOFT;
1246 else if (strcmp (target_thread_switch, "auto") == 0)
1247 target_thread_pointer = TP_AUTO;
1248 else if (strcmp (target_thread_switch, "cp15") == 0)
1249 target_thread_pointer = TP_CP15;
1250 else
1251 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1254 /* Use the cp15 method if it is available. */
1255 if (target_thread_pointer == TP_AUTO)
1257 if (arm_arch6k && !TARGET_THUMB)
1258 target_thread_pointer = TP_CP15;
1259 else
1260 target_thread_pointer = TP_SOFT;
1263 if (TARGET_HARD_TP && TARGET_THUMB)
1264 error ("can not use -mtp=cp15 with -mthumb");
1266 /* Override the default structure alignment for AAPCS ABI. */
1267 if (TARGET_AAPCS_BASED)
1268 arm_structure_size_boundary = 8;
1270 if (structure_size_string != NULL)
1272 int size = strtol (structure_size_string, NULL, 0);
1274 if (size == 8 || size == 32
1275 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1276 arm_structure_size_boundary = size;
1277 else
1278 warning (0, "structure size boundary can only be set to %s",
1279 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1282 if (arm_pic_register_string != NULL)
1284 int pic_register = decode_reg_name (arm_pic_register_string);
1286 if (!flag_pic)
1287 warning (0, "-mpic-register= is useless without -fpic");
1289 /* Prevent the user from choosing an obviously stupid PIC register. */
1290 else if (pic_register < 0 || call_used_regs[pic_register]
1291 || pic_register == HARD_FRAME_POINTER_REGNUM
1292 || pic_register == STACK_POINTER_REGNUM
1293 || pic_register >= PC_REGNUM)
1294 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1295 else
1296 arm_pic_register = pic_register;
1299 if (TARGET_THUMB && flag_schedule_insns)
1301 /* Don't warn since it's on by default in -O2. */
1302 flag_schedule_insns = 0;
1305 if (optimize_size)
1307 arm_constant_limit = 1;
1309 /* If optimizing for size, bump the number of instructions that we
1310 are prepared to conditionally execute (even on a StrongARM). */
1311 max_insns_skipped = 6;
1313 else
1315 /* For processors with load scheduling, it never costs more than
1316 2 cycles to load a constant, and the load scheduler may well
1317 reduce that to 1. */
1318 if (arm_ld_sched)
1319 arm_constant_limit = 1;
1321 /* On XScale the longer latency of a load makes it more difficult
1322 to achieve a good schedule, so it's faster to synthesize
1323 constants that can be done in two insns. */
1324 if (arm_tune_xscale)
1325 arm_constant_limit = 2;
1327 /* StrongARM has early execution of branches, so a sequence
1328 that is worth skipping is shorter. */
1329 if (arm_tune_strongarm)
1330 max_insns_skipped = 3;
1333 /* Register global variables with the garbage collector. */
1334 arm_add_gc_roots ();
1337 static void
1338 arm_add_gc_roots (void)
1340 gcc_obstack_init(&minipool_obstack);
1341 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1344 /* A table of known ARM exception types.
1345 For use with the interrupt function attribute. */
1347 typedef struct
1349 const char *const arg;
1350 const unsigned long return_value;
1352 isr_attribute_arg;
1354 static const isr_attribute_arg isr_attribute_args [] =
1356 { "IRQ", ARM_FT_ISR },
1357 { "irq", ARM_FT_ISR },
1358 { "FIQ", ARM_FT_FIQ },
1359 { "fiq", ARM_FT_FIQ },
1360 { "ABORT", ARM_FT_ISR },
1361 { "abort", ARM_FT_ISR },
1362 { "ABORT", ARM_FT_ISR },
1363 { "abort", ARM_FT_ISR },
1364 { "UNDEF", ARM_FT_EXCEPTION },
1365 { "undef", ARM_FT_EXCEPTION },
1366 { "SWI", ARM_FT_EXCEPTION },
1367 { "swi", ARM_FT_EXCEPTION },
1368 { NULL, ARM_FT_NORMAL }
1371 /* Returns the (interrupt) function type of the current
1372 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1374 static unsigned long
1375 arm_isr_value (tree argument)
1377 const isr_attribute_arg * ptr;
1378 const char * arg;
1380 /* No argument - default to IRQ. */
1381 if (argument == NULL_TREE)
1382 return ARM_FT_ISR;
1384 /* Get the value of the argument. */
1385 if (TREE_VALUE (argument) == NULL_TREE
1386 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1387 return ARM_FT_UNKNOWN;
1389 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1391 /* Check it against the list of known arguments. */
1392 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1393 if (streq (arg, ptr->arg))
1394 return ptr->return_value;
1396 /* An unrecognized interrupt type. */
1397 return ARM_FT_UNKNOWN;
1400 /* Computes the type of the current function. */
1402 static unsigned long
1403 arm_compute_func_type (void)
1405 unsigned long type = ARM_FT_UNKNOWN;
1406 tree a;
1407 tree attr;
1409 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1411 /* Decide if the current function is volatile. Such functions
1412 never return, and many memory cycles can be saved by not storing
1413 register values that will never be needed again. This optimization
1414 was added to speed up context switching in a kernel application. */
1415 if (optimize > 0
1416 && TREE_NOTHROW (current_function_decl)
1417 && TREE_THIS_VOLATILE (current_function_decl))
1418 type |= ARM_FT_VOLATILE;
1420 if (cfun->static_chain_decl != NULL)
1421 type |= ARM_FT_NESTED;
1423 attr = DECL_ATTRIBUTES (current_function_decl);
1425 a = lookup_attribute ("naked", attr);
1426 if (a != NULL_TREE)
1427 type |= ARM_FT_NAKED;
1429 a = lookup_attribute ("isr", attr);
1430 if (a == NULL_TREE)
1431 a = lookup_attribute ("interrupt", attr);
1433 if (a == NULL_TREE)
1434 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1435 else
1436 type |= arm_isr_value (TREE_VALUE (a));
1438 return type;
1441 /* Returns the type of the current function. */
1443 unsigned long
1444 arm_current_func_type (void)
1446 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1447 cfun->machine->func_type = arm_compute_func_type ();
1449 return cfun->machine->func_type;
1452 /* Return 1 if it is possible to return using a single instruction.
1453 If SIBLING is non-null, this is a test for a return before a sibling
1454 call. SIBLING is the call insn, so we can examine its register usage. */
1457 use_return_insn (int iscond, rtx sibling)
1459 int regno;
1460 unsigned int func_type;
1461 unsigned long saved_int_regs;
1462 unsigned HOST_WIDE_INT stack_adjust;
1463 arm_stack_offsets *offsets;
1465 /* Never use a return instruction before reload has run. */
1466 if (!reload_completed)
1467 return 0;
1469 func_type = arm_current_func_type ();
1471 /* Naked functions and volatile functions need special
1472 consideration. */
1473 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1474 return 0;
1476 /* So do interrupt functions that use the frame pointer. */
1477 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1478 return 0;
1480 offsets = arm_get_frame_offsets ();
1481 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1483 /* As do variadic functions. */
1484 if (current_function_pretend_args_size
1485 || cfun->machine->uses_anonymous_args
1486 /* Or if the function calls __builtin_eh_return () */
1487 || current_function_calls_eh_return
1488 /* Or if the function calls alloca */
1489 || current_function_calls_alloca
1490 /* Or if there is a stack adjustment. However, if the stack pointer
1491 is saved on the stack, we can use a pre-incrementing stack load. */
1492 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1493 return 0;
1495 saved_int_regs = arm_compute_save_reg_mask ();
1497 /* Unfortunately, the insn
1499 ldmib sp, {..., sp, ...}
1501 triggers a bug on most SA-110 based devices, such that the stack
1502 pointer won't be correctly restored if the instruction takes a
1503 page fault. We work around this problem by popping r3 along with
1504 the other registers, since that is never slower than executing
1505 another instruction.
1507 We test for !arm_arch5 here, because code for any architecture
1508 less than this could potentially be run on one of the buggy
1509 chips. */
1510 if (stack_adjust == 4 && !arm_arch5)
1512 /* Validate that r3 is a call-clobbered register (always true in
1513 the default abi) ... */
1514 if (!call_used_regs[3])
1515 return 0;
1517 /* ... that it isn't being used for a return value ... */
1518 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1519 return 0;
1521 /* ... or for a tail-call argument ... */
1522 if (sibling)
1524 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1526 if (find_regno_fusage (sibling, USE, 3))
1527 return 0;
1530 /* ... and that there are no call-saved registers in r0-r2
1531 (always true in the default ABI). */
1532 if (saved_int_regs & 0x7)
1533 return 0;
1536 /* Can't be done if interworking with Thumb, and any registers have been
1537 stacked. */
1538 if (TARGET_INTERWORK && saved_int_regs != 0)
1539 return 0;
1541 /* On StrongARM, conditional returns are expensive if they aren't
1542 taken and multiple registers have been stacked. */
1543 if (iscond && arm_tune_strongarm)
1545 /* Conditional return when just the LR is stored is a simple
1546 conditional-load instruction, that's not expensive. */
1547 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1548 return 0;
1550 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1551 return 0;
1554 /* If there are saved registers but the LR isn't saved, then we need
1555 two instructions for the return. */
1556 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1557 return 0;
1559 /* Can't be done if any of the FPA regs are pushed,
1560 since this also requires an insn. */
1561 if (TARGET_HARD_FLOAT && TARGET_FPA)
1562 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1563 if (regs_ever_live[regno] && !call_used_regs[regno])
1564 return 0;
1566 /* Likewise VFP regs. */
1567 if (TARGET_HARD_FLOAT && TARGET_VFP)
1568 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1569 if (regs_ever_live[regno] && !call_used_regs[regno])
1570 return 0;
1572 if (TARGET_REALLY_IWMMXT)
1573 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1574 if (regs_ever_live[regno] && ! call_used_regs [regno])
1575 return 0;
1577 return 1;
1580 /* Return TRUE if int I is a valid immediate ARM constant. */
1583 const_ok_for_arm (HOST_WIDE_INT i)
1585 int lowbit;
1587 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1588 be all zero, or all one. */
1589 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1590 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1591 != ((~(unsigned HOST_WIDE_INT) 0)
1592 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1593 return FALSE;
1595 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1597 /* Fast return for 0 and small values. We must do this for zero, since
1598 the code below can't handle that one case. */
1599 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1600 return TRUE;
1602 /* Get the number of trailing zeros, rounded down to the nearest even
1603 number. */
1604 lowbit = (ffs ((int) i) - 1) & ~1;
1606 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1607 return TRUE;
1608 else if (lowbit <= 4
1609 && ((i & ~0xc000003f) == 0
1610 || (i & ~0xf000000f) == 0
1611 || (i & ~0xfc000003) == 0))
1612 return TRUE;
1614 return FALSE;
1617 /* Return true if I is a valid constant for the operation CODE. */
1618 static int
1619 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1621 if (const_ok_for_arm (i))
1622 return 1;
1624 switch (code)
1626 case PLUS:
1627 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1629 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1630 case XOR:
1631 case IOR:
1632 return 0;
1634 case AND:
1635 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1637 default:
1638 gcc_unreachable ();
1642 /* Emit a sequence of insns to handle a large constant.
1643 CODE is the code of the operation required, it can be any of SET, PLUS,
1644 IOR, AND, XOR, MINUS;
1645 MODE is the mode in which the operation is being performed;
1646 VAL is the integer to operate on;
1647 SOURCE is the other operand (a register, or a null-pointer for SET);
1648 SUBTARGETS means it is safe to create scratch registers if that will
1649 either produce a simpler sequence, or we will want to cse the values.
1650 Return value is the number of insns emitted. */
1653 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1654 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1656 rtx cond;
1658 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1659 cond = COND_EXEC_TEST (PATTERN (insn));
1660 else
1661 cond = NULL_RTX;
1663 if (subtargets || code == SET
1664 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1665 && REGNO (target) != REGNO (source)))
1667 /* After arm_reorg has been called, we can't fix up expensive
1668 constants by pushing them into memory so we must synthesize
1669 them in-line, regardless of the cost. This is only likely to
1670 be more costly on chips that have load delay slots and we are
1671 compiling without running the scheduler (so no splitting
1672 occurred before the final instruction emission).
1674 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1676 if (!after_arm_reorg
1677 && !cond
1678 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1679 1, 0)
1680 > arm_constant_limit + (code != SET)))
1682 if (code == SET)
1684 /* Currently SET is the only monadic value for CODE, all
1685 the rest are diadic. */
1686 emit_set_insn (target, GEN_INT (val));
1687 return 1;
1689 else
1691 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1693 emit_set_insn (temp, GEN_INT (val));
1694 /* For MINUS, the value is subtracted from, since we never
1695 have subtraction of a constant. */
1696 if (code == MINUS)
1697 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1698 else
1699 emit_set_insn (target,
1700 gen_rtx_fmt_ee (code, mode, source, temp));
1701 return 2;
1706 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1710 static int
1711 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1713 HOST_WIDE_INT temp1;
1714 int num_insns = 0;
1717 int end;
1719 if (i <= 0)
1720 i += 32;
1721 if (remainder & (3 << (i - 2)))
1723 end = i - 8;
1724 if (end < 0)
1725 end += 32;
1726 temp1 = remainder & ((0x0ff << end)
1727 | ((i < end) ? (0xff >> (32 - end)) : 0));
1728 remainder &= ~temp1;
1729 num_insns++;
1730 i -= 6;
1732 i -= 2;
1733 } while (remainder);
1734 return num_insns;
1737 /* Emit an instruction with the indicated PATTERN. If COND is
1738 non-NULL, conditionalize the execution of the instruction on COND
1739 being true. */
1741 static void
1742 emit_constant_insn (rtx cond, rtx pattern)
1744 if (cond)
1745 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1746 emit_insn (pattern);
1749 /* As above, but extra parameter GENERATE which, if clear, suppresses
1750 RTL generation. */
1752 static int
1753 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1754 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1755 int generate)
1757 int can_invert = 0;
1758 int can_negate = 0;
1759 int can_negate_initial = 0;
1760 int can_shift = 0;
1761 int i;
1762 int num_bits_set = 0;
1763 int set_sign_bit_copies = 0;
1764 int clear_sign_bit_copies = 0;
1765 int clear_zero_bit_copies = 0;
1766 int set_zero_bit_copies = 0;
1767 int insns = 0;
1768 unsigned HOST_WIDE_INT temp1, temp2;
1769 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1771 /* Find out which operations are safe for a given CODE. Also do a quick
1772 check for degenerate cases; these can occur when DImode operations
1773 are split. */
1774 switch (code)
1776 case SET:
1777 can_invert = 1;
1778 can_shift = 1;
1779 can_negate = 1;
1780 break;
1782 case PLUS:
1783 can_negate = 1;
1784 can_negate_initial = 1;
1785 break;
1787 case IOR:
1788 if (remainder == 0xffffffff)
1790 if (generate)
1791 emit_constant_insn (cond,
1792 gen_rtx_SET (VOIDmode, target,
1793 GEN_INT (ARM_SIGN_EXTEND (val))));
1794 return 1;
1796 if (remainder == 0)
1798 if (reload_completed && rtx_equal_p (target, source))
1799 return 0;
1800 if (generate)
1801 emit_constant_insn (cond,
1802 gen_rtx_SET (VOIDmode, target, source));
1803 return 1;
1805 break;
1807 case AND:
1808 if (remainder == 0)
1810 if (generate)
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, target, const0_rtx));
1813 return 1;
1815 if (remainder == 0xffffffff)
1817 if (reload_completed && rtx_equal_p (target, source))
1818 return 0;
1819 if (generate)
1820 emit_constant_insn (cond,
1821 gen_rtx_SET (VOIDmode, target, source));
1822 return 1;
1824 can_invert = 1;
1825 break;
1827 case XOR:
1828 if (remainder == 0)
1830 if (reload_completed && rtx_equal_p (target, source))
1831 return 0;
1832 if (generate)
1833 emit_constant_insn (cond,
1834 gen_rtx_SET (VOIDmode, target, source));
1835 return 1;
1838 /* We don't know how to handle other cases yet. */
1839 gcc_assert (remainder == 0xffffffff);
1841 if (generate)
1842 emit_constant_insn (cond,
1843 gen_rtx_SET (VOIDmode, target,
1844 gen_rtx_NOT (mode, source)));
1845 return 1;
1847 case MINUS:
1848 /* We treat MINUS as (val - source), since (source - val) is always
1849 passed as (source + (-val)). */
1850 if (remainder == 0)
1852 if (generate)
1853 emit_constant_insn (cond,
1854 gen_rtx_SET (VOIDmode, target,
1855 gen_rtx_NEG (mode, source)));
1856 return 1;
1858 if (const_ok_for_arm (val))
1860 if (generate)
1861 emit_constant_insn (cond,
1862 gen_rtx_SET (VOIDmode, target,
1863 gen_rtx_MINUS (mode, GEN_INT (val),
1864 source)));
1865 return 1;
1867 can_negate = 1;
1869 break;
1871 default:
1872 gcc_unreachable ();
1875 /* If we can do it in one insn get out quickly. */
1876 if (const_ok_for_arm (val)
1877 || (can_negate_initial && const_ok_for_arm (-val))
1878 || (can_invert && const_ok_for_arm (~val)))
1880 if (generate)
1881 emit_constant_insn (cond,
1882 gen_rtx_SET (VOIDmode, target,
1883 (source
1884 ? gen_rtx_fmt_ee (code, mode, source,
1885 GEN_INT (val))
1886 : GEN_INT (val))));
1887 return 1;
1890 /* Calculate a few attributes that may be useful for specific
1891 optimizations. */
1892 for (i = 31; i >= 0; i--)
1894 if ((remainder & (1 << i)) == 0)
1895 clear_sign_bit_copies++;
1896 else
1897 break;
1900 for (i = 31; i >= 0; i--)
1902 if ((remainder & (1 << i)) != 0)
1903 set_sign_bit_copies++;
1904 else
1905 break;
1908 for (i = 0; i <= 31; i++)
1910 if ((remainder & (1 << i)) == 0)
1911 clear_zero_bit_copies++;
1912 else
1913 break;
1916 for (i = 0; i <= 31; i++)
1918 if ((remainder & (1 << i)) != 0)
1919 set_zero_bit_copies++;
1920 else
1921 break;
1924 switch (code)
1926 case SET:
1927 /* See if we can do this by sign_extending a constant that is known
1928 to be negative. This is a good, way of doing it, since the shift
1929 may well merge into a subsequent insn. */
1930 if (set_sign_bit_copies > 1)
1932 if (const_ok_for_arm
1933 (temp1 = ARM_SIGN_EXTEND (remainder
1934 << (set_sign_bit_copies - 1))))
1936 if (generate)
1938 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1939 emit_constant_insn (cond,
1940 gen_rtx_SET (VOIDmode, new_src,
1941 GEN_INT (temp1)));
1942 emit_constant_insn (cond,
1943 gen_ashrsi3 (target, new_src,
1944 GEN_INT (set_sign_bit_copies - 1)));
1946 return 2;
1948 /* For an inverted constant, we will need to set the low bits,
1949 these will be shifted out of harm's way. */
1950 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1951 if (const_ok_for_arm (~temp1))
1953 if (generate)
1955 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1956 emit_constant_insn (cond,
1957 gen_rtx_SET (VOIDmode, new_src,
1958 GEN_INT (temp1)));
1959 emit_constant_insn (cond,
1960 gen_ashrsi3 (target, new_src,
1961 GEN_INT (set_sign_bit_copies - 1)));
1963 return 2;
1967 /* See if we can calculate the value as the difference between two
1968 valid immediates. */
1969 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1971 int topshift = clear_sign_bit_copies & ~1;
1973 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1974 & (0xff000000 >> topshift));
1976 /* If temp1 is zero, then that means the 9 most significant
1977 bits of remainder were 1 and we've caused it to overflow.
1978 When topshift is 0 we don't need to do anything since we
1979 can borrow from 'bit 32'. */
1980 if (temp1 == 0 && topshift != 0)
1981 temp1 = 0x80000000 >> (topshift - 1);
1983 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1985 if (const_ok_for_arm (temp2))
1987 if (generate)
1989 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1990 emit_constant_insn (cond,
1991 gen_rtx_SET (VOIDmode, new_src,
1992 GEN_INT (temp1)));
1993 emit_constant_insn (cond,
1994 gen_addsi3 (target, new_src,
1995 GEN_INT (-temp2)));
1998 return 2;
2002 /* See if we can generate this by setting the bottom (or the top)
2003 16 bits, and then shifting these into the other half of the
2004 word. We only look for the simplest cases, to do more would cost
2005 too much. Be careful, however, not to generate this when the
2006 alternative would take fewer insns. */
2007 if (val & 0xffff0000)
2009 temp1 = remainder & 0xffff0000;
2010 temp2 = remainder & 0x0000ffff;
2012 /* Overlaps outside this range are best done using other methods. */
2013 for (i = 9; i < 24; i++)
2015 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2016 && !const_ok_for_arm (temp2))
2018 rtx new_src = (subtargets
2019 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2020 : target);
2021 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2022 source, subtargets, generate);
2023 source = new_src;
2024 if (generate)
2025 emit_constant_insn
2026 (cond,
2027 gen_rtx_SET
2028 (VOIDmode, target,
2029 gen_rtx_IOR (mode,
2030 gen_rtx_ASHIFT (mode, source,
2031 GEN_INT (i)),
2032 source)));
2033 return insns + 1;
2037 /* Don't duplicate cases already considered. */
2038 for (i = 17; i < 24; i++)
2040 if (((temp1 | (temp1 >> i)) == remainder)
2041 && !const_ok_for_arm (temp1))
2043 rtx new_src = (subtargets
2044 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2045 : target);
2046 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2047 source, subtargets, generate);
2048 source = new_src;
2049 if (generate)
2050 emit_constant_insn
2051 (cond,
2052 gen_rtx_SET (VOIDmode, target,
2053 gen_rtx_IOR
2054 (mode,
2055 gen_rtx_LSHIFTRT (mode, source,
2056 GEN_INT (i)),
2057 source)));
2058 return insns + 1;
2062 break;
2064 case IOR:
2065 case XOR:
2066 /* If we have IOR or XOR, and the constant can be loaded in a
2067 single instruction, and we can find a temporary to put it in,
2068 then this can be done in two instructions instead of 3-4. */
2069 if (subtargets
2070 /* TARGET can't be NULL if SUBTARGETS is 0 */
2071 || (reload_completed && !reg_mentioned_p (target, source)))
2073 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2075 if (generate)
2077 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2079 emit_constant_insn (cond,
2080 gen_rtx_SET (VOIDmode, sub,
2081 GEN_INT (val)));
2082 emit_constant_insn (cond,
2083 gen_rtx_SET (VOIDmode, target,
2084 gen_rtx_fmt_ee (code, mode,
2085 source, sub)));
2087 return 2;
2091 if (code == XOR)
2092 break;
2094 if (set_sign_bit_copies > 8
2095 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2097 if (generate)
2099 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2100 rtx shift = GEN_INT (set_sign_bit_copies);
2102 emit_constant_insn
2103 (cond,
2104 gen_rtx_SET (VOIDmode, sub,
2105 gen_rtx_NOT (mode,
2106 gen_rtx_ASHIFT (mode,
2107 source,
2108 shift))));
2109 emit_constant_insn
2110 (cond,
2111 gen_rtx_SET (VOIDmode, target,
2112 gen_rtx_NOT (mode,
2113 gen_rtx_LSHIFTRT (mode, sub,
2114 shift))));
2116 return 2;
2119 if (set_zero_bit_copies > 8
2120 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2122 if (generate)
2124 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2125 rtx shift = GEN_INT (set_zero_bit_copies);
2127 emit_constant_insn
2128 (cond,
2129 gen_rtx_SET (VOIDmode, sub,
2130 gen_rtx_NOT (mode,
2131 gen_rtx_LSHIFTRT (mode,
2132 source,
2133 shift))));
2134 emit_constant_insn
2135 (cond,
2136 gen_rtx_SET (VOIDmode, target,
2137 gen_rtx_NOT (mode,
2138 gen_rtx_ASHIFT (mode, sub,
2139 shift))));
2141 return 2;
2144 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2146 if (generate)
2148 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2149 emit_constant_insn (cond,
2150 gen_rtx_SET (VOIDmode, sub,
2151 gen_rtx_NOT (mode, source)));
2152 source = sub;
2153 if (subtargets)
2154 sub = gen_reg_rtx (mode);
2155 emit_constant_insn (cond,
2156 gen_rtx_SET (VOIDmode, sub,
2157 gen_rtx_AND (mode, source,
2158 GEN_INT (temp1))));
2159 emit_constant_insn (cond,
2160 gen_rtx_SET (VOIDmode, target,
2161 gen_rtx_NOT (mode, sub)));
2163 return 3;
2165 break;
2167 case AND:
2168 /* See if two shifts will do 2 or more insn's worth of work. */
2169 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2171 HOST_WIDE_INT shift_mask = ((0xffffffff
2172 << (32 - clear_sign_bit_copies))
2173 & 0xffffffff);
2175 if ((remainder | shift_mask) != 0xffffffff)
2177 if (generate)
2179 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2180 insns = arm_gen_constant (AND, mode, cond,
2181 remainder | shift_mask,
2182 new_src, source, subtargets, 1);
2183 source = new_src;
2185 else
2187 rtx targ = subtargets ? NULL_RTX : target;
2188 insns = arm_gen_constant (AND, mode, cond,
2189 remainder | shift_mask,
2190 targ, source, subtargets, 0);
2194 if (generate)
2196 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2197 rtx shift = GEN_INT (clear_sign_bit_copies);
2199 emit_insn (gen_ashlsi3 (new_src, source, shift));
2200 emit_insn (gen_lshrsi3 (target, new_src, shift));
2203 return insns + 2;
2206 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2208 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2210 if ((remainder | shift_mask) != 0xffffffff)
2212 if (generate)
2214 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2216 insns = arm_gen_constant (AND, mode, cond,
2217 remainder | shift_mask,
2218 new_src, source, subtargets, 1);
2219 source = new_src;
2221 else
2223 rtx targ = subtargets ? NULL_RTX : target;
2225 insns = arm_gen_constant (AND, mode, cond,
2226 remainder | shift_mask,
2227 targ, source, subtargets, 0);
2231 if (generate)
2233 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2234 rtx shift = GEN_INT (clear_zero_bit_copies);
2236 emit_insn (gen_lshrsi3 (new_src, source, shift));
2237 emit_insn (gen_ashlsi3 (target, new_src, shift));
2240 return insns + 2;
2243 break;
2245 default:
2246 break;
2249 for (i = 0; i < 32; i++)
2250 if (remainder & (1 << i))
2251 num_bits_set++;
2253 if (code == AND || (can_invert && num_bits_set > 16))
2254 remainder = (~remainder) & 0xffffffff;
2255 else if (code == PLUS && num_bits_set > 16)
2256 remainder = (-remainder) & 0xffffffff;
2257 else
2259 can_invert = 0;
2260 can_negate = 0;
2263 /* Now try and find a way of doing the job in either two or three
2264 instructions.
2265 We start by looking for the largest block of zeros that are aligned on
2266 a 2-bit boundary, we then fill up the temps, wrapping around to the
2267 top of the word when we drop off the bottom.
2268 In the worst case this code should produce no more than four insns. */
2270 int best_start = 0;
2271 int best_consecutive_zeros = 0;
2273 for (i = 0; i < 32; i += 2)
2275 int consecutive_zeros = 0;
2277 if (!(remainder & (3 << i)))
2279 while ((i < 32) && !(remainder & (3 << i)))
2281 consecutive_zeros += 2;
2282 i += 2;
2284 if (consecutive_zeros > best_consecutive_zeros)
2286 best_consecutive_zeros = consecutive_zeros;
2287 best_start = i - consecutive_zeros;
2289 i -= 2;
2293 /* So long as it won't require any more insns to do so, it's
2294 desirable to emit a small constant (in bits 0...9) in the last
2295 insn. This way there is more chance that it can be combined with
2296 a later addressing insn to form a pre-indexed load or store
2297 operation. Consider:
2299 *((volatile int *)0xe0000100) = 1;
2300 *((volatile int *)0xe0000110) = 2;
2302 We want this to wind up as:
2304 mov rA, #0xe0000000
2305 mov rB, #1
2306 str rB, [rA, #0x100]
2307 mov rB, #2
2308 str rB, [rA, #0x110]
2310 rather than having to synthesize both large constants from scratch.
2312 Therefore, we calculate how many insns would be required to emit
2313 the constant starting from `best_start', and also starting from
2314 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2315 yield a shorter sequence, we may as well use zero. */
2316 if (best_start != 0
2317 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2318 && (count_insns_for_constant (remainder, 0) <=
2319 count_insns_for_constant (remainder, best_start)))
2320 best_start = 0;
2322 /* Now start emitting the insns. */
2323 i = best_start;
2326 int end;
2328 if (i <= 0)
2329 i += 32;
2330 if (remainder & (3 << (i - 2)))
2332 end = i - 8;
2333 if (end < 0)
2334 end += 32;
2335 temp1 = remainder & ((0x0ff << end)
2336 | ((i < end) ? (0xff >> (32 - end)) : 0));
2337 remainder &= ~temp1;
2339 if (generate)
2341 rtx new_src, temp1_rtx;
2343 if (code == SET || code == MINUS)
2345 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2346 if (can_invert && code != MINUS)
2347 temp1 = ~temp1;
2349 else
2351 if (remainder && subtargets)
2352 new_src = gen_reg_rtx (mode);
2353 else
2354 new_src = target;
2355 if (can_invert)
2356 temp1 = ~temp1;
2357 else if (can_negate)
2358 temp1 = -temp1;
2361 temp1 = trunc_int_for_mode (temp1, mode);
2362 temp1_rtx = GEN_INT (temp1);
2364 if (code == SET)
2366 else if (code == MINUS)
2367 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2368 else
2369 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2371 emit_constant_insn (cond,
2372 gen_rtx_SET (VOIDmode, new_src,
2373 temp1_rtx));
2374 source = new_src;
2377 if (code == SET)
2379 can_invert = 0;
2380 code = PLUS;
2382 else if (code == MINUS)
2383 code = PLUS;
2385 insns++;
2386 i -= 6;
2388 i -= 2;
2390 while (remainder);
2393 return insns;
2396 /* Canonicalize a comparison so that we are more likely to recognize it.
2397 This can be done for a few constant compares, where we can make the
2398 immediate value easier to load. */
2400 enum rtx_code
2401 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2402 rtx * op1)
2404 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2405 unsigned HOST_WIDE_INT maxval;
2406 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2408 switch (code)
2410 case EQ:
2411 case NE:
2412 return code;
2414 case GT:
2415 case LE:
2416 if (i != maxval
2417 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2419 *op1 = GEN_INT (i + 1);
2420 return code == GT ? GE : LT;
2422 break;
2424 case GE:
2425 case LT:
2426 if (i != ~maxval
2427 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2429 *op1 = GEN_INT (i - 1);
2430 return code == GE ? GT : LE;
2432 break;
2434 case GTU:
2435 case LEU:
2436 if (i != ~((unsigned HOST_WIDE_INT) 0)
2437 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2439 *op1 = GEN_INT (i + 1);
2440 return code == GTU ? GEU : LTU;
2442 break;
2444 case GEU:
2445 case LTU:
2446 if (i != 0
2447 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2449 *op1 = GEN_INT (i - 1);
2450 return code == GEU ? GTU : LEU;
2452 break;
2454 default:
2455 gcc_unreachable ();
2458 return code;
2462 /* Define how to find the value returned by a function. */
2465 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2467 enum machine_mode mode;
2468 int unsignedp ATTRIBUTE_UNUSED;
2469 rtx r ATTRIBUTE_UNUSED;
2471 mode = TYPE_MODE (type);
2472 /* Promote integer types. */
2473 if (INTEGRAL_TYPE_P (type))
2474 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2476 /* Promotes small structs returned in a register to full-word size
2477 for big-endian AAPCS. */
2478 if (arm_return_in_msb (type))
2480 HOST_WIDE_INT size = int_size_in_bytes (type);
2481 if (size % UNITS_PER_WORD != 0)
2483 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2484 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2488 return LIBCALL_VALUE(mode);
2491 /* Determine the amount of memory needed to store the possible return
2492 registers of an untyped call. */
2494 arm_apply_result_size (void)
2496 int size = 16;
2498 if (TARGET_ARM)
2500 if (TARGET_HARD_FLOAT_ABI)
2502 if (TARGET_FPA)
2503 size += 12;
2504 if (TARGET_MAVERICK)
2505 size += 8;
2507 if (TARGET_IWMMXT_ABI)
2508 size += 8;
2511 return size;
2514 /* Decide whether a type should be returned in memory (true)
2515 or in a register (false). This is called by the macro
2516 RETURN_IN_MEMORY. */
2518 arm_return_in_memory (tree type)
2520 HOST_WIDE_INT size;
2522 if (!AGGREGATE_TYPE_P (type) &&
2523 (TREE_CODE (type) != VECTOR_TYPE) &&
2524 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2525 /* All simple types are returned in registers.
2526 For AAPCS, complex types are treated the same as aggregates. */
2527 return 0;
2529 size = int_size_in_bytes (type);
2531 if (arm_abi != ARM_ABI_APCS)
2533 /* ATPCS and later return aggregate types in memory only if they are
2534 larger than a word (or are variable size). */
2535 return (size < 0 || size > UNITS_PER_WORD);
2538 /* To maximize backwards compatibility with previous versions of gcc,
2539 return vectors up to 4 words in registers. */
2540 if (TREE_CODE (type) == VECTOR_TYPE)
2541 return (size < 0 || size > (4 * UNITS_PER_WORD));
2543 /* For the arm-wince targets we choose to be compatible with Microsoft's
2544 ARM and Thumb compilers, which always return aggregates in memory. */
2545 #ifndef ARM_WINCE
2546 /* All structures/unions bigger than one word are returned in memory.
2547 Also catch the case where int_size_in_bytes returns -1. In this case
2548 the aggregate is either huge or of variable size, and in either case
2549 we will want to return it via memory and not in a register. */
2550 if (size < 0 || size > UNITS_PER_WORD)
2551 return 1;
2553 if (TREE_CODE (type) == RECORD_TYPE)
2555 tree field;
2557 /* For a struct the APCS says that we only return in a register
2558 if the type is 'integer like' and every addressable element
2559 has an offset of zero. For practical purposes this means
2560 that the structure can have at most one non bit-field element
2561 and that this element must be the first one in the structure. */
2563 /* Find the first field, ignoring non FIELD_DECL things which will
2564 have been created by C++. */
2565 for (field = TYPE_FIELDS (type);
2566 field && TREE_CODE (field) != FIELD_DECL;
2567 field = TREE_CHAIN (field))
2568 continue;
2570 if (field == NULL)
2571 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2573 /* Check that the first field is valid for returning in a register. */
2575 /* ... Floats are not allowed */
2576 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2577 return 1;
2579 /* ... Aggregates that are not themselves valid for returning in
2580 a register are not allowed. */
2581 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2582 return 1;
2584 /* Now check the remaining fields, if any. Only bitfields are allowed,
2585 since they are not addressable. */
2586 for (field = TREE_CHAIN (field);
2587 field;
2588 field = TREE_CHAIN (field))
2590 if (TREE_CODE (field) != FIELD_DECL)
2591 continue;
2593 if (!DECL_BIT_FIELD_TYPE (field))
2594 return 1;
2597 return 0;
2600 if (TREE_CODE (type) == UNION_TYPE)
2602 tree field;
2604 /* Unions can be returned in registers if every element is
2605 integral, or can be returned in an integer register. */
2606 for (field = TYPE_FIELDS (type);
2607 field;
2608 field = TREE_CHAIN (field))
2610 if (TREE_CODE (field) != FIELD_DECL)
2611 continue;
2613 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2614 return 1;
2616 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2617 return 1;
2620 return 0;
2622 #endif /* not ARM_WINCE */
2624 /* Return all other types in memory. */
2625 return 1;
2628 /* Indicate whether or not words of a double are in big-endian order. */
2631 arm_float_words_big_endian (void)
2633 if (TARGET_MAVERICK)
2634 return 0;
2636 /* For FPA, float words are always big-endian. For VFP, floats words
2637 follow the memory system mode. */
2639 if (TARGET_FPA)
2641 return 1;
2644 if (TARGET_VFP)
2645 return (TARGET_BIG_END ? 1 : 0);
2647 return 1;
2650 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2651 for a call to a function whose data type is FNTYPE.
2652 For a library call, FNTYPE is NULL. */
2653 void
2654 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2655 rtx libname ATTRIBUTE_UNUSED,
2656 tree fndecl ATTRIBUTE_UNUSED)
2658 /* On the ARM, the offset starts at 0. */
2659 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2660 pcum->iwmmxt_nregs = 0;
2661 pcum->can_split = true;
2663 pcum->call_cookie = CALL_NORMAL;
2665 if (TARGET_LONG_CALLS)
2666 pcum->call_cookie = CALL_LONG;
2668 /* Check for long call/short call attributes. The attributes
2669 override any command line option. */
2670 if (fntype)
2672 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2673 pcum->call_cookie = CALL_SHORT;
2674 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2675 pcum->call_cookie = CALL_LONG;
2678 /* Varargs vectors are treated the same as long long.
2679 named_count avoids having to change the way arm handles 'named' */
2680 pcum->named_count = 0;
2681 pcum->nargs = 0;
2683 if (TARGET_REALLY_IWMMXT && fntype)
2685 tree fn_arg;
2687 for (fn_arg = TYPE_ARG_TYPES (fntype);
2688 fn_arg;
2689 fn_arg = TREE_CHAIN (fn_arg))
2690 pcum->named_count += 1;
2692 if (! pcum->named_count)
2693 pcum->named_count = INT_MAX;
2698 /* Return true if mode/type need doubleword alignment. */
2699 bool
2700 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2702 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2703 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2707 /* Determine where to put an argument to a function.
2708 Value is zero to push the argument on the stack,
2709 or a hard register in which to store the argument.
2711 MODE is the argument's machine mode.
2712 TYPE is the data type of the argument (as a tree).
2713 This is null for libcalls where that information may
2714 not be available.
2715 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2716 the preceding args and about the function being called.
2717 NAMED is nonzero if this argument is a named parameter
2718 (otherwise it is an extra parameter matching an ellipsis). */
2721 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2722 tree type, int named)
2724 int nregs;
2726 /* Varargs vectors are treated the same as long long.
2727 named_count avoids having to change the way arm handles 'named' */
2728 if (TARGET_IWMMXT_ABI
2729 && arm_vector_mode_supported_p (mode)
2730 && pcum->named_count > pcum->nargs + 1)
2732 if (pcum->iwmmxt_nregs <= 9)
2733 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2734 else
2736 pcum->can_split = false;
2737 return NULL_RTX;
2741 /* Put doubleword aligned quantities in even register pairs. */
2742 if (pcum->nregs & 1
2743 && ARM_DOUBLEWORD_ALIGN
2744 && arm_needs_doubleword_align (mode, type))
2745 pcum->nregs++;
2747 if (mode == VOIDmode)
2748 /* Compute operand 2 of the call insn. */
2749 return GEN_INT (pcum->call_cookie);
2751 /* Only allow splitting an arg between regs and memory if all preceding
2752 args were allocated to regs. For args passed by reference we only count
2753 the reference pointer. */
2754 if (pcum->can_split)
2755 nregs = 1;
2756 else
2757 nregs = ARM_NUM_REGS2 (mode, type);
2759 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2760 return NULL_RTX;
2762 return gen_rtx_REG (mode, pcum->nregs);
2765 static int
2766 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2767 tree type, bool named ATTRIBUTE_UNUSED)
2769 int nregs = pcum->nregs;
2771 if (arm_vector_mode_supported_p (mode))
2772 return 0;
2774 if (NUM_ARG_REGS > nregs
2775 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2776 && pcum->can_split)
2777 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2779 return 0;
2782 /* Variable sized types are passed by reference. This is a GCC
2783 extension to the ARM ABI. */
2785 static bool
2786 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2787 enum machine_mode mode ATTRIBUTE_UNUSED,
2788 tree type, bool named ATTRIBUTE_UNUSED)
2790 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2793 /* Encode the current state of the #pragma [no_]long_calls. */
2794 typedef enum
2796 OFF, /* No #pramgma [no_]long_calls is in effect. */
2797 LONG, /* #pragma long_calls is in effect. */
2798 SHORT /* #pragma no_long_calls is in effect. */
2799 } arm_pragma_enum;
2801 static arm_pragma_enum arm_pragma_long_calls = OFF;
2803 void
2804 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2806 arm_pragma_long_calls = LONG;
2809 void
2810 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2812 arm_pragma_long_calls = SHORT;
2815 void
2816 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2818 arm_pragma_long_calls = OFF;
2821 /* Table of machine attributes. */
2822 const struct attribute_spec arm_attribute_table[] =
2824 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2825 /* Function calls made to this symbol must be done indirectly, because
2826 it may lie outside of the 26 bit addressing range of a normal function
2827 call. */
2828 { "long_call", 0, 0, false, true, true, NULL },
2829 /* Whereas these functions are always known to reside within the 26 bit
2830 addressing range. */
2831 { "short_call", 0, 0, false, true, true, NULL },
2832 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2833 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2834 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2835 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2836 #ifdef ARM_PE
2837 /* ARM/PE has three new attributes:
2838 interfacearm - ?
2839 dllexport - for exporting a function/variable that will live in a dll
2840 dllimport - for importing a function/variable from a dll
2842 Microsoft allows multiple declspecs in one __declspec, separating
2843 them with spaces. We do NOT support this. Instead, use __declspec
2844 multiple times.
2846 { "dllimport", 0, 0, true, false, false, NULL },
2847 { "dllexport", 0, 0, true, false, false, NULL },
2848 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2849 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2850 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2851 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2852 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2853 #endif
2854 { NULL, 0, 0, false, false, false, NULL }
2857 /* Handle an attribute requiring a FUNCTION_DECL;
2858 arguments as in struct attribute_spec.handler. */
2859 static tree
2860 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2861 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2863 if (TREE_CODE (*node) != FUNCTION_DECL)
2865 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2866 IDENTIFIER_POINTER (name));
2867 *no_add_attrs = true;
2870 return NULL_TREE;
2873 /* Handle an "interrupt" or "isr" attribute;
2874 arguments as in struct attribute_spec.handler. */
2875 static tree
2876 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2877 bool *no_add_attrs)
2879 if (DECL_P (*node))
2881 if (TREE_CODE (*node) != FUNCTION_DECL)
2883 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2884 IDENTIFIER_POINTER (name));
2885 *no_add_attrs = true;
2887 /* FIXME: the argument if any is checked for type attributes;
2888 should it be checked for decl ones? */
2890 else
2892 if (TREE_CODE (*node) == FUNCTION_TYPE
2893 || TREE_CODE (*node) == METHOD_TYPE)
2895 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2897 warning (OPT_Wattributes, "%qs attribute ignored",
2898 IDENTIFIER_POINTER (name));
2899 *no_add_attrs = true;
2902 else if (TREE_CODE (*node) == POINTER_TYPE
2903 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2904 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2905 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2907 *node = build_variant_type_copy (*node);
2908 TREE_TYPE (*node) = build_type_attribute_variant
2909 (TREE_TYPE (*node),
2910 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2911 *no_add_attrs = true;
2913 else
2915 /* Possibly pass this attribute on from the type to a decl. */
2916 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2917 | (int) ATTR_FLAG_FUNCTION_NEXT
2918 | (int) ATTR_FLAG_ARRAY_NEXT))
2920 *no_add_attrs = true;
2921 return tree_cons (name, args, NULL_TREE);
2923 else
2925 warning (OPT_Wattributes, "%qs attribute ignored",
2926 IDENTIFIER_POINTER (name));
2931 return NULL_TREE;
2934 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2935 /* Handle the "notshared" attribute. This attribute is another way of
2936 requesting hidden visibility. ARM's compiler supports
2937 "__declspec(notshared)"; we support the same thing via an
2938 attribute. */
2940 static tree
2941 arm_handle_notshared_attribute (tree *node,
2942 tree name ATTRIBUTE_UNUSED,
2943 tree args ATTRIBUTE_UNUSED,
2944 int flags ATTRIBUTE_UNUSED,
2945 bool *no_add_attrs)
2947 tree decl = TYPE_NAME (*node);
2949 if (decl)
2951 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2952 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2953 *no_add_attrs = false;
2955 return NULL_TREE;
2957 #endif
2959 /* Return 0 if the attributes for two types are incompatible, 1 if they
2960 are compatible, and 2 if they are nearly compatible (which causes a
2961 warning to be generated). */
2962 static int
2963 arm_comp_type_attributes (tree type1, tree type2)
2965 int l1, l2, s1, s2;
2967 /* Check for mismatch of non-default calling convention. */
2968 if (TREE_CODE (type1) != FUNCTION_TYPE)
2969 return 1;
2971 /* Check for mismatched call attributes. */
2972 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2973 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2974 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2975 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2977 /* Only bother to check if an attribute is defined. */
2978 if (l1 | l2 | s1 | s2)
2980 /* If one type has an attribute, the other must have the same attribute. */
2981 if ((l1 != l2) || (s1 != s2))
2982 return 0;
2984 /* Disallow mixed attributes. */
2985 if ((l1 & s2) || (l2 & s1))
2986 return 0;
2989 /* Check for mismatched ISR attribute. */
2990 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2991 if (! l1)
2992 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2993 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2994 if (! l2)
2995 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2996 if (l1 != l2)
2997 return 0;
2999 return 1;
3002 /* Encode long_call or short_call attribute by prefixing
3003 symbol name in DECL with a special character FLAG. */
3004 void
3005 arm_encode_call_attribute (tree decl, int flag)
3007 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3008 int len = strlen (str);
3009 char * newstr;
3011 /* Do not allow weak functions to be treated as short call. */
3012 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3013 return;
3015 newstr = alloca (len + 2);
3016 newstr[0] = flag;
3017 strcpy (newstr + 1, str);
3019 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3020 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3023 /* Assigns default attributes to newly defined type. This is used to
3024 set short_call/long_call attributes for function types of
3025 functions defined inside corresponding #pragma scopes. */
3026 static void
3027 arm_set_default_type_attributes (tree type)
3029 /* Add __attribute__ ((long_call)) to all functions, when
3030 inside #pragma long_calls or __attribute__ ((short_call)),
3031 when inside #pragma no_long_calls. */
3032 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3034 tree type_attr_list, attr_name;
3035 type_attr_list = TYPE_ATTRIBUTES (type);
3037 if (arm_pragma_long_calls == LONG)
3038 attr_name = get_identifier ("long_call");
3039 else if (arm_pragma_long_calls == SHORT)
3040 attr_name = get_identifier ("short_call");
3041 else
3042 return;
3044 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3045 TYPE_ATTRIBUTES (type) = type_attr_list;
3049 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3050 defined within the current compilation unit. If this cannot be
3051 determined, then 0 is returned. */
3052 static int
3053 current_file_function_operand (rtx sym_ref)
3055 /* This is a bit of a fib. A function will have a short call flag
3056 applied to its name if it has the short call attribute, or it has
3057 already been defined within the current compilation unit. */
3058 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3059 return 1;
3061 /* The current function is always defined within the current compilation
3062 unit. If it s a weak definition however, then this may not be the real
3063 definition of the function, and so we have to say no. */
3064 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3065 && !DECL_WEAK (current_function_decl))
3066 return 1;
3068 /* We cannot make the determination - default to returning 0. */
3069 return 0;
3072 /* Return nonzero if a 32 bit "long_call" should be generated for
3073 this call. We generate a long_call if the function:
3075 a. has an __attribute__((long call))
3076 or b. is within the scope of a #pragma long_calls
3077 or c. the -mlong-calls command line switch has been specified
3078 . and either:
3079 1. -ffunction-sections is in effect
3080 or 2. the current function has __attribute__ ((section))
3081 or 3. the target function has __attribute__ ((section))
3083 However we do not generate a long call if the function:
3085 d. has an __attribute__ ((short_call))
3086 or e. is inside the scope of a #pragma no_long_calls
3087 or f. is defined within the current compilation unit.
3089 This function will be called by C fragments contained in the machine
3090 description file. SYM_REF and CALL_COOKIE correspond to the matched
3091 rtl operands. CALL_SYMBOL is used to distinguish between
3092 two different callers of the function. It is set to 1 in the
3093 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3094 and "call_value" patterns. This is because of the difference in the
3095 SYM_REFs passed by these patterns. */
3097 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3099 if (!call_symbol)
3101 if (GET_CODE (sym_ref) != MEM)
3102 return 0;
3104 sym_ref = XEXP (sym_ref, 0);
3107 if (GET_CODE (sym_ref) != SYMBOL_REF)
3108 return 0;
3110 if (call_cookie & CALL_SHORT)
3111 return 0;
3113 if (TARGET_LONG_CALLS)
3115 if (flag_function_sections
3116 || DECL_SECTION_NAME (current_function_decl))
3117 /* c.3 is handled by the definition of the
3118 ARM_DECLARE_FUNCTION_SIZE macro. */
3119 return 1;
3122 if (current_file_function_operand (sym_ref))
3123 return 0;
3125 return (call_cookie & CALL_LONG)
3126 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3127 || TARGET_LONG_CALLS;
3130 /* Return nonzero if it is ok to make a tail-call to DECL. */
3131 static bool
3132 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3134 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3136 if (cfun->machine->sibcall_blocked)
3137 return false;
3139 /* Never tailcall something for which we have no decl, or if we
3140 are in Thumb mode. */
3141 if (decl == NULL || TARGET_THUMB)
3142 return false;
3144 /* Get the calling method. */
3145 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3146 call_type = CALL_SHORT;
3147 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3148 call_type = CALL_LONG;
3150 /* Cannot tail-call to long calls, since these are out of range of
3151 a branch instruction. However, if not compiling PIC, we know
3152 we can reach the symbol if it is in this compilation unit. */
3153 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3154 return false;
3156 /* If we are interworking and the function is not declared static
3157 then we can't tail-call it unless we know that it exists in this
3158 compilation unit (since it might be a Thumb routine). */
3159 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3160 return false;
3162 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3163 if (IS_INTERRUPT (arm_current_func_type ()))
3164 return false;
3166 /* Everything else is ok. */
3167 return true;
3171 /* Addressing mode support functions. */
3173 /* Return nonzero if X is a legitimate immediate operand when compiling
3174 for PIC. */
3176 legitimate_pic_operand_p (rtx x)
3178 if (CONSTANT_P (x)
3179 && flag_pic
3180 && (GET_CODE (x) == SYMBOL_REF
3181 || (GET_CODE (x) == CONST
3182 && GET_CODE (XEXP (x, 0)) == PLUS
3183 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3184 return 0;
3186 return 1;
3190 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3192 if (GET_CODE (orig) == SYMBOL_REF
3193 || GET_CODE (orig) == LABEL_REF)
3195 #ifndef AOF_ASSEMBLER
3196 rtx pic_ref, address;
3197 #endif
3198 rtx insn;
3199 int subregs = 0;
3201 if (reg == 0)
3203 gcc_assert (!no_new_pseudos);
3204 reg = gen_reg_rtx (Pmode);
3206 subregs = 1;
3209 #ifdef AOF_ASSEMBLER
3210 /* The AOF assembler can generate relocations for these directly, and
3211 understands that the PIC register has to be added into the offset. */
3212 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3213 #else
3214 if (subregs)
3215 address = gen_reg_rtx (Pmode);
3216 else
3217 address = reg;
3219 if (TARGET_ARM)
3220 emit_insn (gen_pic_load_addr_arm (address, orig));
3221 else
3222 emit_insn (gen_pic_load_addr_thumb (address, orig));
3224 if ((GET_CODE (orig) == LABEL_REF
3225 || (GET_CODE (orig) == SYMBOL_REF &&
3226 SYMBOL_REF_LOCAL_P (orig)))
3227 && NEED_GOT_RELOC)
3228 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3229 else
3231 pic_ref = gen_const_mem (Pmode,
3232 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3233 address));
3236 insn = emit_move_insn (reg, pic_ref);
3237 #endif
3238 current_function_uses_pic_offset_table = 1;
3239 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3240 by loop. */
3241 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3242 REG_NOTES (insn));
3243 return reg;
3245 else if (GET_CODE (orig) == CONST)
3247 rtx base, offset;
3249 if (GET_CODE (XEXP (orig, 0)) == PLUS
3250 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3251 return orig;
3253 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3254 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3255 return orig;
3257 if (reg == 0)
3259 gcc_assert (!no_new_pseudos);
3260 reg = gen_reg_rtx (Pmode);
3263 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3265 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3266 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3267 base == reg ? 0 : reg);
3269 if (GET_CODE (offset) == CONST_INT)
3271 /* The base register doesn't really matter, we only want to
3272 test the index for the appropriate mode. */
3273 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3275 gcc_assert (!no_new_pseudos);
3276 offset = force_reg (Pmode, offset);
3279 if (GET_CODE (offset) == CONST_INT)
3280 return plus_constant (base, INTVAL (offset));
3283 if (GET_MODE_SIZE (mode) > 4
3284 && (GET_MODE_CLASS (mode) == MODE_INT
3285 || TARGET_SOFT_FLOAT))
3287 emit_insn (gen_addsi3 (reg, base, offset));
3288 return reg;
3291 return gen_rtx_PLUS (Pmode, base, offset);
3294 return orig;
3298 /* Find a spare low register to use during the prolog of a function. */
3300 static int
3301 thumb_find_work_register (unsigned long pushed_regs_mask)
3303 int reg;
3305 /* Check the argument registers first as these are call-used. The
3306 register allocation order means that sometimes r3 might be used
3307 but earlier argument registers might not, so check them all. */
3308 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3309 if (!regs_ever_live[reg])
3310 return reg;
3312 /* Before going on to check the call-saved registers we can try a couple
3313 more ways of deducing that r3 is available. The first is when we are
3314 pushing anonymous arguments onto the stack and we have less than 4
3315 registers worth of fixed arguments(*). In this case r3 will be part of
3316 the variable argument list and so we can be sure that it will be
3317 pushed right at the start of the function. Hence it will be available
3318 for the rest of the prologue.
3319 (*): ie current_function_pretend_args_size is greater than 0. */
3320 if (cfun->machine->uses_anonymous_args
3321 && current_function_pretend_args_size > 0)
3322 return LAST_ARG_REGNUM;
3324 /* The other case is when we have fixed arguments but less than 4 registers
3325 worth. In this case r3 might be used in the body of the function, but
3326 it is not being used to convey an argument into the function. In theory
3327 we could just check current_function_args_size to see how many bytes are
3328 being passed in argument registers, but it seems that it is unreliable.
3329 Sometimes it will have the value 0 when in fact arguments are being
3330 passed. (See testcase execute/20021111-1.c for an example). So we also
3331 check the args_info.nregs field as well. The problem with this field is
3332 that it makes no allowances for arguments that are passed to the
3333 function but which are not used. Hence we could miss an opportunity
3334 when a function has an unused argument in r3. But it is better to be
3335 safe than to be sorry. */
3336 if (! cfun->machine->uses_anonymous_args
3337 && current_function_args_size >= 0
3338 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3339 && cfun->args_info.nregs < 4)
3340 return LAST_ARG_REGNUM;
3342 /* Otherwise look for a call-saved register that is going to be pushed. */
3343 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3344 if (pushed_regs_mask & (1 << reg))
3345 return reg;
3347 /* Something went wrong - thumb_compute_save_reg_mask()
3348 should have arranged for a suitable register to be pushed. */
3349 gcc_unreachable ();
3352 static GTY(()) int pic_labelno;
3354 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3355 low register. */
3357 void
3358 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3360 #ifndef AOF_ASSEMBLER
3361 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3362 rtx global_offset_table;
3364 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3365 return;
3367 gcc_assert (flag_pic);
3369 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3370 in the code stream. */
3372 labelno = GEN_INT (pic_labelno++);
3373 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3374 l1 = gen_rtx_CONST (VOIDmode, l1);
3376 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3377 /* On the ARM the PC register contains 'dot + 8' at the time of the
3378 addition, on the Thumb it is 'dot + 4'. */
3379 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3380 if (GOT_PCREL)
3381 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3382 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3383 else
3384 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3386 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3388 if (TARGET_ARM)
3390 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3391 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx,
3392 pic_offset_table_rtx, labelno));
3394 else
3396 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3398 /* We will have pushed the pic register, so we should always be
3399 able to find a work register. */
3400 pic_tmp = gen_rtx_REG (SImode,
3401 thumb_find_work_register (saved_regs));
3402 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3403 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3405 else
3406 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3407 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx,
3408 pic_offset_table_rtx, labelno));
3411 /* Need to emit this whether or not we obey regdecls,
3412 since setjmp/longjmp can cause life info to screw up. */
3413 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3414 #endif /* AOF_ASSEMBLER */
3418 /* Return nonzero if X is valid as an ARM state addressing register. */
3419 static int
3420 arm_address_register_rtx_p (rtx x, int strict_p)
3422 int regno;
3424 if (GET_CODE (x) != REG)
3425 return 0;
3427 regno = REGNO (x);
3429 if (strict_p)
3430 return ARM_REGNO_OK_FOR_BASE_P (regno);
3432 return (regno <= LAST_ARM_REGNUM
3433 || regno >= FIRST_PSEUDO_REGISTER
3434 || regno == FRAME_POINTER_REGNUM
3435 || regno == ARG_POINTER_REGNUM);
3438 /* Return TRUE if this rtx is the difference of a symbol and a label,
3439 and will reduce to a PC-relative relocation in the object file.
3440 Expressions like this can be left alone when generating PIC, rather
3441 than forced through the GOT. */
3442 static int
3443 pcrel_constant_p (rtx x)
3445 if (GET_CODE (x) == MINUS)
3446 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3448 return FALSE;
3451 /* Return nonzero if X is a valid ARM state address operand. */
3453 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3454 int strict_p)
3456 bool use_ldrd;
3457 enum rtx_code code = GET_CODE (x);
3459 if (arm_address_register_rtx_p (x, strict_p))
3460 return 1;
3462 use_ldrd = (TARGET_LDRD
3463 && (mode == DImode
3464 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3466 if (code == POST_INC || code == PRE_DEC
3467 || ((code == PRE_INC || code == POST_DEC)
3468 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3469 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3471 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3472 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3473 && GET_CODE (XEXP (x, 1)) == PLUS
3474 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3476 rtx addend = XEXP (XEXP (x, 1), 1);
3478 /* Don't allow ldrd post increment by register because it's hard
3479 to fixup invalid register choices. */
3480 if (use_ldrd
3481 && GET_CODE (x) == POST_MODIFY
3482 && GET_CODE (addend) == REG)
3483 return 0;
3485 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3486 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3489 /* After reload constants split into minipools will have addresses
3490 from a LABEL_REF. */
3491 else if (reload_completed
3492 && (code == LABEL_REF
3493 || (code == CONST
3494 && GET_CODE (XEXP (x, 0)) == PLUS
3495 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3496 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3497 return 1;
3499 else if (mode == TImode)
3500 return 0;
3502 else if (code == PLUS)
3504 rtx xop0 = XEXP (x, 0);
3505 rtx xop1 = XEXP (x, 1);
3507 return ((arm_address_register_rtx_p (xop0, strict_p)
3508 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3509 || (arm_address_register_rtx_p (xop1, strict_p)
3510 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3513 #if 0
3514 /* Reload currently can't handle MINUS, so disable this for now */
3515 else if (GET_CODE (x) == MINUS)
3517 rtx xop0 = XEXP (x, 0);
3518 rtx xop1 = XEXP (x, 1);
3520 return (arm_address_register_rtx_p (xop0, strict_p)
3521 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3523 #endif
3525 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3526 && code == SYMBOL_REF
3527 && CONSTANT_POOL_ADDRESS_P (x)
3528 && ! (flag_pic
3529 && symbol_mentioned_p (get_pool_constant (x))
3530 && ! pcrel_constant_p (get_pool_constant (x))))
3531 return 1;
3533 return 0;
3536 /* Return nonzero if INDEX is valid for an address index operand in
3537 ARM state. */
3538 static int
3539 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3540 int strict_p)
3542 HOST_WIDE_INT range;
3543 enum rtx_code code = GET_CODE (index);
3545 /* Standard coprocessor addressing modes. */
3546 if (TARGET_HARD_FLOAT
3547 && (TARGET_FPA || TARGET_MAVERICK)
3548 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3549 || (TARGET_MAVERICK && mode == DImode)))
3550 return (code == CONST_INT && INTVAL (index) < 1024
3551 && INTVAL (index) > -1024
3552 && (INTVAL (index) & 3) == 0);
3554 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3555 return (code == CONST_INT
3556 && INTVAL (index) < 1024
3557 && INTVAL (index) > -1024
3558 && (INTVAL (index) & 3) == 0);
3560 if (arm_address_register_rtx_p (index, strict_p)
3561 && (GET_MODE_SIZE (mode) <= 4))
3562 return 1;
3564 if (mode == DImode || mode == DFmode)
3566 if (code == CONST_INT)
3568 HOST_WIDE_INT val = INTVAL (index);
3570 if (TARGET_LDRD)
3571 return val > -256 && val < 256;
3572 else
3573 return val > -4096 && val < 4092;
3576 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3579 if (GET_MODE_SIZE (mode) <= 4
3580 && ! (arm_arch4
3581 && (mode == HImode
3582 || (mode == QImode && outer == SIGN_EXTEND))))
3584 if (code == MULT)
3586 rtx xiop0 = XEXP (index, 0);
3587 rtx xiop1 = XEXP (index, 1);
3589 return ((arm_address_register_rtx_p (xiop0, strict_p)
3590 && power_of_two_operand (xiop1, SImode))
3591 || (arm_address_register_rtx_p (xiop1, strict_p)
3592 && power_of_two_operand (xiop0, SImode)));
3594 else if (code == LSHIFTRT || code == ASHIFTRT
3595 || code == ASHIFT || code == ROTATERT)
3597 rtx op = XEXP (index, 1);
3599 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3600 && GET_CODE (op) == CONST_INT
3601 && INTVAL (op) > 0
3602 && INTVAL (op) <= 31);
3606 /* For ARM v4 we may be doing a sign-extend operation during the
3607 load. */
3608 if (arm_arch4)
3610 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3611 range = 256;
3612 else
3613 range = 4096;
3615 else
3616 range = (mode == HImode) ? 4095 : 4096;
3618 return (code == CONST_INT
3619 && INTVAL (index) < range
3620 && INTVAL (index) > -range);
3623 /* Return nonzero if X is valid as a Thumb state base register. */
3624 static int
3625 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3627 int regno;
3629 if (GET_CODE (x) != REG)
3630 return 0;
3632 regno = REGNO (x);
3634 if (strict_p)
3635 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3637 return (regno <= LAST_LO_REGNUM
3638 || regno > LAST_VIRTUAL_REGISTER
3639 || regno == FRAME_POINTER_REGNUM
3640 || (GET_MODE_SIZE (mode) >= 4
3641 && (regno == STACK_POINTER_REGNUM
3642 || regno >= FIRST_PSEUDO_REGISTER
3643 || x == hard_frame_pointer_rtx
3644 || x == arg_pointer_rtx)));
3647 /* Return nonzero if x is a legitimate index register. This is the case
3648 for any base register that can access a QImode object. */
3649 inline static int
3650 thumb_index_register_rtx_p (rtx x, int strict_p)
3652 return thumb_base_register_rtx_p (x, QImode, strict_p);
3655 /* Return nonzero if x is a legitimate Thumb-state address.
3657 The AP may be eliminated to either the SP or the FP, so we use the
3658 least common denominator, e.g. SImode, and offsets from 0 to 64.
3660 ??? Verify whether the above is the right approach.
3662 ??? Also, the FP may be eliminated to the SP, so perhaps that
3663 needs special handling also.
3665 ??? Look at how the mips16 port solves this problem. It probably uses
3666 better ways to solve some of these problems.
3668 Although it is not incorrect, we don't accept QImode and HImode
3669 addresses based on the frame pointer or arg pointer until the
3670 reload pass starts. This is so that eliminating such addresses
3671 into stack based ones won't produce impossible code. */
3673 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3675 /* ??? Not clear if this is right. Experiment. */
3676 if (GET_MODE_SIZE (mode) < 4
3677 && !(reload_in_progress || reload_completed)
3678 && (reg_mentioned_p (frame_pointer_rtx, x)
3679 || reg_mentioned_p (arg_pointer_rtx, x)
3680 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3681 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3682 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3683 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3684 return 0;
3686 /* Accept any base register. SP only in SImode or larger. */
3687 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3688 return 1;
3690 /* This is PC relative data before arm_reorg runs. */
3691 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3692 && GET_CODE (x) == SYMBOL_REF
3693 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3694 return 1;
3696 /* This is PC relative data after arm_reorg runs. */
3697 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3698 && (GET_CODE (x) == LABEL_REF
3699 || (GET_CODE (x) == CONST
3700 && GET_CODE (XEXP (x, 0)) == PLUS
3701 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3702 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3703 return 1;
3705 /* Post-inc indexing only supported for SImode and larger. */
3706 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3707 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3708 return 1;
3710 else if (GET_CODE (x) == PLUS)
3712 /* REG+REG address can be any two index registers. */
3713 /* We disallow FRAME+REG addressing since we know that FRAME
3714 will be replaced with STACK, and SP relative addressing only
3715 permits SP+OFFSET. */
3716 if (GET_MODE_SIZE (mode) <= 4
3717 && XEXP (x, 0) != frame_pointer_rtx
3718 && XEXP (x, 1) != frame_pointer_rtx
3719 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3720 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3721 return 1;
3723 /* REG+const has 5-7 bit offset for non-SP registers. */
3724 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3725 || XEXP (x, 0) == arg_pointer_rtx)
3726 && GET_CODE (XEXP (x, 1)) == CONST_INT
3727 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3728 return 1;
3730 /* REG+const has 10 bit offset for SP, but only SImode and
3731 larger is supported. */
3732 /* ??? Should probably check for DI/DFmode overflow here
3733 just like GO_IF_LEGITIMATE_OFFSET does. */
3734 else if (GET_CODE (XEXP (x, 0)) == REG
3735 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3736 && GET_MODE_SIZE (mode) >= 4
3737 && GET_CODE (XEXP (x, 1)) == CONST_INT
3738 && INTVAL (XEXP (x, 1)) >= 0
3739 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3740 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3741 return 1;
3743 else if (GET_CODE (XEXP (x, 0)) == REG
3744 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3745 && GET_MODE_SIZE (mode) >= 4
3746 && GET_CODE (XEXP (x, 1)) == CONST_INT
3747 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3748 return 1;
3751 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3752 && GET_MODE_SIZE (mode) == 4
3753 && GET_CODE (x) == SYMBOL_REF
3754 && CONSTANT_POOL_ADDRESS_P (x)
3755 && ! (flag_pic
3756 && symbol_mentioned_p (get_pool_constant (x))
3757 && ! pcrel_constant_p (get_pool_constant (x))))
3758 return 1;
3760 return 0;
3763 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3764 instruction of mode MODE. */
3766 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3768 switch (GET_MODE_SIZE (mode))
3770 case 1:
3771 return val >= 0 && val < 32;
3773 case 2:
3774 return val >= 0 && val < 64 && (val & 1) == 0;
3776 default:
3777 return (val >= 0
3778 && (val + GET_MODE_SIZE (mode)) <= 128
3779 && (val & 3) == 0);
3783 /* Build the SYMBOL_REF for __tls_get_addr. */
3785 static GTY(()) rtx tls_get_addr_libfunc;
3787 static rtx
3788 get_tls_get_addr (void)
3790 if (!tls_get_addr_libfunc)
3791 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
3792 return tls_get_addr_libfunc;
3795 static rtx
3796 arm_load_tp (rtx target)
3798 if (!target)
3799 target = gen_reg_rtx (SImode);
3801 if (TARGET_HARD_TP)
3803 /* Can return in any reg. */
3804 emit_insn (gen_load_tp_hard (target));
3806 else
3808 /* Always returned in r0. Immediately copy the result into a pseudo,
3809 otherwise other uses of r0 (e.g. setting up function arguments) may
3810 clobber the value. */
3812 rtx tmp;
3814 emit_insn (gen_load_tp_soft ());
3816 tmp = gen_rtx_REG (SImode, 0);
3817 emit_move_insn (target, tmp);
3819 return target;
3822 static rtx
3823 load_tls_operand (rtx x, rtx reg)
3825 rtx tmp;
3827 if (reg == NULL_RTX)
3828 reg = gen_reg_rtx (SImode);
3830 tmp = gen_rtx_CONST (SImode, x);
3832 emit_move_insn (reg, tmp);
3834 return reg;
3837 static rtx
3838 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
3840 rtx insns, label, labelno, sum;
3842 start_sequence ();
3844 labelno = GEN_INT (pic_labelno++);
3845 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3846 label = gen_rtx_CONST (VOIDmode, label);
3848 sum = gen_rtx_UNSPEC (Pmode,
3849 gen_rtvec (4, x, GEN_INT (reloc), label,
3850 GEN_INT (TARGET_ARM ? 8 : 4)),
3851 UNSPEC_TLS);
3852 reg = load_tls_operand (sum, reg);
3854 if (TARGET_ARM)
3855 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
3856 else
3857 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3859 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
3860 Pmode, 1, reg, Pmode);
3862 insns = get_insns ();
3863 end_sequence ();
3865 return insns;
3869 legitimize_tls_address (rtx x, rtx reg)
3871 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
3872 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
3874 switch (model)
3876 case TLS_MODEL_GLOBAL_DYNAMIC:
3877 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
3878 dest = gen_reg_rtx (Pmode);
3879 emit_libcall_block (insns, dest, ret, x);
3880 return dest;
3882 case TLS_MODEL_LOCAL_DYNAMIC:
3883 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
3885 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3886 share the LDM result with other LD model accesses. */
3887 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
3888 UNSPEC_TLS);
3889 dest = gen_reg_rtx (Pmode);
3890 emit_libcall_block (insns, dest, ret, eqv);
3892 /* Load the addend. */
3893 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
3894 UNSPEC_TLS);
3895 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
3896 return gen_rtx_PLUS (Pmode, dest, addend);
3898 case TLS_MODEL_INITIAL_EXEC:
3899 labelno = GEN_INT (pic_labelno++);
3900 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3901 label = gen_rtx_CONST (VOIDmode, label);
3902 sum = gen_rtx_UNSPEC (Pmode,
3903 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
3904 GEN_INT (TARGET_ARM ? 8 : 4)),
3905 UNSPEC_TLS);
3906 reg = load_tls_operand (sum, reg);
3908 if (TARGET_ARM)
3909 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
3910 else
3912 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3913 emit_move_insn (reg, gen_const_mem (SImode, reg));
3916 tp = arm_load_tp (NULL_RTX);
3918 return gen_rtx_PLUS (Pmode, tp, reg);
3920 case TLS_MODEL_LOCAL_EXEC:
3921 tp = arm_load_tp (NULL_RTX);
3923 reg = gen_rtx_UNSPEC (Pmode,
3924 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
3925 UNSPEC_TLS);
3926 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
3928 return gen_rtx_PLUS (Pmode, tp, reg);
3930 default:
3931 abort ();
3935 /* Try machine-dependent ways of modifying an illegitimate address
3936 to be legitimate. If we find one, return the new, valid address. */
3938 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3940 if (arm_tls_symbol_p (x))
3941 return legitimize_tls_address (x, NULL_RTX);
3943 if (GET_CODE (x) == PLUS)
3945 rtx xop0 = XEXP (x, 0);
3946 rtx xop1 = XEXP (x, 1);
3948 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3949 xop0 = force_reg (SImode, xop0);
3951 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3952 xop1 = force_reg (SImode, xop1);
3954 if (ARM_BASE_REGISTER_RTX_P (xop0)
3955 && GET_CODE (xop1) == CONST_INT)
3957 HOST_WIDE_INT n, low_n;
3958 rtx base_reg, val;
3959 n = INTVAL (xop1);
3961 /* VFP addressing modes actually allow greater offsets, but for
3962 now we just stick with the lowest common denominator. */
3963 if (mode == DImode
3964 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3966 low_n = n & 0x0f;
3967 n &= ~0x0f;
3968 if (low_n > 4)
3970 n += 16;
3971 low_n -= 16;
3974 else
3976 low_n = ((mode) == TImode ? 0
3977 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3978 n -= low_n;
3981 base_reg = gen_reg_rtx (SImode);
3982 val = force_operand (plus_constant (xop0, n), NULL_RTX);
3983 emit_move_insn (base_reg, val);
3984 x = plus_constant (base_reg, low_n);
3986 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3987 x = gen_rtx_PLUS (SImode, xop0, xop1);
3990 /* XXX We don't allow MINUS any more -- see comment in
3991 arm_legitimate_address_p (). */
3992 else if (GET_CODE (x) == MINUS)
3994 rtx xop0 = XEXP (x, 0);
3995 rtx xop1 = XEXP (x, 1);
3997 if (CONSTANT_P (xop0))
3998 xop0 = force_reg (SImode, xop0);
4000 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4001 xop1 = force_reg (SImode, xop1);
4003 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4004 x = gen_rtx_MINUS (SImode, xop0, xop1);
4007 /* Make sure to take full advantage of the pre-indexed addressing mode
4008 with absolute addresses which often allows for the base register to
4009 be factorized for multiple adjacent memory references, and it might
4010 even allows for the mini pool to be avoided entirely. */
4011 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4013 unsigned int bits;
4014 HOST_WIDE_INT mask, base, index;
4015 rtx base_reg;
4017 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4018 use a 8 bit index. So let's use a 12 bit index for SImode only and
4019 hope that arm_gen_constant will enable ldrb to use more bits. */
4020 bits = (mode == SImode) ? 12 : 8;
4021 mask = (1 << bits) - 1;
4022 base = INTVAL (x) & ~mask;
4023 index = INTVAL (x) & mask;
4024 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4026 /* It'll most probably be more efficient to generate the base
4027 with more bits set and use a negative index instead. */
4028 base |= mask;
4029 index -= mask;
4031 base_reg = force_reg (SImode, GEN_INT (base));
4032 x = plus_constant (base_reg, index);
4035 if (flag_pic)
4037 /* We need to find and carefully transform any SYMBOL and LABEL
4038 references; so go back to the original address expression. */
4039 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4041 if (new_x != orig_x)
4042 x = new_x;
4045 return x;
4049 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4050 to be legitimate. If we find one, return the new, valid address. */
4052 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4054 if (arm_tls_symbol_p (x))
4055 return legitimize_tls_address (x, NULL_RTX);
4057 if (GET_CODE (x) == PLUS
4058 && GET_CODE (XEXP (x, 1)) == CONST_INT
4059 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4060 || INTVAL (XEXP (x, 1)) < 0))
4062 rtx xop0 = XEXP (x, 0);
4063 rtx xop1 = XEXP (x, 1);
4064 HOST_WIDE_INT offset = INTVAL (xop1);
4066 /* Try and fold the offset into a biasing of the base register and
4067 then offsetting that. Don't do this when optimizing for space
4068 since it can cause too many CSEs. */
4069 if (optimize_size && offset >= 0
4070 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4072 HOST_WIDE_INT delta;
4074 if (offset >= 256)
4075 delta = offset - (256 - GET_MODE_SIZE (mode));
4076 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4077 delta = 31 * GET_MODE_SIZE (mode);
4078 else
4079 delta = offset & (~31 * GET_MODE_SIZE (mode));
4081 xop0 = force_operand (plus_constant (xop0, offset - delta),
4082 NULL_RTX);
4083 x = plus_constant (xop0, delta);
4085 else if (offset < 0 && offset > -256)
4086 /* Small negative offsets are best done with a subtract before the
4087 dereference, forcing these into a register normally takes two
4088 instructions. */
4089 x = force_operand (x, NULL_RTX);
4090 else
4092 /* For the remaining cases, force the constant into a register. */
4093 xop1 = force_reg (SImode, xop1);
4094 x = gen_rtx_PLUS (SImode, xop0, xop1);
4097 else if (GET_CODE (x) == PLUS
4098 && s_register_operand (XEXP (x, 1), SImode)
4099 && !s_register_operand (XEXP (x, 0), SImode))
4101 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4103 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4106 if (flag_pic)
4108 /* We need to find and carefully transform any SYMBOL and LABEL
4109 references; so go back to the original address expression. */
4110 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4112 if (new_x != orig_x)
4113 x = new_x;
4116 return x;
4120 thumb_legitimize_reload_address (rtx *x_p,
4121 enum machine_mode mode,
4122 int opnum, int type,
4123 int ind_levels ATTRIBUTE_UNUSED)
4125 rtx x = *x_p;
4127 if (GET_CODE (x) == PLUS
4128 && GET_MODE_SIZE (mode) < 4
4129 && REG_P (XEXP (x, 0))
4130 && XEXP (x, 0) == stack_pointer_rtx
4131 && GET_CODE (XEXP (x, 1)) == CONST_INT
4132 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4134 rtx orig_x = x;
4136 x = copy_rtx (x);
4137 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4138 Pmode, VOIDmode, 0, 0, opnum, type);
4139 return x;
4142 /* If both registers are hi-regs, then it's better to reload the
4143 entire expression rather than each register individually. That
4144 only requires one reload register rather than two. */
4145 if (GET_CODE (x) == PLUS
4146 && REG_P (XEXP (x, 0))
4147 && REG_P (XEXP (x, 1))
4148 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4149 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4151 rtx orig_x = x;
4153 x = copy_rtx (x);
4154 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4155 Pmode, VOIDmode, 0, 0, opnum, type);
4156 return x;
4159 return NULL;
4162 /* Test for various thread-local symbols. */
4164 /* Return TRUE if X is a thread-local symbol. */
4166 static bool
4167 arm_tls_symbol_p (rtx x)
4169 if (! TARGET_HAVE_TLS)
4170 return false;
4172 if (GET_CODE (x) != SYMBOL_REF)
4173 return false;
4175 return SYMBOL_REF_TLS_MODEL (x) != 0;
4178 /* Helper for arm_tls_referenced_p. */
4180 static int
4181 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4183 if (GET_CODE (*x) == SYMBOL_REF)
4184 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4186 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4187 TLS offsets, not real symbol references. */
4188 if (GET_CODE (*x) == UNSPEC
4189 && XINT (*x, 1) == UNSPEC_TLS)
4190 return -1;
4192 return 0;
4195 /* Return TRUE if X contains any TLS symbol references. */
4197 bool
4198 arm_tls_referenced_p (rtx x)
4200 if (! TARGET_HAVE_TLS)
4201 return false;
4203 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4206 #define REG_OR_SUBREG_REG(X) \
4207 (GET_CODE (X) == REG \
4208 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4210 #define REG_OR_SUBREG_RTX(X) \
4211 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4213 #ifndef COSTS_N_INSNS
4214 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4215 #endif
4216 static inline int
4217 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4219 enum machine_mode mode = GET_MODE (x);
4221 switch (code)
4223 case ASHIFT:
4224 case ASHIFTRT:
4225 case LSHIFTRT:
4226 case ROTATERT:
4227 case PLUS:
4228 case MINUS:
4229 case COMPARE:
4230 case NEG:
4231 case NOT:
4232 return COSTS_N_INSNS (1);
4234 case MULT:
4235 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4237 int cycles = 0;
4238 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4240 while (i)
4242 i >>= 2;
4243 cycles++;
4245 return COSTS_N_INSNS (2) + cycles;
4247 return COSTS_N_INSNS (1) + 16;
4249 case SET:
4250 return (COSTS_N_INSNS (1)
4251 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4252 + GET_CODE (SET_DEST (x)) == MEM));
4254 case CONST_INT:
4255 if (outer == SET)
4257 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4258 return 0;
4259 if (thumb_shiftable_const (INTVAL (x)))
4260 return COSTS_N_INSNS (2);
4261 return COSTS_N_INSNS (3);
4263 else if ((outer == PLUS || outer == COMPARE)
4264 && INTVAL (x) < 256 && INTVAL (x) > -256)
4265 return 0;
4266 else if (outer == AND
4267 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4268 return COSTS_N_INSNS (1);
4269 else if (outer == ASHIFT || outer == ASHIFTRT
4270 || outer == LSHIFTRT)
4271 return 0;
4272 return COSTS_N_INSNS (2);
4274 case CONST:
4275 case CONST_DOUBLE:
4276 case LABEL_REF:
4277 case SYMBOL_REF:
4278 return COSTS_N_INSNS (3);
4280 case UDIV:
4281 case UMOD:
4282 case DIV:
4283 case MOD:
4284 return 100;
4286 case TRUNCATE:
4287 return 99;
4289 case AND:
4290 case XOR:
4291 case IOR:
4292 /* XXX guess. */
4293 return 8;
4295 case MEM:
4296 /* XXX another guess. */
4297 /* Memory costs quite a lot for the first word, but subsequent words
4298 load at the equivalent of a single insn each. */
4299 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4300 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4301 ? 4 : 0));
4303 case IF_THEN_ELSE:
4304 /* XXX a guess. */
4305 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4306 return 14;
4307 return 2;
4309 case ZERO_EXTEND:
4310 /* XXX still guessing. */
4311 switch (GET_MODE (XEXP (x, 0)))
4313 case QImode:
4314 return (1 + (mode == DImode ? 4 : 0)
4315 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4317 case HImode:
4318 return (4 + (mode == DImode ? 4 : 0)
4319 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4321 case SImode:
4322 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4324 default:
4325 return 99;
4328 default:
4329 return 99;
4334 /* Worker routine for arm_rtx_costs. */
4335 static inline int
4336 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4338 enum machine_mode mode = GET_MODE (x);
4339 enum rtx_code subcode;
4340 int extra_cost;
4342 switch (code)
4344 case MEM:
4345 /* Memory costs quite a lot for the first word, but subsequent words
4346 load at the equivalent of a single insn each. */
4347 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4348 + (GET_CODE (x) == SYMBOL_REF
4349 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4351 case DIV:
4352 case MOD:
4353 case UDIV:
4354 case UMOD:
4355 return optimize_size ? COSTS_N_INSNS (2) : 100;
4357 case ROTATE:
4358 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4359 return 4;
4360 /* Fall through */
4361 case ROTATERT:
4362 if (mode != SImode)
4363 return 8;
4364 /* Fall through */
4365 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4366 if (mode == DImode)
4367 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4368 + ((GET_CODE (XEXP (x, 0)) == REG
4369 || (GET_CODE (XEXP (x, 0)) == SUBREG
4370 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4371 ? 0 : 8));
4372 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4373 || (GET_CODE (XEXP (x, 0)) == SUBREG
4374 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4375 ? 0 : 4)
4376 + ((GET_CODE (XEXP (x, 1)) == REG
4377 || (GET_CODE (XEXP (x, 1)) == SUBREG
4378 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4379 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4380 ? 0 : 4));
4382 case MINUS:
4383 if (mode == DImode)
4384 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4385 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4386 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4387 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4388 ? 0 : 8));
4390 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4391 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4392 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4393 && arm_const_double_rtx (XEXP (x, 1))))
4394 ? 0 : 8)
4395 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4396 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4397 && arm_const_double_rtx (XEXP (x, 0))))
4398 ? 0 : 8));
4400 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4401 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4402 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4403 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4404 || subcode == ASHIFTRT || subcode == LSHIFTRT
4405 || subcode == ROTATE || subcode == ROTATERT
4406 || (subcode == MULT
4407 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4408 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4409 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4410 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4411 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4412 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4413 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4414 return 1;
4415 /* Fall through */
4417 case PLUS:
4418 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4419 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4420 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4421 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4422 && arm_const_double_rtx (XEXP (x, 1))))
4423 ? 0 : 8));
4425 /* Fall through */
4426 case AND: case XOR: case IOR:
4427 extra_cost = 0;
4429 /* Normally the frame registers will be spilt into reg+const during
4430 reload, so it is a bad idea to combine them with other instructions,
4431 since then they might not be moved outside of loops. As a compromise
4432 we allow integration with ops that have a constant as their second
4433 operand. */
4434 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4435 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4436 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4437 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4438 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4439 extra_cost = 4;
4441 if (mode == DImode)
4442 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4443 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4444 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4445 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4446 ? 0 : 8));
4448 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4449 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4450 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4451 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4452 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4453 ? 0 : 4));
4455 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4456 return (1 + extra_cost
4457 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4458 || subcode == LSHIFTRT || subcode == ASHIFTRT
4459 || subcode == ROTATE || subcode == ROTATERT
4460 || (subcode == MULT
4461 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4462 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4463 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4464 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4465 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4466 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4467 ? 0 : 4));
4469 return 8;
4471 case MULT:
4472 /* This should have been handled by the CPU specific routines. */
4473 gcc_unreachable ();
4475 case TRUNCATE:
4476 if (arm_arch3m && mode == SImode
4477 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4478 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4479 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4480 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4481 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4482 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4483 return 8;
4484 return 99;
4486 case NEG:
4487 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4488 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4489 /* Fall through */
4490 case NOT:
4491 if (mode == DImode)
4492 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4494 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4496 case IF_THEN_ELSE:
4497 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4498 return 14;
4499 return 2;
4501 case COMPARE:
4502 return 1;
4504 case ABS:
4505 return 4 + (mode == DImode ? 4 : 0);
4507 case SIGN_EXTEND:
4508 if (GET_MODE (XEXP (x, 0)) == QImode)
4509 return (4 + (mode == DImode ? 4 : 0)
4510 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4511 /* Fall through */
4512 case ZERO_EXTEND:
4513 switch (GET_MODE (XEXP (x, 0)))
4515 case QImode:
4516 return (1 + (mode == DImode ? 4 : 0)
4517 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4519 case HImode:
4520 return (4 + (mode == DImode ? 4 : 0)
4521 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4523 case SImode:
4524 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4526 case V8QImode:
4527 case V4HImode:
4528 case V2SImode:
4529 case V4QImode:
4530 case V2HImode:
4531 return 1;
4533 default:
4534 gcc_unreachable ();
4536 gcc_unreachable ();
4538 case CONST_INT:
4539 if (const_ok_for_arm (INTVAL (x)))
4540 return outer == SET ? 2 : -1;
4541 else if (outer == AND
4542 && const_ok_for_arm (~INTVAL (x)))
4543 return -1;
4544 else if ((outer == COMPARE
4545 || outer == PLUS || outer == MINUS)
4546 && const_ok_for_arm (-INTVAL (x)))
4547 return -1;
4548 else
4549 return 5;
4551 case CONST:
4552 case LABEL_REF:
4553 case SYMBOL_REF:
4554 return 6;
4556 case CONST_DOUBLE:
4557 if (arm_const_double_rtx (x))
4558 return outer == SET ? 2 : -1;
4559 else if ((outer == COMPARE || outer == PLUS)
4560 && neg_const_double_rtx_ok_for_fpa (x))
4561 return -1;
4562 return 7;
4564 default:
4565 return 99;
4569 /* RTX costs when optimizing for size. */
4570 static bool
4571 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4573 enum machine_mode mode = GET_MODE (x);
4575 if (TARGET_THUMB)
4577 /* XXX TBD. For now, use the standard costs. */
4578 *total = thumb_rtx_costs (x, code, outer_code);
4579 return true;
4582 switch (code)
4584 case MEM:
4585 /* A memory access costs 1 insn if the mode is small, or the address is
4586 a single register, otherwise it costs one insn per word. */
4587 if (REG_P (XEXP (x, 0)))
4588 *total = COSTS_N_INSNS (1);
4589 else
4590 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4591 return true;
4593 case DIV:
4594 case MOD:
4595 case UDIV:
4596 case UMOD:
4597 /* Needs a libcall, so it costs about this. */
4598 *total = COSTS_N_INSNS (2);
4599 return false;
4601 case ROTATE:
4602 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4604 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4605 return true;
4607 /* Fall through */
4608 case ROTATERT:
4609 case ASHIFT:
4610 case LSHIFTRT:
4611 case ASHIFTRT:
4612 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4614 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4615 return true;
4617 else if (mode == SImode)
4619 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4620 /* Slightly disparage register shifts, but not by much. */
4621 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4622 *total += 1 + rtx_cost (XEXP (x, 1), code);
4623 return true;
4626 /* Needs a libcall. */
4627 *total = COSTS_N_INSNS (2);
4628 return false;
4630 case MINUS:
4631 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4633 *total = COSTS_N_INSNS (1);
4634 return false;
4637 if (mode == SImode)
4639 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4640 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4642 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4643 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4644 || subcode1 == ROTATE || subcode1 == ROTATERT
4645 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4646 || subcode1 == ASHIFTRT)
4648 /* It's just the cost of the two operands. */
4649 *total = 0;
4650 return false;
4653 *total = COSTS_N_INSNS (1);
4654 return false;
4657 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4658 return false;
4660 case PLUS:
4661 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4663 *total = COSTS_N_INSNS (1);
4664 return false;
4667 /* Fall through */
4668 case AND: case XOR: case IOR:
4669 if (mode == SImode)
4671 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4673 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4674 || subcode == LSHIFTRT || subcode == ASHIFTRT
4675 || (code == AND && subcode == NOT))
4677 /* It's just the cost of the two operands. */
4678 *total = 0;
4679 return false;
4683 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4684 return false;
4686 case MULT:
4687 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4688 return false;
4690 case NEG:
4691 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4692 *total = COSTS_N_INSNS (1);
4693 /* Fall through */
4694 case NOT:
4695 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4697 return false;
4699 case IF_THEN_ELSE:
4700 *total = 0;
4701 return false;
4703 case COMPARE:
4704 if (cc_register (XEXP (x, 0), VOIDmode))
4705 * total = 0;
4706 else
4707 *total = COSTS_N_INSNS (1);
4708 return false;
4710 case ABS:
4711 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4712 *total = COSTS_N_INSNS (1);
4713 else
4714 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4715 return false;
4717 case SIGN_EXTEND:
4718 *total = 0;
4719 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4721 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4722 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4724 if (mode == DImode)
4725 *total += COSTS_N_INSNS (1);
4726 return false;
4728 case ZERO_EXTEND:
4729 *total = 0;
4730 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4732 switch (GET_MODE (XEXP (x, 0)))
4734 case QImode:
4735 *total += COSTS_N_INSNS (1);
4736 break;
4738 case HImode:
4739 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4741 case SImode:
4742 break;
4744 default:
4745 *total += COSTS_N_INSNS (2);
4749 if (mode == DImode)
4750 *total += COSTS_N_INSNS (1);
4752 return false;
4754 case CONST_INT:
4755 if (const_ok_for_arm (INTVAL (x)))
4756 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4757 else if (const_ok_for_arm (~INTVAL (x)))
4758 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4759 else if (const_ok_for_arm (-INTVAL (x)))
4761 if (outer_code == COMPARE || outer_code == PLUS
4762 || outer_code == MINUS)
4763 *total = 0;
4764 else
4765 *total = COSTS_N_INSNS (1);
4767 else
4768 *total = COSTS_N_INSNS (2);
4769 return true;
4771 case CONST:
4772 case LABEL_REF:
4773 case SYMBOL_REF:
4774 *total = COSTS_N_INSNS (2);
4775 return true;
4777 case CONST_DOUBLE:
4778 *total = COSTS_N_INSNS (4);
4779 return true;
4781 default:
4782 if (mode != VOIDmode)
4783 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4784 else
4785 *total = COSTS_N_INSNS (4); /* How knows? */
4786 return false;
4790 /* RTX costs for cores with a slow MUL implementation. */
4792 static bool
4793 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4795 enum machine_mode mode = GET_MODE (x);
4797 if (TARGET_THUMB)
4799 *total = thumb_rtx_costs (x, code, outer_code);
4800 return true;
4803 switch (code)
4805 case MULT:
4806 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4807 || mode == DImode)
4809 *total = 30;
4810 return true;
4813 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4815 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4816 & (unsigned HOST_WIDE_INT) 0xffffffff);
4817 int cost, const_ok = const_ok_for_arm (i);
4818 int j, booth_unit_size;
4820 /* Tune as appropriate. */
4821 cost = const_ok ? 4 : 8;
4822 booth_unit_size = 2;
4823 for (j = 0; i && j < 32; j += booth_unit_size)
4825 i >>= booth_unit_size;
4826 cost += 2;
4829 *total = cost;
4830 return true;
4833 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4834 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4835 return true;
4837 default:
4838 *total = arm_rtx_costs_1 (x, code, outer_code);
4839 return true;
4844 /* RTX cost for cores with a fast multiply unit (M variants). */
4846 static bool
4847 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4849 enum machine_mode mode = GET_MODE (x);
4851 if (TARGET_THUMB)
4853 *total = thumb_rtx_costs (x, code, outer_code);
4854 return true;
4857 switch (code)
4859 case MULT:
4860 /* There is no point basing this on the tuning, since it is always the
4861 fast variant if it exists at all. */
4862 if (mode == DImode
4863 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4864 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4865 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4867 *total = 8;
4868 return true;
4872 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4873 || mode == DImode)
4875 *total = 30;
4876 return true;
4879 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4881 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4882 & (unsigned HOST_WIDE_INT) 0xffffffff);
4883 int cost, const_ok = const_ok_for_arm (i);
4884 int j, booth_unit_size;
4886 /* Tune as appropriate. */
4887 cost = const_ok ? 4 : 8;
4888 booth_unit_size = 8;
4889 for (j = 0; i && j < 32; j += booth_unit_size)
4891 i >>= booth_unit_size;
4892 cost += 2;
4895 *total = cost;
4896 return true;
4899 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4900 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4901 return true;
4903 default:
4904 *total = arm_rtx_costs_1 (x, code, outer_code);
4905 return true;
4910 /* RTX cost for XScale CPUs. */
4912 static bool
4913 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4915 enum machine_mode mode = GET_MODE (x);
4917 if (TARGET_THUMB)
4919 *total = thumb_rtx_costs (x, code, outer_code);
4920 return true;
4923 switch (code)
4925 case MULT:
4926 /* There is no point basing this on the tuning, since it is always the
4927 fast variant if it exists at all. */
4928 if (mode == DImode
4929 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4930 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4931 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4933 *total = 8;
4934 return true;
4938 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4939 || mode == DImode)
4941 *total = 30;
4942 return true;
4945 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4947 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4948 & (unsigned HOST_WIDE_INT) 0xffffffff);
4949 int cost, const_ok = const_ok_for_arm (i);
4950 unsigned HOST_WIDE_INT masked_const;
4952 /* The cost will be related to two insns.
4953 First a load of the constant (MOV or LDR), then a multiply. */
4954 cost = 2;
4955 if (! const_ok)
4956 cost += 1; /* LDR is probably more expensive because
4957 of longer result latency. */
4958 masked_const = i & 0xffff8000;
4959 if (masked_const != 0 && masked_const != 0xffff8000)
4961 masked_const = i & 0xf8000000;
4962 if (masked_const == 0 || masked_const == 0xf8000000)
4963 cost += 1;
4964 else
4965 cost += 2;
4967 *total = cost;
4968 return true;
4971 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4972 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4973 return true;
4975 case COMPARE:
4976 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4977 will stall until the multiplication is complete. */
4978 if (GET_CODE (XEXP (x, 0)) == MULT)
4979 *total = 4 + rtx_cost (XEXP (x, 0), code);
4980 else
4981 *total = arm_rtx_costs_1 (x, code, outer_code);
4982 return true;
4984 default:
4985 *total = arm_rtx_costs_1 (x, code, outer_code);
4986 return true;
4991 /* RTX costs for 9e (and later) cores. */
4993 static bool
4994 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4996 enum machine_mode mode = GET_MODE (x);
4997 int nonreg_cost;
4998 int cost;
5000 if (TARGET_THUMB)
5002 switch (code)
5004 case MULT:
5005 *total = COSTS_N_INSNS (3);
5006 return true;
5008 default:
5009 *total = thumb_rtx_costs (x, code, outer_code);
5010 return true;
5014 switch (code)
5016 case MULT:
5017 /* There is no point basing this on the tuning, since it is always the
5018 fast variant if it exists at all. */
5019 if (mode == DImode
5020 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5021 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5022 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5024 *total = 3;
5025 return true;
5029 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5031 *total = 30;
5032 return true;
5034 if (mode == DImode)
5036 cost = 7;
5037 nonreg_cost = 8;
5039 else
5041 cost = 2;
5042 nonreg_cost = 4;
5046 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5047 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5048 return true;
5050 default:
5051 *total = arm_rtx_costs_1 (x, code, outer_code);
5052 return true;
5055 /* All address computations that can be done are free, but rtx cost returns
5056 the same for practically all of them. So we weight the different types
5057 of address here in the order (most pref first):
5058 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5059 static inline int
5060 arm_arm_address_cost (rtx x)
5062 enum rtx_code c = GET_CODE (x);
5064 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5065 return 0;
5066 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5067 return 10;
5069 if (c == PLUS || c == MINUS)
5071 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5072 return 2;
5074 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5075 return 3;
5077 return 4;
5080 return 6;
5083 static inline int
5084 arm_thumb_address_cost (rtx x)
5086 enum rtx_code c = GET_CODE (x);
5088 if (c == REG)
5089 return 1;
5090 if (c == PLUS
5091 && GET_CODE (XEXP (x, 0)) == REG
5092 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5093 return 1;
5095 return 2;
5098 static int
5099 arm_address_cost (rtx x)
5101 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5104 static int
5105 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5107 rtx i_pat, d_pat;
5109 /* Some true dependencies can have a higher cost depending
5110 on precisely how certain input operands are used. */
5111 if (arm_tune_xscale
5112 && REG_NOTE_KIND (link) == 0
5113 && recog_memoized (insn) >= 0
5114 && recog_memoized (dep) >= 0)
5116 int shift_opnum = get_attr_shift (insn);
5117 enum attr_type attr_type = get_attr_type (dep);
5119 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5120 operand for INSN. If we have a shifted input operand and the
5121 instruction we depend on is another ALU instruction, then we may
5122 have to account for an additional stall. */
5123 if (shift_opnum != 0
5124 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5126 rtx shifted_operand;
5127 int opno;
5129 /* Get the shifted operand. */
5130 extract_insn (insn);
5131 shifted_operand = recog_data.operand[shift_opnum];
5133 /* Iterate over all the operands in DEP. If we write an operand
5134 that overlaps with SHIFTED_OPERAND, then we have increase the
5135 cost of this dependency. */
5136 extract_insn (dep);
5137 preprocess_constraints ();
5138 for (opno = 0; opno < recog_data.n_operands; opno++)
5140 /* We can ignore strict inputs. */
5141 if (recog_data.operand_type[opno] == OP_IN)
5142 continue;
5144 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5145 shifted_operand))
5146 return 2;
5151 /* XXX This is not strictly true for the FPA. */
5152 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5153 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5154 return 0;
5156 /* Call insns don't incur a stall, even if they follow a load. */
5157 if (REG_NOTE_KIND (link) == 0
5158 && GET_CODE (insn) == CALL_INSN)
5159 return 1;
5161 if ((i_pat = single_set (insn)) != NULL
5162 && GET_CODE (SET_SRC (i_pat)) == MEM
5163 && (d_pat = single_set (dep)) != NULL
5164 && GET_CODE (SET_DEST (d_pat)) == MEM)
5166 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5167 /* This is a load after a store, there is no conflict if the load reads
5168 from a cached area. Assume that loads from the stack, and from the
5169 constant pool are cached, and that others will miss. This is a
5170 hack. */
5172 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5173 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5174 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5175 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5176 return 1;
5179 return cost;
5182 static int fp_consts_inited = 0;
5184 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5185 static const char * const strings_fp[8] =
5187 "0", "1", "2", "3",
5188 "4", "5", "0.5", "10"
5191 static REAL_VALUE_TYPE values_fp[8];
5193 static void
5194 init_fp_table (void)
5196 int i;
5197 REAL_VALUE_TYPE r;
5199 if (TARGET_VFP)
5200 fp_consts_inited = 1;
5201 else
5202 fp_consts_inited = 8;
5204 for (i = 0; i < fp_consts_inited; i++)
5206 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5207 values_fp[i] = r;
5211 /* Return TRUE if rtx X is a valid immediate FP constant. */
5213 arm_const_double_rtx (rtx x)
5215 REAL_VALUE_TYPE r;
5216 int i;
5218 if (!fp_consts_inited)
5219 init_fp_table ();
5221 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5222 if (REAL_VALUE_MINUS_ZERO (r))
5223 return 0;
5225 for (i = 0; i < fp_consts_inited; i++)
5226 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5227 return 1;
5229 return 0;
5232 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5234 neg_const_double_rtx_ok_for_fpa (rtx x)
5236 REAL_VALUE_TYPE r;
5237 int i;
5239 if (!fp_consts_inited)
5240 init_fp_table ();
5242 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5243 r = REAL_VALUE_NEGATE (r);
5244 if (REAL_VALUE_MINUS_ZERO (r))
5245 return 0;
5247 for (i = 0; i < 8; i++)
5248 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5249 return 1;
5251 return 0;
5254 /* Predicates for `match_operand' and `match_operator'. */
5256 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5258 cirrus_memory_offset (rtx op)
5260 /* Reject eliminable registers. */
5261 if (! (reload_in_progress || reload_completed)
5262 && ( reg_mentioned_p (frame_pointer_rtx, op)
5263 || reg_mentioned_p (arg_pointer_rtx, op)
5264 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5265 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5266 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5267 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5268 return 0;
5270 if (GET_CODE (op) == MEM)
5272 rtx ind;
5274 ind = XEXP (op, 0);
5276 /* Match: (mem (reg)). */
5277 if (GET_CODE (ind) == REG)
5278 return 1;
5280 /* Match:
5281 (mem (plus (reg)
5282 (const))). */
5283 if (GET_CODE (ind) == PLUS
5284 && GET_CODE (XEXP (ind, 0)) == REG
5285 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5286 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5287 return 1;
5290 return 0;
5293 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5294 WB if true if writeback address modes are allowed. */
5297 arm_coproc_mem_operand (rtx op, bool wb)
5299 rtx ind;
5301 /* Reject eliminable registers. */
5302 if (! (reload_in_progress || reload_completed)
5303 && ( reg_mentioned_p (frame_pointer_rtx, op)
5304 || reg_mentioned_p (arg_pointer_rtx, op)
5305 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5306 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5307 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5308 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5309 return FALSE;
5311 /* Constants are converted into offsets from labels. */
5312 if (GET_CODE (op) != MEM)
5313 return FALSE;
5315 ind = XEXP (op, 0);
5317 if (reload_completed
5318 && (GET_CODE (ind) == LABEL_REF
5319 || (GET_CODE (ind) == CONST
5320 && GET_CODE (XEXP (ind, 0)) == PLUS
5321 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5322 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5323 return TRUE;
5325 /* Match: (mem (reg)). */
5326 if (GET_CODE (ind) == REG)
5327 return arm_address_register_rtx_p (ind, 0);
5329 /* Autoincremment addressing modes. */
5330 if (wb
5331 && (GET_CODE (ind) == PRE_INC
5332 || GET_CODE (ind) == POST_INC
5333 || GET_CODE (ind) == PRE_DEC
5334 || GET_CODE (ind) == POST_DEC))
5335 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5337 if (wb
5338 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5339 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5340 && GET_CODE (XEXP (ind, 1)) == PLUS
5341 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5342 ind = XEXP (ind, 1);
5344 /* Match:
5345 (plus (reg)
5346 (const)). */
5347 if (GET_CODE (ind) == PLUS
5348 && GET_CODE (XEXP (ind, 0)) == REG
5349 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5350 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5351 && INTVAL (XEXP (ind, 1)) > -1024
5352 && INTVAL (XEXP (ind, 1)) < 1024
5353 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5354 return TRUE;
5356 return FALSE;
5359 /* Return true if X is a register that will be eliminated later on. */
5361 arm_eliminable_register (rtx x)
5363 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5364 || REGNO (x) == ARG_POINTER_REGNUM
5365 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5366 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5369 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5370 VFP registers. Otherwise return NO_REGS. */
5372 enum reg_class
5373 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5375 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5376 return NO_REGS;
5378 return GENERAL_REGS;
5381 /* Values which must be returned in the most-significant end of the return
5382 register. */
5384 static bool
5385 arm_return_in_msb (tree valtype)
5387 return (TARGET_AAPCS_BASED
5388 && BYTES_BIG_ENDIAN
5389 && (AGGREGATE_TYPE_P (valtype)
5390 || TREE_CODE (valtype) == COMPLEX_TYPE));
5393 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5394 Use by the Cirrus Maverick code which has to workaround
5395 a hardware bug triggered by such instructions. */
5396 static bool
5397 arm_memory_load_p (rtx insn)
5399 rtx body, lhs, rhs;;
5401 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5402 return false;
5404 body = PATTERN (insn);
5406 if (GET_CODE (body) != SET)
5407 return false;
5409 lhs = XEXP (body, 0);
5410 rhs = XEXP (body, 1);
5412 lhs = REG_OR_SUBREG_RTX (lhs);
5414 /* If the destination is not a general purpose
5415 register we do not have to worry. */
5416 if (GET_CODE (lhs) != REG
5417 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5418 return false;
5420 /* As well as loads from memory we also have to react
5421 to loads of invalid constants which will be turned
5422 into loads from the minipool. */
5423 return (GET_CODE (rhs) == MEM
5424 || GET_CODE (rhs) == SYMBOL_REF
5425 || note_invalid_constants (insn, -1, false));
5428 /* Return TRUE if INSN is a Cirrus instruction. */
5429 static bool
5430 arm_cirrus_insn_p (rtx insn)
5432 enum attr_cirrus attr;
5434 /* get_attr cannot accept USE or CLOBBER. */
5435 if (!insn
5436 || GET_CODE (insn) != INSN
5437 || GET_CODE (PATTERN (insn)) == USE
5438 || GET_CODE (PATTERN (insn)) == CLOBBER)
5439 return 0;
5441 attr = get_attr_cirrus (insn);
5443 return attr != CIRRUS_NOT;
5446 /* Cirrus reorg for invalid instruction combinations. */
5447 static void
5448 cirrus_reorg (rtx first)
5450 enum attr_cirrus attr;
5451 rtx body = PATTERN (first);
5452 rtx t;
5453 int nops;
5455 /* Any branch must be followed by 2 non Cirrus instructions. */
5456 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5458 nops = 0;
5459 t = next_nonnote_insn (first);
5461 if (arm_cirrus_insn_p (t))
5462 ++ nops;
5464 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5465 ++ nops;
5467 while (nops --)
5468 emit_insn_after (gen_nop (), first);
5470 return;
5473 /* (float (blah)) is in parallel with a clobber. */
5474 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5475 body = XVECEXP (body, 0, 0);
5477 if (GET_CODE (body) == SET)
5479 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5481 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5482 be followed by a non Cirrus insn. */
5483 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5485 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5486 emit_insn_after (gen_nop (), first);
5488 return;
5490 else if (arm_memory_load_p (first))
5492 unsigned int arm_regno;
5494 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5495 ldr/cfmv64hr combination where the Rd field is the same
5496 in both instructions must be split with a non Cirrus
5497 insn. Example:
5499 ldr r0, blah
5501 cfmvsr mvf0, r0. */
5503 /* Get Arm register number for ldr insn. */
5504 if (GET_CODE (lhs) == REG)
5505 arm_regno = REGNO (lhs);
5506 else
5508 gcc_assert (GET_CODE (rhs) == REG);
5509 arm_regno = REGNO (rhs);
5512 /* Next insn. */
5513 first = next_nonnote_insn (first);
5515 if (! arm_cirrus_insn_p (first))
5516 return;
5518 body = PATTERN (first);
5520 /* (float (blah)) is in parallel with a clobber. */
5521 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5522 body = XVECEXP (body, 0, 0);
5524 if (GET_CODE (body) == FLOAT)
5525 body = XEXP (body, 0);
5527 if (get_attr_cirrus (first) == CIRRUS_MOVE
5528 && GET_CODE (XEXP (body, 1)) == REG
5529 && arm_regno == REGNO (XEXP (body, 1)))
5530 emit_insn_after (gen_nop (), first);
5532 return;
5536 /* get_attr cannot accept USE or CLOBBER. */
5537 if (!first
5538 || GET_CODE (first) != INSN
5539 || GET_CODE (PATTERN (first)) == USE
5540 || GET_CODE (PATTERN (first)) == CLOBBER)
5541 return;
5543 attr = get_attr_cirrus (first);
5545 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5546 must be followed by a non-coprocessor instruction. */
5547 if (attr == CIRRUS_COMPARE)
5549 nops = 0;
5551 t = next_nonnote_insn (first);
5553 if (arm_cirrus_insn_p (t))
5554 ++ nops;
5556 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5557 ++ nops;
5559 while (nops --)
5560 emit_insn_after (gen_nop (), first);
5562 return;
5566 /* Return TRUE if X references a SYMBOL_REF. */
5568 symbol_mentioned_p (rtx x)
5570 const char * fmt;
5571 int i;
5573 if (GET_CODE (x) == SYMBOL_REF)
5574 return 1;
5576 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5577 are constant offsets, not symbols. */
5578 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5579 return 0;
5581 fmt = GET_RTX_FORMAT (GET_CODE (x));
5583 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5585 if (fmt[i] == 'E')
5587 int j;
5589 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5590 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5591 return 1;
5593 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5594 return 1;
5597 return 0;
5600 /* Return TRUE if X references a LABEL_REF. */
5602 label_mentioned_p (rtx x)
5604 const char * fmt;
5605 int i;
5607 if (GET_CODE (x) == LABEL_REF)
5608 return 1;
5610 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5611 instruction, but they are constant offsets, not symbols. */
5612 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5613 return 0;
5615 fmt = GET_RTX_FORMAT (GET_CODE (x));
5616 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5618 if (fmt[i] == 'E')
5620 int j;
5622 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5623 if (label_mentioned_p (XVECEXP (x, i, j)))
5624 return 1;
5626 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5627 return 1;
5630 return 0;
5634 tls_mentioned_p (rtx x)
5636 switch (GET_CODE (x))
5638 case CONST:
5639 return tls_mentioned_p (XEXP (x, 0));
5641 case UNSPEC:
5642 if (XINT (x, 1) == UNSPEC_TLS)
5643 return 1;
5645 default:
5646 return 0;
5650 /* Must not copy a SET whose source operand is PC-relative. */
5652 static bool
5653 arm_cannot_copy_insn_p (rtx insn)
5655 rtx pat = PATTERN (insn);
5657 if (GET_CODE (pat) == PARALLEL
5658 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
5660 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
5662 if (GET_CODE (rhs) == UNSPEC
5663 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
5664 return TRUE;
5666 if (GET_CODE (rhs) == MEM
5667 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
5668 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
5669 return TRUE;
5672 return FALSE;
5675 enum rtx_code
5676 minmax_code (rtx x)
5678 enum rtx_code code = GET_CODE (x);
5680 switch (code)
5682 case SMAX:
5683 return GE;
5684 case SMIN:
5685 return LE;
5686 case UMIN:
5687 return LEU;
5688 case UMAX:
5689 return GEU;
5690 default:
5691 gcc_unreachable ();
5695 /* Return 1 if memory locations are adjacent. */
5697 adjacent_mem_locations (rtx a, rtx b)
5699 /* We don't guarantee to preserve the order of these memory refs. */
5700 if (volatile_refs_p (a) || volatile_refs_p (b))
5701 return 0;
5703 if ((GET_CODE (XEXP (a, 0)) == REG
5704 || (GET_CODE (XEXP (a, 0)) == PLUS
5705 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5706 && (GET_CODE (XEXP (b, 0)) == REG
5707 || (GET_CODE (XEXP (b, 0)) == PLUS
5708 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5710 HOST_WIDE_INT val0 = 0, val1 = 0;
5711 rtx reg0, reg1;
5712 int val_diff;
5714 if (GET_CODE (XEXP (a, 0)) == PLUS)
5716 reg0 = XEXP (XEXP (a, 0), 0);
5717 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5719 else
5720 reg0 = XEXP (a, 0);
5722 if (GET_CODE (XEXP (b, 0)) == PLUS)
5724 reg1 = XEXP (XEXP (b, 0), 0);
5725 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5727 else
5728 reg1 = XEXP (b, 0);
5730 /* Don't accept any offset that will require multiple
5731 instructions to handle, since this would cause the
5732 arith_adjacentmem pattern to output an overlong sequence. */
5733 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5734 return 0;
5736 /* Don't allow an eliminable register: register elimination can make
5737 the offset too large. */
5738 if (arm_eliminable_register (reg0))
5739 return 0;
5741 val_diff = val1 - val0;
5743 if (arm_ld_sched)
5745 /* If the target has load delay slots, then there's no benefit
5746 to using an ldm instruction unless the offset is zero and
5747 we are optimizing for size. */
5748 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5749 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5750 && (val_diff == 4 || val_diff == -4));
5753 return ((REGNO (reg0) == REGNO (reg1))
5754 && (val_diff == 4 || val_diff == -4));
5757 return 0;
5761 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5762 HOST_WIDE_INT *load_offset)
5764 int unsorted_regs[4];
5765 HOST_WIDE_INT unsorted_offsets[4];
5766 int order[4];
5767 int base_reg = -1;
5768 int i;
5770 /* Can only handle 2, 3, or 4 insns at present,
5771 though could be easily extended if required. */
5772 gcc_assert (nops >= 2 && nops <= 4);
5774 /* Loop over the operands and check that the memory references are
5775 suitable (i.e. immediate offsets from the same base register). At
5776 the same time, extract the target register, and the memory
5777 offsets. */
5778 for (i = 0; i < nops; i++)
5780 rtx reg;
5781 rtx offset;
5783 /* Convert a subreg of a mem into the mem itself. */
5784 if (GET_CODE (operands[nops + i]) == SUBREG)
5785 operands[nops + i] = alter_subreg (operands + (nops + i));
5787 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5789 /* Don't reorder volatile memory references; it doesn't seem worth
5790 looking for the case where the order is ok anyway. */
5791 if (MEM_VOLATILE_P (operands[nops + i]))
5792 return 0;
5794 offset = const0_rtx;
5796 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5797 || (GET_CODE (reg) == SUBREG
5798 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5799 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5800 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5801 == REG)
5802 || (GET_CODE (reg) == SUBREG
5803 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5804 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5805 == CONST_INT)))
5807 if (i == 0)
5809 base_reg = REGNO (reg);
5810 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5811 ? REGNO (operands[i])
5812 : REGNO (SUBREG_REG (operands[i])));
5813 order[0] = 0;
5815 else
5817 if (base_reg != (int) REGNO (reg))
5818 /* Not addressed from the same base register. */
5819 return 0;
5821 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5822 ? REGNO (operands[i])
5823 : REGNO (SUBREG_REG (operands[i])));
5824 if (unsorted_regs[i] < unsorted_regs[order[0]])
5825 order[0] = i;
5828 /* If it isn't an integer register, or if it overwrites the
5829 base register but isn't the last insn in the list, then
5830 we can't do this. */
5831 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5832 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5833 return 0;
5835 unsorted_offsets[i] = INTVAL (offset);
5837 else
5838 /* Not a suitable memory address. */
5839 return 0;
5842 /* All the useful information has now been extracted from the
5843 operands into unsorted_regs and unsorted_offsets; additionally,
5844 order[0] has been set to the lowest numbered register in the
5845 list. Sort the registers into order, and check that the memory
5846 offsets are ascending and adjacent. */
5848 for (i = 1; i < nops; i++)
5850 int j;
5852 order[i] = order[i - 1];
5853 for (j = 0; j < nops; j++)
5854 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5855 && (order[i] == order[i - 1]
5856 || unsorted_regs[j] < unsorted_regs[order[i]]))
5857 order[i] = j;
5859 /* Have we found a suitable register? if not, one must be used more
5860 than once. */
5861 if (order[i] == order[i - 1])
5862 return 0;
5864 /* Is the memory address adjacent and ascending? */
5865 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5866 return 0;
5869 if (base)
5871 *base = base_reg;
5873 for (i = 0; i < nops; i++)
5874 regs[i] = unsorted_regs[order[i]];
5876 *load_offset = unsorted_offsets[order[0]];
5879 if (unsorted_offsets[order[0]] == 0)
5880 return 1; /* ldmia */
5882 if (unsorted_offsets[order[0]] == 4)
5883 return 2; /* ldmib */
5885 if (unsorted_offsets[order[nops - 1]] == 0)
5886 return 3; /* ldmda */
5888 if (unsorted_offsets[order[nops - 1]] == -4)
5889 return 4; /* ldmdb */
5891 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5892 if the offset isn't small enough. The reason 2 ldrs are faster
5893 is because these ARMs are able to do more than one cache access
5894 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5895 whilst the ARM8 has a double bandwidth cache. This means that
5896 these cores can do both an instruction fetch and a data fetch in
5897 a single cycle, so the trick of calculating the address into a
5898 scratch register (one of the result regs) and then doing a load
5899 multiple actually becomes slower (and no smaller in code size).
5900 That is the transformation
5902 ldr rd1, [rbase + offset]
5903 ldr rd2, [rbase + offset + 4]
5907 add rd1, rbase, offset
5908 ldmia rd1, {rd1, rd2}
5910 produces worse code -- '3 cycles + any stalls on rd2' instead of
5911 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5912 access per cycle, the first sequence could never complete in less
5913 than 6 cycles, whereas the ldm sequence would only take 5 and
5914 would make better use of sequential accesses if not hitting the
5915 cache.
5917 We cheat here and test 'arm_ld_sched' which we currently know to
5918 only be true for the ARM8, ARM9 and StrongARM. If this ever
5919 changes, then the test below needs to be reworked. */
5920 if (nops == 2 && arm_ld_sched)
5921 return 0;
5923 /* Can't do it without setting up the offset, only do this if it takes
5924 no more than one insn. */
5925 return (const_ok_for_arm (unsorted_offsets[order[0]])
5926 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5929 const char *
5930 emit_ldm_seq (rtx *operands, int nops)
5932 int regs[4];
5933 int base_reg;
5934 HOST_WIDE_INT offset;
5935 char buf[100];
5936 int i;
5938 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5940 case 1:
5941 strcpy (buf, "ldm%?ia\t");
5942 break;
5944 case 2:
5945 strcpy (buf, "ldm%?ib\t");
5946 break;
5948 case 3:
5949 strcpy (buf, "ldm%?da\t");
5950 break;
5952 case 4:
5953 strcpy (buf, "ldm%?db\t");
5954 break;
5956 case 5:
5957 if (offset >= 0)
5958 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5959 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5960 (long) offset);
5961 else
5962 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5963 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5964 (long) -offset);
5965 output_asm_insn (buf, operands);
5966 base_reg = regs[0];
5967 strcpy (buf, "ldm%?ia\t");
5968 break;
5970 default:
5971 gcc_unreachable ();
5974 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5975 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5977 for (i = 1; i < nops; i++)
5978 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5979 reg_names[regs[i]]);
5981 strcat (buf, "}\t%@ phole ldm");
5983 output_asm_insn (buf, operands);
5984 return "";
5988 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5989 HOST_WIDE_INT * load_offset)
5991 int unsorted_regs[4];
5992 HOST_WIDE_INT unsorted_offsets[4];
5993 int order[4];
5994 int base_reg = -1;
5995 int i;
5997 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5998 extended if required. */
5999 gcc_assert (nops >= 2 && nops <= 4);
6001 /* Loop over the operands and check that the memory references are
6002 suitable (i.e. immediate offsets from the same base register). At
6003 the same time, extract the target register, and the memory
6004 offsets. */
6005 for (i = 0; i < nops; i++)
6007 rtx reg;
6008 rtx offset;
6010 /* Convert a subreg of a mem into the mem itself. */
6011 if (GET_CODE (operands[nops + i]) == SUBREG)
6012 operands[nops + i] = alter_subreg (operands + (nops + i));
6014 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6016 /* Don't reorder volatile memory references; it doesn't seem worth
6017 looking for the case where the order is ok anyway. */
6018 if (MEM_VOLATILE_P (operands[nops + i]))
6019 return 0;
6021 offset = const0_rtx;
6023 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6024 || (GET_CODE (reg) == SUBREG
6025 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6026 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6027 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6028 == REG)
6029 || (GET_CODE (reg) == SUBREG
6030 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6031 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6032 == CONST_INT)))
6034 if (i == 0)
6036 base_reg = REGNO (reg);
6037 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6038 ? REGNO (operands[i])
6039 : REGNO (SUBREG_REG (operands[i])));
6040 order[0] = 0;
6042 else
6044 if (base_reg != (int) REGNO (reg))
6045 /* Not addressed from the same base register. */
6046 return 0;
6048 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6049 ? REGNO (operands[i])
6050 : REGNO (SUBREG_REG (operands[i])));
6051 if (unsorted_regs[i] < unsorted_regs[order[0]])
6052 order[0] = i;
6055 /* If it isn't an integer register, then we can't do this. */
6056 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6057 return 0;
6059 unsorted_offsets[i] = INTVAL (offset);
6061 else
6062 /* Not a suitable memory address. */
6063 return 0;
6066 /* All the useful information has now been extracted from the
6067 operands into unsorted_regs and unsorted_offsets; additionally,
6068 order[0] has been set to the lowest numbered register in the
6069 list. Sort the registers into order, and check that the memory
6070 offsets are ascending and adjacent. */
6072 for (i = 1; i < nops; i++)
6074 int j;
6076 order[i] = order[i - 1];
6077 for (j = 0; j < nops; j++)
6078 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6079 && (order[i] == order[i - 1]
6080 || unsorted_regs[j] < unsorted_regs[order[i]]))
6081 order[i] = j;
6083 /* Have we found a suitable register? if not, one must be used more
6084 than once. */
6085 if (order[i] == order[i - 1])
6086 return 0;
6088 /* Is the memory address adjacent and ascending? */
6089 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6090 return 0;
6093 if (base)
6095 *base = base_reg;
6097 for (i = 0; i < nops; i++)
6098 regs[i] = unsorted_regs[order[i]];
6100 *load_offset = unsorted_offsets[order[0]];
6103 if (unsorted_offsets[order[0]] == 0)
6104 return 1; /* stmia */
6106 if (unsorted_offsets[order[0]] == 4)
6107 return 2; /* stmib */
6109 if (unsorted_offsets[order[nops - 1]] == 0)
6110 return 3; /* stmda */
6112 if (unsorted_offsets[order[nops - 1]] == -4)
6113 return 4; /* stmdb */
6115 return 0;
6118 const char *
6119 emit_stm_seq (rtx *operands, int nops)
6121 int regs[4];
6122 int base_reg;
6123 HOST_WIDE_INT offset;
6124 char buf[100];
6125 int i;
6127 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6129 case 1:
6130 strcpy (buf, "stm%?ia\t");
6131 break;
6133 case 2:
6134 strcpy (buf, "stm%?ib\t");
6135 break;
6137 case 3:
6138 strcpy (buf, "stm%?da\t");
6139 break;
6141 case 4:
6142 strcpy (buf, "stm%?db\t");
6143 break;
6145 default:
6146 gcc_unreachable ();
6149 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6150 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6152 for (i = 1; i < nops; i++)
6153 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6154 reg_names[regs[i]]);
6156 strcat (buf, "}\t%@ phole stm");
6158 output_asm_insn (buf, operands);
6159 return "";
6162 /* Routines for use in generating RTL. */
6165 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6166 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6168 HOST_WIDE_INT offset = *offsetp;
6169 int i = 0, j;
6170 rtx result;
6171 int sign = up ? 1 : -1;
6172 rtx mem, addr;
6174 /* XScale has load-store double instructions, but they have stricter
6175 alignment requirements than load-store multiple, so we cannot
6176 use them.
6178 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6179 the pipeline until completion.
6181 NREGS CYCLES
6187 An ldr instruction takes 1-3 cycles, but does not block the
6188 pipeline.
6190 NREGS CYCLES
6191 1 1-3
6192 2 2-6
6193 3 3-9
6194 4 4-12
6196 Best case ldr will always win. However, the more ldr instructions
6197 we issue, the less likely we are to be able to schedule them well.
6198 Using ldr instructions also increases code size.
6200 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6201 for counts of 3 or 4 regs. */
6202 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6204 rtx seq;
6206 start_sequence ();
6208 for (i = 0; i < count; i++)
6210 addr = plus_constant (from, i * 4 * sign);
6211 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6212 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6213 offset += 4 * sign;
6216 if (write_back)
6218 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6219 *offsetp = offset;
6222 seq = get_insns ();
6223 end_sequence ();
6225 return seq;
6228 result = gen_rtx_PARALLEL (VOIDmode,
6229 rtvec_alloc (count + (write_back ? 1 : 0)));
6230 if (write_back)
6232 XVECEXP (result, 0, 0)
6233 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6234 i = 1;
6235 count++;
6238 for (j = 0; i < count; i++, j++)
6240 addr = plus_constant (from, j * 4 * sign);
6241 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6242 XVECEXP (result, 0, i)
6243 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6244 offset += 4 * sign;
6247 if (write_back)
6248 *offsetp = offset;
6250 return result;
6254 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6255 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6257 HOST_WIDE_INT offset = *offsetp;
6258 int i = 0, j;
6259 rtx result;
6260 int sign = up ? 1 : -1;
6261 rtx mem, addr;
6263 /* See arm_gen_load_multiple for discussion of
6264 the pros/cons of ldm/stm usage for XScale. */
6265 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6267 rtx seq;
6269 start_sequence ();
6271 for (i = 0; i < count; i++)
6273 addr = plus_constant (to, i * 4 * sign);
6274 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6275 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6276 offset += 4 * sign;
6279 if (write_back)
6281 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6282 *offsetp = offset;
6285 seq = get_insns ();
6286 end_sequence ();
6288 return seq;
6291 result = gen_rtx_PARALLEL (VOIDmode,
6292 rtvec_alloc (count + (write_back ? 1 : 0)));
6293 if (write_back)
6295 XVECEXP (result, 0, 0)
6296 = gen_rtx_SET (VOIDmode, to,
6297 plus_constant (to, count * 4 * sign));
6298 i = 1;
6299 count++;
6302 for (j = 0; i < count; i++, j++)
6304 addr = plus_constant (to, j * 4 * sign);
6305 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6306 XVECEXP (result, 0, i)
6307 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6308 offset += 4 * sign;
6311 if (write_back)
6312 *offsetp = offset;
6314 return result;
6318 arm_gen_movmemqi (rtx *operands)
6320 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6321 HOST_WIDE_INT srcoffset, dstoffset;
6322 int i;
6323 rtx src, dst, srcbase, dstbase;
6324 rtx part_bytes_reg = NULL;
6325 rtx mem;
6327 if (GET_CODE (operands[2]) != CONST_INT
6328 || GET_CODE (operands[3]) != CONST_INT
6329 || INTVAL (operands[2]) > 64
6330 || INTVAL (operands[3]) & 3)
6331 return 0;
6333 dstbase = operands[0];
6334 srcbase = operands[1];
6336 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6337 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6339 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6340 out_words_to_go = INTVAL (operands[2]) / 4;
6341 last_bytes = INTVAL (operands[2]) & 3;
6342 dstoffset = srcoffset = 0;
6344 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6345 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6347 for (i = 0; in_words_to_go >= 2; i+=4)
6349 if (in_words_to_go > 4)
6350 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6351 srcbase, &srcoffset));
6352 else
6353 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6354 FALSE, srcbase, &srcoffset));
6356 if (out_words_to_go)
6358 if (out_words_to_go > 4)
6359 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6360 dstbase, &dstoffset));
6361 else if (out_words_to_go != 1)
6362 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6363 dst, TRUE,
6364 (last_bytes == 0
6365 ? FALSE : TRUE),
6366 dstbase, &dstoffset));
6367 else
6369 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6370 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6371 if (last_bytes != 0)
6373 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6374 dstoffset += 4;
6379 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6380 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6383 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6384 if (out_words_to_go)
6386 rtx sreg;
6388 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6389 sreg = copy_to_reg (mem);
6391 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6392 emit_move_insn (mem, sreg);
6393 in_words_to_go--;
6395 gcc_assert (!in_words_to_go); /* Sanity check */
6398 if (in_words_to_go)
6400 gcc_assert (in_words_to_go > 0);
6402 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6403 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6406 gcc_assert (!last_bytes || part_bytes_reg);
6408 if (BYTES_BIG_ENDIAN && last_bytes)
6410 rtx tmp = gen_reg_rtx (SImode);
6412 /* The bytes we want are in the top end of the word. */
6413 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6414 GEN_INT (8 * (4 - last_bytes))));
6415 part_bytes_reg = tmp;
6417 while (last_bytes)
6419 mem = adjust_automodify_address (dstbase, QImode,
6420 plus_constant (dst, last_bytes - 1),
6421 dstoffset + last_bytes - 1);
6422 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6424 if (--last_bytes)
6426 tmp = gen_reg_rtx (SImode);
6427 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6428 part_bytes_reg = tmp;
6433 else
6435 if (last_bytes > 1)
6437 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6438 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6439 last_bytes -= 2;
6440 if (last_bytes)
6442 rtx tmp = gen_reg_rtx (SImode);
6443 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6444 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6445 part_bytes_reg = tmp;
6446 dstoffset += 2;
6450 if (last_bytes)
6452 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6453 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6457 return 1;
6460 /* Select a dominance comparison mode if possible for a test of the general
6461 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6462 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6463 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6464 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6465 In all cases OP will be either EQ or NE, but we don't need to know which
6466 here. If we are unable to support a dominance comparison we return
6467 CC mode. This will then fail to match for the RTL expressions that
6468 generate this call. */
6469 enum machine_mode
6470 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6472 enum rtx_code cond1, cond2;
6473 int swapped = 0;
6475 /* Currently we will probably get the wrong result if the individual
6476 comparisons are not simple. This also ensures that it is safe to
6477 reverse a comparison if necessary. */
6478 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6479 != CCmode)
6480 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6481 != CCmode))
6482 return CCmode;
6484 /* The if_then_else variant of this tests the second condition if the
6485 first passes, but is true if the first fails. Reverse the first
6486 condition to get a true "inclusive-or" expression. */
6487 if (cond_or == DOM_CC_NX_OR_Y)
6488 cond1 = reverse_condition (cond1);
6490 /* If the comparisons are not equal, and one doesn't dominate the other,
6491 then we can't do this. */
6492 if (cond1 != cond2
6493 && !comparison_dominates_p (cond1, cond2)
6494 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6495 return CCmode;
6497 if (swapped)
6499 enum rtx_code temp = cond1;
6500 cond1 = cond2;
6501 cond2 = temp;
6504 switch (cond1)
6506 case EQ:
6507 if (cond_or == DOM_CC_X_AND_Y)
6508 return CC_DEQmode;
6510 switch (cond2)
6512 case EQ: return CC_DEQmode;
6513 case LE: return CC_DLEmode;
6514 case LEU: return CC_DLEUmode;
6515 case GE: return CC_DGEmode;
6516 case GEU: return CC_DGEUmode;
6517 default: gcc_unreachable ();
6520 case LT:
6521 if (cond_or == DOM_CC_X_AND_Y)
6522 return CC_DLTmode;
6524 switch (cond2)
6526 case LT:
6527 return CC_DLTmode;
6528 case LE:
6529 return CC_DLEmode;
6530 case NE:
6531 return CC_DNEmode;
6532 default:
6533 gcc_unreachable ();
6536 case GT:
6537 if (cond_or == DOM_CC_X_AND_Y)
6538 return CC_DGTmode;
6540 switch (cond2)
6542 case GT:
6543 return CC_DGTmode;
6544 case GE:
6545 return CC_DGEmode;
6546 case NE:
6547 return CC_DNEmode;
6548 default:
6549 gcc_unreachable ();
6552 case LTU:
6553 if (cond_or == DOM_CC_X_AND_Y)
6554 return CC_DLTUmode;
6556 switch (cond2)
6558 case LTU:
6559 return CC_DLTUmode;
6560 case LEU:
6561 return CC_DLEUmode;
6562 case NE:
6563 return CC_DNEmode;
6564 default:
6565 gcc_unreachable ();
6568 case GTU:
6569 if (cond_or == DOM_CC_X_AND_Y)
6570 return CC_DGTUmode;
6572 switch (cond2)
6574 case GTU:
6575 return CC_DGTUmode;
6576 case GEU:
6577 return CC_DGEUmode;
6578 case NE:
6579 return CC_DNEmode;
6580 default:
6581 gcc_unreachable ();
6584 /* The remaining cases only occur when both comparisons are the
6585 same. */
6586 case NE:
6587 gcc_assert (cond1 == cond2);
6588 return CC_DNEmode;
6590 case LE:
6591 gcc_assert (cond1 == cond2);
6592 return CC_DLEmode;
6594 case GE:
6595 gcc_assert (cond1 == cond2);
6596 return CC_DGEmode;
6598 case LEU:
6599 gcc_assert (cond1 == cond2);
6600 return CC_DLEUmode;
6602 case GEU:
6603 gcc_assert (cond1 == cond2);
6604 return CC_DGEUmode;
6606 default:
6607 gcc_unreachable ();
6611 enum machine_mode
6612 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6614 /* All floating point compares return CCFP if it is an equality
6615 comparison, and CCFPE otherwise. */
6616 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6618 switch (op)
6620 case EQ:
6621 case NE:
6622 case UNORDERED:
6623 case ORDERED:
6624 case UNLT:
6625 case UNLE:
6626 case UNGT:
6627 case UNGE:
6628 case UNEQ:
6629 case LTGT:
6630 return CCFPmode;
6632 case LT:
6633 case LE:
6634 case GT:
6635 case GE:
6636 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6637 return CCFPmode;
6638 return CCFPEmode;
6640 default:
6641 gcc_unreachable ();
6645 /* A compare with a shifted operand. Because of canonicalization, the
6646 comparison will have to be swapped when we emit the assembler. */
6647 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6648 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6649 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6650 || GET_CODE (x) == ROTATERT))
6651 return CC_SWPmode;
6653 /* This operation is performed swapped, but since we only rely on the Z
6654 flag we don't need an additional mode. */
6655 if (GET_MODE (y) == SImode && REG_P (y)
6656 && GET_CODE (x) == NEG
6657 && (op == EQ || op == NE))
6658 return CC_Zmode;
6660 /* This is a special case that is used by combine to allow a
6661 comparison of a shifted byte load to be split into a zero-extend
6662 followed by a comparison of the shifted integer (only valid for
6663 equalities and unsigned inequalities). */
6664 if (GET_MODE (x) == SImode
6665 && GET_CODE (x) == ASHIFT
6666 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6667 && GET_CODE (XEXP (x, 0)) == SUBREG
6668 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6669 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6670 && (op == EQ || op == NE
6671 || op == GEU || op == GTU || op == LTU || op == LEU)
6672 && GET_CODE (y) == CONST_INT)
6673 return CC_Zmode;
6675 /* A construct for a conditional compare, if the false arm contains
6676 0, then both conditions must be true, otherwise either condition
6677 must be true. Not all conditions are possible, so CCmode is
6678 returned if it can't be done. */
6679 if (GET_CODE (x) == IF_THEN_ELSE
6680 && (XEXP (x, 2) == const0_rtx
6681 || XEXP (x, 2) == const1_rtx)
6682 && COMPARISON_P (XEXP (x, 0))
6683 && COMPARISON_P (XEXP (x, 1)))
6684 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6685 INTVAL (XEXP (x, 2)));
6687 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6688 if (GET_CODE (x) == AND
6689 && COMPARISON_P (XEXP (x, 0))
6690 && COMPARISON_P (XEXP (x, 1)))
6691 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6692 DOM_CC_X_AND_Y);
6694 if (GET_CODE (x) == IOR
6695 && COMPARISON_P (XEXP (x, 0))
6696 && COMPARISON_P (XEXP (x, 1)))
6697 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6698 DOM_CC_X_OR_Y);
6700 /* An operation (on Thumb) where we want to test for a single bit.
6701 This is done by shifting that bit up into the top bit of a
6702 scratch register; we can then branch on the sign bit. */
6703 if (TARGET_THUMB
6704 && GET_MODE (x) == SImode
6705 && (op == EQ || op == NE)
6706 && (GET_CODE (x) == ZERO_EXTRACT))
6707 return CC_Nmode;
6709 /* An operation that sets the condition codes as a side-effect, the
6710 V flag is not set correctly, so we can only use comparisons where
6711 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6712 instead.) */
6713 if (GET_MODE (x) == SImode
6714 && y == const0_rtx
6715 && (op == EQ || op == NE || op == LT || op == GE)
6716 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6717 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6718 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6719 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6720 || GET_CODE (x) == LSHIFTRT
6721 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6722 || GET_CODE (x) == ROTATERT
6723 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6724 return CC_NOOVmode;
6726 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6727 return CC_Zmode;
6729 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6730 && GET_CODE (x) == PLUS
6731 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6732 return CC_Cmode;
6734 return CCmode;
6737 /* X and Y are two things to compare using CODE. Emit the compare insn and
6738 return the rtx for register 0 in the proper mode. FP means this is a
6739 floating point compare: I don't think that it is needed on the arm. */
6741 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6743 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6744 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6746 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
6748 return cc_reg;
6751 /* Generate a sequence of insns that will generate the correct return
6752 address mask depending on the physical architecture that the program
6753 is running on. */
6755 arm_gen_return_addr_mask (void)
6757 rtx reg = gen_reg_rtx (Pmode);
6759 emit_insn (gen_return_addr_mask (reg));
6760 return reg;
6763 void
6764 arm_reload_in_hi (rtx *operands)
6766 rtx ref = operands[1];
6767 rtx base, scratch;
6768 HOST_WIDE_INT offset = 0;
6770 if (GET_CODE (ref) == SUBREG)
6772 offset = SUBREG_BYTE (ref);
6773 ref = SUBREG_REG (ref);
6776 if (GET_CODE (ref) == REG)
6778 /* We have a pseudo which has been spilt onto the stack; there
6779 are two cases here: the first where there is a simple
6780 stack-slot replacement and a second where the stack-slot is
6781 out of range, or is used as a subreg. */
6782 if (reg_equiv_mem[REGNO (ref)])
6784 ref = reg_equiv_mem[REGNO (ref)];
6785 base = find_replacement (&XEXP (ref, 0));
6787 else
6788 /* The slot is out of range, or was dressed up in a SUBREG. */
6789 base = reg_equiv_address[REGNO (ref)];
6791 else
6792 base = find_replacement (&XEXP (ref, 0));
6794 /* Handle the case where the address is too complex to be offset by 1. */
6795 if (GET_CODE (base) == MINUS
6796 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6798 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6800 emit_set_insn (base_plus, base);
6801 base = base_plus;
6803 else if (GET_CODE (base) == PLUS)
6805 /* The addend must be CONST_INT, or we would have dealt with it above. */
6806 HOST_WIDE_INT hi, lo;
6808 offset += INTVAL (XEXP (base, 1));
6809 base = XEXP (base, 0);
6811 /* Rework the address into a legal sequence of insns. */
6812 /* Valid range for lo is -4095 -> 4095 */
6813 lo = (offset >= 0
6814 ? (offset & 0xfff)
6815 : -((-offset) & 0xfff));
6817 /* Corner case, if lo is the max offset then we would be out of range
6818 once we have added the additional 1 below, so bump the msb into the
6819 pre-loading insn(s). */
6820 if (lo == 4095)
6821 lo &= 0x7ff;
6823 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6824 ^ (HOST_WIDE_INT) 0x80000000)
6825 - (HOST_WIDE_INT) 0x80000000);
6827 gcc_assert (hi + lo == offset);
6829 if (hi != 0)
6831 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6833 /* Get the base address; addsi3 knows how to handle constants
6834 that require more than one insn. */
6835 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6836 base = base_plus;
6837 offset = lo;
6841 /* Operands[2] may overlap operands[0] (though it won't overlap
6842 operands[1]), that's why we asked for a DImode reg -- so we can
6843 use the bit that does not overlap. */
6844 if (REGNO (operands[2]) == REGNO (operands[0]))
6845 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6846 else
6847 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6849 emit_insn (gen_zero_extendqisi2 (scratch,
6850 gen_rtx_MEM (QImode,
6851 plus_constant (base,
6852 offset))));
6853 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6854 gen_rtx_MEM (QImode,
6855 plus_constant (base,
6856 offset + 1))));
6857 if (!BYTES_BIG_ENDIAN)
6858 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6859 gen_rtx_IOR (SImode,
6860 gen_rtx_ASHIFT
6861 (SImode,
6862 gen_rtx_SUBREG (SImode, operands[0], 0),
6863 GEN_INT (8)),
6864 scratch));
6865 else
6866 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6867 gen_rtx_IOR (SImode,
6868 gen_rtx_ASHIFT (SImode, scratch,
6869 GEN_INT (8)),
6870 gen_rtx_SUBREG (SImode, operands[0], 0)));
6873 /* Handle storing a half-word to memory during reload by synthesizing as two
6874 byte stores. Take care not to clobber the input values until after we
6875 have moved them somewhere safe. This code assumes that if the DImode
6876 scratch in operands[2] overlaps either the input value or output address
6877 in some way, then that value must die in this insn (we absolutely need
6878 two scratch registers for some corner cases). */
6879 void
6880 arm_reload_out_hi (rtx *operands)
6882 rtx ref = operands[0];
6883 rtx outval = operands[1];
6884 rtx base, scratch;
6885 HOST_WIDE_INT offset = 0;
6887 if (GET_CODE (ref) == SUBREG)
6889 offset = SUBREG_BYTE (ref);
6890 ref = SUBREG_REG (ref);
6893 if (GET_CODE (ref) == REG)
6895 /* We have a pseudo which has been spilt onto the stack; there
6896 are two cases here: the first where there is a simple
6897 stack-slot replacement and a second where the stack-slot is
6898 out of range, or is used as a subreg. */
6899 if (reg_equiv_mem[REGNO (ref)])
6901 ref = reg_equiv_mem[REGNO (ref)];
6902 base = find_replacement (&XEXP (ref, 0));
6904 else
6905 /* The slot is out of range, or was dressed up in a SUBREG. */
6906 base = reg_equiv_address[REGNO (ref)];
6908 else
6909 base = find_replacement (&XEXP (ref, 0));
6911 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6913 /* Handle the case where the address is too complex to be offset by 1. */
6914 if (GET_CODE (base) == MINUS
6915 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6917 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6919 /* Be careful not to destroy OUTVAL. */
6920 if (reg_overlap_mentioned_p (base_plus, outval))
6922 /* Updating base_plus might destroy outval, see if we can
6923 swap the scratch and base_plus. */
6924 if (!reg_overlap_mentioned_p (scratch, outval))
6926 rtx tmp = scratch;
6927 scratch = base_plus;
6928 base_plus = tmp;
6930 else
6932 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6934 /* Be conservative and copy OUTVAL into the scratch now,
6935 this should only be necessary if outval is a subreg
6936 of something larger than a word. */
6937 /* XXX Might this clobber base? I can't see how it can,
6938 since scratch is known to overlap with OUTVAL, and
6939 must be wider than a word. */
6940 emit_insn (gen_movhi (scratch_hi, outval));
6941 outval = scratch_hi;
6945 emit_set_insn (base_plus, base);
6946 base = base_plus;
6948 else if (GET_CODE (base) == PLUS)
6950 /* The addend must be CONST_INT, or we would have dealt with it above. */
6951 HOST_WIDE_INT hi, lo;
6953 offset += INTVAL (XEXP (base, 1));
6954 base = XEXP (base, 0);
6956 /* Rework the address into a legal sequence of insns. */
6957 /* Valid range for lo is -4095 -> 4095 */
6958 lo = (offset >= 0
6959 ? (offset & 0xfff)
6960 : -((-offset) & 0xfff));
6962 /* Corner case, if lo is the max offset then we would be out of range
6963 once we have added the additional 1 below, so bump the msb into the
6964 pre-loading insn(s). */
6965 if (lo == 4095)
6966 lo &= 0x7ff;
6968 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6969 ^ (HOST_WIDE_INT) 0x80000000)
6970 - (HOST_WIDE_INT) 0x80000000);
6972 gcc_assert (hi + lo == offset);
6974 if (hi != 0)
6976 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6978 /* Be careful not to destroy OUTVAL. */
6979 if (reg_overlap_mentioned_p (base_plus, outval))
6981 /* Updating base_plus might destroy outval, see if we
6982 can swap the scratch and base_plus. */
6983 if (!reg_overlap_mentioned_p (scratch, outval))
6985 rtx tmp = scratch;
6986 scratch = base_plus;
6987 base_plus = tmp;
6989 else
6991 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6993 /* Be conservative and copy outval into scratch now,
6994 this should only be necessary if outval is a
6995 subreg of something larger than a word. */
6996 /* XXX Might this clobber base? I can't see how it
6997 can, since scratch is known to overlap with
6998 outval. */
6999 emit_insn (gen_movhi (scratch_hi, outval));
7000 outval = scratch_hi;
7004 /* Get the base address; addsi3 knows how to handle constants
7005 that require more than one insn. */
7006 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7007 base = base_plus;
7008 offset = lo;
7012 if (BYTES_BIG_ENDIAN)
7014 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7015 plus_constant (base, offset + 1)),
7016 gen_lowpart (QImode, outval)));
7017 emit_insn (gen_lshrsi3 (scratch,
7018 gen_rtx_SUBREG (SImode, outval, 0),
7019 GEN_INT (8)));
7020 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7021 gen_lowpart (QImode, scratch)));
7023 else
7025 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7026 gen_lowpart (QImode, outval)));
7027 emit_insn (gen_lshrsi3 (scratch,
7028 gen_rtx_SUBREG (SImode, outval, 0),
7029 GEN_INT (8)));
7030 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7031 plus_constant (base, offset + 1)),
7032 gen_lowpart (QImode, scratch)));
7036 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7037 (padded to the size of a word) should be passed in a register. */
7039 static bool
7040 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7042 if (TARGET_AAPCS_BASED)
7043 return must_pass_in_stack_var_size (mode, type);
7044 else
7045 return must_pass_in_stack_var_size_or_pad (mode, type);
7049 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7050 Return true if an argument passed on the stack should be padded upwards,
7051 i.e. if the least-significant byte has useful data.
7052 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7053 aggregate types are placed in the lowest memory address. */
7055 bool
7056 arm_pad_arg_upward (enum machine_mode mode, tree type)
7058 if (!TARGET_AAPCS_BASED)
7059 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7061 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7062 return false;
7064 return true;
7068 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7069 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7070 byte of the register has useful data, and return the opposite if the
7071 most significant byte does.
7072 For AAPCS, small aggregates and small complex types are always padded
7073 upwards. */
7075 bool
7076 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7077 tree type, int first ATTRIBUTE_UNUSED)
7079 if (TARGET_AAPCS_BASED
7080 && BYTES_BIG_ENDIAN
7081 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7082 && int_size_in_bytes (type) <= 4)
7083 return true;
7085 /* Otherwise, use default padding. */
7086 return !BYTES_BIG_ENDIAN;
7090 /* Print a symbolic form of X to the debug file, F. */
7091 static void
7092 arm_print_value (FILE *f, rtx x)
7094 switch (GET_CODE (x))
7096 case CONST_INT:
7097 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7098 return;
7100 case CONST_DOUBLE:
7101 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7102 return;
7104 case CONST_VECTOR:
7106 int i;
7108 fprintf (f, "<");
7109 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7111 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7112 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7113 fputc (',', f);
7115 fprintf (f, ">");
7117 return;
7119 case CONST_STRING:
7120 fprintf (f, "\"%s\"", XSTR (x, 0));
7121 return;
7123 case SYMBOL_REF:
7124 fprintf (f, "`%s'", XSTR (x, 0));
7125 return;
7127 case LABEL_REF:
7128 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7129 return;
7131 case CONST:
7132 arm_print_value (f, XEXP (x, 0));
7133 return;
7135 case PLUS:
7136 arm_print_value (f, XEXP (x, 0));
7137 fprintf (f, "+");
7138 arm_print_value (f, XEXP (x, 1));
7139 return;
7141 case PC:
7142 fprintf (f, "pc");
7143 return;
7145 default:
7146 fprintf (f, "????");
7147 return;
7151 /* Routines for manipulation of the constant pool. */
7153 /* Arm instructions cannot load a large constant directly into a
7154 register; they have to come from a pc relative load. The constant
7155 must therefore be placed in the addressable range of the pc
7156 relative load. Depending on the precise pc relative load
7157 instruction the range is somewhere between 256 bytes and 4k. This
7158 means that we often have to dump a constant inside a function, and
7159 generate code to branch around it.
7161 It is important to minimize this, since the branches will slow
7162 things down and make the code larger.
7164 Normally we can hide the table after an existing unconditional
7165 branch so that there is no interruption of the flow, but in the
7166 worst case the code looks like this:
7168 ldr rn, L1
7170 b L2
7171 align
7172 L1: .long value
7176 ldr rn, L3
7178 b L4
7179 align
7180 L3: .long value
7184 We fix this by performing a scan after scheduling, which notices
7185 which instructions need to have their operands fetched from the
7186 constant table and builds the table.
7188 The algorithm starts by building a table of all the constants that
7189 need fixing up and all the natural barriers in the function (places
7190 where a constant table can be dropped without breaking the flow).
7191 For each fixup we note how far the pc-relative replacement will be
7192 able to reach and the offset of the instruction into the function.
7194 Having built the table we then group the fixes together to form
7195 tables that are as large as possible (subject to addressing
7196 constraints) and emit each table of constants after the last
7197 barrier that is within range of all the instructions in the group.
7198 If a group does not contain a barrier, then we forcibly create one
7199 by inserting a jump instruction into the flow. Once the table has
7200 been inserted, the insns are then modified to reference the
7201 relevant entry in the pool.
7203 Possible enhancements to the algorithm (not implemented) are:
7205 1) For some processors and object formats, there may be benefit in
7206 aligning the pools to the start of cache lines; this alignment
7207 would need to be taken into account when calculating addressability
7208 of a pool. */
7210 /* These typedefs are located at the start of this file, so that
7211 they can be used in the prototypes there. This comment is to
7212 remind readers of that fact so that the following structures
7213 can be understood more easily.
7215 typedef struct minipool_node Mnode;
7216 typedef struct minipool_fixup Mfix; */
7218 struct minipool_node
7220 /* Doubly linked chain of entries. */
7221 Mnode * next;
7222 Mnode * prev;
7223 /* The maximum offset into the code that this entry can be placed. While
7224 pushing fixes for forward references, all entries are sorted in order
7225 of increasing max_address. */
7226 HOST_WIDE_INT max_address;
7227 /* Similarly for an entry inserted for a backwards ref. */
7228 HOST_WIDE_INT min_address;
7229 /* The number of fixes referencing this entry. This can become zero
7230 if we "unpush" an entry. In this case we ignore the entry when we
7231 come to emit the code. */
7232 int refcount;
7233 /* The offset from the start of the minipool. */
7234 HOST_WIDE_INT offset;
7235 /* The value in table. */
7236 rtx value;
7237 /* The mode of value. */
7238 enum machine_mode mode;
7239 /* The size of the value. With iWMMXt enabled
7240 sizes > 4 also imply an alignment of 8-bytes. */
7241 int fix_size;
7244 struct minipool_fixup
7246 Mfix * next;
7247 rtx insn;
7248 HOST_WIDE_INT address;
7249 rtx * loc;
7250 enum machine_mode mode;
7251 int fix_size;
7252 rtx value;
7253 Mnode * minipool;
7254 HOST_WIDE_INT forwards;
7255 HOST_WIDE_INT backwards;
7258 /* Fixes less than a word need padding out to a word boundary. */
7259 #define MINIPOOL_FIX_SIZE(mode) \
7260 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7262 static Mnode * minipool_vector_head;
7263 static Mnode * minipool_vector_tail;
7264 static rtx minipool_vector_label;
7266 /* The linked list of all minipool fixes required for this function. */
7267 Mfix * minipool_fix_head;
7268 Mfix * minipool_fix_tail;
7269 /* The fix entry for the current minipool, once it has been placed. */
7270 Mfix * minipool_barrier;
7272 /* Determines if INSN is the start of a jump table. Returns the end
7273 of the TABLE or NULL_RTX. */
7274 static rtx
7275 is_jump_table (rtx insn)
7277 rtx table;
7279 if (GET_CODE (insn) == JUMP_INSN
7280 && JUMP_LABEL (insn) != NULL
7281 && ((table = next_real_insn (JUMP_LABEL (insn)))
7282 == next_real_insn (insn))
7283 && table != NULL
7284 && GET_CODE (table) == JUMP_INSN
7285 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7286 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7287 return table;
7289 return NULL_RTX;
7292 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7293 #define JUMP_TABLES_IN_TEXT_SECTION 0
7294 #endif
7296 static HOST_WIDE_INT
7297 get_jump_table_size (rtx insn)
7299 /* ADDR_VECs only take room if read-only data does into the text
7300 section. */
7301 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7303 rtx body = PATTERN (insn);
7304 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7306 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7309 return 0;
7312 /* Move a minipool fix MP from its current location to before MAX_MP.
7313 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7314 constraints may need updating. */
7315 static Mnode *
7316 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7317 HOST_WIDE_INT max_address)
7319 /* The code below assumes these are different. */
7320 gcc_assert (mp != max_mp);
7322 if (max_mp == NULL)
7324 if (max_address < mp->max_address)
7325 mp->max_address = max_address;
7327 else
7329 if (max_address > max_mp->max_address - mp->fix_size)
7330 mp->max_address = max_mp->max_address - mp->fix_size;
7331 else
7332 mp->max_address = max_address;
7334 /* Unlink MP from its current position. Since max_mp is non-null,
7335 mp->prev must be non-null. */
7336 mp->prev->next = mp->next;
7337 if (mp->next != NULL)
7338 mp->next->prev = mp->prev;
7339 else
7340 minipool_vector_tail = mp->prev;
7342 /* Re-insert it before MAX_MP. */
7343 mp->next = max_mp;
7344 mp->prev = max_mp->prev;
7345 max_mp->prev = mp;
7347 if (mp->prev != NULL)
7348 mp->prev->next = mp;
7349 else
7350 minipool_vector_head = mp;
7353 /* Save the new entry. */
7354 max_mp = mp;
7356 /* Scan over the preceding entries and adjust their addresses as
7357 required. */
7358 while (mp->prev != NULL
7359 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7361 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7362 mp = mp->prev;
7365 return max_mp;
7368 /* Add a constant to the minipool for a forward reference. Returns the
7369 node added or NULL if the constant will not fit in this pool. */
7370 static Mnode *
7371 add_minipool_forward_ref (Mfix *fix)
7373 /* If set, max_mp is the first pool_entry that has a lower
7374 constraint than the one we are trying to add. */
7375 Mnode * max_mp = NULL;
7376 HOST_WIDE_INT max_address = fix->address + fix->forwards;
7377 Mnode * mp;
7379 /* If this fix's address is greater than the address of the first
7380 entry, then we can't put the fix in this pool. We subtract the
7381 size of the current fix to ensure that if the table is fully
7382 packed we still have enough room to insert this value by shuffling
7383 the other fixes forwards. */
7384 if (minipool_vector_head &&
7385 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7386 return NULL;
7388 /* Scan the pool to see if a constant with the same value has
7389 already been added. While we are doing this, also note the
7390 location where we must insert the constant if it doesn't already
7391 exist. */
7392 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7394 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7395 && fix->mode == mp->mode
7396 && (GET_CODE (fix->value) != CODE_LABEL
7397 || (CODE_LABEL_NUMBER (fix->value)
7398 == CODE_LABEL_NUMBER (mp->value)))
7399 && rtx_equal_p (fix->value, mp->value))
7401 /* More than one fix references this entry. */
7402 mp->refcount++;
7403 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7406 /* Note the insertion point if necessary. */
7407 if (max_mp == NULL
7408 && mp->max_address > max_address)
7409 max_mp = mp;
7411 /* If we are inserting an 8-bytes aligned quantity and
7412 we have not already found an insertion point, then
7413 make sure that all such 8-byte aligned quantities are
7414 placed at the start of the pool. */
7415 if (ARM_DOUBLEWORD_ALIGN
7416 && max_mp == NULL
7417 && fix->fix_size == 8
7418 && mp->fix_size != 8)
7420 max_mp = mp;
7421 max_address = mp->max_address;
7425 /* The value is not currently in the minipool, so we need to create
7426 a new entry for it. If MAX_MP is NULL, the entry will be put on
7427 the end of the list since the placement is less constrained than
7428 any existing entry. Otherwise, we insert the new fix before
7429 MAX_MP and, if necessary, adjust the constraints on the other
7430 entries. */
7431 mp = xmalloc (sizeof (* mp));
7432 mp->fix_size = fix->fix_size;
7433 mp->mode = fix->mode;
7434 mp->value = fix->value;
7435 mp->refcount = 1;
7436 /* Not yet required for a backwards ref. */
7437 mp->min_address = -65536;
7439 if (max_mp == NULL)
7441 mp->max_address = max_address;
7442 mp->next = NULL;
7443 mp->prev = minipool_vector_tail;
7445 if (mp->prev == NULL)
7447 minipool_vector_head = mp;
7448 minipool_vector_label = gen_label_rtx ();
7450 else
7451 mp->prev->next = mp;
7453 minipool_vector_tail = mp;
7455 else
7457 if (max_address > max_mp->max_address - mp->fix_size)
7458 mp->max_address = max_mp->max_address - mp->fix_size;
7459 else
7460 mp->max_address = max_address;
7462 mp->next = max_mp;
7463 mp->prev = max_mp->prev;
7464 max_mp->prev = mp;
7465 if (mp->prev != NULL)
7466 mp->prev->next = mp;
7467 else
7468 minipool_vector_head = mp;
7471 /* Save the new entry. */
7472 max_mp = mp;
7474 /* Scan over the preceding entries and adjust their addresses as
7475 required. */
7476 while (mp->prev != NULL
7477 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7479 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7480 mp = mp->prev;
7483 return max_mp;
7486 static Mnode *
7487 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7488 HOST_WIDE_INT min_address)
7490 HOST_WIDE_INT offset;
7492 /* The code below assumes these are different. */
7493 gcc_assert (mp != min_mp);
7495 if (min_mp == NULL)
7497 if (min_address > mp->min_address)
7498 mp->min_address = min_address;
7500 else
7502 /* We will adjust this below if it is too loose. */
7503 mp->min_address = min_address;
7505 /* Unlink MP from its current position. Since min_mp is non-null,
7506 mp->next must be non-null. */
7507 mp->next->prev = mp->prev;
7508 if (mp->prev != NULL)
7509 mp->prev->next = mp->next;
7510 else
7511 minipool_vector_head = mp->next;
7513 /* Reinsert it after MIN_MP. */
7514 mp->prev = min_mp;
7515 mp->next = min_mp->next;
7516 min_mp->next = mp;
7517 if (mp->next != NULL)
7518 mp->next->prev = mp;
7519 else
7520 minipool_vector_tail = mp;
7523 min_mp = mp;
7525 offset = 0;
7526 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7528 mp->offset = offset;
7529 if (mp->refcount > 0)
7530 offset += mp->fix_size;
7532 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7533 mp->next->min_address = mp->min_address + mp->fix_size;
7536 return min_mp;
7539 /* Add a constant to the minipool for a backward reference. Returns the
7540 node added or NULL if the constant will not fit in this pool.
7542 Note that the code for insertion for a backwards reference can be
7543 somewhat confusing because the calculated offsets for each fix do
7544 not take into account the size of the pool (which is still under
7545 construction. */
7546 static Mnode *
7547 add_minipool_backward_ref (Mfix *fix)
7549 /* If set, min_mp is the last pool_entry that has a lower constraint
7550 than the one we are trying to add. */
7551 Mnode *min_mp = NULL;
7552 /* This can be negative, since it is only a constraint. */
7553 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7554 Mnode *mp;
7556 /* If we can't reach the current pool from this insn, or if we can't
7557 insert this entry at the end of the pool without pushing other
7558 fixes out of range, then we don't try. This ensures that we
7559 can't fail later on. */
7560 if (min_address >= minipool_barrier->address
7561 || (minipool_vector_tail->min_address + fix->fix_size
7562 >= minipool_barrier->address))
7563 return NULL;
7565 /* Scan the pool to see if a constant with the same value has
7566 already been added. While we are doing this, also note the
7567 location where we must insert the constant if it doesn't already
7568 exist. */
7569 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7571 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7572 && fix->mode == mp->mode
7573 && (GET_CODE (fix->value) != CODE_LABEL
7574 || (CODE_LABEL_NUMBER (fix->value)
7575 == CODE_LABEL_NUMBER (mp->value)))
7576 && rtx_equal_p (fix->value, mp->value)
7577 /* Check that there is enough slack to move this entry to the
7578 end of the table (this is conservative). */
7579 && (mp->max_address
7580 > (minipool_barrier->address
7581 + minipool_vector_tail->offset
7582 + minipool_vector_tail->fix_size)))
7584 mp->refcount++;
7585 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7588 if (min_mp != NULL)
7589 mp->min_address += fix->fix_size;
7590 else
7592 /* Note the insertion point if necessary. */
7593 if (mp->min_address < min_address)
7595 /* For now, we do not allow the insertion of 8-byte alignment
7596 requiring nodes anywhere but at the start of the pool. */
7597 if (ARM_DOUBLEWORD_ALIGN
7598 && fix->fix_size == 8 && mp->fix_size != 8)
7599 return NULL;
7600 else
7601 min_mp = mp;
7603 else if (mp->max_address
7604 < minipool_barrier->address + mp->offset + fix->fix_size)
7606 /* Inserting before this entry would push the fix beyond
7607 its maximum address (which can happen if we have
7608 re-located a forwards fix); force the new fix to come
7609 after it. */
7610 min_mp = mp;
7611 min_address = mp->min_address + fix->fix_size;
7613 /* If we are inserting an 8-bytes aligned quantity and
7614 we have not already found an insertion point, then
7615 make sure that all such 8-byte aligned quantities are
7616 placed at the start of the pool. */
7617 else if (ARM_DOUBLEWORD_ALIGN
7618 && min_mp == NULL
7619 && fix->fix_size == 8
7620 && mp->fix_size < 8)
7622 min_mp = mp;
7623 min_address = mp->min_address + fix->fix_size;
7628 /* We need to create a new entry. */
7629 mp = xmalloc (sizeof (* mp));
7630 mp->fix_size = fix->fix_size;
7631 mp->mode = fix->mode;
7632 mp->value = fix->value;
7633 mp->refcount = 1;
7634 mp->max_address = minipool_barrier->address + 65536;
7636 mp->min_address = min_address;
7638 if (min_mp == NULL)
7640 mp->prev = NULL;
7641 mp->next = minipool_vector_head;
7643 if (mp->next == NULL)
7645 minipool_vector_tail = mp;
7646 minipool_vector_label = gen_label_rtx ();
7648 else
7649 mp->next->prev = mp;
7651 minipool_vector_head = mp;
7653 else
7655 mp->next = min_mp->next;
7656 mp->prev = min_mp;
7657 min_mp->next = mp;
7659 if (mp->next != NULL)
7660 mp->next->prev = mp;
7661 else
7662 minipool_vector_tail = mp;
7665 /* Save the new entry. */
7666 min_mp = mp;
7668 if (mp->prev)
7669 mp = mp->prev;
7670 else
7671 mp->offset = 0;
7673 /* Scan over the following entries and adjust their offsets. */
7674 while (mp->next != NULL)
7676 if (mp->next->min_address < mp->min_address + mp->fix_size)
7677 mp->next->min_address = mp->min_address + mp->fix_size;
7679 if (mp->refcount)
7680 mp->next->offset = mp->offset + mp->fix_size;
7681 else
7682 mp->next->offset = mp->offset;
7684 mp = mp->next;
7687 return min_mp;
7690 static void
7691 assign_minipool_offsets (Mfix *barrier)
7693 HOST_WIDE_INT offset = 0;
7694 Mnode *mp;
7696 minipool_barrier = barrier;
7698 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7700 mp->offset = offset;
7702 if (mp->refcount > 0)
7703 offset += mp->fix_size;
7707 /* Output the literal table */
7708 static void
7709 dump_minipool (rtx scan)
7711 Mnode * mp;
7712 Mnode * nmp;
7713 int align64 = 0;
7715 if (ARM_DOUBLEWORD_ALIGN)
7716 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7717 if (mp->refcount > 0 && mp->fix_size == 8)
7719 align64 = 1;
7720 break;
7723 if (dump_file)
7724 fprintf (dump_file,
7725 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7726 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7728 scan = emit_label_after (gen_label_rtx (), scan);
7729 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7730 scan = emit_label_after (minipool_vector_label, scan);
7732 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7734 if (mp->refcount > 0)
7736 if (dump_file)
7738 fprintf (dump_file,
7739 ";; Offset %u, min %ld, max %ld ",
7740 (unsigned) mp->offset, (unsigned long) mp->min_address,
7741 (unsigned long) mp->max_address);
7742 arm_print_value (dump_file, mp->value);
7743 fputc ('\n', dump_file);
7746 switch (mp->fix_size)
7748 #ifdef HAVE_consttable_1
7749 case 1:
7750 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7751 break;
7753 #endif
7754 #ifdef HAVE_consttable_2
7755 case 2:
7756 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7757 break;
7759 #endif
7760 #ifdef HAVE_consttable_4
7761 case 4:
7762 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7763 break;
7765 #endif
7766 #ifdef HAVE_consttable_8
7767 case 8:
7768 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7769 break;
7771 #endif
7772 default:
7773 gcc_unreachable ();
7777 nmp = mp->next;
7778 free (mp);
7781 minipool_vector_head = minipool_vector_tail = NULL;
7782 scan = emit_insn_after (gen_consttable_end (), scan);
7783 scan = emit_barrier_after (scan);
7786 /* Return the cost of forcibly inserting a barrier after INSN. */
7787 static int
7788 arm_barrier_cost (rtx insn)
7790 /* Basing the location of the pool on the loop depth is preferable,
7791 but at the moment, the basic block information seems to be
7792 corrupt by this stage of the compilation. */
7793 int base_cost = 50;
7794 rtx next = next_nonnote_insn (insn);
7796 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7797 base_cost -= 20;
7799 switch (GET_CODE (insn))
7801 case CODE_LABEL:
7802 /* It will always be better to place the table before the label, rather
7803 than after it. */
7804 return 50;
7806 case INSN:
7807 case CALL_INSN:
7808 return base_cost;
7810 case JUMP_INSN:
7811 return base_cost - 10;
7813 default:
7814 return base_cost + 10;
7818 /* Find the best place in the insn stream in the range
7819 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7820 Create the barrier by inserting a jump and add a new fix entry for
7821 it. */
7822 static Mfix *
7823 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7825 HOST_WIDE_INT count = 0;
7826 rtx barrier;
7827 rtx from = fix->insn;
7828 rtx selected = from;
7829 int selected_cost;
7830 HOST_WIDE_INT selected_address;
7831 Mfix * new_fix;
7832 HOST_WIDE_INT max_count = max_address - fix->address;
7833 rtx label = gen_label_rtx ();
7835 selected_cost = arm_barrier_cost (from);
7836 selected_address = fix->address;
7838 while (from && count < max_count)
7840 rtx tmp;
7841 int new_cost;
7843 /* This code shouldn't have been called if there was a natural barrier
7844 within range. */
7845 gcc_assert (GET_CODE (from) != BARRIER);
7847 /* Count the length of this insn. */
7848 count += get_attr_length (from);
7850 /* If there is a jump table, add its length. */
7851 tmp = is_jump_table (from);
7852 if (tmp != NULL)
7854 count += get_jump_table_size (tmp);
7856 /* Jump tables aren't in a basic block, so base the cost on
7857 the dispatch insn. If we select this location, we will
7858 still put the pool after the table. */
7859 new_cost = arm_barrier_cost (from);
7861 if (count < max_count && new_cost <= selected_cost)
7863 selected = tmp;
7864 selected_cost = new_cost;
7865 selected_address = fix->address + count;
7868 /* Continue after the dispatch table. */
7869 from = NEXT_INSN (tmp);
7870 continue;
7873 new_cost = arm_barrier_cost (from);
7875 if (count < max_count && new_cost <= selected_cost)
7877 selected = from;
7878 selected_cost = new_cost;
7879 selected_address = fix->address + count;
7882 from = NEXT_INSN (from);
7885 /* Create a new JUMP_INSN that branches around a barrier. */
7886 from = emit_jump_insn_after (gen_jump (label), selected);
7887 JUMP_LABEL (from) = label;
7888 barrier = emit_barrier_after (from);
7889 emit_label_after (label, barrier);
7891 /* Create a minipool barrier entry for the new barrier. */
7892 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7893 new_fix->insn = barrier;
7894 new_fix->address = selected_address;
7895 new_fix->next = fix->next;
7896 fix->next = new_fix;
7898 return new_fix;
7901 /* Record that there is a natural barrier in the insn stream at
7902 ADDRESS. */
7903 static void
7904 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7906 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7908 fix->insn = insn;
7909 fix->address = address;
7911 fix->next = NULL;
7912 if (minipool_fix_head != NULL)
7913 minipool_fix_tail->next = fix;
7914 else
7915 minipool_fix_head = fix;
7917 minipool_fix_tail = fix;
7920 /* Record INSN, which will need fixing up to load a value from the
7921 minipool. ADDRESS is the offset of the insn since the start of the
7922 function; LOC is a pointer to the part of the insn which requires
7923 fixing; VALUE is the constant that must be loaded, which is of type
7924 MODE. */
7925 static void
7926 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7927 enum machine_mode mode, rtx value)
7929 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7931 #ifdef AOF_ASSEMBLER
7932 /* PIC symbol references need to be converted into offsets into the
7933 based area. */
7934 /* XXX This shouldn't be done here. */
7935 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7936 value = aof_pic_entry (value);
7937 #endif /* AOF_ASSEMBLER */
7939 fix->insn = insn;
7940 fix->address = address;
7941 fix->loc = loc;
7942 fix->mode = mode;
7943 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7944 fix->value = value;
7945 fix->forwards = get_attr_pool_range (insn);
7946 fix->backwards = get_attr_neg_pool_range (insn);
7947 fix->minipool = NULL;
7949 /* If an insn doesn't have a range defined for it, then it isn't
7950 expecting to be reworked by this code. Better to stop now than
7951 to generate duff assembly code. */
7952 gcc_assert (fix->forwards || fix->backwards);
7954 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7955 So there might be an empty word before the start of the pool.
7956 Hence we reduce the forward range by 4 to allow for this
7957 possibility. */
7958 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7959 fix->forwards -= 4;
7961 if (dump_file)
7963 fprintf (dump_file,
7964 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7965 GET_MODE_NAME (mode),
7966 INSN_UID (insn), (unsigned long) address,
7967 -1 * (long)fix->backwards, (long)fix->forwards);
7968 arm_print_value (dump_file, fix->value);
7969 fprintf (dump_file, "\n");
7972 /* Add it to the chain of fixes. */
7973 fix->next = NULL;
7975 if (minipool_fix_head != NULL)
7976 minipool_fix_tail->next = fix;
7977 else
7978 minipool_fix_head = fix;
7980 minipool_fix_tail = fix;
7983 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7984 Returns the number of insns needed, or 99 if we don't know how to
7985 do it. */
7987 arm_const_double_inline_cost (rtx val)
7989 rtx lowpart, highpart;
7990 enum machine_mode mode;
7992 mode = GET_MODE (val);
7994 if (mode == VOIDmode)
7995 mode = DImode;
7997 gcc_assert (GET_MODE_SIZE (mode) == 8);
7999 lowpart = gen_lowpart (SImode, val);
8000 highpart = gen_highpart_mode (SImode, mode, val);
8002 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8003 gcc_assert (GET_CODE (highpart) == CONST_INT);
8005 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8006 NULL_RTX, NULL_RTX, 0, 0)
8007 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8008 NULL_RTX, NULL_RTX, 0, 0));
8011 /* Return true if it is worthwhile to split a 64-bit constant into two
8012 32-bit operations. This is the case if optimizing for size, or
8013 if we have load delay slots, or if one 32-bit part can be done with
8014 a single data operation. */
8015 bool
8016 arm_const_double_by_parts (rtx val)
8018 enum machine_mode mode = GET_MODE (val);
8019 rtx part;
8021 if (optimize_size || arm_ld_sched)
8022 return true;
8024 if (mode == VOIDmode)
8025 mode = DImode;
8027 part = gen_highpart_mode (SImode, mode, val);
8029 gcc_assert (GET_CODE (part) == CONST_INT);
8031 if (const_ok_for_arm (INTVAL (part))
8032 || const_ok_for_arm (~INTVAL (part)))
8033 return true;
8035 part = gen_lowpart (SImode, val);
8037 gcc_assert (GET_CODE (part) == CONST_INT);
8039 if (const_ok_for_arm (INTVAL (part))
8040 || const_ok_for_arm (~INTVAL (part)))
8041 return true;
8043 return false;
8046 /* Scan INSN and note any of its operands that need fixing.
8047 If DO_PUSHES is false we do not actually push any of the fixups
8048 needed. The function returns TRUE if any fixups were needed/pushed.
8049 This is used by arm_memory_load_p() which needs to know about loads
8050 of constants that will be converted into minipool loads. */
8051 static bool
8052 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8054 bool result = false;
8055 int opno;
8057 extract_insn (insn);
8059 if (!constrain_operands (1))
8060 fatal_insn_not_found (insn);
8062 if (recog_data.n_alternatives == 0)
8063 return false;
8065 /* Fill in recog_op_alt with information about the constraints of
8066 this insn. */
8067 preprocess_constraints ();
8069 for (opno = 0; opno < recog_data.n_operands; opno++)
8071 /* Things we need to fix can only occur in inputs. */
8072 if (recog_data.operand_type[opno] != OP_IN)
8073 continue;
8075 /* If this alternative is a memory reference, then any mention
8076 of constants in this alternative is really to fool reload
8077 into allowing us to accept one there. We need to fix them up
8078 now so that we output the right code. */
8079 if (recog_op_alt[opno][which_alternative].memory_ok)
8081 rtx op = recog_data.operand[opno];
8083 if (CONSTANT_P (op))
8085 if (do_pushes)
8086 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8087 recog_data.operand_mode[opno], op);
8088 result = true;
8090 else if (GET_CODE (op) == MEM
8091 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8092 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8094 if (do_pushes)
8096 rtx cop = avoid_constant_pool_reference (op);
8098 /* Casting the address of something to a mode narrower
8099 than a word can cause avoid_constant_pool_reference()
8100 to return the pool reference itself. That's no good to
8101 us here. Lets just hope that we can use the
8102 constant pool value directly. */
8103 if (op == cop)
8104 cop = get_pool_constant (XEXP (op, 0));
8106 push_minipool_fix (insn, address,
8107 recog_data.operand_loc[opno],
8108 recog_data.operand_mode[opno], cop);
8111 result = true;
8116 return result;
8119 /* Gcc puts the pool in the wrong place for ARM, since we can only
8120 load addresses a limited distance around the pc. We do some
8121 special munging to move the constant pool values to the correct
8122 point in the code. */
8123 static void
8124 arm_reorg (void)
8126 rtx insn;
8127 HOST_WIDE_INT address = 0;
8128 Mfix * fix;
8130 minipool_fix_head = minipool_fix_tail = NULL;
8132 /* The first insn must always be a note, or the code below won't
8133 scan it properly. */
8134 insn = get_insns ();
8135 gcc_assert (GET_CODE (insn) == NOTE);
8137 /* Scan all the insns and record the operands that will need fixing. */
8138 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8140 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8141 && (arm_cirrus_insn_p (insn)
8142 || GET_CODE (insn) == JUMP_INSN
8143 || arm_memory_load_p (insn)))
8144 cirrus_reorg (insn);
8146 if (GET_CODE (insn) == BARRIER)
8147 push_minipool_barrier (insn, address);
8148 else if (INSN_P (insn))
8150 rtx table;
8152 note_invalid_constants (insn, address, true);
8153 address += get_attr_length (insn);
8155 /* If the insn is a vector jump, add the size of the table
8156 and skip the table. */
8157 if ((table = is_jump_table (insn)) != NULL)
8159 address += get_jump_table_size (table);
8160 insn = table;
8165 fix = minipool_fix_head;
8167 /* Now scan the fixups and perform the required changes. */
8168 while (fix)
8170 Mfix * ftmp;
8171 Mfix * fdel;
8172 Mfix * last_added_fix;
8173 Mfix * last_barrier = NULL;
8174 Mfix * this_fix;
8176 /* Skip any further barriers before the next fix. */
8177 while (fix && GET_CODE (fix->insn) == BARRIER)
8178 fix = fix->next;
8180 /* No more fixes. */
8181 if (fix == NULL)
8182 break;
8184 last_added_fix = NULL;
8186 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8188 if (GET_CODE (ftmp->insn) == BARRIER)
8190 if (ftmp->address >= minipool_vector_head->max_address)
8191 break;
8193 last_barrier = ftmp;
8195 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8196 break;
8198 last_added_fix = ftmp; /* Keep track of the last fix added. */
8201 /* If we found a barrier, drop back to that; any fixes that we
8202 could have reached but come after the barrier will now go in
8203 the next mini-pool. */
8204 if (last_barrier != NULL)
8206 /* Reduce the refcount for those fixes that won't go into this
8207 pool after all. */
8208 for (fdel = last_barrier->next;
8209 fdel && fdel != ftmp;
8210 fdel = fdel->next)
8212 fdel->minipool->refcount--;
8213 fdel->minipool = NULL;
8216 ftmp = last_barrier;
8218 else
8220 /* ftmp is first fix that we can't fit into this pool and
8221 there no natural barriers that we could use. Insert a
8222 new barrier in the code somewhere between the previous
8223 fix and this one, and arrange to jump around it. */
8224 HOST_WIDE_INT max_address;
8226 /* The last item on the list of fixes must be a barrier, so
8227 we can never run off the end of the list of fixes without
8228 last_barrier being set. */
8229 gcc_assert (ftmp);
8231 max_address = minipool_vector_head->max_address;
8232 /* Check that there isn't another fix that is in range that
8233 we couldn't fit into this pool because the pool was
8234 already too large: we need to put the pool before such an
8235 instruction. */
8236 if (ftmp->address < max_address)
8237 max_address = ftmp->address;
8239 last_barrier = create_fix_barrier (last_added_fix, max_address);
8242 assign_minipool_offsets (last_barrier);
8244 while (ftmp)
8246 if (GET_CODE (ftmp->insn) != BARRIER
8247 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8248 == NULL))
8249 break;
8251 ftmp = ftmp->next;
8254 /* Scan over the fixes we have identified for this pool, fixing them
8255 up and adding the constants to the pool itself. */
8256 for (this_fix = fix; this_fix && ftmp != this_fix;
8257 this_fix = this_fix->next)
8258 if (GET_CODE (this_fix->insn) != BARRIER)
8260 rtx addr
8261 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8262 minipool_vector_label),
8263 this_fix->minipool->offset);
8264 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8267 dump_minipool (last_barrier->insn);
8268 fix = ftmp;
8271 /* From now on we must synthesize any constants that we can't handle
8272 directly. This can happen if the RTL gets split during final
8273 instruction generation. */
8274 after_arm_reorg = 1;
8276 /* Free the minipool memory. */
8277 obstack_free (&minipool_obstack, minipool_startobj);
8280 /* Routines to output assembly language. */
8282 /* If the rtx is the correct value then return the string of the number.
8283 In this way we can ensure that valid double constants are generated even
8284 when cross compiling. */
8285 const char *
8286 fp_immediate_constant (rtx x)
8288 REAL_VALUE_TYPE r;
8289 int i;
8291 if (!fp_consts_inited)
8292 init_fp_table ();
8294 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8295 for (i = 0; i < 8; i++)
8296 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8297 return strings_fp[i];
8299 gcc_unreachable ();
8302 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8303 static const char *
8304 fp_const_from_val (REAL_VALUE_TYPE *r)
8306 int i;
8308 if (!fp_consts_inited)
8309 init_fp_table ();
8311 for (i = 0; i < 8; i++)
8312 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8313 return strings_fp[i];
8315 gcc_unreachable ();
8318 /* Output the operands of a LDM/STM instruction to STREAM.
8319 MASK is the ARM register set mask of which only bits 0-15 are important.
8320 REG is the base register, either the frame pointer or the stack pointer,
8321 INSTR is the possibly suffixed load or store instruction. */
8323 static void
8324 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8325 unsigned long mask)
8327 unsigned i;
8328 bool not_first = FALSE;
8330 fputc ('\t', stream);
8331 asm_fprintf (stream, instr, reg);
8332 fputs (", {", stream);
8334 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8335 if (mask & (1 << i))
8337 if (not_first)
8338 fprintf (stream, ", ");
8340 asm_fprintf (stream, "%r", i);
8341 not_first = TRUE;
8344 fprintf (stream, "}\n");
8348 /* Output a FLDMX instruction to STREAM.
8349 BASE if the register containing the address.
8350 REG and COUNT specify the register range.
8351 Extra registers may be added to avoid hardware bugs. */
8353 static void
8354 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8356 int i;
8358 /* Workaround ARM10 VFPr1 bug. */
8359 if (count == 2 && !arm_arch6)
8361 if (reg == 15)
8362 reg--;
8363 count++;
8366 fputc ('\t', stream);
8367 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8369 for (i = reg; i < reg + count; i++)
8371 if (i > reg)
8372 fputs (", ", stream);
8373 asm_fprintf (stream, "d%d", i);
8375 fputs ("}\n", stream);
8380 /* Output the assembly for a store multiple. */
8382 const char *
8383 vfp_output_fstmx (rtx * operands)
8385 char pattern[100];
8386 int p;
8387 int base;
8388 int i;
8390 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8391 p = strlen (pattern);
8393 gcc_assert (GET_CODE (operands[1]) == REG);
8395 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8396 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8398 p += sprintf (&pattern[p], ", d%d", base + i);
8400 strcpy (&pattern[p], "}");
8402 output_asm_insn (pattern, operands);
8403 return "";
8407 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8408 number of bytes pushed. */
8410 static int
8411 vfp_emit_fstmx (int base_reg, int count)
8413 rtx par;
8414 rtx dwarf;
8415 rtx tmp, reg;
8416 int i;
8418 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8419 register pairs are stored by a store multiple insn. We avoid this
8420 by pushing an extra pair. */
8421 if (count == 2 && !arm_arch6)
8423 if (base_reg == LAST_VFP_REGNUM - 3)
8424 base_reg -= 2;
8425 count++;
8428 /* ??? The frame layout is implementation defined. We describe
8429 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8430 We really need some way of representing the whole block so that the
8431 unwinder can figure it out at runtime. */
8432 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8433 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8435 reg = gen_rtx_REG (DFmode, base_reg);
8436 base_reg += 2;
8438 XVECEXP (par, 0, 0)
8439 = gen_rtx_SET (VOIDmode,
8440 gen_frame_mem (BLKmode,
8441 gen_rtx_PRE_DEC (BLKmode,
8442 stack_pointer_rtx)),
8443 gen_rtx_UNSPEC (BLKmode,
8444 gen_rtvec (1, reg),
8445 UNSPEC_PUSH_MULT));
8447 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8448 plus_constant (stack_pointer_rtx, -(count * 8 + 4)));
8449 RTX_FRAME_RELATED_P (tmp) = 1;
8450 XVECEXP (dwarf, 0, 0) = tmp;
8452 tmp = gen_rtx_SET (VOIDmode,
8453 gen_frame_mem (DFmode, stack_pointer_rtx),
8454 reg);
8455 RTX_FRAME_RELATED_P (tmp) = 1;
8456 XVECEXP (dwarf, 0, 1) = tmp;
8458 for (i = 1; i < count; i++)
8460 reg = gen_rtx_REG (DFmode, base_reg);
8461 base_reg += 2;
8462 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8464 tmp = gen_rtx_SET (VOIDmode,
8465 gen_frame_mem (DFmode,
8466 plus_constant (stack_pointer_rtx,
8467 i * 8)),
8468 reg);
8469 RTX_FRAME_RELATED_P (tmp) = 1;
8470 XVECEXP (dwarf, 0, i + 1) = tmp;
8473 par = emit_insn (par);
8474 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8475 REG_NOTES (par));
8476 RTX_FRAME_RELATED_P (par) = 1;
8478 return count * 8 + 4;
8482 /* Output a 'call' insn. */
8483 const char *
8484 output_call (rtx *operands)
8486 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8488 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8489 if (REGNO (operands[0]) == LR_REGNUM)
8491 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8492 output_asm_insn ("mov%?\t%0, %|lr", operands);
8495 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8497 if (TARGET_INTERWORK || arm_arch4t)
8498 output_asm_insn ("bx%?\t%0", operands);
8499 else
8500 output_asm_insn ("mov%?\t%|pc, %0", operands);
8502 return "";
8505 /* Output a 'call' insn that is a reference in memory. */
8506 const char *
8507 output_call_mem (rtx *operands)
8509 if (TARGET_INTERWORK && !arm_arch5)
8511 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8512 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8513 output_asm_insn ("bx%?\t%|ip", operands);
8515 else if (regno_use_in (LR_REGNUM, operands[0]))
8517 /* LR is used in the memory address. We load the address in the
8518 first instruction. It's safe to use IP as the target of the
8519 load since the call will kill it anyway. */
8520 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8521 if (arm_arch5)
8522 output_asm_insn ("blx%?\t%|ip", operands);
8523 else
8525 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8526 if (arm_arch4t)
8527 output_asm_insn ("bx%?\t%|ip", operands);
8528 else
8529 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8532 else
8534 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8535 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8538 return "";
8542 /* Output a move from arm registers to an fpa registers.
8543 OPERANDS[0] is an fpa register.
8544 OPERANDS[1] is the first registers of an arm register pair. */
8545 const char *
8546 output_mov_long_double_fpa_from_arm (rtx *operands)
8548 int arm_reg0 = REGNO (operands[1]);
8549 rtx ops[3];
8551 gcc_assert (arm_reg0 != IP_REGNUM);
8553 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8554 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8555 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8557 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8558 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8560 return "";
8563 /* Output a move from an fpa register to arm registers.
8564 OPERANDS[0] is the first registers of an arm register pair.
8565 OPERANDS[1] is an fpa register. */
8566 const char *
8567 output_mov_long_double_arm_from_fpa (rtx *operands)
8569 int arm_reg0 = REGNO (operands[0]);
8570 rtx ops[3];
8572 gcc_assert (arm_reg0 != IP_REGNUM);
8574 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8575 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8576 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8578 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8579 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8580 return "";
8583 /* Output a move from arm registers to arm registers of a long double
8584 OPERANDS[0] is the destination.
8585 OPERANDS[1] is the source. */
8586 const char *
8587 output_mov_long_double_arm_from_arm (rtx *operands)
8589 /* We have to be careful here because the two might overlap. */
8590 int dest_start = REGNO (operands[0]);
8591 int src_start = REGNO (operands[1]);
8592 rtx ops[2];
8593 int i;
8595 if (dest_start < src_start)
8597 for (i = 0; i < 3; i++)
8599 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8600 ops[1] = gen_rtx_REG (SImode, src_start + i);
8601 output_asm_insn ("mov%?\t%0, %1", ops);
8604 else
8606 for (i = 2; i >= 0; i--)
8608 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8609 ops[1] = gen_rtx_REG (SImode, src_start + i);
8610 output_asm_insn ("mov%?\t%0, %1", ops);
8614 return "";
8618 /* Output a move from arm registers to an fpa registers.
8619 OPERANDS[0] is an fpa register.
8620 OPERANDS[1] is the first registers of an arm register pair. */
8621 const char *
8622 output_mov_double_fpa_from_arm (rtx *operands)
8624 int arm_reg0 = REGNO (operands[1]);
8625 rtx ops[2];
8627 gcc_assert (arm_reg0 != IP_REGNUM);
8629 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8630 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8631 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8632 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8633 return "";
8636 /* Output a move from an fpa register to arm registers.
8637 OPERANDS[0] is the first registers of an arm register pair.
8638 OPERANDS[1] is an fpa register. */
8639 const char *
8640 output_mov_double_arm_from_fpa (rtx *operands)
8642 int arm_reg0 = REGNO (operands[0]);
8643 rtx ops[2];
8645 gcc_assert (arm_reg0 != IP_REGNUM);
8647 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8648 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8649 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8650 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8651 return "";
8654 /* Output a move between double words.
8655 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8656 or MEM<-REG and all MEMs must be offsettable addresses. */
8657 const char *
8658 output_move_double (rtx *operands)
8660 enum rtx_code code0 = GET_CODE (operands[0]);
8661 enum rtx_code code1 = GET_CODE (operands[1]);
8662 rtx otherops[3];
8664 if (code0 == REG)
8666 int reg0 = REGNO (operands[0]);
8668 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8670 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8672 switch (GET_CODE (XEXP (operands[1], 0)))
8674 case REG:
8675 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8676 break;
8678 case PRE_INC:
8679 gcc_assert (TARGET_LDRD);
8680 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8681 break;
8683 case PRE_DEC:
8684 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8685 break;
8687 case POST_INC:
8688 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8689 break;
8691 case POST_DEC:
8692 gcc_assert (TARGET_LDRD);
8693 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8694 break;
8696 case PRE_MODIFY:
8697 case POST_MODIFY:
8698 otherops[0] = operands[0];
8699 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8700 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8702 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8704 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8706 /* Registers overlap so split out the increment. */
8707 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8708 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8710 else
8711 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8713 else
8715 /* We only allow constant increments, so this is safe. */
8716 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8718 break;
8720 case LABEL_REF:
8721 case CONST:
8722 output_asm_insn ("adr%?\t%0, %1", operands);
8723 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8724 break;
8726 default:
8727 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8728 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8730 otherops[0] = operands[0];
8731 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8732 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8734 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8736 if (GET_CODE (otherops[2]) == CONST_INT)
8738 switch ((int) INTVAL (otherops[2]))
8740 case -8:
8741 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8742 return "";
8743 case -4:
8744 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8745 return "";
8746 case 4:
8747 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8748 return "";
8751 if (TARGET_LDRD
8752 && (GET_CODE (otherops[2]) == REG
8753 || (GET_CODE (otherops[2]) == CONST_INT
8754 && INTVAL (otherops[2]) > -256
8755 && INTVAL (otherops[2]) < 256)))
8757 if (reg_overlap_mentioned_p (otherops[0],
8758 otherops[2]))
8760 /* Swap base and index registers over to
8761 avoid a conflict. */
8762 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8763 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8765 /* If both registers conflict, it will usually
8766 have been fixed by a splitter. */
8767 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8769 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8770 output_asm_insn ("ldr%?d\t%0, [%1]",
8771 otherops);
8773 else
8774 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8775 return "";
8778 if (GET_CODE (otherops[2]) == CONST_INT)
8780 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8781 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8782 else
8783 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8785 else
8786 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8788 else
8789 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8791 return "ldm%?ia\t%0, %M0";
8793 else
8795 otherops[1] = adjust_address (operands[1], SImode, 4);
8796 /* Take care of overlapping base/data reg. */
8797 if (reg_mentioned_p (operands[0], operands[1]))
8799 output_asm_insn ("ldr%?\t%0, %1", otherops);
8800 output_asm_insn ("ldr%?\t%0, %1", operands);
8802 else
8804 output_asm_insn ("ldr%?\t%0, %1", operands);
8805 output_asm_insn ("ldr%?\t%0, %1", otherops);
8810 else
8812 /* Constraints should ensure this. */
8813 gcc_assert (code0 == MEM && code1 == REG);
8814 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8816 switch (GET_CODE (XEXP (operands[0], 0)))
8818 case REG:
8819 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8820 break;
8822 case PRE_INC:
8823 gcc_assert (TARGET_LDRD);
8824 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8825 break;
8827 case PRE_DEC:
8828 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8829 break;
8831 case POST_INC:
8832 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8833 break;
8835 case POST_DEC:
8836 gcc_assert (TARGET_LDRD);
8837 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8838 break;
8840 case PRE_MODIFY:
8841 case POST_MODIFY:
8842 otherops[0] = operands[1];
8843 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8844 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8846 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8847 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8848 else
8849 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8850 break;
8852 case PLUS:
8853 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8854 if (GET_CODE (otherops[2]) == CONST_INT)
8856 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8858 case -8:
8859 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8860 return "";
8862 case -4:
8863 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8864 return "";
8866 case 4:
8867 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8868 return "";
8871 if (TARGET_LDRD
8872 && (GET_CODE (otherops[2]) == REG
8873 || (GET_CODE (otherops[2]) == CONST_INT
8874 && INTVAL (otherops[2]) > -256
8875 && INTVAL (otherops[2]) < 256)))
8877 otherops[0] = operands[1];
8878 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8879 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8880 return "";
8882 /* Fall through */
8884 default:
8885 otherops[0] = adjust_address (operands[0], SImode, 4);
8886 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8887 output_asm_insn ("str%?\t%1, %0", operands);
8888 output_asm_insn ("str%?\t%1, %0", otherops);
8892 return "";
8895 /* Output an ADD r, s, #n where n may be too big for one instruction.
8896 If adding zero to one register, output nothing. */
8897 const char *
8898 output_add_immediate (rtx *operands)
8900 HOST_WIDE_INT n = INTVAL (operands[2]);
8902 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8904 if (n < 0)
8905 output_multi_immediate (operands,
8906 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8907 -n);
8908 else
8909 output_multi_immediate (operands,
8910 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8914 return "";
8917 /* Output a multiple immediate operation.
8918 OPERANDS is the vector of operands referred to in the output patterns.
8919 INSTR1 is the output pattern to use for the first constant.
8920 INSTR2 is the output pattern to use for subsequent constants.
8921 IMMED_OP is the index of the constant slot in OPERANDS.
8922 N is the constant value. */
8923 static const char *
8924 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8925 int immed_op, HOST_WIDE_INT n)
8927 #if HOST_BITS_PER_WIDE_INT > 32
8928 n &= 0xffffffff;
8929 #endif
8931 if (n == 0)
8933 /* Quick and easy output. */
8934 operands[immed_op] = const0_rtx;
8935 output_asm_insn (instr1, operands);
8937 else
8939 int i;
8940 const char * instr = instr1;
8942 /* Note that n is never zero here (which would give no output). */
8943 for (i = 0; i < 32; i += 2)
8945 if (n & (3 << i))
8947 operands[immed_op] = GEN_INT (n & (255 << i));
8948 output_asm_insn (instr, operands);
8949 instr = instr2;
8950 i += 6;
8955 return "";
8958 /* Return the appropriate ARM instruction for the operation code.
8959 The returned result should not be overwritten. OP is the rtx of the
8960 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8961 was shifted. */
8962 const char *
8963 arithmetic_instr (rtx op, int shift_first_arg)
8965 switch (GET_CODE (op))
8967 case PLUS:
8968 return "add";
8970 case MINUS:
8971 return shift_first_arg ? "rsb" : "sub";
8973 case IOR:
8974 return "orr";
8976 case XOR:
8977 return "eor";
8979 case AND:
8980 return "and";
8982 default:
8983 gcc_unreachable ();
8987 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8988 for the operation code. The returned result should not be overwritten.
8989 OP is the rtx code of the shift.
8990 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8991 shift. */
8992 static const char *
8993 shift_op (rtx op, HOST_WIDE_INT *amountp)
8995 const char * mnem;
8996 enum rtx_code code = GET_CODE (op);
8998 switch (GET_CODE (XEXP (op, 1)))
9000 case REG:
9001 case SUBREG:
9002 *amountp = -1;
9003 break;
9005 case CONST_INT:
9006 *amountp = INTVAL (XEXP (op, 1));
9007 break;
9009 default:
9010 gcc_unreachable ();
9013 switch (code)
9015 case ASHIFT:
9016 mnem = "asl";
9017 break;
9019 case ASHIFTRT:
9020 mnem = "asr";
9021 break;
9023 case LSHIFTRT:
9024 mnem = "lsr";
9025 break;
9027 case ROTATE:
9028 gcc_assert (*amountp != -1);
9029 *amountp = 32 - *amountp;
9031 /* Fall through. */
9033 case ROTATERT:
9034 mnem = "ror";
9035 break;
9037 case MULT:
9038 /* We never have to worry about the amount being other than a
9039 power of 2, since this case can never be reloaded from a reg. */
9040 gcc_assert (*amountp != -1);
9041 *amountp = int_log2 (*amountp);
9042 return "asl";
9044 default:
9045 gcc_unreachable ();
9048 if (*amountp != -1)
9050 /* This is not 100% correct, but follows from the desire to merge
9051 multiplication by a power of 2 with the recognizer for a
9052 shift. >=32 is not a valid shift for "asl", so we must try and
9053 output a shift that produces the correct arithmetical result.
9054 Using lsr #32 is identical except for the fact that the carry bit
9055 is not set correctly if we set the flags; but we never use the
9056 carry bit from such an operation, so we can ignore that. */
9057 if (code == ROTATERT)
9058 /* Rotate is just modulo 32. */
9059 *amountp &= 31;
9060 else if (*amountp != (*amountp & 31))
9062 if (code == ASHIFT)
9063 mnem = "lsr";
9064 *amountp = 32;
9067 /* Shifts of 0 are no-ops. */
9068 if (*amountp == 0)
9069 return NULL;
9072 return mnem;
9075 /* Obtain the shift from the POWER of two. */
9077 static HOST_WIDE_INT
9078 int_log2 (HOST_WIDE_INT power)
9080 HOST_WIDE_INT shift = 0;
9082 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9084 gcc_assert (shift <= 31);
9085 shift++;
9088 return shift;
9091 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9092 because /bin/as is horribly restrictive. The judgement about
9093 whether or not each character is 'printable' (and can be output as
9094 is) or not (and must be printed with an octal escape) must be made
9095 with reference to the *host* character set -- the situation is
9096 similar to that discussed in the comments above pp_c_char in
9097 c-pretty-print.c. */
9099 #define MAX_ASCII_LEN 51
9101 void
9102 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9104 int i;
9105 int len_so_far = 0;
9107 fputs ("\t.ascii\t\"", stream);
9109 for (i = 0; i < len; i++)
9111 int c = p[i];
9113 if (len_so_far >= MAX_ASCII_LEN)
9115 fputs ("\"\n\t.ascii\t\"", stream);
9116 len_so_far = 0;
9119 if (ISPRINT (c))
9121 if (c == '\\' || c == '\"')
9123 putc ('\\', stream);
9124 len_so_far++;
9126 putc (c, stream);
9127 len_so_far++;
9129 else
9131 fprintf (stream, "\\%03o", c);
9132 len_so_far += 4;
9136 fputs ("\"\n", stream);
9139 /* Compute the register save mask for registers 0 through 12
9140 inclusive. This code is used by arm_compute_save_reg_mask. */
9142 static unsigned long
9143 arm_compute_save_reg0_reg12_mask (void)
9145 unsigned long func_type = arm_current_func_type ();
9146 unsigned long save_reg_mask = 0;
9147 unsigned int reg;
9149 if (IS_INTERRUPT (func_type))
9151 unsigned int max_reg;
9152 /* Interrupt functions must not corrupt any registers,
9153 even call clobbered ones. If this is a leaf function
9154 we can just examine the registers used by the RTL, but
9155 otherwise we have to assume that whatever function is
9156 called might clobber anything, and so we have to save
9157 all the call-clobbered registers as well. */
9158 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9159 /* FIQ handlers have registers r8 - r12 banked, so
9160 we only need to check r0 - r7, Normal ISRs only
9161 bank r14 and r15, so we must check up to r12.
9162 r13 is the stack pointer which is always preserved,
9163 so we do not need to consider it here. */
9164 max_reg = 7;
9165 else
9166 max_reg = 12;
9168 for (reg = 0; reg <= max_reg; reg++)
9169 if (regs_ever_live[reg]
9170 || (! current_function_is_leaf && call_used_regs [reg]))
9171 save_reg_mask |= (1 << reg);
9173 /* Also save the pic base register if necessary. */
9174 if (flag_pic
9175 && !TARGET_SINGLE_PIC_BASE
9176 && current_function_uses_pic_offset_table)
9177 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9179 else
9181 /* In the normal case we only need to save those registers
9182 which are call saved and which are used by this function. */
9183 for (reg = 0; reg <= 10; reg++)
9184 if (regs_ever_live[reg] && ! call_used_regs [reg])
9185 save_reg_mask |= (1 << reg);
9187 /* Handle the frame pointer as a special case. */
9188 if (! TARGET_APCS_FRAME
9189 && ! frame_pointer_needed
9190 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9191 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9192 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9194 /* If we aren't loading the PIC register,
9195 don't stack it even though it may be live. */
9196 if (flag_pic
9197 && !TARGET_SINGLE_PIC_BASE
9198 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9199 || current_function_uses_pic_offset_table))
9200 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9203 /* Save registers so the exception handler can modify them. */
9204 if (current_function_calls_eh_return)
9206 unsigned int i;
9208 for (i = 0; ; i++)
9210 reg = EH_RETURN_DATA_REGNO (i);
9211 if (reg == INVALID_REGNUM)
9212 break;
9213 save_reg_mask |= 1 << reg;
9217 return save_reg_mask;
9220 /* Compute a bit mask of which registers need to be
9221 saved on the stack for the current function. */
9223 static unsigned long
9224 arm_compute_save_reg_mask (void)
9226 unsigned int save_reg_mask = 0;
9227 unsigned long func_type = arm_current_func_type ();
9229 if (IS_NAKED (func_type))
9230 /* This should never really happen. */
9231 return 0;
9233 /* If we are creating a stack frame, then we must save the frame pointer,
9234 IP (which will hold the old stack pointer), LR and the PC. */
9235 if (frame_pointer_needed)
9236 save_reg_mask |=
9237 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9238 | (1 << IP_REGNUM)
9239 | (1 << LR_REGNUM)
9240 | (1 << PC_REGNUM);
9242 /* Volatile functions do not return, so there
9243 is no need to save any other registers. */
9244 if (IS_VOLATILE (func_type))
9245 return save_reg_mask;
9247 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9249 /* Decide if we need to save the link register.
9250 Interrupt routines have their own banked link register,
9251 so they never need to save it.
9252 Otherwise if we do not use the link register we do not need to save
9253 it. If we are pushing other registers onto the stack however, we
9254 can save an instruction in the epilogue by pushing the link register
9255 now and then popping it back into the PC. This incurs extra memory
9256 accesses though, so we only do it when optimizing for size, and only
9257 if we know that we will not need a fancy return sequence. */
9258 if (regs_ever_live [LR_REGNUM]
9259 || (save_reg_mask
9260 && optimize_size
9261 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9262 && !current_function_calls_eh_return))
9263 save_reg_mask |= 1 << LR_REGNUM;
9265 if (cfun->machine->lr_save_eliminated)
9266 save_reg_mask &= ~ (1 << LR_REGNUM);
9268 if (TARGET_REALLY_IWMMXT
9269 && ((bit_count (save_reg_mask)
9270 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9272 unsigned int reg;
9274 /* The total number of registers that are going to be pushed
9275 onto the stack is odd. We need to ensure that the stack
9276 is 64-bit aligned before we start to save iWMMXt registers,
9277 and also before we start to create locals. (A local variable
9278 might be a double or long long which we will load/store using
9279 an iWMMXt instruction). Therefore we need to push another
9280 ARM register, so that the stack will be 64-bit aligned. We
9281 try to avoid using the arg registers (r0 -r3) as they might be
9282 used to pass values in a tail call. */
9283 for (reg = 4; reg <= 12; reg++)
9284 if ((save_reg_mask & (1 << reg)) == 0)
9285 break;
9287 if (reg <= 12)
9288 save_reg_mask |= (1 << reg);
9289 else
9291 cfun->machine->sibcall_blocked = 1;
9292 save_reg_mask |= (1 << 3);
9296 return save_reg_mask;
9300 /* Compute a bit mask of which registers need to be
9301 saved on the stack for the current function. */
9302 static unsigned long
9303 thumb_compute_save_reg_mask (void)
9305 unsigned long mask;
9306 unsigned reg;
9308 mask = 0;
9309 for (reg = 0; reg < 12; reg ++)
9310 if (regs_ever_live[reg] && !call_used_regs[reg])
9311 mask |= 1 << reg;
9313 if (flag_pic
9314 && !TARGET_SINGLE_PIC_BASE
9315 && current_function_uses_pic_offset_table)
9316 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9318 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9319 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9320 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9322 /* LR will also be pushed if any lo regs are pushed. */
9323 if (mask & 0xff || thumb_force_lr_save ())
9324 mask |= (1 << LR_REGNUM);
9326 /* Make sure we have a low work register if we need one.
9327 We will need one if we are going to push a high register,
9328 but we are not currently intending to push a low register. */
9329 if ((mask & 0xff) == 0
9330 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9332 /* Use thumb_find_work_register to choose which register
9333 we will use. If the register is live then we will
9334 have to push it. Use LAST_LO_REGNUM as our fallback
9335 choice for the register to select. */
9336 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9338 if (! call_used_regs[reg])
9339 mask |= 1 << reg;
9342 return mask;
9346 /* Return the number of bytes required to save VFP registers. */
9347 static int
9348 arm_get_vfp_saved_size (void)
9350 unsigned int regno;
9351 int count;
9352 int saved;
9354 saved = 0;
9355 /* Space for saved VFP registers. */
9356 if (TARGET_HARD_FLOAT && TARGET_VFP)
9358 count = 0;
9359 for (regno = FIRST_VFP_REGNUM;
9360 regno < LAST_VFP_REGNUM;
9361 regno += 2)
9363 if ((!regs_ever_live[regno] || call_used_regs[regno])
9364 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9366 if (count > 0)
9368 /* Workaround ARM10 VFPr1 bug. */
9369 if (count == 2 && !arm_arch6)
9370 count++;
9371 saved += count * 8 + 4;
9373 count = 0;
9375 else
9376 count++;
9378 if (count > 0)
9380 if (count == 2 && !arm_arch6)
9381 count++;
9382 saved += count * 8 + 4;
9385 return saved;
9389 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9390 everything bar the final return instruction. */
9391 const char *
9392 output_return_instruction (rtx operand, int really_return, int reverse)
9394 char conditional[10];
9395 char instr[100];
9396 unsigned reg;
9397 unsigned long live_regs_mask;
9398 unsigned long func_type;
9399 arm_stack_offsets *offsets;
9401 func_type = arm_current_func_type ();
9403 if (IS_NAKED (func_type))
9404 return "";
9406 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9408 /* If this function was declared non-returning, and we have
9409 found a tail call, then we have to trust that the called
9410 function won't return. */
9411 if (really_return)
9413 rtx ops[2];
9415 /* Otherwise, trap an attempted return by aborting. */
9416 ops[0] = operand;
9417 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9418 : "abort");
9419 assemble_external_libcall (ops[1]);
9420 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9423 return "";
9426 gcc_assert (!current_function_calls_alloca || really_return);
9428 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9430 return_used_this_function = 1;
9432 live_regs_mask = arm_compute_save_reg_mask ();
9434 if (live_regs_mask)
9436 const char * return_reg;
9438 /* If we do not have any special requirements for function exit
9439 (e.g. interworking, or ISR) then we can load the return address
9440 directly into the PC. Otherwise we must load it into LR. */
9441 if (really_return
9442 && ! TARGET_INTERWORK)
9443 return_reg = reg_names[PC_REGNUM];
9444 else
9445 return_reg = reg_names[LR_REGNUM];
9447 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9449 /* There are three possible reasons for the IP register
9450 being saved. 1) a stack frame was created, in which case
9451 IP contains the old stack pointer, or 2) an ISR routine
9452 corrupted it, or 3) it was saved to align the stack on
9453 iWMMXt. In case 1, restore IP into SP, otherwise just
9454 restore IP. */
9455 if (frame_pointer_needed)
9457 live_regs_mask &= ~ (1 << IP_REGNUM);
9458 live_regs_mask |= (1 << SP_REGNUM);
9460 else
9461 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9464 /* On some ARM architectures it is faster to use LDR rather than
9465 LDM to load a single register. On other architectures, the
9466 cost is the same. In 26 bit mode, or for exception handlers,
9467 we have to use LDM to load the PC so that the CPSR is also
9468 restored. */
9469 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9470 if (live_regs_mask == (1U << reg))
9471 break;
9473 if (reg <= LAST_ARM_REGNUM
9474 && (reg != LR_REGNUM
9475 || ! really_return
9476 || ! IS_INTERRUPT (func_type)))
9478 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9479 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9481 else
9483 char *p;
9484 int first = 1;
9486 /* Generate the load multiple instruction to restore the
9487 registers. Note we can get here, even if
9488 frame_pointer_needed is true, but only if sp already
9489 points to the base of the saved core registers. */
9490 if (live_regs_mask & (1 << SP_REGNUM))
9492 unsigned HOST_WIDE_INT stack_adjust;
9494 offsets = arm_get_frame_offsets ();
9495 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9496 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9498 if (stack_adjust && arm_arch5)
9499 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9500 else
9502 /* If we can't use ldmib (SA110 bug),
9503 then try to pop r3 instead. */
9504 if (stack_adjust)
9505 live_regs_mask |= 1 << 3;
9506 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9509 else
9510 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9512 p = instr + strlen (instr);
9514 for (reg = 0; reg <= SP_REGNUM; reg++)
9515 if (live_regs_mask & (1 << reg))
9517 int l = strlen (reg_names[reg]);
9519 if (first)
9520 first = 0;
9521 else
9523 memcpy (p, ", ", 2);
9524 p += 2;
9527 memcpy (p, "%|", 2);
9528 memcpy (p + 2, reg_names[reg], l);
9529 p += l + 2;
9532 if (live_regs_mask & (1 << LR_REGNUM))
9534 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9535 /* If returning from an interrupt, restore the CPSR. */
9536 if (IS_INTERRUPT (func_type))
9537 strcat (p, "^");
9539 else
9540 strcpy (p, "}");
9543 output_asm_insn (instr, & operand);
9545 /* See if we need to generate an extra instruction to
9546 perform the actual function return. */
9547 if (really_return
9548 && func_type != ARM_FT_INTERWORKED
9549 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9551 /* The return has already been handled
9552 by loading the LR into the PC. */
9553 really_return = 0;
9557 if (really_return)
9559 switch ((int) ARM_FUNC_TYPE (func_type))
9561 case ARM_FT_ISR:
9562 case ARM_FT_FIQ:
9563 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9564 break;
9566 case ARM_FT_INTERWORKED:
9567 sprintf (instr, "bx%s\t%%|lr", conditional);
9568 break;
9570 case ARM_FT_EXCEPTION:
9571 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9572 break;
9574 default:
9575 /* Use bx if it's available. */
9576 if (arm_arch5 || arm_arch4t)
9577 sprintf (instr, "bx%s\t%%|lr", conditional);
9578 else
9579 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9580 break;
9583 output_asm_insn (instr, & operand);
9586 return "";
9589 /* Write the function name into the code section, directly preceding
9590 the function prologue.
9592 Code will be output similar to this:
9594 .ascii "arm_poke_function_name", 0
9595 .align
9597 .word 0xff000000 + (t1 - t0)
9598 arm_poke_function_name
9599 mov ip, sp
9600 stmfd sp!, {fp, ip, lr, pc}
9601 sub fp, ip, #4
9603 When performing a stack backtrace, code can inspect the value
9604 of 'pc' stored at 'fp' + 0. If the trace function then looks
9605 at location pc - 12 and the top 8 bits are set, then we know
9606 that there is a function name embedded immediately preceding this
9607 location and has length ((pc[-3]) & 0xff000000).
9609 We assume that pc is declared as a pointer to an unsigned long.
9611 It is of no benefit to output the function name if we are assembling
9612 a leaf function. These function types will not contain a stack
9613 backtrace structure, therefore it is not possible to determine the
9614 function name. */
9615 void
9616 arm_poke_function_name (FILE *stream, const char *name)
9618 unsigned long alignlength;
9619 unsigned long length;
9620 rtx x;
9622 length = strlen (name) + 1;
9623 alignlength = ROUND_UP_WORD (length);
9625 ASM_OUTPUT_ASCII (stream, name, length);
9626 ASM_OUTPUT_ALIGN (stream, 2);
9627 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9628 assemble_aligned_integer (UNITS_PER_WORD, x);
9631 /* Place some comments into the assembler stream
9632 describing the current function. */
9633 static void
9634 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9636 unsigned long func_type;
9638 if (!TARGET_ARM)
9640 thumb_output_function_prologue (f, frame_size);
9641 return;
9644 /* Sanity check. */
9645 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9647 func_type = arm_current_func_type ();
9649 switch ((int) ARM_FUNC_TYPE (func_type))
9651 default:
9652 case ARM_FT_NORMAL:
9653 break;
9654 case ARM_FT_INTERWORKED:
9655 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9656 break;
9657 case ARM_FT_ISR:
9658 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9659 break;
9660 case ARM_FT_FIQ:
9661 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9662 break;
9663 case ARM_FT_EXCEPTION:
9664 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9665 break;
9668 if (IS_NAKED (func_type))
9669 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9671 if (IS_VOLATILE (func_type))
9672 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9674 if (IS_NESTED (func_type))
9675 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9677 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9678 current_function_args_size,
9679 current_function_pretend_args_size, frame_size);
9681 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9682 frame_pointer_needed,
9683 cfun->machine->uses_anonymous_args);
9685 if (cfun->machine->lr_save_eliminated)
9686 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9688 if (current_function_calls_eh_return)
9689 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9691 #ifdef AOF_ASSEMBLER
9692 if (flag_pic)
9693 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9694 #endif
9696 return_used_this_function = 0;
9699 const char *
9700 arm_output_epilogue (rtx sibling)
9702 int reg;
9703 unsigned long saved_regs_mask;
9704 unsigned long func_type;
9705 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9706 frame that is $fp + 4 for a non-variadic function. */
9707 int floats_offset = 0;
9708 rtx operands[3];
9709 FILE * f = asm_out_file;
9710 unsigned int lrm_count = 0;
9711 int really_return = (sibling == NULL);
9712 int start_reg;
9713 arm_stack_offsets *offsets;
9715 /* If we have already generated the return instruction
9716 then it is futile to generate anything else. */
9717 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9718 return "";
9720 func_type = arm_current_func_type ();
9722 if (IS_NAKED (func_type))
9723 /* Naked functions don't have epilogues. */
9724 return "";
9726 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9728 rtx op;
9730 /* A volatile function should never return. Call abort. */
9731 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9732 assemble_external_libcall (op);
9733 output_asm_insn ("bl\t%a0", &op);
9735 return "";
9738 /* If we are throwing an exception, then we really must be doing a
9739 return, so we can't tail-call. */
9740 gcc_assert (!current_function_calls_eh_return || really_return);
9742 offsets = arm_get_frame_offsets ();
9743 saved_regs_mask = arm_compute_save_reg_mask ();
9745 if (TARGET_IWMMXT)
9746 lrm_count = bit_count (saved_regs_mask);
9748 floats_offset = offsets->saved_args;
9749 /* Compute how far away the floats will be. */
9750 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9751 if (saved_regs_mask & (1 << reg))
9752 floats_offset += 4;
9754 if (frame_pointer_needed)
9756 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9757 int vfp_offset = offsets->frame;
9759 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9761 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9762 if (regs_ever_live[reg] && !call_used_regs[reg])
9764 floats_offset += 12;
9765 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9766 reg, FP_REGNUM, floats_offset - vfp_offset);
9769 else
9771 start_reg = LAST_FPA_REGNUM;
9773 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9775 if (regs_ever_live[reg] && !call_used_regs[reg])
9777 floats_offset += 12;
9779 /* We can't unstack more than four registers at once. */
9780 if (start_reg - reg == 3)
9782 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9783 reg, FP_REGNUM, floats_offset - vfp_offset);
9784 start_reg = reg - 1;
9787 else
9789 if (reg != start_reg)
9790 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9791 reg + 1, start_reg - reg,
9792 FP_REGNUM, floats_offset - vfp_offset);
9793 start_reg = reg - 1;
9797 /* Just in case the last register checked also needs unstacking. */
9798 if (reg != start_reg)
9799 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9800 reg + 1, start_reg - reg,
9801 FP_REGNUM, floats_offset - vfp_offset);
9804 if (TARGET_HARD_FLOAT && TARGET_VFP)
9806 int saved_size;
9808 /* The fldmx insn does not have base+offset addressing modes,
9809 so we use IP to hold the address. */
9810 saved_size = arm_get_vfp_saved_size ();
9812 if (saved_size > 0)
9814 floats_offset += saved_size;
9815 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9816 FP_REGNUM, floats_offset - vfp_offset);
9818 start_reg = FIRST_VFP_REGNUM;
9819 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9821 if ((!regs_ever_live[reg] || call_used_regs[reg])
9822 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9824 if (start_reg != reg)
9825 arm_output_fldmx (f, IP_REGNUM,
9826 (start_reg - FIRST_VFP_REGNUM) / 2,
9827 (reg - start_reg) / 2);
9828 start_reg = reg + 2;
9831 if (start_reg != reg)
9832 arm_output_fldmx (f, IP_REGNUM,
9833 (start_reg - FIRST_VFP_REGNUM) / 2,
9834 (reg - start_reg) / 2);
9837 if (TARGET_IWMMXT)
9839 /* The frame pointer is guaranteed to be non-double-word aligned.
9840 This is because it is set to (old_stack_pointer - 4) and the
9841 old_stack_pointer was double word aligned. Thus the offset to
9842 the iWMMXt registers to be loaded must also be non-double-word
9843 sized, so that the resultant address *is* double-word aligned.
9844 We can ignore floats_offset since that was already included in
9845 the live_regs_mask. */
9846 lrm_count += (lrm_count % 2 ? 2 : 1);
9848 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9849 if (regs_ever_live[reg] && !call_used_regs[reg])
9851 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9852 reg, FP_REGNUM, lrm_count * 4);
9853 lrm_count += 2;
9857 /* saved_regs_mask should contain the IP, which at the time of stack
9858 frame generation actually contains the old stack pointer. So a
9859 quick way to unwind the stack is just pop the IP register directly
9860 into the stack pointer. */
9861 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9862 saved_regs_mask &= ~ (1 << IP_REGNUM);
9863 saved_regs_mask |= (1 << SP_REGNUM);
9865 /* There are two registers left in saved_regs_mask - LR and PC. We
9866 only need to restore the LR register (the return address), but to
9867 save time we can load it directly into the PC, unless we need a
9868 special function exit sequence, or we are not really returning. */
9869 if (really_return
9870 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9871 && !current_function_calls_eh_return)
9872 /* Delete the LR from the register mask, so that the LR on
9873 the stack is loaded into the PC in the register mask. */
9874 saved_regs_mask &= ~ (1 << LR_REGNUM);
9875 else
9876 saved_regs_mask &= ~ (1 << PC_REGNUM);
9878 /* We must use SP as the base register, because SP is one of the
9879 registers being restored. If an interrupt or page fault
9880 happens in the ldm instruction, the SP might or might not
9881 have been restored. That would be bad, as then SP will no
9882 longer indicate the safe area of stack, and we can get stack
9883 corruption. Using SP as the base register means that it will
9884 be reset correctly to the original value, should an interrupt
9885 occur. If the stack pointer already points at the right
9886 place, then omit the subtraction. */
9887 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9888 || current_function_calls_alloca)
9889 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9890 4 * bit_count (saved_regs_mask));
9891 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9893 if (IS_INTERRUPT (func_type))
9894 /* Interrupt handlers will have pushed the
9895 IP onto the stack, so restore it now. */
9896 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9898 else
9900 /* Restore stack pointer if necessary. */
9901 if (offsets->outgoing_args != offsets->saved_regs)
9903 operands[0] = operands[1] = stack_pointer_rtx;
9904 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9905 output_add_immediate (operands);
9908 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9910 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9911 if (regs_ever_live[reg] && !call_used_regs[reg])
9912 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9913 reg, SP_REGNUM);
9915 else
9917 start_reg = FIRST_FPA_REGNUM;
9919 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9921 if (regs_ever_live[reg] && !call_used_regs[reg])
9923 if (reg - start_reg == 3)
9925 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9926 start_reg, SP_REGNUM);
9927 start_reg = reg + 1;
9930 else
9932 if (reg != start_reg)
9933 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9934 start_reg, reg - start_reg,
9935 SP_REGNUM);
9937 start_reg = reg + 1;
9941 /* Just in case the last register checked also needs unstacking. */
9942 if (reg != start_reg)
9943 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9944 start_reg, reg - start_reg, SP_REGNUM);
9947 if (TARGET_HARD_FLOAT && TARGET_VFP)
9949 start_reg = FIRST_VFP_REGNUM;
9950 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9952 if ((!regs_ever_live[reg] || call_used_regs[reg])
9953 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9955 if (start_reg != reg)
9956 arm_output_fldmx (f, SP_REGNUM,
9957 (start_reg - FIRST_VFP_REGNUM) / 2,
9958 (reg - start_reg) / 2);
9959 start_reg = reg + 2;
9962 if (start_reg != reg)
9963 arm_output_fldmx (f, SP_REGNUM,
9964 (start_reg - FIRST_VFP_REGNUM) / 2,
9965 (reg - start_reg) / 2);
9967 if (TARGET_IWMMXT)
9968 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9969 if (regs_ever_live[reg] && !call_used_regs[reg])
9970 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9972 /* If we can, restore the LR into the PC. */
9973 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9974 && really_return
9975 && current_function_pretend_args_size == 0
9976 && saved_regs_mask & (1 << LR_REGNUM)
9977 && !current_function_calls_eh_return)
9979 saved_regs_mask &= ~ (1 << LR_REGNUM);
9980 saved_regs_mask |= (1 << PC_REGNUM);
9983 /* Load the registers off the stack. If we only have one register
9984 to load use the LDR instruction - it is faster. */
9985 if (saved_regs_mask == (1 << LR_REGNUM))
9987 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9989 else if (saved_regs_mask)
9991 if (saved_regs_mask & (1 << SP_REGNUM))
9992 /* Note - write back to the stack register is not enabled
9993 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9994 in the list of registers and if we add writeback the
9995 instruction becomes UNPREDICTABLE. */
9996 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9997 else
9998 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
10001 if (current_function_pretend_args_size)
10003 /* Unwind the pre-pushed regs. */
10004 operands[0] = operands[1] = stack_pointer_rtx;
10005 operands[2] = GEN_INT (current_function_pretend_args_size);
10006 output_add_immediate (operands);
10010 /* We may have already restored PC directly from the stack. */
10011 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10012 return "";
10014 /* Stack adjustment for exception handler. */
10015 if (current_function_calls_eh_return)
10016 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10017 ARM_EH_STACKADJ_REGNUM);
10019 /* Generate the return instruction. */
10020 switch ((int) ARM_FUNC_TYPE (func_type))
10022 case ARM_FT_ISR:
10023 case ARM_FT_FIQ:
10024 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10025 break;
10027 case ARM_FT_EXCEPTION:
10028 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10029 break;
10031 case ARM_FT_INTERWORKED:
10032 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10033 break;
10035 default:
10036 if (arm_arch5 || arm_arch4t)
10037 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10038 else
10039 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10040 break;
10043 return "";
10046 static void
10047 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10048 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10050 arm_stack_offsets *offsets;
10052 if (TARGET_THUMB)
10054 int regno;
10056 /* Emit any call-via-reg trampolines that are needed for v4t support
10057 of call_reg and call_value_reg type insns. */
10058 for (regno = 0; regno < LR_REGNUM; regno++)
10060 rtx label = cfun->machine->call_via[regno];
10062 if (label != NULL)
10064 switch_to_section (function_section (current_function_decl));
10065 targetm.asm_out.internal_label (asm_out_file, "L",
10066 CODE_LABEL_NUMBER (label));
10067 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10071 /* ??? Probably not safe to set this here, since it assumes that a
10072 function will be emitted as assembly immediately after we generate
10073 RTL for it. This does not happen for inline functions. */
10074 return_used_this_function = 0;
10076 else
10078 /* We need to take into account any stack-frame rounding. */
10079 offsets = arm_get_frame_offsets ();
10081 gcc_assert (!use_return_insn (FALSE, NULL)
10082 || !return_used_this_function
10083 || offsets->saved_regs == offsets->outgoing_args
10084 || frame_pointer_needed);
10086 /* Reset the ARM-specific per-function variables. */
10087 after_arm_reorg = 0;
10091 /* Generate and emit an insn that we will recognize as a push_multi.
10092 Unfortunately, since this insn does not reflect very well the actual
10093 semantics of the operation, we need to annotate the insn for the benefit
10094 of DWARF2 frame unwind information. */
10095 static rtx
10096 emit_multi_reg_push (unsigned long mask)
10098 int num_regs = 0;
10099 int num_dwarf_regs;
10100 int i, j;
10101 rtx par;
10102 rtx dwarf;
10103 int dwarf_par_index;
10104 rtx tmp, reg;
10106 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10107 if (mask & (1 << i))
10108 num_regs++;
10110 gcc_assert (num_regs && num_regs <= 16);
10112 /* We don't record the PC in the dwarf frame information. */
10113 num_dwarf_regs = num_regs;
10114 if (mask & (1 << PC_REGNUM))
10115 num_dwarf_regs--;
10117 /* For the body of the insn we are going to generate an UNSPEC in
10118 parallel with several USEs. This allows the insn to be recognized
10119 by the push_multi pattern in the arm.md file. The insn looks
10120 something like this:
10122 (parallel [
10123 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10124 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10125 (use (reg:SI 11 fp))
10126 (use (reg:SI 12 ip))
10127 (use (reg:SI 14 lr))
10128 (use (reg:SI 15 pc))
10131 For the frame note however, we try to be more explicit and actually
10132 show each register being stored into the stack frame, plus a (single)
10133 decrement of the stack pointer. We do it this way in order to be
10134 friendly to the stack unwinding code, which only wants to see a single
10135 stack decrement per instruction. The RTL we generate for the note looks
10136 something like this:
10138 (sequence [
10139 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10140 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10141 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10142 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10143 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10146 This sequence is used both by the code to support stack unwinding for
10147 exceptions handlers and the code to generate dwarf2 frame debugging. */
10149 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10150 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10151 dwarf_par_index = 1;
10153 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10155 if (mask & (1 << i))
10157 reg = gen_rtx_REG (SImode, i);
10159 XVECEXP (par, 0, 0)
10160 = gen_rtx_SET (VOIDmode,
10161 gen_frame_mem (BLKmode,
10162 gen_rtx_PRE_DEC (BLKmode,
10163 stack_pointer_rtx)),
10164 gen_rtx_UNSPEC (BLKmode,
10165 gen_rtvec (1, reg),
10166 UNSPEC_PUSH_MULT));
10168 if (i != PC_REGNUM)
10170 tmp = gen_rtx_SET (VOIDmode,
10171 gen_frame_mem (SImode, stack_pointer_rtx),
10172 reg);
10173 RTX_FRAME_RELATED_P (tmp) = 1;
10174 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10175 dwarf_par_index++;
10178 break;
10182 for (j = 1, i++; j < num_regs; i++)
10184 if (mask & (1 << i))
10186 reg = gen_rtx_REG (SImode, i);
10188 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10190 if (i != PC_REGNUM)
10193 = gen_rtx_SET (VOIDmode,
10194 gen_frame_mem (SImode,
10195 plus_constant (stack_pointer_rtx,
10196 4 * j)),
10197 reg);
10198 RTX_FRAME_RELATED_P (tmp) = 1;
10199 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10202 j++;
10206 par = emit_insn (par);
10208 tmp = gen_rtx_SET (VOIDmode,
10209 stack_pointer_rtx,
10210 plus_constant (stack_pointer_rtx, -4 * num_regs));
10211 RTX_FRAME_RELATED_P (tmp) = 1;
10212 XVECEXP (dwarf, 0, 0) = tmp;
10214 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10215 REG_NOTES (par));
10216 return par;
10219 /* Calculate the size of the return value that is passed in registers. */
10220 static int
10221 arm_size_return_regs (void)
10223 enum machine_mode mode;
10225 if (current_function_return_rtx != 0)
10226 mode = GET_MODE (current_function_return_rtx);
10227 else
10228 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10230 return GET_MODE_SIZE (mode);
10233 static rtx
10234 emit_sfm (int base_reg, int count)
10236 rtx par;
10237 rtx dwarf;
10238 rtx tmp, reg;
10239 int i;
10241 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10242 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10244 reg = gen_rtx_REG (XFmode, base_reg++);
10246 XVECEXP (par, 0, 0)
10247 = gen_rtx_SET (VOIDmode,
10248 gen_frame_mem (BLKmode,
10249 gen_rtx_PRE_DEC (BLKmode,
10250 stack_pointer_rtx)),
10251 gen_rtx_UNSPEC (BLKmode,
10252 gen_rtvec (1, reg),
10253 UNSPEC_PUSH_MULT));
10254 tmp = gen_rtx_SET (VOIDmode,
10255 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10256 RTX_FRAME_RELATED_P (tmp) = 1;
10257 XVECEXP (dwarf, 0, 1) = tmp;
10259 for (i = 1; i < count; i++)
10261 reg = gen_rtx_REG (XFmode, base_reg++);
10262 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10264 tmp = gen_rtx_SET (VOIDmode,
10265 gen_frame_mem (XFmode,
10266 plus_constant (stack_pointer_rtx,
10267 i * 12)),
10268 reg);
10269 RTX_FRAME_RELATED_P (tmp) = 1;
10270 XVECEXP (dwarf, 0, i + 1) = tmp;
10273 tmp = gen_rtx_SET (VOIDmode,
10274 stack_pointer_rtx,
10275 plus_constant (stack_pointer_rtx, -12 * count));
10277 RTX_FRAME_RELATED_P (tmp) = 1;
10278 XVECEXP (dwarf, 0, 0) = tmp;
10280 par = emit_insn (par);
10281 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10282 REG_NOTES (par));
10283 return par;
10287 /* Return true if the current function needs to save/restore LR. */
10289 static bool
10290 thumb_force_lr_save (void)
10292 return !cfun->machine->lr_save_eliminated
10293 && (!leaf_function_p ()
10294 || thumb_far_jump_used_p ()
10295 || regs_ever_live [LR_REGNUM]);
10299 /* Compute the distance from register FROM to register TO.
10300 These can be the arg pointer (26), the soft frame pointer (25),
10301 the stack pointer (13) or the hard frame pointer (11).
10302 In thumb mode r7 is used as the soft frame pointer, if needed.
10303 Typical stack layout looks like this:
10305 old stack pointer -> | |
10306 ----
10307 | | \
10308 | | saved arguments for
10309 | | vararg functions
10310 | | /
10312 hard FP & arg pointer -> | | \
10313 | | stack
10314 | | frame
10315 | | /
10317 | | \
10318 | | call saved
10319 | | registers
10320 soft frame pointer -> | | /
10322 | | \
10323 | | local
10324 | | variables
10325 locals base pointer -> | | /
10327 | | \
10328 | | outgoing
10329 | | arguments
10330 current stack pointer -> | | /
10333 For a given function some or all of these stack components
10334 may not be needed, giving rise to the possibility of
10335 eliminating some of the registers.
10337 The values returned by this function must reflect the behavior
10338 of arm_expand_prologue() and arm_compute_save_reg_mask().
10340 The sign of the number returned reflects the direction of stack
10341 growth, so the values are positive for all eliminations except
10342 from the soft frame pointer to the hard frame pointer.
10344 SFP may point just inside the local variables block to ensure correct
10345 alignment. */
10348 /* Calculate stack offsets. These are used to calculate register elimination
10349 offsets and in prologue/epilogue code. */
10351 static arm_stack_offsets *
10352 arm_get_frame_offsets (void)
10354 struct arm_stack_offsets *offsets;
10355 unsigned long func_type;
10356 int leaf;
10357 int saved;
10358 HOST_WIDE_INT frame_size;
10360 offsets = &cfun->machine->stack_offsets;
10362 /* We need to know if we are a leaf function. Unfortunately, it
10363 is possible to be called after start_sequence has been called,
10364 which causes get_insns to return the insns for the sequence,
10365 not the function, which will cause leaf_function_p to return
10366 the incorrect result.
10368 to know about leaf functions once reload has completed, and the
10369 frame size cannot be changed after that time, so we can safely
10370 use the cached value. */
10372 if (reload_completed)
10373 return offsets;
10375 /* Initially this is the size of the local variables. It will translated
10376 into an offset once we have determined the size of preceding data. */
10377 frame_size = ROUND_UP_WORD (get_frame_size ());
10379 leaf = leaf_function_p ();
10381 /* Space for variadic functions. */
10382 offsets->saved_args = current_function_pretend_args_size;
10384 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10386 if (TARGET_ARM)
10388 unsigned int regno;
10390 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10392 /* We know that SP will be doubleword aligned on entry, and we must
10393 preserve that condition at any subroutine call. We also require the
10394 soft frame pointer to be doubleword aligned. */
10396 if (TARGET_REALLY_IWMMXT)
10398 /* Check for the call-saved iWMMXt registers. */
10399 for (regno = FIRST_IWMMXT_REGNUM;
10400 regno <= LAST_IWMMXT_REGNUM;
10401 regno++)
10402 if (regs_ever_live [regno] && ! call_used_regs [regno])
10403 saved += 8;
10406 func_type = arm_current_func_type ();
10407 if (! IS_VOLATILE (func_type))
10409 /* Space for saved FPA registers. */
10410 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10411 if (regs_ever_live[regno] && ! call_used_regs[regno])
10412 saved += 12;
10414 /* Space for saved VFP registers. */
10415 if (TARGET_HARD_FLOAT && TARGET_VFP)
10416 saved += arm_get_vfp_saved_size ();
10419 else /* TARGET_THUMB */
10421 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10422 if (TARGET_BACKTRACE)
10423 saved += 16;
10426 /* Saved registers include the stack frame. */
10427 offsets->saved_regs = offsets->saved_args + saved;
10428 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10429 /* A leaf function does not need any stack alignment if it has nothing
10430 on the stack. */
10431 if (leaf && frame_size == 0)
10433 offsets->outgoing_args = offsets->soft_frame;
10434 return offsets;
10437 /* Ensure SFP has the correct alignment. */
10438 if (ARM_DOUBLEWORD_ALIGN
10439 && (offsets->soft_frame & 7))
10440 offsets->soft_frame += 4;
10442 offsets->locals_base = offsets->soft_frame + frame_size;
10443 offsets->outgoing_args = (offsets->locals_base
10444 + current_function_outgoing_args_size);
10446 if (ARM_DOUBLEWORD_ALIGN)
10448 /* Ensure SP remains doubleword aligned. */
10449 if (offsets->outgoing_args & 7)
10450 offsets->outgoing_args += 4;
10451 gcc_assert (!(offsets->outgoing_args & 7));
10454 return offsets;
10458 /* Calculate the relative offsets for the different stack pointers. Positive
10459 offsets are in the direction of stack growth. */
10461 HOST_WIDE_INT
10462 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10464 arm_stack_offsets *offsets;
10466 offsets = arm_get_frame_offsets ();
10468 /* OK, now we have enough information to compute the distances.
10469 There must be an entry in these switch tables for each pair
10470 of registers in ELIMINABLE_REGS, even if some of the entries
10471 seem to be redundant or useless. */
10472 switch (from)
10474 case ARG_POINTER_REGNUM:
10475 switch (to)
10477 case THUMB_HARD_FRAME_POINTER_REGNUM:
10478 return 0;
10480 case FRAME_POINTER_REGNUM:
10481 /* This is the reverse of the soft frame pointer
10482 to hard frame pointer elimination below. */
10483 return offsets->soft_frame - offsets->saved_args;
10485 case ARM_HARD_FRAME_POINTER_REGNUM:
10486 /* If there is no stack frame then the hard
10487 frame pointer and the arg pointer coincide. */
10488 if (offsets->frame == offsets->saved_regs)
10489 return 0;
10490 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10491 return (frame_pointer_needed
10492 && cfun->static_chain_decl != NULL
10493 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10495 case STACK_POINTER_REGNUM:
10496 /* If nothing has been pushed on the stack at all
10497 then this will return -4. This *is* correct! */
10498 return offsets->outgoing_args - (offsets->saved_args + 4);
10500 default:
10501 gcc_unreachable ();
10503 gcc_unreachable ();
10505 case FRAME_POINTER_REGNUM:
10506 switch (to)
10508 case THUMB_HARD_FRAME_POINTER_REGNUM:
10509 return 0;
10511 case ARM_HARD_FRAME_POINTER_REGNUM:
10512 /* The hard frame pointer points to the top entry in the
10513 stack frame. The soft frame pointer to the bottom entry
10514 in the stack frame. If there is no stack frame at all,
10515 then they are identical. */
10517 return offsets->frame - offsets->soft_frame;
10519 case STACK_POINTER_REGNUM:
10520 return offsets->outgoing_args - offsets->soft_frame;
10522 default:
10523 gcc_unreachable ();
10525 gcc_unreachable ();
10527 default:
10528 /* You cannot eliminate from the stack pointer.
10529 In theory you could eliminate from the hard frame
10530 pointer to the stack pointer, but this will never
10531 happen, since if a stack frame is not needed the
10532 hard frame pointer will never be used. */
10533 gcc_unreachable ();
10538 /* Generate the prologue instructions for entry into an ARM function. */
10539 void
10540 arm_expand_prologue (void)
10542 int reg;
10543 rtx amount;
10544 rtx insn;
10545 rtx ip_rtx;
10546 unsigned long live_regs_mask;
10547 unsigned long func_type;
10548 int fp_offset = 0;
10549 int saved_pretend_args = 0;
10550 int saved_regs = 0;
10551 unsigned HOST_WIDE_INT args_to_push;
10552 arm_stack_offsets *offsets;
10554 func_type = arm_current_func_type ();
10556 /* Naked functions don't have prologues. */
10557 if (IS_NAKED (func_type))
10558 return;
10560 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10561 args_to_push = current_function_pretend_args_size;
10563 /* Compute which register we will have to save onto the stack. */
10564 live_regs_mask = arm_compute_save_reg_mask ();
10566 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10568 if (frame_pointer_needed)
10570 if (IS_INTERRUPT (func_type))
10572 /* Interrupt functions must not corrupt any registers.
10573 Creating a frame pointer however, corrupts the IP
10574 register, so we must push it first. */
10575 insn = emit_multi_reg_push (1 << IP_REGNUM);
10577 /* Do not set RTX_FRAME_RELATED_P on this insn.
10578 The dwarf stack unwinding code only wants to see one
10579 stack decrement per function, and this is not it. If
10580 this instruction is labeled as being part of the frame
10581 creation sequence then dwarf2out_frame_debug_expr will
10582 die when it encounters the assignment of IP to FP
10583 later on, since the use of SP here establishes SP as
10584 the CFA register and not IP.
10586 Anyway this instruction is not really part of the stack
10587 frame creation although it is part of the prologue. */
10589 else if (IS_NESTED (func_type))
10591 /* The Static chain register is the same as the IP register
10592 used as a scratch register during stack frame creation.
10593 To get around this need to find somewhere to store IP
10594 whilst the frame is being created. We try the following
10595 places in order:
10597 1. The last argument register.
10598 2. A slot on the stack above the frame. (This only
10599 works if the function is not a varargs function).
10600 3. Register r3, after pushing the argument registers
10601 onto the stack.
10603 Note - we only need to tell the dwarf2 backend about the SP
10604 adjustment in the second variant; the static chain register
10605 doesn't need to be unwound, as it doesn't contain a value
10606 inherited from the caller. */
10608 if (regs_ever_live[3] == 0)
10609 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10610 else if (args_to_push == 0)
10612 rtx dwarf;
10614 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10615 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
10616 fp_offset = 4;
10618 /* Just tell the dwarf backend that we adjusted SP. */
10619 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10620 plus_constant (stack_pointer_rtx,
10621 -fp_offset));
10622 RTX_FRAME_RELATED_P (insn) = 1;
10623 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10624 dwarf, REG_NOTES (insn));
10626 else
10628 /* Store the args on the stack. */
10629 if (cfun->machine->uses_anonymous_args)
10630 insn = emit_multi_reg_push
10631 ((0xf0 >> (args_to_push / 4)) & 0xf);
10632 else
10633 insn = emit_insn
10634 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10635 GEN_INT (- args_to_push)));
10637 RTX_FRAME_RELATED_P (insn) = 1;
10639 saved_pretend_args = 1;
10640 fp_offset = args_to_push;
10641 args_to_push = 0;
10643 /* Now reuse r3 to preserve IP. */
10644 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10648 insn = emit_set_insn (ip_rtx,
10649 plus_constant (stack_pointer_rtx, fp_offset));
10650 RTX_FRAME_RELATED_P (insn) = 1;
10653 if (args_to_push)
10655 /* Push the argument registers, or reserve space for them. */
10656 if (cfun->machine->uses_anonymous_args)
10657 insn = emit_multi_reg_push
10658 ((0xf0 >> (args_to_push / 4)) & 0xf);
10659 else
10660 insn = emit_insn
10661 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10662 GEN_INT (- args_to_push)));
10663 RTX_FRAME_RELATED_P (insn) = 1;
10666 /* If this is an interrupt service routine, and the link register
10667 is going to be pushed, and we are not creating a stack frame,
10668 (which would involve an extra push of IP and a pop in the epilogue)
10669 subtracting four from LR now will mean that the function return
10670 can be done with a single instruction. */
10671 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10672 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10673 && ! frame_pointer_needed)
10675 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
10677 emit_set_insn (lr, plus_constant (lr, -4));
10680 if (live_regs_mask)
10682 insn = emit_multi_reg_push (live_regs_mask);
10683 saved_regs += bit_count (live_regs_mask) * 4;
10684 RTX_FRAME_RELATED_P (insn) = 1;
10687 if (TARGET_IWMMXT)
10688 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10689 if (regs_ever_live[reg] && ! call_used_regs [reg])
10691 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10692 insn = gen_frame_mem (V2SImode, insn);
10693 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
10694 RTX_FRAME_RELATED_P (insn) = 1;
10695 saved_regs += 8;
10698 if (! IS_VOLATILE (func_type))
10700 int start_reg;
10702 /* Save any floating point call-saved registers used by this
10703 function. */
10704 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10706 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10707 if (regs_ever_live[reg] && !call_used_regs[reg])
10709 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10710 insn = gen_frame_mem (XFmode, insn);
10711 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
10712 RTX_FRAME_RELATED_P (insn) = 1;
10713 saved_regs += 12;
10716 else
10718 start_reg = LAST_FPA_REGNUM;
10720 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10722 if (regs_ever_live[reg] && !call_used_regs[reg])
10724 if (start_reg - reg == 3)
10726 insn = emit_sfm (reg, 4);
10727 RTX_FRAME_RELATED_P (insn) = 1;
10728 saved_regs += 48;
10729 start_reg = reg - 1;
10732 else
10734 if (start_reg != reg)
10736 insn = emit_sfm (reg + 1, start_reg - reg);
10737 RTX_FRAME_RELATED_P (insn) = 1;
10738 saved_regs += (start_reg - reg) * 12;
10740 start_reg = reg - 1;
10744 if (start_reg != reg)
10746 insn = emit_sfm (reg + 1, start_reg - reg);
10747 saved_regs += (start_reg - reg) * 12;
10748 RTX_FRAME_RELATED_P (insn) = 1;
10751 if (TARGET_HARD_FLOAT && TARGET_VFP)
10753 start_reg = FIRST_VFP_REGNUM;
10755 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10757 if ((!regs_ever_live[reg] || call_used_regs[reg])
10758 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10760 if (start_reg != reg)
10761 saved_regs += vfp_emit_fstmx (start_reg,
10762 (reg - start_reg) / 2);
10763 start_reg = reg + 2;
10766 if (start_reg != reg)
10767 saved_regs += vfp_emit_fstmx (start_reg,
10768 (reg - start_reg) / 2);
10772 if (frame_pointer_needed)
10774 /* Create the new frame pointer. */
10775 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10776 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10777 RTX_FRAME_RELATED_P (insn) = 1;
10779 if (IS_NESTED (func_type))
10781 /* Recover the static chain register. */
10782 if (regs_ever_live [3] == 0
10783 || saved_pretend_args)
10784 insn = gen_rtx_REG (SImode, 3);
10785 else /* if (current_function_pretend_args_size == 0) */
10787 insn = plus_constant (hard_frame_pointer_rtx, 4);
10788 insn = gen_frame_mem (SImode, insn);
10791 emit_set_insn (ip_rtx, insn);
10792 /* Add a USE to stop propagate_one_insn() from barfing. */
10793 emit_insn (gen_prologue_use (ip_rtx));
10797 offsets = arm_get_frame_offsets ();
10798 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10800 /* This add can produce multiple insns for a large constant, so we
10801 need to get tricky. */
10802 rtx last = get_last_insn ();
10804 amount = GEN_INT (offsets->saved_args + saved_regs
10805 - offsets->outgoing_args);
10807 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10808 amount));
10811 last = last ? NEXT_INSN (last) : get_insns ();
10812 RTX_FRAME_RELATED_P (last) = 1;
10814 while (last != insn);
10816 /* If the frame pointer is needed, emit a special barrier that
10817 will prevent the scheduler from moving stores to the frame
10818 before the stack adjustment. */
10819 if (frame_pointer_needed)
10820 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10821 hard_frame_pointer_rtx));
10825 if (flag_pic)
10826 arm_load_pic_register (0UL);
10828 /* If we are profiling, make sure no instructions are scheduled before
10829 the call to mcount. Similarly if the user has requested no
10830 scheduling in the prolog. Similarly if we want non-call exceptions
10831 using the EABI unwinder, to prevent faulting instructions from being
10832 swapped with a stack adjustment. */
10833 if (current_function_profile || !TARGET_SCHED_PROLOG
10834 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
10835 emit_insn (gen_blockage ());
10837 /* If the link register is being kept alive, with the return address in it,
10838 then make sure that it does not get reused by the ce2 pass. */
10839 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10841 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10842 cfun->machine->lr_save_eliminated = 1;
10846 /* If CODE is 'd', then the X is a condition operand and the instruction
10847 should only be executed if the condition is true.
10848 if CODE is 'D', then the X is a condition operand and the instruction
10849 should only be executed if the condition is false: however, if the mode
10850 of the comparison is CCFPEmode, then always execute the instruction -- we
10851 do this because in these circumstances !GE does not necessarily imply LT;
10852 in these cases the instruction pattern will take care to make sure that
10853 an instruction containing %d will follow, thereby undoing the effects of
10854 doing this instruction unconditionally.
10855 If CODE is 'N' then X is a floating point operand that must be negated
10856 before output.
10857 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10858 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10859 void
10860 arm_print_operand (FILE *stream, rtx x, int code)
10862 switch (code)
10864 case '@':
10865 fputs (ASM_COMMENT_START, stream);
10866 return;
10868 case '_':
10869 fputs (user_label_prefix, stream);
10870 return;
10872 case '|':
10873 fputs (REGISTER_PREFIX, stream);
10874 return;
10876 case '?':
10877 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10879 if (TARGET_THUMB)
10881 output_operand_lossage ("predicated Thumb instruction");
10882 break;
10884 if (current_insn_predicate != NULL)
10886 output_operand_lossage
10887 ("predicated instruction in conditional sequence");
10888 break;
10891 fputs (arm_condition_codes[arm_current_cc], stream);
10893 else if (current_insn_predicate)
10895 enum arm_cond_code code;
10897 if (TARGET_THUMB)
10899 output_operand_lossage ("predicated Thumb instruction");
10900 break;
10903 code = get_arm_condition_code (current_insn_predicate);
10904 fputs (arm_condition_codes[code], stream);
10906 return;
10908 case 'N':
10910 REAL_VALUE_TYPE r;
10911 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10912 r = REAL_VALUE_NEGATE (r);
10913 fprintf (stream, "%s", fp_const_from_val (&r));
10915 return;
10917 case 'B':
10918 if (GET_CODE (x) == CONST_INT)
10920 HOST_WIDE_INT val;
10921 val = ARM_SIGN_EXTEND (~INTVAL (x));
10922 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10924 else
10926 putc ('~', stream);
10927 output_addr_const (stream, x);
10929 return;
10931 case 'i':
10932 fprintf (stream, "%s", arithmetic_instr (x, 1));
10933 return;
10935 /* Truncate Cirrus shift counts. */
10936 case 's':
10937 if (GET_CODE (x) == CONST_INT)
10939 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10940 return;
10942 arm_print_operand (stream, x, 0);
10943 return;
10945 case 'I':
10946 fprintf (stream, "%s", arithmetic_instr (x, 0));
10947 return;
10949 case 'S':
10951 HOST_WIDE_INT val;
10952 const char * shift = shift_op (x, &val);
10954 if (shift)
10956 fprintf (stream, ", %s ", shift_op (x, &val));
10957 if (val == -1)
10958 arm_print_operand (stream, XEXP (x, 1), 0);
10959 else
10960 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10963 return;
10965 /* An explanation of the 'Q', 'R' and 'H' register operands:
10967 In a pair of registers containing a DI or DF value the 'Q'
10968 operand returns the register number of the register containing
10969 the least significant part of the value. The 'R' operand returns
10970 the register number of the register containing the most
10971 significant part of the value.
10973 The 'H' operand returns the higher of the two register numbers.
10974 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10975 same as the 'Q' operand, since the most significant part of the
10976 value is held in the lower number register. The reverse is true
10977 on systems where WORDS_BIG_ENDIAN is false.
10979 The purpose of these operands is to distinguish between cases
10980 where the endian-ness of the values is important (for example
10981 when they are added together), and cases where the endian-ness
10982 is irrelevant, but the order of register operations is important.
10983 For example when loading a value from memory into a register
10984 pair, the endian-ness does not matter. Provided that the value
10985 from the lower memory address is put into the lower numbered
10986 register, and the value from the higher address is put into the
10987 higher numbered register, the load will work regardless of whether
10988 the value being loaded is big-wordian or little-wordian. The
10989 order of the two register loads can matter however, if the address
10990 of the memory location is actually held in one of the registers
10991 being overwritten by the load. */
10992 case 'Q':
10993 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10995 output_operand_lossage ("invalid operand for code '%c'", code);
10996 return;
10999 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11000 return;
11002 case 'R':
11003 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11005 output_operand_lossage ("invalid operand for code '%c'", code);
11006 return;
11009 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11010 return;
11012 case 'H':
11013 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11015 output_operand_lossage ("invalid operand for code '%c'", code);
11016 return;
11019 asm_fprintf (stream, "%r", REGNO (x) + 1);
11020 return;
11022 case 'm':
11023 asm_fprintf (stream, "%r",
11024 GET_CODE (XEXP (x, 0)) == REG
11025 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11026 return;
11028 case 'M':
11029 asm_fprintf (stream, "{%r-%r}",
11030 REGNO (x),
11031 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11032 return;
11034 case 'd':
11035 /* CONST_TRUE_RTX means always -- that's the default. */
11036 if (x == const_true_rtx)
11037 return;
11039 if (!COMPARISON_P (x))
11041 output_operand_lossage ("invalid operand for code '%c'", code);
11042 return;
11045 fputs (arm_condition_codes[get_arm_condition_code (x)],
11046 stream);
11047 return;
11049 case 'D':
11050 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11051 want to do that. */
11052 if (x == const_true_rtx)
11054 output_operand_lossage ("instruction never exectued");
11055 return;
11057 if (!COMPARISON_P (x))
11059 output_operand_lossage ("invalid operand for code '%c'", code);
11060 return;
11063 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11064 (get_arm_condition_code (x))],
11065 stream);
11066 return;
11068 /* Cirrus registers can be accessed in a variety of ways:
11069 single floating point (f)
11070 double floating point (d)
11071 32bit integer (fx)
11072 64bit integer (dx). */
11073 case 'W': /* Cirrus register in F mode. */
11074 case 'X': /* Cirrus register in D mode. */
11075 case 'Y': /* Cirrus register in FX mode. */
11076 case 'Z': /* Cirrus register in DX mode. */
11077 gcc_assert (GET_CODE (x) == REG
11078 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11080 fprintf (stream, "mv%s%s",
11081 code == 'W' ? "f"
11082 : code == 'X' ? "d"
11083 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11085 return;
11087 /* Print cirrus register in the mode specified by the register's mode. */
11088 case 'V':
11090 int mode = GET_MODE (x);
11092 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11094 output_operand_lossage ("invalid operand for code '%c'", code);
11095 return;
11098 fprintf (stream, "mv%s%s",
11099 mode == DFmode ? "d"
11100 : mode == SImode ? "fx"
11101 : mode == DImode ? "dx"
11102 : "f", reg_names[REGNO (x)] + 2);
11104 return;
11107 case 'U':
11108 if (GET_CODE (x) != REG
11109 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11110 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11111 /* Bad value for wCG register number. */
11113 output_operand_lossage ("invalid operand for code '%c'", code);
11114 return;
11117 else
11118 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11119 return;
11121 /* Print an iWMMXt control register name. */
11122 case 'w':
11123 if (GET_CODE (x) != CONST_INT
11124 || INTVAL (x) < 0
11125 || INTVAL (x) >= 16)
11126 /* Bad value for wC register number. */
11128 output_operand_lossage ("invalid operand for code '%c'", code);
11129 return;
11132 else
11134 static const char * wc_reg_names [16] =
11136 "wCID", "wCon", "wCSSF", "wCASF",
11137 "wC4", "wC5", "wC6", "wC7",
11138 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11139 "wC12", "wC13", "wC14", "wC15"
11142 fprintf (stream, wc_reg_names [INTVAL (x)]);
11144 return;
11146 /* Print a VFP double precision register name. */
11147 case 'P':
11149 int mode = GET_MODE (x);
11150 int num;
11152 if (mode != DImode && mode != DFmode)
11154 output_operand_lossage ("invalid operand for code '%c'", code);
11155 return;
11158 if (GET_CODE (x) != REG
11159 || !IS_VFP_REGNUM (REGNO (x)))
11161 output_operand_lossage ("invalid operand for code '%c'", code);
11162 return;
11165 num = REGNO(x) - FIRST_VFP_REGNUM;
11166 if (num & 1)
11168 output_operand_lossage ("invalid operand for code '%c'", code);
11169 return;
11172 fprintf (stream, "d%d", num >> 1);
11174 return;
11176 default:
11177 if (x == 0)
11179 output_operand_lossage ("missing operand");
11180 return;
11183 switch (GET_CODE (x))
11185 case REG:
11186 asm_fprintf (stream, "%r", REGNO (x));
11187 break;
11189 case MEM:
11190 output_memory_reference_mode = GET_MODE (x);
11191 output_address (XEXP (x, 0));
11192 break;
11194 case CONST_DOUBLE:
11195 fprintf (stream, "#%s", fp_immediate_constant (x));
11196 break;
11198 default:
11199 gcc_assert (GET_CODE (x) != NEG);
11200 fputc ('#', stream);
11201 output_addr_const (stream, x);
11202 break;
11207 #ifndef AOF_ASSEMBLER
11208 /* Target hook for assembling integer objects. The ARM version needs to
11209 handle word-sized values specially. */
11210 static bool
11211 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11213 if (size == UNITS_PER_WORD && aligned_p)
11215 fputs ("\t.word\t", asm_out_file);
11216 output_addr_const (asm_out_file, x);
11218 /* Mark symbols as position independent. We only do this in the
11219 .text segment, not in the .data segment. */
11220 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11221 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11223 if (GET_CODE (x) == SYMBOL_REF
11224 && (CONSTANT_POOL_ADDRESS_P (x)
11225 || SYMBOL_REF_LOCAL_P (x)))
11226 fputs ("(GOTOFF)", asm_out_file);
11227 else if (GET_CODE (x) == LABEL_REF)
11228 fputs ("(GOTOFF)", asm_out_file);
11229 else
11230 fputs ("(GOT)", asm_out_file);
11232 fputc ('\n', asm_out_file);
11233 return true;
11236 if (arm_vector_mode_supported_p (GET_MODE (x)))
11238 int i, units;
11240 gcc_assert (GET_CODE (x) == CONST_VECTOR);
11242 units = CONST_VECTOR_NUNITS (x);
11244 switch (GET_MODE (x))
11246 case V2SImode: size = 4; break;
11247 case V4HImode: size = 2; break;
11248 case V8QImode: size = 1; break;
11249 default:
11250 gcc_unreachable ();
11253 for (i = 0; i < units; i++)
11255 rtx elt;
11257 elt = CONST_VECTOR_ELT (x, i);
11258 assemble_integer
11259 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11262 return true;
11265 return default_assemble_integer (x, size, aligned_p);
11269 /* Add a function to the list of static constructors. */
11271 static void
11272 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
11274 if (!TARGET_AAPCS_BASED)
11276 default_named_section_asm_out_constructor (symbol, priority);
11277 return;
11280 /* Put these in the .init_array section, using a special relocation. */
11281 switch_to_section (ctors_section);
11282 assemble_align (POINTER_SIZE);
11283 fputs ("\t.word\t", asm_out_file);
11284 output_addr_const (asm_out_file, symbol);
11285 fputs ("(target1)\n", asm_out_file);
11287 #endif
11289 /* A finite state machine takes care of noticing whether or not instructions
11290 can be conditionally executed, and thus decrease execution time and code
11291 size by deleting branch instructions. The fsm is controlled by
11292 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11294 /* The state of the fsm controlling condition codes are:
11295 0: normal, do nothing special
11296 1: make ASM_OUTPUT_OPCODE not output this instruction
11297 2: make ASM_OUTPUT_OPCODE not output this instruction
11298 3: make instructions conditional
11299 4: make instructions conditional
11301 State transitions (state->state by whom under condition):
11302 0 -> 1 final_prescan_insn if the `target' is a label
11303 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11304 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11305 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11306 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11307 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11308 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11309 (the target insn is arm_target_insn).
11311 If the jump clobbers the conditions then we use states 2 and 4.
11313 A similar thing can be done with conditional return insns.
11315 XXX In case the `target' is an unconditional branch, this conditionalising
11316 of the instructions always reduces code size, but not always execution
11317 time. But then, I want to reduce the code size to somewhere near what
11318 /bin/cc produces. */
11320 /* Returns the index of the ARM condition code string in
11321 `arm_condition_codes'. COMPARISON should be an rtx like
11322 `(eq (...) (...))'. */
11323 static enum arm_cond_code
11324 get_arm_condition_code (rtx comparison)
11326 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11327 int code;
11328 enum rtx_code comp_code = GET_CODE (comparison);
11330 if (GET_MODE_CLASS (mode) != MODE_CC)
11331 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11332 XEXP (comparison, 1));
11334 switch (mode)
11336 case CC_DNEmode: code = ARM_NE; goto dominance;
11337 case CC_DEQmode: code = ARM_EQ; goto dominance;
11338 case CC_DGEmode: code = ARM_GE; goto dominance;
11339 case CC_DGTmode: code = ARM_GT; goto dominance;
11340 case CC_DLEmode: code = ARM_LE; goto dominance;
11341 case CC_DLTmode: code = ARM_LT; goto dominance;
11342 case CC_DGEUmode: code = ARM_CS; goto dominance;
11343 case CC_DGTUmode: code = ARM_HI; goto dominance;
11344 case CC_DLEUmode: code = ARM_LS; goto dominance;
11345 case CC_DLTUmode: code = ARM_CC;
11347 dominance:
11348 gcc_assert (comp_code == EQ || comp_code == NE);
11350 if (comp_code == EQ)
11351 return ARM_INVERSE_CONDITION_CODE (code);
11352 return code;
11354 case CC_NOOVmode:
11355 switch (comp_code)
11357 case NE: return ARM_NE;
11358 case EQ: return ARM_EQ;
11359 case GE: return ARM_PL;
11360 case LT: return ARM_MI;
11361 default: gcc_unreachable ();
11364 case CC_Zmode:
11365 switch (comp_code)
11367 case NE: return ARM_NE;
11368 case EQ: return ARM_EQ;
11369 default: gcc_unreachable ();
11372 case CC_Nmode:
11373 switch (comp_code)
11375 case NE: return ARM_MI;
11376 case EQ: return ARM_PL;
11377 default: gcc_unreachable ();
11380 case CCFPEmode:
11381 case CCFPmode:
11382 /* These encodings assume that AC=1 in the FPA system control
11383 byte. This allows us to handle all cases except UNEQ and
11384 LTGT. */
11385 switch (comp_code)
11387 case GE: return ARM_GE;
11388 case GT: return ARM_GT;
11389 case LE: return ARM_LS;
11390 case LT: return ARM_MI;
11391 case NE: return ARM_NE;
11392 case EQ: return ARM_EQ;
11393 case ORDERED: return ARM_VC;
11394 case UNORDERED: return ARM_VS;
11395 case UNLT: return ARM_LT;
11396 case UNLE: return ARM_LE;
11397 case UNGT: return ARM_HI;
11398 case UNGE: return ARM_PL;
11399 /* UNEQ and LTGT do not have a representation. */
11400 case UNEQ: /* Fall through. */
11401 case LTGT: /* Fall through. */
11402 default: gcc_unreachable ();
11405 case CC_SWPmode:
11406 switch (comp_code)
11408 case NE: return ARM_NE;
11409 case EQ: return ARM_EQ;
11410 case GE: return ARM_LE;
11411 case GT: return ARM_LT;
11412 case LE: return ARM_GE;
11413 case LT: return ARM_GT;
11414 case GEU: return ARM_LS;
11415 case GTU: return ARM_CC;
11416 case LEU: return ARM_CS;
11417 case LTU: return ARM_HI;
11418 default: gcc_unreachable ();
11421 case CC_Cmode:
11422 switch (comp_code)
11424 case LTU: return ARM_CS;
11425 case GEU: return ARM_CC;
11426 default: gcc_unreachable ();
11429 case CCmode:
11430 switch (comp_code)
11432 case NE: return ARM_NE;
11433 case EQ: return ARM_EQ;
11434 case GE: return ARM_GE;
11435 case GT: return ARM_GT;
11436 case LE: return ARM_LE;
11437 case LT: return ARM_LT;
11438 case GEU: return ARM_CS;
11439 case GTU: return ARM_HI;
11440 case LEU: return ARM_LS;
11441 case LTU: return ARM_CC;
11442 default: gcc_unreachable ();
11445 default: gcc_unreachable ();
11449 void
11450 arm_final_prescan_insn (rtx insn)
11452 /* BODY will hold the body of INSN. */
11453 rtx body = PATTERN (insn);
11455 /* This will be 1 if trying to repeat the trick, and things need to be
11456 reversed if it appears to fail. */
11457 int reverse = 0;
11459 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11460 taken are clobbered, even if the rtl suggests otherwise. It also
11461 means that we have to grub around within the jump expression to find
11462 out what the conditions are when the jump isn't taken. */
11463 int jump_clobbers = 0;
11465 /* If we start with a return insn, we only succeed if we find another one. */
11466 int seeking_return = 0;
11468 /* START_INSN will hold the insn from where we start looking. This is the
11469 first insn after the following code_label if REVERSE is true. */
11470 rtx start_insn = insn;
11472 /* If in state 4, check if the target branch is reached, in order to
11473 change back to state 0. */
11474 if (arm_ccfsm_state == 4)
11476 if (insn == arm_target_insn)
11478 arm_target_insn = NULL;
11479 arm_ccfsm_state = 0;
11481 return;
11484 /* If in state 3, it is possible to repeat the trick, if this insn is an
11485 unconditional branch to a label, and immediately following this branch
11486 is the previous target label which is only used once, and the label this
11487 branch jumps to is not too far off. */
11488 if (arm_ccfsm_state == 3)
11490 if (simplejump_p (insn))
11492 start_insn = next_nonnote_insn (start_insn);
11493 if (GET_CODE (start_insn) == BARRIER)
11495 /* XXX Isn't this always a barrier? */
11496 start_insn = next_nonnote_insn (start_insn);
11498 if (GET_CODE (start_insn) == CODE_LABEL
11499 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11500 && LABEL_NUSES (start_insn) == 1)
11501 reverse = TRUE;
11502 else
11503 return;
11505 else if (GET_CODE (body) == RETURN)
11507 start_insn = next_nonnote_insn (start_insn);
11508 if (GET_CODE (start_insn) == BARRIER)
11509 start_insn = next_nonnote_insn (start_insn);
11510 if (GET_CODE (start_insn) == CODE_LABEL
11511 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11512 && LABEL_NUSES (start_insn) == 1)
11514 reverse = TRUE;
11515 seeking_return = 1;
11517 else
11518 return;
11520 else
11521 return;
11524 gcc_assert (!arm_ccfsm_state || reverse);
11525 if (GET_CODE (insn) != JUMP_INSN)
11526 return;
11528 /* This jump might be paralleled with a clobber of the condition codes
11529 the jump should always come first */
11530 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11531 body = XVECEXP (body, 0, 0);
11533 if (reverse
11534 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11535 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11537 int insns_skipped;
11538 int fail = FALSE, succeed = FALSE;
11539 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11540 int then_not_else = TRUE;
11541 rtx this_insn = start_insn, label = 0;
11543 /* If the jump cannot be done with one instruction, we cannot
11544 conditionally execute the instruction in the inverse case. */
11545 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11547 jump_clobbers = 1;
11548 return;
11551 /* Register the insn jumped to. */
11552 if (reverse)
11554 if (!seeking_return)
11555 label = XEXP (SET_SRC (body), 0);
11557 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11558 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11559 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11561 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11562 then_not_else = FALSE;
11564 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11565 seeking_return = 1;
11566 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11568 seeking_return = 1;
11569 then_not_else = FALSE;
11571 else
11572 gcc_unreachable ();
11574 /* See how many insns this branch skips, and what kind of insns. If all
11575 insns are okay, and the label or unconditional branch to the same
11576 label is not too far away, succeed. */
11577 for (insns_skipped = 0;
11578 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11580 rtx scanbody;
11582 this_insn = next_nonnote_insn (this_insn);
11583 if (!this_insn)
11584 break;
11586 switch (GET_CODE (this_insn))
11588 case CODE_LABEL:
11589 /* Succeed if it is the target label, otherwise fail since
11590 control falls in from somewhere else. */
11591 if (this_insn == label)
11593 if (jump_clobbers)
11595 arm_ccfsm_state = 2;
11596 this_insn = next_nonnote_insn (this_insn);
11598 else
11599 arm_ccfsm_state = 1;
11600 succeed = TRUE;
11602 else
11603 fail = TRUE;
11604 break;
11606 case BARRIER:
11607 /* Succeed if the following insn is the target label.
11608 Otherwise fail.
11609 If return insns are used then the last insn in a function
11610 will be a barrier. */
11611 this_insn = next_nonnote_insn (this_insn);
11612 if (this_insn && this_insn == label)
11614 if (jump_clobbers)
11616 arm_ccfsm_state = 2;
11617 this_insn = next_nonnote_insn (this_insn);
11619 else
11620 arm_ccfsm_state = 1;
11621 succeed = TRUE;
11623 else
11624 fail = TRUE;
11625 break;
11627 case CALL_INSN:
11628 /* The AAPCS says that conditional calls should not be
11629 used since they make interworking inefficient (the
11630 linker can't transform BL<cond> into BLX). That's
11631 only a problem if the machine has BLX. */
11632 if (arm_arch5)
11634 fail = TRUE;
11635 break;
11638 /* Succeed if the following insn is the target label, or
11639 if the following two insns are a barrier and the
11640 target label. */
11641 this_insn = next_nonnote_insn (this_insn);
11642 if (this_insn && GET_CODE (this_insn) == BARRIER)
11643 this_insn = next_nonnote_insn (this_insn);
11645 if (this_insn && this_insn == label
11646 && insns_skipped < max_insns_skipped)
11648 if (jump_clobbers)
11650 arm_ccfsm_state = 2;
11651 this_insn = next_nonnote_insn (this_insn);
11653 else
11654 arm_ccfsm_state = 1;
11655 succeed = TRUE;
11657 else
11658 fail = TRUE;
11659 break;
11661 case JUMP_INSN:
11662 /* If this is an unconditional branch to the same label, succeed.
11663 If it is to another label, do nothing. If it is conditional,
11664 fail. */
11665 /* XXX Probably, the tests for SET and the PC are
11666 unnecessary. */
11668 scanbody = PATTERN (this_insn);
11669 if (GET_CODE (scanbody) == SET
11670 && GET_CODE (SET_DEST (scanbody)) == PC)
11672 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11673 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11675 arm_ccfsm_state = 2;
11676 succeed = TRUE;
11678 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11679 fail = TRUE;
11681 /* Fail if a conditional return is undesirable (e.g. on a
11682 StrongARM), but still allow this if optimizing for size. */
11683 else if (GET_CODE (scanbody) == RETURN
11684 && !use_return_insn (TRUE, NULL)
11685 && !optimize_size)
11686 fail = TRUE;
11687 else if (GET_CODE (scanbody) == RETURN
11688 && seeking_return)
11690 arm_ccfsm_state = 2;
11691 succeed = TRUE;
11693 else if (GET_CODE (scanbody) == PARALLEL)
11695 switch (get_attr_conds (this_insn))
11697 case CONDS_NOCOND:
11698 break;
11699 default:
11700 fail = TRUE;
11701 break;
11704 else
11705 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11707 break;
11709 case INSN:
11710 /* Instructions using or affecting the condition codes make it
11711 fail. */
11712 scanbody = PATTERN (this_insn);
11713 if (!(GET_CODE (scanbody) == SET
11714 || GET_CODE (scanbody) == PARALLEL)
11715 || get_attr_conds (this_insn) != CONDS_NOCOND)
11716 fail = TRUE;
11718 /* A conditional cirrus instruction must be followed by
11719 a non Cirrus instruction. However, since we
11720 conditionalize instructions in this function and by
11721 the time we get here we can't add instructions
11722 (nops), because shorten_branches() has already been
11723 called, we will disable conditionalizing Cirrus
11724 instructions to be safe. */
11725 if (GET_CODE (scanbody) != USE
11726 && GET_CODE (scanbody) != CLOBBER
11727 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11728 fail = TRUE;
11729 break;
11731 default:
11732 break;
11735 if (succeed)
11737 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11738 arm_target_label = CODE_LABEL_NUMBER (label);
11739 else
11741 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11743 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11745 this_insn = next_nonnote_insn (this_insn);
11746 gcc_assert (!this_insn
11747 || (GET_CODE (this_insn) != BARRIER
11748 && GET_CODE (this_insn) != CODE_LABEL));
11750 if (!this_insn)
11752 /* Oh, dear! we ran off the end.. give up. */
11753 recog (PATTERN (insn), insn, NULL);
11754 arm_ccfsm_state = 0;
11755 arm_target_insn = NULL;
11756 return;
11758 arm_target_insn = this_insn;
11760 if (jump_clobbers)
11762 gcc_assert (!reverse);
11763 arm_current_cc =
11764 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11765 0), 0), 1));
11766 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11767 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11768 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11769 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11771 else
11773 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11774 what it was. */
11775 if (!reverse)
11776 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11777 0));
11780 if (reverse || then_not_else)
11781 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11784 /* Restore recog_data (getting the attributes of other insns can
11785 destroy this array, but final.c assumes that it remains intact
11786 across this call; since the insn has been recognized already we
11787 call recog direct). */
11788 recog (PATTERN (insn), insn, NULL);
11792 /* Returns true if REGNO is a valid register
11793 for holding a quantity of type MODE. */
11795 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11797 if (GET_MODE_CLASS (mode) == MODE_CC)
11798 return (regno == CC_REGNUM
11799 || (TARGET_HARD_FLOAT && TARGET_VFP
11800 && regno == VFPCC_REGNUM));
11802 if (TARGET_THUMB)
11803 /* For the Thumb we only allow values bigger than SImode in
11804 registers 0 - 6, so that there is always a second low
11805 register available to hold the upper part of the value.
11806 We probably we ought to ensure that the register is the
11807 start of an even numbered register pair. */
11808 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11810 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
11811 && IS_CIRRUS_REGNUM (regno))
11812 /* We have outlawed SI values in Cirrus registers because they
11813 reside in the lower 32 bits, but SF values reside in the
11814 upper 32 bits. This causes gcc all sorts of grief. We can't
11815 even split the registers into pairs because Cirrus SI values
11816 get sign extended to 64bits-- aldyh. */
11817 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11819 if (TARGET_HARD_FLOAT && TARGET_VFP
11820 && IS_VFP_REGNUM (regno))
11822 if (mode == SFmode || mode == SImode)
11823 return TRUE;
11825 /* DFmode values are only valid in even register pairs. */
11826 if (mode == DFmode)
11827 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11828 return FALSE;
11831 if (TARGET_REALLY_IWMMXT)
11833 if (IS_IWMMXT_GR_REGNUM (regno))
11834 return mode == SImode;
11836 if (IS_IWMMXT_REGNUM (regno))
11837 return VALID_IWMMXT_REG_MODE (mode);
11840 /* We allow any value to be stored in the general registers.
11841 Restrict doubleword quantities to even register pairs so that we can
11842 use ldrd. */
11843 if (regno <= LAST_ARM_REGNUM)
11844 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11846 if (regno == FRAME_POINTER_REGNUM
11847 || regno == ARG_POINTER_REGNUM)
11848 /* We only allow integers in the fake hard registers. */
11849 return GET_MODE_CLASS (mode) == MODE_INT;
11851 /* The only registers left are the FPA registers
11852 which we only allow to hold FP values. */
11853 return (TARGET_HARD_FLOAT && TARGET_FPA
11854 && GET_MODE_CLASS (mode) == MODE_FLOAT
11855 && regno >= FIRST_FPA_REGNUM
11856 && regno <= LAST_FPA_REGNUM);
11860 arm_regno_class (int regno)
11862 if (TARGET_THUMB)
11864 if (regno == STACK_POINTER_REGNUM)
11865 return STACK_REG;
11866 if (regno == CC_REGNUM)
11867 return CC_REG;
11868 if (regno < 8)
11869 return LO_REGS;
11870 return HI_REGS;
11873 if ( regno <= LAST_ARM_REGNUM
11874 || regno == FRAME_POINTER_REGNUM
11875 || regno == ARG_POINTER_REGNUM)
11876 return GENERAL_REGS;
11878 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11879 return NO_REGS;
11881 if (IS_CIRRUS_REGNUM (regno))
11882 return CIRRUS_REGS;
11884 if (IS_VFP_REGNUM (regno))
11885 return VFP_REGS;
11887 if (IS_IWMMXT_REGNUM (regno))
11888 return IWMMXT_REGS;
11890 if (IS_IWMMXT_GR_REGNUM (regno))
11891 return IWMMXT_GR_REGS;
11893 return FPA_REGS;
11896 /* Handle a special case when computing the offset
11897 of an argument from the frame pointer. */
11899 arm_debugger_arg_offset (int value, rtx addr)
11901 rtx insn;
11903 /* We are only interested if dbxout_parms() failed to compute the offset. */
11904 if (value != 0)
11905 return 0;
11907 /* We can only cope with the case where the address is held in a register. */
11908 if (GET_CODE (addr) != REG)
11909 return 0;
11911 /* If we are using the frame pointer to point at the argument, then
11912 an offset of 0 is correct. */
11913 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11914 return 0;
11916 /* If we are using the stack pointer to point at the
11917 argument, then an offset of 0 is correct. */
11918 if ((TARGET_THUMB || !frame_pointer_needed)
11919 && REGNO (addr) == SP_REGNUM)
11920 return 0;
11922 /* Oh dear. The argument is pointed to by a register rather
11923 than being held in a register, or being stored at a known
11924 offset from the frame pointer. Since GDB only understands
11925 those two kinds of argument we must translate the address
11926 held in the register into an offset from the frame pointer.
11927 We do this by searching through the insns for the function
11928 looking to see where this register gets its value. If the
11929 register is initialized from the frame pointer plus an offset
11930 then we are in luck and we can continue, otherwise we give up.
11932 This code is exercised by producing debugging information
11933 for a function with arguments like this:
11935 double func (double a, double b, int c, double d) {return d;}
11937 Without this code the stab for parameter 'd' will be set to
11938 an offset of 0 from the frame pointer, rather than 8. */
11940 /* The if() statement says:
11942 If the insn is a normal instruction
11943 and if the insn is setting the value in a register
11944 and if the register being set is the register holding the address of the argument
11945 and if the address is computing by an addition
11946 that involves adding to a register
11947 which is the frame pointer
11948 a constant integer
11950 then... */
11952 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11954 if ( GET_CODE (insn) == INSN
11955 && GET_CODE (PATTERN (insn)) == SET
11956 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11957 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11958 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11959 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11960 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11963 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11965 break;
11969 if (value == 0)
11971 debug_rtx (addr);
11972 warning (0, "unable to compute real location of stacked parameter");
11973 value = 8; /* XXX magic hack */
11976 return value;
11979 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11980 do \
11982 if ((MASK) & insn_flags) \
11983 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11984 BUILT_IN_MD, NULL, NULL_TREE); \
11986 while (0)
11988 struct builtin_description
11990 const unsigned int mask;
11991 const enum insn_code icode;
11992 const char * const name;
11993 const enum arm_builtins code;
11994 const enum rtx_code comparison;
11995 const unsigned int flag;
11998 static const struct builtin_description bdesc_2arg[] =
12000 #define IWMMXT_BUILTIN(code, string, builtin) \
12001 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12002 ARM_BUILTIN_##builtin, 0, 0 },
12004 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12005 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12006 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12007 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12008 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12009 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12010 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12011 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12012 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12013 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12014 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12015 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12016 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12017 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12018 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12019 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12020 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12021 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12022 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12023 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12024 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12025 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12026 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12027 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12028 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12029 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12030 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12031 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12032 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12033 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12034 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12035 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12036 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12037 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12038 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12039 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12040 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12041 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12042 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12043 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12044 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12045 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12046 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12047 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12048 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12049 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12050 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12051 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12052 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12053 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12054 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12055 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12056 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12057 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12058 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12059 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12060 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12061 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12063 #define IWMMXT_BUILTIN2(code, builtin) \
12064 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12066 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12067 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12068 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12069 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12070 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12071 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12072 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12073 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12074 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12075 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12076 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
12077 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
12078 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
12079 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
12080 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
12081 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
12082 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
12083 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
12084 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
12085 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
12086 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
12087 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
12088 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
12089 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
12090 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
12091 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
12092 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
12093 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
12094 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
12095 IWMMXT_BUILTIN2 (rordi3, WRORDI)
12096 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
12097 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
12100 static const struct builtin_description bdesc_1arg[] =
12102 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
12103 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
12104 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
12105 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
12106 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
12107 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
12108 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
12109 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
12110 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
12111 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
12112 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
12113 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
12114 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
12115 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
12116 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
12117 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
12118 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
12119 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
12122 /* Set up all the iWMMXt builtins. This is
12123 not called if TARGET_IWMMXT is zero. */
12125 static void
12126 arm_init_iwmmxt_builtins (void)
12128 const struct builtin_description * d;
12129 size_t i;
12130 tree endlink = void_list_node;
12132 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12133 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12134 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12136 tree int_ftype_int
12137 = build_function_type (integer_type_node,
12138 tree_cons (NULL_TREE, integer_type_node, endlink));
12139 tree v8qi_ftype_v8qi_v8qi_int
12140 = build_function_type (V8QI_type_node,
12141 tree_cons (NULL_TREE, V8QI_type_node,
12142 tree_cons (NULL_TREE, V8QI_type_node,
12143 tree_cons (NULL_TREE,
12144 integer_type_node,
12145 endlink))));
12146 tree v4hi_ftype_v4hi_int
12147 = build_function_type (V4HI_type_node,
12148 tree_cons (NULL_TREE, V4HI_type_node,
12149 tree_cons (NULL_TREE, integer_type_node,
12150 endlink)));
12151 tree v2si_ftype_v2si_int
12152 = build_function_type (V2SI_type_node,
12153 tree_cons (NULL_TREE, V2SI_type_node,
12154 tree_cons (NULL_TREE, integer_type_node,
12155 endlink)));
12156 tree v2si_ftype_di_di
12157 = build_function_type (V2SI_type_node,
12158 tree_cons (NULL_TREE, long_long_integer_type_node,
12159 tree_cons (NULL_TREE, long_long_integer_type_node,
12160 endlink)));
12161 tree di_ftype_di_int
12162 = build_function_type (long_long_integer_type_node,
12163 tree_cons (NULL_TREE, long_long_integer_type_node,
12164 tree_cons (NULL_TREE, integer_type_node,
12165 endlink)));
12166 tree di_ftype_di_int_int
12167 = build_function_type (long_long_integer_type_node,
12168 tree_cons (NULL_TREE, long_long_integer_type_node,
12169 tree_cons (NULL_TREE, integer_type_node,
12170 tree_cons (NULL_TREE,
12171 integer_type_node,
12172 endlink))));
12173 tree int_ftype_v8qi
12174 = build_function_type (integer_type_node,
12175 tree_cons (NULL_TREE, V8QI_type_node,
12176 endlink));
12177 tree int_ftype_v4hi
12178 = build_function_type (integer_type_node,
12179 tree_cons (NULL_TREE, V4HI_type_node,
12180 endlink));
12181 tree int_ftype_v2si
12182 = build_function_type (integer_type_node,
12183 tree_cons (NULL_TREE, V2SI_type_node,
12184 endlink));
12185 tree int_ftype_v8qi_int
12186 = build_function_type (integer_type_node,
12187 tree_cons (NULL_TREE, V8QI_type_node,
12188 tree_cons (NULL_TREE, integer_type_node,
12189 endlink)));
12190 tree int_ftype_v4hi_int
12191 = build_function_type (integer_type_node,
12192 tree_cons (NULL_TREE, V4HI_type_node,
12193 tree_cons (NULL_TREE, integer_type_node,
12194 endlink)));
12195 tree int_ftype_v2si_int
12196 = build_function_type (integer_type_node,
12197 tree_cons (NULL_TREE, V2SI_type_node,
12198 tree_cons (NULL_TREE, integer_type_node,
12199 endlink)));
12200 tree v8qi_ftype_v8qi_int_int
12201 = build_function_type (V8QI_type_node,
12202 tree_cons (NULL_TREE, V8QI_type_node,
12203 tree_cons (NULL_TREE, integer_type_node,
12204 tree_cons (NULL_TREE,
12205 integer_type_node,
12206 endlink))));
12207 tree v4hi_ftype_v4hi_int_int
12208 = build_function_type (V4HI_type_node,
12209 tree_cons (NULL_TREE, V4HI_type_node,
12210 tree_cons (NULL_TREE, integer_type_node,
12211 tree_cons (NULL_TREE,
12212 integer_type_node,
12213 endlink))));
12214 tree v2si_ftype_v2si_int_int
12215 = build_function_type (V2SI_type_node,
12216 tree_cons (NULL_TREE, V2SI_type_node,
12217 tree_cons (NULL_TREE, integer_type_node,
12218 tree_cons (NULL_TREE,
12219 integer_type_node,
12220 endlink))));
12221 /* Miscellaneous. */
12222 tree v8qi_ftype_v4hi_v4hi
12223 = build_function_type (V8QI_type_node,
12224 tree_cons (NULL_TREE, V4HI_type_node,
12225 tree_cons (NULL_TREE, V4HI_type_node,
12226 endlink)));
12227 tree v4hi_ftype_v2si_v2si
12228 = build_function_type (V4HI_type_node,
12229 tree_cons (NULL_TREE, V2SI_type_node,
12230 tree_cons (NULL_TREE, V2SI_type_node,
12231 endlink)));
12232 tree v2si_ftype_v4hi_v4hi
12233 = build_function_type (V2SI_type_node,
12234 tree_cons (NULL_TREE, V4HI_type_node,
12235 tree_cons (NULL_TREE, V4HI_type_node,
12236 endlink)));
12237 tree v2si_ftype_v8qi_v8qi
12238 = build_function_type (V2SI_type_node,
12239 tree_cons (NULL_TREE, V8QI_type_node,
12240 tree_cons (NULL_TREE, V8QI_type_node,
12241 endlink)));
12242 tree v4hi_ftype_v4hi_di
12243 = build_function_type (V4HI_type_node,
12244 tree_cons (NULL_TREE, V4HI_type_node,
12245 tree_cons (NULL_TREE,
12246 long_long_integer_type_node,
12247 endlink)));
12248 tree v2si_ftype_v2si_di
12249 = build_function_type (V2SI_type_node,
12250 tree_cons (NULL_TREE, V2SI_type_node,
12251 tree_cons (NULL_TREE,
12252 long_long_integer_type_node,
12253 endlink)));
12254 tree void_ftype_int_int
12255 = build_function_type (void_type_node,
12256 tree_cons (NULL_TREE, integer_type_node,
12257 tree_cons (NULL_TREE, integer_type_node,
12258 endlink)));
12259 tree di_ftype_void
12260 = build_function_type (long_long_unsigned_type_node, endlink);
12261 tree di_ftype_v8qi
12262 = build_function_type (long_long_integer_type_node,
12263 tree_cons (NULL_TREE, V8QI_type_node,
12264 endlink));
12265 tree di_ftype_v4hi
12266 = build_function_type (long_long_integer_type_node,
12267 tree_cons (NULL_TREE, V4HI_type_node,
12268 endlink));
12269 tree di_ftype_v2si
12270 = build_function_type (long_long_integer_type_node,
12271 tree_cons (NULL_TREE, V2SI_type_node,
12272 endlink));
12273 tree v2si_ftype_v4hi
12274 = build_function_type (V2SI_type_node,
12275 tree_cons (NULL_TREE, V4HI_type_node,
12276 endlink));
12277 tree v4hi_ftype_v8qi
12278 = build_function_type (V4HI_type_node,
12279 tree_cons (NULL_TREE, V8QI_type_node,
12280 endlink));
12282 tree di_ftype_di_v4hi_v4hi
12283 = build_function_type (long_long_unsigned_type_node,
12284 tree_cons (NULL_TREE,
12285 long_long_unsigned_type_node,
12286 tree_cons (NULL_TREE, V4HI_type_node,
12287 tree_cons (NULL_TREE,
12288 V4HI_type_node,
12289 endlink))));
12291 tree di_ftype_v4hi_v4hi
12292 = build_function_type (long_long_unsigned_type_node,
12293 tree_cons (NULL_TREE, V4HI_type_node,
12294 tree_cons (NULL_TREE, V4HI_type_node,
12295 endlink)));
12297 /* Normal vector binops. */
12298 tree v8qi_ftype_v8qi_v8qi
12299 = build_function_type (V8QI_type_node,
12300 tree_cons (NULL_TREE, V8QI_type_node,
12301 tree_cons (NULL_TREE, V8QI_type_node,
12302 endlink)));
12303 tree v4hi_ftype_v4hi_v4hi
12304 = build_function_type (V4HI_type_node,
12305 tree_cons (NULL_TREE, V4HI_type_node,
12306 tree_cons (NULL_TREE, V4HI_type_node,
12307 endlink)));
12308 tree v2si_ftype_v2si_v2si
12309 = build_function_type (V2SI_type_node,
12310 tree_cons (NULL_TREE, V2SI_type_node,
12311 tree_cons (NULL_TREE, V2SI_type_node,
12312 endlink)));
12313 tree di_ftype_di_di
12314 = build_function_type (long_long_unsigned_type_node,
12315 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12316 tree_cons (NULL_TREE,
12317 long_long_unsigned_type_node,
12318 endlink)));
12320 /* Add all builtins that are more or less simple operations on two
12321 operands. */
12322 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12324 /* Use one of the operands; the target can have a different mode for
12325 mask-generating compares. */
12326 enum machine_mode mode;
12327 tree type;
12329 if (d->name == 0)
12330 continue;
12332 mode = insn_data[d->icode].operand[1].mode;
12334 switch (mode)
12336 case V8QImode:
12337 type = v8qi_ftype_v8qi_v8qi;
12338 break;
12339 case V4HImode:
12340 type = v4hi_ftype_v4hi_v4hi;
12341 break;
12342 case V2SImode:
12343 type = v2si_ftype_v2si_v2si;
12344 break;
12345 case DImode:
12346 type = di_ftype_di_di;
12347 break;
12349 default:
12350 gcc_unreachable ();
12353 def_mbuiltin (d->mask, d->name, type, d->code);
12356 /* Add the remaining MMX insns with somewhat more complicated types. */
12357 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12358 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12359 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12361 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12362 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12363 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12364 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12365 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12366 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12368 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12369 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12370 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12371 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12372 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12373 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12375 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12376 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12377 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12378 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12379 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12380 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12382 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12383 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12384 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12385 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12386 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12387 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12389 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12391 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12392 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12393 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12394 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12396 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12397 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12398 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12399 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12400 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12401 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12402 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12403 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12404 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12406 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12407 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12408 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12410 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12411 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12412 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12414 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12415 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12416 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12417 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12418 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12419 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12421 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12422 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12423 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12424 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12425 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12426 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12427 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12428 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12429 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12430 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12431 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12432 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12434 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12435 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12436 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12437 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12439 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12440 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12441 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12442 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12443 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12444 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12445 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12448 static void
12449 arm_init_tls_builtins (void)
12451 tree ftype;
12452 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
12453 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
12455 ftype = build_function_type (ptr_type_node, void_list_node);
12456 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
12457 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
12458 NULL, const_nothrow);
12461 static void
12462 arm_init_builtins (void)
12464 arm_init_tls_builtins ();
12466 if (TARGET_REALLY_IWMMXT)
12467 arm_init_iwmmxt_builtins ();
12470 /* Errors in the source file can cause expand_expr to return const0_rtx
12471 where we expect a vector. To avoid crashing, use one of the vector
12472 clear instructions. */
12474 static rtx
12475 safe_vector_operand (rtx x, enum machine_mode mode)
12477 if (x != const0_rtx)
12478 return x;
12479 x = gen_reg_rtx (mode);
12481 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12482 : gen_rtx_SUBREG (DImode, x, 0)));
12483 return x;
12486 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12488 static rtx
12489 arm_expand_binop_builtin (enum insn_code icode,
12490 tree arglist, rtx target)
12492 rtx pat;
12493 tree arg0 = TREE_VALUE (arglist);
12494 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12495 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12496 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12497 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12498 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12499 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12501 if (VECTOR_MODE_P (mode0))
12502 op0 = safe_vector_operand (op0, mode0);
12503 if (VECTOR_MODE_P (mode1))
12504 op1 = safe_vector_operand (op1, mode1);
12506 if (! target
12507 || GET_MODE (target) != tmode
12508 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12509 target = gen_reg_rtx (tmode);
12511 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12513 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12514 op0 = copy_to_mode_reg (mode0, op0);
12515 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12516 op1 = copy_to_mode_reg (mode1, op1);
12518 pat = GEN_FCN (icode) (target, op0, op1);
12519 if (! pat)
12520 return 0;
12521 emit_insn (pat);
12522 return target;
12525 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12527 static rtx
12528 arm_expand_unop_builtin (enum insn_code icode,
12529 tree arglist, rtx target, int do_load)
12531 rtx pat;
12532 tree arg0 = TREE_VALUE (arglist);
12533 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12534 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12535 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12537 if (! target
12538 || GET_MODE (target) != tmode
12539 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12540 target = gen_reg_rtx (tmode);
12541 if (do_load)
12542 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12543 else
12545 if (VECTOR_MODE_P (mode0))
12546 op0 = safe_vector_operand (op0, mode0);
12548 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12549 op0 = copy_to_mode_reg (mode0, op0);
12552 pat = GEN_FCN (icode) (target, op0);
12553 if (! pat)
12554 return 0;
12555 emit_insn (pat);
12556 return target;
12559 /* Expand an expression EXP that calls a built-in function,
12560 with result going to TARGET if that's convenient
12561 (and in mode MODE if that's convenient).
12562 SUBTARGET may be used as the target for computing one of EXP's operands.
12563 IGNORE is nonzero if the value is to be ignored. */
12565 static rtx
12566 arm_expand_builtin (tree exp,
12567 rtx target,
12568 rtx subtarget ATTRIBUTE_UNUSED,
12569 enum machine_mode mode ATTRIBUTE_UNUSED,
12570 int ignore ATTRIBUTE_UNUSED)
12572 const struct builtin_description * d;
12573 enum insn_code icode;
12574 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12575 tree arglist = TREE_OPERAND (exp, 1);
12576 tree arg0;
12577 tree arg1;
12578 tree arg2;
12579 rtx op0;
12580 rtx op1;
12581 rtx op2;
12582 rtx pat;
12583 int fcode = DECL_FUNCTION_CODE (fndecl);
12584 size_t i;
12585 enum machine_mode tmode;
12586 enum machine_mode mode0;
12587 enum machine_mode mode1;
12588 enum machine_mode mode2;
12590 switch (fcode)
12592 case ARM_BUILTIN_TEXTRMSB:
12593 case ARM_BUILTIN_TEXTRMUB:
12594 case ARM_BUILTIN_TEXTRMSH:
12595 case ARM_BUILTIN_TEXTRMUH:
12596 case ARM_BUILTIN_TEXTRMSW:
12597 case ARM_BUILTIN_TEXTRMUW:
12598 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12599 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12600 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12601 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12602 : CODE_FOR_iwmmxt_textrmw);
12604 arg0 = TREE_VALUE (arglist);
12605 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12606 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12607 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12608 tmode = insn_data[icode].operand[0].mode;
12609 mode0 = insn_data[icode].operand[1].mode;
12610 mode1 = insn_data[icode].operand[2].mode;
12612 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12613 op0 = copy_to_mode_reg (mode0, op0);
12614 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12616 /* @@@ better error message */
12617 error ("selector must be an immediate");
12618 return gen_reg_rtx (tmode);
12620 if (target == 0
12621 || GET_MODE (target) != tmode
12622 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12623 target = gen_reg_rtx (tmode);
12624 pat = GEN_FCN (icode) (target, op0, op1);
12625 if (! pat)
12626 return 0;
12627 emit_insn (pat);
12628 return target;
12630 case ARM_BUILTIN_TINSRB:
12631 case ARM_BUILTIN_TINSRH:
12632 case ARM_BUILTIN_TINSRW:
12633 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12634 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12635 : CODE_FOR_iwmmxt_tinsrw);
12636 arg0 = TREE_VALUE (arglist);
12637 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12638 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12639 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12640 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12641 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12642 tmode = insn_data[icode].operand[0].mode;
12643 mode0 = insn_data[icode].operand[1].mode;
12644 mode1 = insn_data[icode].operand[2].mode;
12645 mode2 = insn_data[icode].operand[3].mode;
12647 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12648 op0 = copy_to_mode_reg (mode0, op0);
12649 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12650 op1 = copy_to_mode_reg (mode1, op1);
12651 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12653 /* @@@ better error message */
12654 error ("selector must be an immediate");
12655 return const0_rtx;
12657 if (target == 0
12658 || GET_MODE (target) != tmode
12659 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12660 target = gen_reg_rtx (tmode);
12661 pat = GEN_FCN (icode) (target, op0, op1, op2);
12662 if (! pat)
12663 return 0;
12664 emit_insn (pat);
12665 return target;
12667 case ARM_BUILTIN_SETWCX:
12668 arg0 = TREE_VALUE (arglist);
12669 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12670 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12671 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12672 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12673 return 0;
12675 case ARM_BUILTIN_GETWCX:
12676 arg0 = TREE_VALUE (arglist);
12677 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12678 target = gen_reg_rtx (SImode);
12679 emit_insn (gen_iwmmxt_tmrc (target, op0));
12680 return target;
12682 case ARM_BUILTIN_WSHUFH:
12683 icode = CODE_FOR_iwmmxt_wshufh;
12684 arg0 = TREE_VALUE (arglist);
12685 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12686 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12687 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12688 tmode = insn_data[icode].operand[0].mode;
12689 mode1 = insn_data[icode].operand[1].mode;
12690 mode2 = insn_data[icode].operand[2].mode;
12692 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12693 op0 = copy_to_mode_reg (mode1, op0);
12694 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12696 /* @@@ better error message */
12697 error ("mask must be an immediate");
12698 return const0_rtx;
12700 if (target == 0
12701 || GET_MODE (target) != tmode
12702 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12703 target = gen_reg_rtx (tmode);
12704 pat = GEN_FCN (icode) (target, op0, op1);
12705 if (! pat)
12706 return 0;
12707 emit_insn (pat);
12708 return target;
12710 case ARM_BUILTIN_WSADB:
12711 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12712 case ARM_BUILTIN_WSADH:
12713 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12714 case ARM_BUILTIN_WSADBZ:
12715 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12716 case ARM_BUILTIN_WSADHZ:
12717 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12719 /* Several three-argument builtins. */
12720 case ARM_BUILTIN_WMACS:
12721 case ARM_BUILTIN_WMACU:
12722 case ARM_BUILTIN_WALIGN:
12723 case ARM_BUILTIN_TMIA:
12724 case ARM_BUILTIN_TMIAPH:
12725 case ARM_BUILTIN_TMIATT:
12726 case ARM_BUILTIN_TMIATB:
12727 case ARM_BUILTIN_TMIABT:
12728 case ARM_BUILTIN_TMIABB:
12729 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12730 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12731 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12732 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12733 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12734 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12735 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12736 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12737 : CODE_FOR_iwmmxt_walign);
12738 arg0 = TREE_VALUE (arglist);
12739 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12740 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12741 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12742 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12743 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12744 tmode = insn_data[icode].operand[0].mode;
12745 mode0 = insn_data[icode].operand[1].mode;
12746 mode1 = insn_data[icode].operand[2].mode;
12747 mode2 = insn_data[icode].operand[3].mode;
12749 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12750 op0 = copy_to_mode_reg (mode0, op0);
12751 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12752 op1 = copy_to_mode_reg (mode1, op1);
12753 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12754 op2 = copy_to_mode_reg (mode2, op2);
12755 if (target == 0
12756 || GET_MODE (target) != tmode
12757 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12758 target = gen_reg_rtx (tmode);
12759 pat = GEN_FCN (icode) (target, op0, op1, op2);
12760 if (! pat)
12761 return 0;
12762 emit_insn (pat);
12763 return target;
12765 case ARM_BUILTIN_WZERO:
12766 target = gen_reg_rtx (DImode);
12767 emit_insn (gen_iwmmxt_clrdi (target));
12768 return target;
12770 case ARM_BUILTIN_THREAD_POINTER:
12771 return arm_load_tp (target);
12773 default:
12774 break;
12777 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12778 if (d->code == (const enum arm_builtins) fcode)
12779 return arm_expand_binop_builtin (d->icode, arglist, target);
12781 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12782 if (d->code == (const enum arm_builtins) fcode)
12783 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12785 /* @@@ Should really do something sensible here. */
12786 return NULL_RTX;
12789 /* Return the number (counting from 0) of
12790 the least significant set bit in MASK. */
12792 inline static int
12793 number_of_first_bit_set (unsigned mask)
12795 int bit;
12797 for (bit = 0;
12798 (mask & (1 << bit)) == 0;
12799 ++bit)
12800 continue;
12802 return bit;
12805 /* Emit code to push or pop registers to or from the stack. F is the
12806 assembly file. MASK is the registers to push or pop. PUSH is
12807 nonzero if we should push, and zero if we should pop. For debugging
12808 output, if pushing, adjust CFA_OFFSET by the amount of space added
12809 to the stack. REAL_REGS should have the same number of bits set as
12810 MASK, and will be used instead (in the same order) to describe which
12811 registers were saved - this is used to mark the save slots when we
12812 push high registers after moving them to low registers. */
12813 static void
12814 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12815 unsigned long real_regs)
12817 int regno;
12818 int lo_mask = mask & 0xFF;
12819 int pushed_words = 0;
12821 gcc_assert (mask);
12823 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12825 /* Special case. Do not generate a POP PC statement here, do it in
12826 thumb_exit() */
12827 thumb_exit (f, -1);
12828 return;
12831 if (ARM_EABI_UNWIND_TABLES && push)
12833 fprintf (f, "\t.save\t{");
12834 for (regno = 0; regno < 15; regno++)
12836 if (real_regs & (1 << regno))
12838 if (real_regs & ((1 << regno) -1))
12839 fprintf (f, ", ");
12840 asm_fprintf (f, "%r", regno);
12843 fprintf (f, "}\n");
12846 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12848 /* Look at the low registers first. */
12849 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12851 if (lo_mask & 1)
12853 asm_fprintf (f, "%r", regno);
12855 if ((lo_mask & ~1) != 0)
12856 fprintf (f, ", ");
12858 pushed_words++;
12862 if (push && (mask & (1 << LR_REGNUM)))
12864 /* Catch pushing the LR. */
12865 if (mask & 0xFF)
12866 fprintf (f, ", ");
12868 asm_fprintf (f, "%r", LR_REGNUM);
12870 pushed_words++;
12872 else if (!push && (mask & (1 << PC_REGNUM)))
12874 /* Catch popping the PC. */
12875 if (TARGET_INTERWORK || TARGET_BACKTRACE
12876 || current_function_calls_eh_return)
12878 /* The PC is never poped directly, instead
12879 it is popped into r3 and then BX is used. */
12880 fprintf (f, "}\n");
12882 thumb_exit (f, -1);
12884 return;
12886 else
12888 if (mask & 0xFF)
12889 fprintf (f, ", ");
12891 asm_fprintf (f, "%r", PC_REGNUM);
12895 fprintf (f, "}\n");
12897 if (push && pushed_words && dwarf2out_do_frame ())
12899 char *l = dwarf2out_cfi_label ();
12900 int pushed_mask = real_regs;
12902 *cfa_offset += pushed_words * 4;
12903 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12905 pushed_words = 0;
12906 pushed_mask = real_regs;
12907 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12909 if (pushed_mask & 1)
12910 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12915 /* Generate code to return from a thumb function.
12916 If 'reg_containing_return_addr' is -1, then the return address is
12917 actually on the stack, at the stack pointer. */
12918 static void
12919 thumb_exit (FILE *f, int reg_containing_return_addr)
12921 unsigned regs_available_for_popping;
12922 unsigned regs_to_pop;
12923 int pops_needed;
12924 unsigned available;
12925 unsigned required;
12926 int mode;
12927 int size;
12928 int restore_a4 = FALSE;
12930 /* Compute the registers we need to pop. */
12931 regs_to_pop = 0;
12932 pops_needed = 0;
12934 if (reg_containing_return_addr == -1)
12936 regs_to_pop |= 1 << LR_REGNUM;
12937 ++pops_needed;
12940 if (TARGET_BACKTRACE)
12942 /* Restore the (ARM) frame pointer and stack pointer. */
12943 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12944 pops_needed += 2;
12947 /* If there is nothing to pop then just emit the BX instruction and
12948 return. */
12949 if (pops_needed == 0)
12951 if (current_function_calls_eh_return)
12952 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12954 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12955 return;
12957 /* Otherwise if we are not supporting interworking and we have not created
12958 a backtrace structure and the function was not entered in ARM mode then
12959 just pop the return address straight into the PC. */
12960 else if (!TARGET_INTERWORK
12961 && !TARGET_BACKTRACE
12962 && !is_called_in_ARM_mode (current_function_decl)
12963 && !current_function_calls_eh_return)
12965 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12966 return;
12969 /* Find out how many of the (return) argument registers we can corrupt. */
12970 regs_available_for_popping = 0;
12972 /* If returning via __builtin_eh_return, the bottom three registers
12973 all contain information needed for the return. */
12974 if (current_function_calls_eh_return)
12975 size = 12;
12976 else
12978 /* If we can deduce the registers used from the function's
12979 return value. This is more reliable that examining
12980 regs_ever_live[] because that will be set if the register is
12981 ever used in the function, not just if the register is used
12982 to hold a return value. */
12984 if (current_function_return_rtx != 0)
12985 mode = GET_MODE (current_function_return_rtx);
12986 else
12987 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12989 size = GET_MODE_SIZE (mode);
12991 if (size == 0)
12993 /* In a void function we can use any argument register.
12994 In a function that returns a structure on the stack
12995 we can use the second and third argument registers. */
12996 if (mode == VOIDmode)
12997 regs_available_for_popping =
12998 (1 << ARG_REGISTER (1))
12999 | (1 << ARG_REGISTER (2))
13000 | (1 << ARG_REGISTER (3));
13001 else
13002 regs_available_for_popping =
13003 (1 << ARG_REGISTER (2))
13004 | (1 << ARG_REGISTER (3));
13006 else if (size <= 4)
13007 regs_available_for_popping =
13008 (1 << ARG_REGISTER (2))
13009 | (1 << ARG_REGISTER (3));
13010 else if (size <= 8)
13011 regs_available_for_popping =
13012 (1 << ARG_REGISTER (3));
13015 /* Match registers to be popped with registers into which we pop them. */
13016 for (available = regs_available_for_popping,
13017 required = regs_to_pop;
13018 required != 0 && available != 0;
13019 available &= ~(available & - available),
13020 required &= ~(required & - required))
13021 -- pops_needed;
13023 /* If we have any popping registers left over, remove them. */
13024 if (available > 0)
13025 regs_available_for_popping &= ~available;
13027 /* Otherwise if we need another popping register we can use
13028 the fourth argument register. */
13029 else if (pops_needed)
13031 /* If we have not found any free argument registers and
13032 reg a4 contains the return address, we must move it. */
13033 if (regs_available_for_popping == 0
13034 && reg_containing_return_addr == LAST_ARG_REGNUM)
13036 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13037 reg_containing_return_addr = LR_REGNUM;
13039 else if (size > 12)
13041 /* Register a4 is being used to hold part of the return value,
13042 but we have dire need of a free, low register. */
13043 restore_a4 = TRUE;
13045 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13048 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13050 /* The fourth argument register is available. */
13051 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13053 --pops_needed;
13057 /* Pop as many registers as we can. */
13058 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13059 regs_available_for_popping);
13061 /* Process the registers we popped. */
13062 if (reg_containing_return_addr == -1)
13064 /* The return address was popped into the lowest numbered register. */
13065 regs_to_pop &= ~(1 << LR_REGNUM);
13067 reg_containing_return_addr =
13068 number_of_first_bit_set (regs_available_for_popping);
13070 /* Remove this register for the mask of available registers, so that
13071 the return address will not be corrupted by further pops. */
13072 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13075 /* If we popped other registers then handle them here. */
13076 if (regs_available_for_popping)
13078 int frame_pointer;
13080 /* Work out which register currently contains the frame pointer. */
13081 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
13083 /* Move it into the correct place. */
13084 asm_fprintf (f, "\tmov\t%r, %r\n",
13085 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
13087 /* (Temporarily) remove it from the mask of popped registers. */
13088 regs_available_for_popping &= ~(1 << frame_pointer);
13089 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
13091 if (regs_available_for_popping)
13093 int stack_pointer;
13095 /* We popped the stack pointer as well,
13096 find the register that contains it. */
13097 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
13099 /* Move it into the stack register. */
13100 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
13102 /* At this point we have popped all necessary registers, so
13103 do not worry about restoring regs_available_for_popping
13104 to its correct value:
13106 assert (pops_needed == 0)
13107 assert (regs_available_for_popping == (1 << frame_pointer))
13108 assert (regs_to_pop == (1 << STACK_POINTER)) */
13110 else
13112 /* Since we have just move the popped value into the frame
13113 pointer, the popping register is available for reuse, and
13114 we know that we still have the stack pointer left to pop. */
13115 regs_available_for_popping |= (1 << frame_pointer);
13119 /* If we still have registers left on the stack, but we no longer have
13120 any registers into which we can pop them, then we must move the return
13121 address into the link register and make available the register that
13122 contained it. */
13123 if (regs_available_for_popping == 0 && pops_needed > 0)
13125 regs_available_for_popping |= 1 << reg_containing_return_addr;
13127 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
13128 reg_containing_return_addr);
13130 reg_containing_return_addr = LR_REGNUM;
13133 /* If we have registers left on the stack then pop some more.
13134 We know that at most we will want to pop FP and SP. */
13135 if (pops_needed > 0)
13137 int popped_into;
13138 int move_to;
13140 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13141 regs_available_for_popping);
13143 /* We have popped either FP or SP.
13144 Move whichever one it is into the correct register. */
13145 popped_into = number_of_first_bit_set (regs_available_for_popping);
13146 move_to = number_of_first_bit_set (regs_to_pop);
13148 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
13150 regs_to_pop &= ~(1 << move_to);
13152 --pops_needed;
13155 /* If we still have not popped everything then we must have only
13156 had one register available to us and we are now popping the SP. */
13157 if (pops_needed > 0)
13159 int popped_into;
13161 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13162 regs_available_for_popping);
13164 popped_into = number_of_first_bit_set (regs_available_for_popping);
13166 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
13168 assert (regs_to_pop == (1 << STACK_POINTER))
13169 assert (pops_needed == 1)
13173 /* If necessary restore the a4 register. */
13174 if (restore_a4)
13176 if (reg_containing_return_addr != LR_REGNUM)
13178 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13179 reg_containing_return_addr = LR_REGNUM;
13182 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13185 if (current_function_calls_eh_return)
13186 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13188 /* Return to caller. */
13189 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13193 void
13194 thumb_final_prescan_insn (rtx insn)
13196 if (flag_print_asm_name)
13197 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
13198 INSN_ADDRESSES (INSN_UID (insn)));
13202 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
13204 unsigned HOST_WIDE_INT mask = 0xff;
13205 int i;
13207 if (val == 0) /* XXX */
13208 return 0;
13210 for (i = 0; i < 25; i++)
13211 if ((val & (mask << i)) == val)
13212 return 1;
13214 return 0;
13217 /* Returns nonzero if the current function contains,
13218 or might contain a far jump. */
13219 static int
13220 thumb_far_jump_used_p (void)
13222 rtx insn;
13224 /* This test is only important for leaf functions. */
13225 /* assert (!leaf_function_p ()); */
13227 /* If we have already decided that far jumps may be used,
13228 do not bother checking again, and always return true even if
13229 it turns out that they are not being used. Once we have made
13230 the decision that far jumps are present (and that hence the link
13231 register will be pushed onto the stack) we cannot go back on it. */
13232 if (cfun->machine->far_jump_used)
13233 return 1;
13235 /* If this function is not being called from the prologue/epilogue
13236 generation code then it must be being called from the
13237 INITIAL_ELIMINATION_OFFSET macro. */
13238 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13240 /* In this case we know that we are being asked about the elimination
13241 of the arg pointer register. If that register is not being used,
13242 then there are no arguments on the stack, and we do not have to
13243 worry that a far jump might force the prologue to push the link
13244 register, changing the stack offsets. In this case we can just
13245 return false, since the presence of far jumps in the function will
13246 not affect stack offsets.
13248 If the arg pointer is live (or if it was live, but has now been
13249 eliminated and so set to dead) then we do have to test to see if
13250 the function might contain a far jump. This test can lead to some
13251 false negatives, since before reload is completed, then length of
13252 branch instructions is not known, so gcc defaults to returning their
13253 longest length, which in turn sets the far jump attribute to true.
13255 A false negative will not result in bad code being generated, but it
13256 will result in a needless push and pop of the link register. We
13257 hope that this does not occur too often.
13259 If we need doubleword stack alignment this could affect the other
13260 elimination offsets so we can't risk getting it wrong. */
13261 if (regs_ever_live [ARG_POINTER_REGNUM])
13262 cfun->machine->arg_pointer_live = 1;
13263 else if (!cfun->machine->arg_pointer_live)
13264 return 0;
13267 /* Check to see if the function contains a branch
13268 insn with the far jump attribute set. */
13269 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13271 if (GET_CODE (insn) == JUMP_INSN
13272 /* Ignore tablejump patterns. */
13273 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13274 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13275 && get_attr_far_jump (insn) == FAR_JUMP_YES
13278 /* Record the fact that we have decided that
13279 the function does use far jumps. */
13280 cfun->machine->far_jump_used = 1;
13281 return 1;
13285 return 0;
13288 /* Return nonzero if FUNC must be entered in ARM mode. */
13290 is_called_in_ARM_mode (tree func)
13292 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
13294 /* Ignore the problem about functions whose address is taken. */
13295 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13296 return TRUE;
13298 #ifdef ARM_PE
13299 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13300 #else
13301 return FALSE;
13302 #endif
13305 /* The bits which aren't usefully expanded as rtl. */
13306 const char *
13307 thumb_unexpanded_epilogue (void)
13309 int regno;
13310 unsigned long live_regs_mask = 0;
13311 int high_regs_pushed = 0;
13312 int had_to_push_lr;
13313 int size;
13315 if (return_used_this_function)
13316 return "";
13318 if (IS_NAKED (arm_current_func_type ()))
13319 return "";
13321 live_regs_mask = thumb_compute_save_reg_mask ();
13322 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13324 /* If we can deduce the registers used from the function's return value.
13325 This is more reliable that examining regs_ever_live[] because that
13326 will be set if the register is ever used in the function, not just if
13327 the register is used to hold a return value. */
13328 size = arm_size_return_regs ();
13330 /* The prolog may have pushed some high registers to use as
13331 work registers. e.g. the testsuite file:
13332 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13333 compiles to produce:
13334 push {r4, r5, r6, r7, lr}
13335 mov r7, r9
13336 mov r6, r8
13337 push {r6, r7}
13338 as part of the prolog. We have to undo that pushing here. */
13340 if (high_regs_pushed)
13342 unsigned long mask = live_regs_mask & 0xff;
13343 int next_hi_reg;
13345 /* The available low registers depend on the size of the value we are
13346 returning. */
13347 if (size <= 12)
13348 mask |= 1 << 3;
13349 if (size <= 8)
13350 mask |= 1 << 2;
13352 if (mask == 0)
13353 /* Oh dear! We have no low registers into which we can pop
13354 high registers! */
13355 internal_error
13356 ("no low registers available for popping high registers");
13358 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13359 if (live_regs_mask & (1 << next_hi_reg))
13360 break;
13362 while (high_regs_pushed)
13364 /* Find lo register(s) into which the high register(s) can
13365 be popped. */
13366 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13368 if (mask & (1 << regno))
13369 high_regs_pushed--;
13370 if (high_regs_pushed == 0)
13371 break;
13374 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13376 /* Pop the values into the low register(s). */
13377 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13379 /* Move the value(s) into the high registers. */
13380 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13382 if (mask & (1 << regno))
13384 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13385 regno);
13387 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13388 if (live_regs_mask & (1 << next_hi_reg))
13389 break;
13393 live_regs_mask &= ~0x0f00;
13396 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13397 live_regs_mask &= 0xff;
13399 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13401 /* Pop the return address into the PC. */
13402 if (had_to_push_lr)
13403 live_regs_mask |= 1 << PC_REGNUM;
13405 /* Either no argument registers were pushed or a backtrace
13406 structure was created which includes an adjusted stack
13407 pointer, so just pop everything. */
13408 if (live_regs_mask)
13409 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13410 live_regs_mask);
13412 /* We have either just popped the return address into the
13413 PC or it is was kept in LR for the entire function. */
13414 if (!had_to_push_lr)
13415 thumb_exit (asm_out_file, LR_REGNUM);
13417 else
13419 /* Pop everything but the return address. */
13420 if (live_regs_mask)
13421 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13422 live_regs_mask);
13424 if (had_to_push_lr)
13426 if (size > 12)
13428 /* We have no free low regs, so save one. */
13429 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13430 LAST_ARG_REGNUM);
13433 /* Get the return address into a temporary register. */
13434 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13435 1 << LAST_ARG_REGNUM);
13437 if (size > 12)
13439 /* Move the return address to lr. */
13440 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13441 LAST_ARG_REGNUM);
13442 /* Restore the low register. */
13443 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13444 IP_REGNUM);
13445 regno = LR_REGNUM;
13447 else
13448 regno = LAST_ARG_REGNUM;
13450 else
13451 regno = LR_REGNUM;
13453 /* Remove the argument registers that were pushed onto the stack. */
13454 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13455 SP_REGNUM, SP_REGNUM,
13456 current_function_pretend_args_size);
13458 thumb_exit (asm_out_file, regno);
13461 return "";
13464 /* Functions to save and restore machine-specific function data. */
13465 static struct machine_function *
13466 arm_init_machine_status (void)
13468 struct machine_function *machine;
13469 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13471 #if ARM_FT_UNKNOWN != 0
13472 machine->func_type = ARM_FT_UNKNOWN;
13473 #endif
13474 return machine;
13477 /* Return an RTX indicating where the return address to the
13478 calling function can be found. */
13480 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13482 if (count != 0)
13483 return NULL_RTX;
13485 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13488 /* Do anything needed before RTL is emitted for each function. */
13489 void
13490 arm_init_expanders (void)
13492 /* Arrange to initialize and mark the machine per-function status. */
13493 init_machine_status = arm_init_machine_status;
13495 /* This is to stop the combine pass optimizing away the alignment
13496 adjustment of va_arg. */
13497 /* ??? It is claimed that this should not be necessary. */
13498 if (cfun)
13499 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13503 /* Like arm_compute_initial_elimination offset. Simpler because there
13504 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13505 to point at the base of the local variables after static stack
13506 space for a function has been allocated. */
13508 HOST_WIDE_INT
13509 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13511 arm_stack_offsets *offsets;
13513 offsets = arm_get_frame_offsets ();
13515 switch (from)
13517 case ARG_POINTER_REGNUM:
13518 switch (to)
13520 case STACK_POINTER_REGNUM:
13521 return offsets->outgoing_args - offsets->saved_args;
13523 case FRAME_POINTER_REGNUM:
13524 return offsets->soft_frame - offsets->saved_args;
13526 case ARM_HARD_FRAME_POINTER_REGNUM:
13527 return offsets->saved_regs - offsets->saved_args;
13529 case THUMB_HARD_FRAME_POINTER_REGNUM:
13530 return offsets->locals_base - offsets->saved_args;
13532 default:
13533 gcc_unreachable ();
13535 break;
13537 case FRAME_POINTER_REGNUM:
13538 switch (to)
13540 case STACK_POINTER_REGNUM:
13541 return offsets->outgoing_args - offsets->soft_frame;
13543 case ARM_HARD_FRAME_POINTER_REGNUM:
13544 return offsets->saved_regs - offsets->soft_frame;
13546 case THUMB_HARD_FRAME_POINTER_REGNUM:
13547 return offsets->locals_base - offsets->soft_frame;
13549 default:
13550 gcc_unreachable ();
13552 break;
13554 default:
13555 gcc_unreachable ();
13560 /* Generate the rest of a function's prologue. */
13561 void
13562 thumb_expand_prologue (void)
13564 rtx insn, dwarf;
13566 HOST_WIDE_INT amount;
13567 arm_stack_offsets *offsets;
13568 unsigned long func_type;
13569 int regno;
13570 unsigned long live_regs_mask;
13572 func_type = arm_current_func_type ();
13574 /* Naked functions don't have prologues. */
13575 if (IS_NAKED (func_type))
13576 return;
13578 if (IS_INTERRUPT (func_type))
13580 error ("interrupt Service Routines cannot be coded in Thumb mode");
13581 return;
13584 live_regs_mask = thumb_compute_save_reg_mask ();
13585 /* Load the pic register before setting the frame pointer,
13586 so we can use r7 as a temporary work register. */
13587 if (flag_pic)
13588 arm_load_pic_register (live_regs_mask);
13590 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13591 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13592 stack_pointer_rtx);
13594 offsets = arm_get_frame_offsets ();
13595 amount = offsets->outgoing_args - offsets->saved_regs;
13596 if (amount)
13598 if (amount < 512)
13600 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13601 GEN_INT (- amount)));
13602 RTX_FRAME_RELATED_P (insn) = 1;
13604 else
13606 rtx reg;
13608 /* The stack decrement is too big for an immediate value in a single
13609 insn. In theory we could issue multiple subtracts, but after
13610 three of them it becomes more space efficient to place the full
13611 value in the constant pool and load into a register. (Also the
13612 ARM debugger really likes to see only one stack decrement per
13613 function). So instead we look for a scratch register into which
13614 we can load the decrement, and then we subtract this from the
13615 stack pointer. Unfortunately on the thumb the only available
13616 scratch registers are the argument registers, and we cannot use
13617 these as they may hold arguments to the function. Instead we
13618 attempt to locate a call preserved register which is used by this
13619 function. If we can find one, then we know that it will have
13620 been pushed at the start of the prologue and so we can corrupt
13621 it now. */
13622 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13623 if (live_regs_mask & (1 << regno)
13624 && !(frame_pointer_needed
13625 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13626 break;
13628 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13630 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13632 /* Choose an arbitrary, non-argument low register. */
13633 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13635 /* Save it by copying it into a high, scratch register. */
13636 emit_insn (gen_movsi (spare, reg));
13637 /* Add a USE to stop propagate_one_insn() from barfing. */
13638 emit_insn (gen_prologue_use (spare));
13640 /* Decrement the stack. */
13641 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13642 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13643 stack_pointer_rtx, reg));
13644 RTX_FRAME_RELATED_P (insn) = 1;
13645 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13646 plus_constant (stack_pointer_rtx,
13647 -amount));
13648 RTX_FRAME_RELATED_P (dwarf) = 1;
13649 REG_NOTES (insn)
13650 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13651 REG_NOTES (insn));
13653 /* Restore the low register's original value. */
13654 emit_insn (gen_movsi (reg, spare));
13656 /* Emit a USE of the restored scratch register, so that flow
13657 analysis will not consider the restore redundant. The
13658 register won't be used again in this function and isn't
13659 restored by the epilogue. */
13660 emit_insn (gen_prologue_use (reg));
13662 else
13664 reg = gen_rtx_REG (SImode, regno);
13666 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13668 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13669 stack_pointer_rtx, reg));
13670 RTX_FRAME_RELATED_P (insn) = 1;
13671 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13672 plus_constant (stack_pointer_rtx,
13673 -amount));
13674 RTX_FRAME_RELATED_P (dwarf) = 1;
13675 REG_NOTES (insn)
13676 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13677 REG_NOTES (insn));
13682 if (frame_pointer_needed)
13684 amount = offsets->outgoing_args - offsets->locals_base;
13686 if (amount < 1024)
13687 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13688 stack_pointer_rtx, GEN_INT (amount)));
13689 else
13691 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13692 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13693 hard_frame_pointer_rtx,
13694 stack_pointer_rtx));
13695 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
13696 plus_constant (stack_pointer_rtx, amount));
13697 RTX_FRAME_RELATED_P (dwarf) = 1;
13698 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13699 REG_NOTES (insn));
13702 RTX_FRAME_RELATED_P (insn) = 1;
13705 /* If we are profiling, make sure no instructions are scheduled before
13706 the call to mcount. Similarly if the user has requested no
13707 scheduling in the prolog. Similarly if we want non-call exceptions
13708 using the EABI unwinder, to prevent faulting instructions from being
13709 swapped with a stack adjustment. */
13710 if (current_function_profile || !TARGET_SCHED_PROLOG
13711 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
13712 emit_insn (gen_blockage ());
13714 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13715 if (live_regs_mask & 0xff)
13716 cfun->machine->lr_save_eliminated = 0;
13718 /* If the link register is being kept alive, with the return address in it,
13719 then make sure that it does not get reused by the ce2 pass. */
13720 if (cfun->machine->lr_save_eliminated)
13721 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13725 void
13726 thumb_expand_epilogue (void)
13728 HOST_WIDE_INT amount;
13729 arm_stack_offsets *offsets;
13730 int regno;
13732 /* Naked functions don't have prologues. */
13733 if (IS_NAKED (arm_current_func_type ()))
13734 return;
13736 offsets = arm_get_frame_offsets ();
13737 amount = offsets->outgoing_args - offsets->saved_regs;
13739 if (frame_pointer_needed)
13741 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13742 amount = offsets->locals_base - offsets->saved_regs;
13745 if (amount)
13747 if (amount < 512)
13748 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13749 GEN_INT (amount)));
13750 else
13752 /* r3 is always free in the epilogue. */
13753 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13755 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13756 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13760 /* Emit a USE (stack_pointer_rtx), so that
13761 the stack adjustment will not be deleted. */
13762 emit_insn (gen_prologue_use (stack_pointer_rtx));
13764 if (current_function_profile || !TARGET_SCHED_PROLOG)
13765 emit_insn (gen_blockage ());
13767 /* Emit a clobber for each insn that will be restored in the epilogue,
13768 so that flow2 will get register lifetimes correct. */
13769 for (regno = 0; regno < 13; regno++)
13770 if (regs_ever_live[regno] && !call_used_regs[regno])
13771 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13773 if (! regs_ever_live[LR_REGNUM])
13774 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13777 static void
13778 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13780 unsigned long live_regs_mask = 0;
13781 unsigned long l_mask;
13782 unsigned high_regs_pushed = 0;
13783 int cfa_offset = 0;
13784 int regno;
13786 if (IS_NAKED (arm_current_func_type ()))
13787 return;
13789 if (is_called_in_ARM_mode (current_function_decl))
13791 const char * name;
13793 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13794 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13795 == SYMBOL_REF);
13796 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13798 /* Generate code sequence to switch us into Thumb mode. */
13799 /* The .code 32 directive has already been emitted by
13800 ASM_DECLARE_FUNCTION_NAME. */
13801 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13802 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13804 /* Generate a label, so that the debugger will notice the
13805 change in instruction sets. This label is also used by
13806 the assembler to bypass the ARM code when this function
13807 is called from a Thumb encoded function elsewhere in the
13808 same file. Hence the definition of STUB_NAME here must
13809 agree with the definition in gas/config/tc-arm.c. */
13811 #define STUB_NAME ".real_start_of"
13813 fprintf (f, "\t.code\t16\n");
13814 #ifdef ARM_PE
13815 if (arm_dllexport_name_p (name))
13816 name = arm_strip_name_encoding (name);
13817 #endif
13818 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13819 fprintf (f, "\t.thumb_func\n");
13820 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13823 if (current_function_pretend_args_size)
13825 /* Output unwind directive for the stack adjustment. */
13826 if (ARM_EABI_UNWIND_TABLES)
13827 fprintf (f, "\t.pad #%d\n",
13828 current_function_pretend_args_size);
13830 if (cfun->machine->uses_anonymous_args)
13832 int num_pushes;
13834 fprintf (f, "\tpush\t{");
13836 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13838 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13839 regno <= LAST_ARG_REGNUM;
13840 regno++)
13841 asm_fprintf (f, "%r%s", regno,
13842 regno == LAST_ARG_REGNUM ? "" : ", ");
13844 fprintf (f, "}\n");
13846 else
13847 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13848 SP_REGNUM, SP_REGNUM,
13849 current_function_pretend_args_size);
13851 /* We don't need to record the stores for unwinding (would it
13852 help the debugger any if we did?), but record the change in
13853 the stack pointer. */
13854 if (dwarf2out_do_frame ())
13856 char *l = dwarf2out_cfi_label ();
13858 cfa_offset = cfa_offset + current_function_pretend_args_size;
13859 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13863 /* Get the registers we are going to push. */
13864 live_regs_mask = thumb_compute_save_reg_mask ();
13865 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13866 l_mask = live_regs_mask & 0x40ff;
13867 /* Then count how many other high registers will need to be pushed. */
13868 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13870 if (TARGET_BACKTRACE)
13872 unsigned offset;
13873 unsigned work_register;
13875 /* We have been asked to create a stack backtrace structure.
13876 The code looks like this:
13878 0 .align 2
13879 0 func:
13880 0 sub SP, #16 Reserve space for 4 registers.
13881 2 push {R7} Push low registers.
13882 4 add R7, SP, #20 Get the stack pointer before the push.
13883 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13884 8 mov R7, PC Get hold of the start of this code plus 12.
13885 10 str R7, [SP, #16] Store it.
13886 12 mov R7, FP Get hold of the current frame pointer.
13887 14 str R7, [SP, #4] Store it.
13888 16 mov R7, LR Get hold of the current return address.
13889 18 str R7, [SP, #12] Store it.
13890 20 add R7, SP, #16 Point at the start of the backtrace structure.
13891 22 mov FP, R7 Put this value into the frame pointer. */
13893 work_register = thumb_find_work_register (live_regs_mask);
13895 if (ARM_EABI_UNWIND_TABLES)
13896 asm_fprintf (f, "\t.pad #16\n");
13898 asm_fprintf
13899 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13900 SP_REGNUM, SP_REGNUM);
13902 if (dwarf2out_do_frame ())
13904 char *l = dwarf2out_cfi_label ();
13906 cfa_offset = cfa_offset + 16;
13907 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13910 if (l_mask)
13912 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13913 offset = bit_count (l_mask) * UNITS_PER_WORD;
13915 else
13916 offset = 0;
13918 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13919 offset + 16 + current_function_pretend_args_size);
13921 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13922 offset + 4);
13924 /* Make sure that the instruction fetching the PC is in the right place
13925 to calculate "start of backtrace creation code + 12". */
13926 if (l_mask)
13928 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13929 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13930 offset + 12);
13931 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13932 ARM_HARD_FRAME_POINTER_REGNUM);
13933 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13934 offset);
13936 else
13938 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13939 ARM_HARD_FRAME_POINTER_REGNUM);
13940 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13941 offset);
13942 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13943 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13944 offset + 12);
13947 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13948 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13949 offset + 8);
13950 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13951 offset + 12);
13952 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13953 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13955 /* Optimization: If we are not pushing any low registers but we are going
13956 to push some high registers then delay our first push. This will just
13957 be a push of LR and we can combine it with the push of the first high
13958 register. */
13959 else if ((l_mask & 0xff) != 0
13960 || (high_regs_pushed == 0 && l_mask))
13961 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13963 if (high_regs_pushed)
13965 unsigned pushable_regs;
13966 unsigned next_hi_reg;
13968 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13969 if (live_regs_mask & (1 << next_hi_reg))
13970 break;
13972 pushable_regs = l_mask & 0xff;
13974 if (pushable_regs == 0)
13975 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13977 while (high_regs_pushed > 0)
13979 unsigned long real_regs_mask = 0;
13981 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13983 if (pushable_regs & (1 << regno))
13985 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13987 high_regs_pushed --;
13988 real_regs_mask |= (1 << next_hi_reg);
13990 if (high_regs_pushed)
13992 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13993 next_hi_reg --)
13994 if (live_regs_mask & (1 << next_hi_reg))
13995 break;
13997 else
13999 pushable_regs &= ~((1 << regno) - 1);
14000 break;
14005 /* If we had to find a work register and we have not yet
14006 saved the LR then add it to the list of regs to push. */
14007 if (l_mask == (1 << LR_REGNUM))
14009 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14010 1, &cfa_offset,
14011 real_regs_mask | (1 << LR_REGNUM));
14012 l_mask = 0;
14014 else
14015 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14020 /* Handle the case of a double word load into a low register from
14021 a computed memory address. The computed address may involve a
14022 register which is overwritten by the load. */
14023 const char *
14024 thumb_load_double_from_address (rtx *operands)
14026 rtx addr;
14027 rtx base;
14028 rtx offset;
14029 rtx arg1;
14030 rtx arg2;
14032 gcc_assert (GET_CODE (operands[0]) == REG);
14033 gcc_assert (GET_CODE (operands[1]) == MEM);
14035 /* Get the memory address. */
14036 addr = XEXP (operands[1], 0);
14038 /* Work out how the memory address is computed. */
14039 switch (GET_CODE (addr))
14041 case REG:
14042 operands[2] = adjust_address (operands[1], SImode, 4);
14044 if (REGNO (operands[0]) == REGNO (addr))
14046 output_asm_insn ("ldr\t%H0, %2", operands);
14047 output_asm_insn ("ldr\t%0, %1", operands);
14049 else
14051 output_asm_insn ("ldr\t%0, %1", operands);
14052 output_asm_insn ("ldr\t%H0, %2", operands);
14054 break;
14056 case CONST:
14057 /* Compute <address> + 4 for the high order load. */
14058 operands[2] = adjust_address (operands[1], SImode, 4);
14060 output_asm_insn ("ldr\t%0, %1", operands);
14061 output_asm_insn ("ldr\t%H0, %2", operands);
14062 break;
14064 case PLUS:
14065 arg1 = XEXP (addr, 0);
14066 arg2 = XEXP (addr, 1);
14068 if (CONSTANT_P (arg1))
14069 base = arg2, offset = arg1;
14070 else
14071 base = arg1, offset = arg2;
14073 gcc_assert (GET_CODE (base) == REG);
14075 /* Catch the case of <address> = <reg> + <reg> */
14076 if (GET_CODE (offset) == REG)
14078 int reg_offset = REGNO (offset);
14079 int reg_base = REGNO (base);
14080 int reg_dest = REGNO (operands[0]);
14082 /* Add the base and offset registers together into the
14083 higher destination register. */
14084 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14085 reg_dest + 1, reg_base, reg_offset);
14087 /* Load the lower destination register from the address in
14088 the higher destination register. */
14089 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14090 reg_dest, reg_dest + 1);
14092 /* Load the higher destination register from its own address
14093 plus 4. */
14094 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14095 reg_dest + 1, reg_dest + 1);
14097 else
14099 /* Compute <address> + 4 for the high order load. */
14100 operands[2] = adjust_address (operands[1], SImode, 4);
14102 /* If the computed address is held in the low order register
14103 then load the high order register first, otherwise always
14104 load the low order register first. */
14105 if (REGNO (operands[0]) == REGNO (base))
14107 output_asm_insn ("ldr\t%H0, %2", operands);
14108 output_asm_insn ("ldr\t%0, %1", operands);
14110 else
14112 output_asm_insn ("ldr\t%0, %1", operands);
14113 output_asm_insn ("ldr\t%H0, %2", operands);
14116 break;
14118 case LABEL_REF:
14119 /* With no registers to worry about we can just load the value
14120 directly. */
14121 operands[2] = adjust_address (operands[1], SImode, 4);
14123 output_asm_insn ("ldr\t%H0, %2", operands);
14124 output_asm_insn ("ldr\t%0, %1", operands);
14125 break;
14127 default:
14128 gcc_unreachable ();
14131 return "";
14134 const char *
14135 thumb_output_move_mem_multiple (int n, rtx *operands)
14137 rtx tmp;
14139 switch (n)
14141 case 2:
14142 if (REGNO (operands[4]) > REGNO (operands[5]))
14144 tmp = operands[4];
14145 operands[4] = operands[5];
14146 operands[5] = tmp;
14148 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
14149 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
14150 break;
14152 case 3:
14153 if (REGNO (operands[4]) > REGNO (operands[5]))
14155 tmp = operands[4];
14156 operands[4] = operands[5];
14157 operands[5] = tmp;
14159 if (REGNO (operands[5]) > REGNO (operands[6]))
14161 tmp = operands[5];
14162 operands[5] = operands[6];
14163 operands[6] = tmp;
14165 if (REGNO (operands[4]) > REGNO (operands[5]))
14167 tmp = operands[4];
14168 operands[4] = operands[5];
14169 operands[5] = tmp;
14172 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
14173 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
14174 break;
14176 default:
14177 gcc_unreachable ();
14180 return "";
14183 /* Output a call-via instruction for thumb state. */
14184 const char *
14185 thumb_call_via_reg (rtx reg)
14187 int regno = REGNO (reg);
14188 rtx *labelp;
14190 gcc_assert (regno < LR_REGNUM);
14192 /* If we are in the normal text section we can use a single instance
14193 per compilation unit. If we are doing function sections, then we need
14194 an entry per section, since we can't rely on reachability. */
14195 if (in_section == text_section)
14197 thumb_call_reg_needed = 1;
14199 if (thumb_call_via_label[regno] == NULL)
14200 thumb_call_via_label[regno] = gen_label_rtx ();
14201 labelp = thumb_call_via_label + regno;
14203 else
14205 if (cfun->machine->call_via[regno] == NULL)
14206 cfun->machine->call_via[regno] = gen_label_rtx ();
14207 labelp = cfun->machine->call_via + regno;
14210 output_asm_insn ("bl\t%a0", labelp);
14211 return "";
14214 /* Routines for generating rtl. */
14215 void
14216 thumb_expand_movmemqi (rtx *operands)
14218 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
14219 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
14220 HOST_WIDE_INT len = INTVAL (operands[2]);
14221 HOST_WIDE_INT offset = 0;
14223 while (len >= 12)
14225 emit_insn (gen_movmem12b (out, in, out, in));
14226 len -= 12;
14229 if (len >= 8)
14231 emit_insn (gen_movmem8b (out, in, out, in));
14232 len -= 8;
14235 if (len >= 4)
14237 rtx reg = gen_reg_rtx (SImode);
14238 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
14239 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
14240 len -= 4;
14241 offset += 4;
14244 if (len >= 2)
14246 rtx reg = gen_reg_rtx (HImode);
14247 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
14248 plus_constant (in, offset))));
14249 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
14250 reg));
14251 len -= 2;
14252 offset += 2;
14255 if (len)
14257 rtx reg = gen_reg_rtx (QImode);
14258 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
14259 plus_constant (in, offset))));
14260 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
14261 reg));
14265 void
14266 thumb_reload_out_hi (rtx *operands)
14268 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14271 /* Handle reading a half-word from memory during reload. */
14272 void
14273 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14275 gcc_unreachable ();
14278 /* Return the length of a function name prefix
14279 that starts with the character 'c'. */
14280 static int
14281 arm_get_strip_length (int c)
14283 switch (c)
14285 ARM_NAME_ENCODING_LENGTHS
14286 default: return 0;
14290 /* Return a pointer to a function's name with any
14291 and all prefix encodings stripped from it. */
14292 const char *
14293 arm_strip_name_encoding (const char *name)
14295 int skip;
14297 while ((skip = arm_get_strip_length (* name)))
14298 name += skip;
14300 return name;
14303 /* If there is a '*' anywhere in the name's prefix, then
14304 emit the stripped name verbatim, otherwise prepend an
14305 underscore if leading underscores are being used. */
14306 void
14307 arm_asm_output_labelref (FILE *stream, const char *name)
14309 int skip;
14310 int verbatim = 0;
14312 while ((skip = arm_get_strip_length (* name)))
14314 verbatim |= (*name == '*');
14315 name += skip;
14318 if (verbatim)
14319 fputs (name, stream);
14320 else
14321 asm_fprintf (stream, "%U%s", name);
14324 static void
14325 arm_file_end (void)
14327 int regno;
14329 if (! thumb_call_reg_needed)
14330 return;
14332 switch_to_section (text_section);
14333 asm_fprintf (asm_out_file, "\t.code 16\n");
14334 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14336 for (regno = 0; regno < LR_REGNUM; regno++)
14338 rtx label = thumb_call_via_label[regno];
14340 if (label != 0)
14342 targetm.asm_out.internal_label (asm_out_file, "L",
14343 CODE_LABEL_NUMBER (label));
14344 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14349 rtx aof_pic_label;
14351 #ifdef AOF_ASSEMBLER
14352 /* Special functions only needed when producing AOF syntax assembler. */
14354 struct pic_chain
14356 struct pic_chain * next;
14357 const char * symname;
14360 static struct pic_chain * aof_pic_chain = NULL;
14363 aof_pic_entry (rtx x)
14365 struct pic_chain ** chainp;
14366 int offset;
14368 if (aof_pic_label == NULL_RTX)
14370 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14373 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14374 offset += 4, chainp = &(*chainp)->next)
14375 if ((*chainp)->symname == XSTR (x, 0))
14376 return plus_constant (aof_pic_label, offset);
14378 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14379 (*chainp)->next = NULL;
14380 (*chainp)->symname = XSTR (x, 0);
14381 return plus_constant (aof_pic_label, offset);
14384 void
14385 aof_dump_pic_table (FILE *f)
14387 struct pic_chain * chain;
14389 if (aof_pic_chain == NULL)
14390 return;
14392 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14393 PIC_OFFSET_TABLE_REGNUM,
14394 PIC_OFFSET_TABLE_REGNUM);
14395 fputs ("|x$adcons|\n", f);
14397 for (chain = aof_pic_chain; chain; chain = chain->next)
14399 fputs ("\tDCD\t", f);
14400 assemble_name (f, chain->symname);
14401 fputs ("\n", f);
14405 int arm_text_section_count = 1;
14407 /* A get_unnamed_section callback for switching to the text section. */
14409 static void
14410 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14412 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
14413 arm_text_section_count++);
14414 if (flag_pic)
14415 fprintf (asm_out_file, ", PIC, REENTRANT");
14416 fprintf (asm_out_file, "\n");
14419 static int arm_data_section_count = 1;
14421 /* A get_unnamed_section callback for switching to the data section. */
14423 static void
14424 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14426 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
14427 arm_data_section_count++);
14430 /* Implement TARGET_ASM_INIT_SECTIONS.
14432 AOF Assembler syntax is a nightmare when it comes to areas, since once
14433 we change from one area to another, we can't go back again. Instead,
14434 we must create a new area with the same attributes and add the new output
14435 to that. Unfortunately, there is nothing we can do here to guarantee that
14436 two areas with the same attributes will be linked adjacently in the
14437 resulting executable, so we have to be careful not to do pc-relative
14438 addressing across such boundaries. */
14440 static void
14441 aof_asm_init_sections (void)
14443 text_section = get_unnamed_section (SECTION_CODE,
14444 aof_output_text_section_asm_op, NULL);
14445 data_section = get_unnamed_section (SECTION_WRITE,
14446 aof_output_data_section_asm_op, NULL);
14447 readonly_data_section = text_section;
14450 void
14451 zero_init_section (void)
14453 static int zero_init_count = 1;
14455 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
14456 in_section = NULL;
14459 /* The AOF assembler is religiously strict about declarations of
14460 imported and exported symbols, so that it is impossible to declare
14461 a function as imported near the beginning of the file, and then to
14462 export it later on. It is, however, possible to delay the decision
14463 until all the functions in the file have been compiled. To get
14464 around this, we maintain a list of the imports and exports, and
14465 delete from it any that are subsequently defined. At the end of
14466 compilation we spit the remainder of the list out before the END
14467 directive. */
14469 struct import
14471 struct import * next;
14472 const char * name;
14475 static struct import * imports_list = NULL;
14477 void
14478 aof_add_import (const char *name)
14480 struct import * new;
14482 for (new = imports_list; new; new = new->next)
14483 if (new->name == name)
14484 return;
14486 new = (struct import *) xmalloc (sizeof (struct import));
14487 new->next = imports_list;
14488 imports_list = new;
14489 new->name = name;
14492 void
14493 aof_delete_import (const char *name)
14495 struct import ** old;
14497 for (old = &imports_list; *old; old = & (*old)->next)
14499 if ((*old)->name == name)
14501 *old = (*old)->next;
14502 return;
14507 int arm_main_function = 0;
14509 static void
14510 aof_dump_imports (FILE *f)
14512 /* The AOF assembler needs this to cause the startup code to be extracted
14513 from the library. Brining in __main causes the whole thing to work
14514 automagically. */
14515 if (arm_main_function)
14517 switch_to_section (text_section);
14518 fputs ("\tIMPORT __main\n", f);
14519 fputs ("\tDCD __main\n", f);
14522 /* Now dump the remaining imports. */
14523 while (imports_list)
14525 fprintf (f, "\tIMPORT\t");
14526 assemble_name (f, imports_list->name);
14527 fputc ('\n', f);
14528 imports_list = imports_list->next;
14532 static void
14533 aof_globalize_label (FILE *stream, const char *name)
14535 default_globalize_label (stream, name);
14536 if (! strcmp (name, "main"))
14537 arm_main_function = 1;
14540 static void
14541 aof_file_start (void)
14543 fputs ("__r0\tRN\t0\n", asm_out_file);
14544 fputs ("__a1\tRN\t0\n", asm_out_file);
14545 fputs ("__a2\tRN\t1\n", asm_out_file);
14546 fputs ("__a3\tRN\t2\n", asm_out_file);
14547 fputs ("__a4\tRN\t3\n", asm_out_file);
14548 fputs ("__v1\tRN\t4\n", asm_out_file);
14549 fputs ("__v2\tRN\t5\n", asm_out_file);
14550 fputs ("__v3\tRN\t6\n", asm_out_file);
14551 fputs ("__v4\tRN\t7\n", asm_out_file);
14552 fputs ("__v5\tRN\t8\n", asm_out_file);
14553 fputs ("__v6\tRN\t9\n", asm_out_file);
14554 fputs ("__sl\tRN\t10\n", asm_out_file);
14555 fputs ("__fp\tRN\t11\n", asm_out_file);
14556 fputs ("__ip\tRN\t12\n", asm_out_file);
14557 fputs ("__sp\tRN\t13\n", asm_out_file);
14558 fputs ("__lr\tRN\t14\n", asm_out_file);
14559 fputs ("__pc\tRN\t15\n", asm_out_file);
14560 fputs ("__f0\tFN\t0\n", asm_out_file);
14561 fputs ("__f1\tFN\t1\n", asm_out_file);
14562 fputs ("__f2\tFN\t2\n", asm_out_file);
14563 fputs ("__f3\tFN\t3\n", asm_out_file);
14564 fputs ("__f4\tFN\t4\n", asm_out_file);
14565 fputs ("__f5\tFN\t5\n", asm_out_file);
14566 fputs ("__f6\tFN\t6\n", asm_out_file);
14567 fputs ("__f7\tFN\t7\n", asm_out_file);
14568 switch_to_section (text_section);
14571 static void
14572 aof_file_end (void)
14574 if (flag_pic)
14575 aof_dump_pic_table (asm_out_file);
14576 arm_file_end ();
14577 aof_dump_imports (asm_out_file);
14578 fputs ("\tEND\n", asm_out_file);
14580 #endif /* AOF_ASSEMBLER */
14582 #ifndef ARM_PE
14583 /* Symbols in the text segment can be accessed without indirecting via the
14584 constant pool; it may take an extra binary operation, but this is still
14585 faster than indirecting via memory. Don't do this when not optimizing,
14586 since we won't be calculating al of the offsets necessary to do this
14587 simplification. */
14589 static void
14590 arm_encode_section_info (tree decl, rtx rtl, int first)
14592 /* This doesn't work with AOF syntax, since the string table may be in
14593 a different AREA. */
14594 #ifndef AOF_ASSEMBLER
14595 if (optimize > 0 && TREE_CONSTANT (decl))
14596 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14597 #endif
14599 /* If we are referencing a function that is weak then encode a long call
14600 flag in the function name, otherwise if the function is static or
14601 or known to be defined in this file then encode a short call flag. */
14602 if (first && DECL_P (decl))
14604 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14605 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14606 else if (! TREE_PUBLIC (decl))
14607 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14610 default_encode_section_info (decl, rtl, first);
14612 #endif /* !ARM_PE */
14614 static void
14615 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14617 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14618 && !strcmp (prefix, "L"))
14620 arm_ccfsm_state = 0;
14621 arm_target_insn = NULL;
14623 default_internal_label (stream, prefix, labelno);
14626 /* Output code to add DELTA to the first argument, and then jump
14627 to FUNCTION. Used for C++ multiple inheritance. */
14628 static void
14629 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14630 HOST_WIDE_INT delta,
14631 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14632 tree function)
14634 static int thunk_label = 0;
14635 char label[256];
14636 int mi_delta = delta;
14637 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14638 int shift = 0;
14639 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14640 ? 1 : 0);
14641 if (mi_delta < 0)
14642 mi_delta = - mi_delta;
14643 if (TARGET_THUMB)
14645 int labelno = thunk_label++;
14646 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14647 fputs ("\tldr\tr12, ", file);
14648 assemble_name (file, label);
14649 fputc ('\n', file);
14651 while (mi_delta != 0)
14653 if ((mi_delta & (3 << shift)) == 0)
14654 shift += 2;
14655 else
14657 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14658 mi_op, this_regno, this_regno,
14659 mi_delta & (0xff << shift));
14660 mi_delta &= ~(0xff << shift);
14661 shift += 8;
14664 if (TARGET_THUMB)
14666 fprintf (file, "\tbx\tr12\n");
14667 ASM_OUTPUT_ALIGN (file, 2);
14668 assemble_name (file, label);
14669 fputs (":\n", file);
14670 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14672 else
14674 fputs ("\tb\t", file);
14675 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14676 if (NEED_PLT_RELOC)
14677 fputs ("(PLT)", file);
14678 fputc ('\n', file);
14683 arm_emit_vector_const (FILE *file, rtx x)
14685 int i;
14686 const char * pattern;
14688 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14690 switch (GET_MODE (x))
14692 case V2SImode: pattern = "%08x"; break;
14693 case V4HImode: pattern = "%04x"; break;
14694 case V8QImode: pattern = "%02x"; break;
14695 default: gcc_unreachable ();
14698 fprintf (file, "0x");
14699 for (i = CONST_VECTOR_NUNITS (x); i--;)
14701 rtx element;
14703 element = CONST_VECTOR_ELT (x, i);
14704 fprintf (file, pattern, INTVAL (element));
14707 return 1;
14710 const char *
14711 arm_output_load_gr (rtx *operands)
14713 rtx reg;
14714 rtx offset;
14715 rtx wcgr;
14716 rtx sum;
14718 if (GET_CODE (operands [1]) != MEM
14719 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14720 || GET_CODE (reg = XEXP (sum, 0)) != REG
14721 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14722 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14723 return "wldrw%?\t%0, %1";
14725 /* Fix up an out-of-range load of a GR register. */
14726 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14727 wcgr = operands[0];
14728 operands[0] = reg;
14729 output_asm_insn ("ldr%?\t%0, %1", operands);
14731 operands[0] = wcgr;
14732 operands[1] = reg;
14733 output_asm_insn ("tmcr%?\t%0, %1", operands);
14734 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14736 return "";
14739 static rtx
14740 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14741 int incoming ATTRIBUTE_UNUSED)
14743 #if 0
14744 /* FIXME: The ARM backend has special code to handle structure
14745 returns, and will reserve its own hidden first argument. So
14746 if this macro is enabled a *second* hidden argument will be
14747 reserved, which will break binary compatibility with old
14748 toolchains and also thunk handling. One day this should be
14749 fixed. */
14750 return 0;
14751 #else
14752 /* Register in which address to store a structure value
14753 is passed to a function. */
14754 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14755 #endif
14758 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14760 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14761 named arg and all anonymous args onto the stack.
14762 XXX I know the prologue shouldn't be pushing registers, but it is faster
14763 that way. */
14765 static void
14766 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14767 enum machine_mode mode ATTRIBUTE_UNUSED,
14768 tree type ATTRIBUTE_UNUSED,
14769 int *pretend_size,
14770 int second_time ATTRIBUTE_UNUSED)
14772 cfun->machine->uses_anonymous_args = 1;
14773 if (cum->nregs < NUM_ARG_REGS)
14774 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14777 /* Return nonzero if the CONSUMER instruction (a store) does not need
14778 PRODUCER's value to calculate the address. */
14781 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14783 rtx value = PATTERN (producer);
14784 rtx addr = PATTERN (consumer);
14786 if (GET_CODE (value) == COND_EXEC)
14787 value = COND_EXEC_CODE (value);
14788 if (GET_CODE (value) == PARALLEL)
14789 value = XVECEXP (value, 0, 0);
14790 value = XEXP (value, 0);
14791 if (GET_CODE (addr) == COND_EXEC)
14792 addr = COND_EXEC_CODE (addr);
14793 if (GET_CODE (addr) == PARALLEL)
14794 addr = XVECEXP (addr, 0, 0);
14795 addr = XEXP (addr, 0);
14797 return !reg_overlap_mentioned_p (value, addr);
14800 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14801 have an early register shift value or amount dependency on the
14802 result of PRODUCER. */
14805 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14807 rtx value = PATTERN (producer);
14808 rtx op = PATTERN (consumer);
14809 rtx early_op;
14811 if (GET_CODE (value) == COND_EXEC)
14812 value = COND_EXEC_CODE (value);
14813 if (GET_CODE (value) == PARALLEL)
14814 value = XVECEXP (value, 0, 0);
14815 value = XEXP (value, 0);
14816 if (GET_CODE (op) == COND_EXEC)
14817 op = COND_EXEC_CODE (op);
14818 if (GET_CODE (op) == PARALLEL)
14819 op = XVECEXP (op, 0, 0);
14820 op = XEXP (op, 1);
14822 early_op = XEXP (op, 0);
14823 /* This is either an actual independent shift, or a shift applied to
14824 the first operand of another operation. We want the whole shift
14825 operation. */
14826 if (GET_CODE (early_op) == REG)
14827 early_op = op;
14829 return !reg_overlap_mentioned_p (value, early_op);
14832 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14833 have an early register shift value dependency on the result of
14834 PRODUCER. */
14837 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14839 rtx value = PATTERN (producer);
14840 rtx op = PATTERN (consumer);
14841 rtx early_op;
14843 if (GET_CODE (value) == COND_EXEC)
14844 value = COND_EXEC_CODE (value);
14845 if (GET_CODE (value) == PARALLEL)
14846 value = XVECEXP (value, 0, 0);
14847 value = XEXP (value, 0);
14848 if (GET_CODE (op) == COND_EXEC)
14849 op = COND_EXEC_CODE (op);
14850 if (GET_CODE (op) == PARALLEL)
14851 op = XVECEXP (op, 0, 0);
14852 op = XEXP (op, 1);
14854 early_op = XEXP (op, 0);
14856 /* This is either an actual independent shift, or a shift applied to
14857 the first operand of another operation. We want the value being
14858 shifted, in either case. */
14859 if (GET_CODE (early_op) != REG)
14860 early_op = XEXP (early_op, 0);
14862 return !reg_overlap_mentioned_p (value, early_op);
14865 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14866 have an early register mult dependency on the result of
14867 PRODUCER. */
14870 arm_no_early_mul_dep (rtx producer, rtx consumer)
14872 rtx value = PATTERN (producer);
14873 rtx op = PATTERN (consumer);
14875 if (GET_CODE (value) == COND_EXEC)
14876 value = COND_EXEC_CODE (value);
14877 if (GET_CODE (value) == PARALLEL)
14878 value = XVECEXP (value, 0, 0);
14879 value = XEXP (value, 0);
14880 if (GET_CODE (op) == COND_EXEC)
14881 op = COND_EXEC_CODE (op);
14882 if (GET_CODE (op) == PARALLEL)
14883 op = XVECEXP (op, 0, 0);
14884 op = XEXP (op, 1);
14886 return (GET_CODE (op) == PLUS
14887 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14891 /* We can't rely on the caller doing the proper promotion when
14892 using APCS or ATPCS. */
14894 static bool
14895 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14897 return !TARGET_AAPCS_BASED;
14901 /* AAPCS based ABIs use short enums by default. */
14903 static bool
14904 arm_default_short_enums (void)
14906 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
14910 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14912 static bool
14913 arm_align_anon_bitfield (void)
14915 return TARGET_AAPCS_BASED;
14919 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14921 static tree
14922 arm_cxx_guard_type (void)
14924 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14928 /* The EABI says test the least significant bit of a guard variable. */
14930 static bool
14931 arm_cxx_guard_mask_bit (void)
14933 return TARGET_AAPCS_BASED;
14937 /* The EABI specifies that all array cookies are 8 bytes long. */
14939 static tree
14940 arm_get_cookie_size (tree type)
14942 tree size;
14944 if (!TARGET_AAPCS_BASED)
14945 return default_cxx_get_cookie_size (type);
14947 size = build_int_cst (sizetype, 8);
14948 return size;
14952 /* The EABI says that array cookies should also contain the element size. */
14954 static bool
14955 arm_cookie_has_size (void)
14957 return TARGET_AAPCS_BASED;
14961 /* The EABI says constructors and destructors should return a pointer to
14962 the object constructed/destroyed. */
14964 static bool
14965 arm_cxx_cdtor_returns_this (void)
14967 return TARGET_AAPCS_BASED;
14970 /* The EABI says that an inline function may never be the key
14971 method. */
14973 static bool
14974 arm_cxx_key_method_may_be_inline (void)
14976 return !TARGET_AAPCS_BASED;
14979 static void
14980 arm_cxx_determine_class_data_visibility (tree decl)
14982 if (!TARGET_AAPCS_BASED)
14983 return;
14985 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14986 is exported. However, on systems without dynamic vague linkage,
14987 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14988 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14989 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14990 else
14991 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14992 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14995 static bool
14996 arm_cxx_class_data_always_comdat (void)
14998 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14999 vague linkage if the class has no key function. */
15000 return !TARGET_AAPCS_BASED;
15004 /* The EABI says __aeabi_atexit should be used to register static
15005 destructors. */
15007 static bool
15008 arm_cxx_use_aeabi_atexit (void)
15010 return TARGET_AAPCS_BASED;
15014 void
15015 arm_set_return_address (rtx source, rtx scratch)
15017 arm_stack_offsets *offsets;
15018 HOST_WIDE_INT delta;
15019 rtx addr;
15020 unsigned long saved_regs;
15022 saved_regs = arm_compute_save_reg_mask ();
15024 if ((saved_regs & (1 << LR_REGNUM)) == 0)
15025 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15026 else
15028 if (frame_pointer_needed)
15029 addr = plus_constant(hard_frame_pointer_rtx, -4);
15030 else
15032 /* LR will be the first saved register. */
15033 offsets = arm_get_frame_offsets ();
15034 delta = offsets->outgoing_args - (offsets->frame + 4);
15037 if (delta >= 4096)
15039 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
15040 GEN_INT (delta & ~4095)));
15041 addr = scratch;
15042 delta &= 4095;
15044 else
15045 addr = stack_pointer_rtx;
15047 addr = plus_constant (addr, delta);
15049 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15054 void
15055 thumb_set_return_address (rtx source, rtx scratch)
15057 arm_stack_offsets *offsets;
15058 HOST_WIDE_INT delta;
15059 int reg;
15060 rtx addr;
15061 unsigned long mask;
15063 emit_insn (gen_rtx_USE (VOIDmode, source));
15065 mask = thumb_compute_save_reg_mask ();
15066 if (mask & (1 << LR_REGNUM))
15068 offsets = arm_get_frame_offsets ();
15070 /* Find the saved regs. */
15071 if (frame_pointer_needed)
15073 delta = offsets->soft_frame - offsets->saved_args;
15074 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
15076 else
15078 delta = offsets->outgoing_args - offsets->saved_args;
15079 reg = SP_REGNUM;
15081 /* Allow for the stack frame. */
15082 if (TARGET_BACKTRACE)
15083 delta -= 16;
15084 /* The link register is always the first saved register. */
15085 delta -= 4;
15087 /* Construct the address. */
15088 addr = gen_rtx_REG (SImode, reg);
15089 if ((reg != SP_REGNUM && delta >= 128)
15090 || delta >= 1024)
15092 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
15093 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
15094 addr = scratch;
15096 else
15097 addr = plus_constant (addr, delta);
15099 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15101 else
15102 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15105 /* Implements target hook vector_mode_supported_p. */
15106 bool
15107 arm_vector_mode_supported_p (enum machine_mode mode)
15109 if ((mode == V2SImode)
15110 || (mode == V4HImode)
15111 || (mode == V8QImode))
15112 return true;
15114 return false;
15117 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15118 ARM insns and therefore guarantee that the shift count is modulo 256.
15119 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15120 guarantee no particular behavior for out-of-range counts. */
15122 static unsigned HOST_WIDE_INT
15123 arm_shift_truncation_mask (enum machine_mode mode)
15125 return mode == SImode ? 255 : 0;
15129 /* Map internal gcc register numbers to DWARF2 register numbers. */
15131 unsigned int
15132 arm_dbx_register_number (unsigned int regno)
15134 if (regno < 16)
15135 return regno;
15137 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15138 compatibility. The EABI defines them as registers 96-103. */
15139 if (IS_FPA_REGNUM (regno))
15140 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
15142 if (IS_VFP_REGNUM (regno))
15143 return 64 + regno - FIRST_VFP_REGNUM;
15145 if (IS_IWMMXT_GR_REGNUM (regno))
15146 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
15148 if (IS_IWMMXT_REGNUM (regno))
15149 return 112 + regno - FIRST_IWMMXT_REGNUM;
15151 gcc_unreachable ();
15155 #ifdef TARGET_UNWIND_INFO
15156 /* Emit unwind directives for a store-multiple instruction. This should
15157 only ever be generated by the function prologue code, so we expect it
15158 to have a particular form. */
15160 static void
15161 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
15163 int i;
15164 HOST_WIDE_INT offset;
15165 HOST_WIDE_INT nregs;
15166 int reg_size;
15167 unsigned reg;
15168 unsigned lastreg;
15169 rtx e;
15171 /* First insn will adjust the stack pointer. */
15172 e = XVECEXP (p, 0, 0);
15173 if (GET_CODE (e) != SET
15174 || GET_CODE (XEXP (e, 0)) != REG
15175 || REGNO (XEXP (e, 0)) != SP_REGNUM
15176 || GET_CODE (XEXP (e, 1)) != PLUS)
15177 abort ();
15179 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
15180 nregs = XVECLEN (p, 0) - 1;
15182 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
15183 if (reg < 16)
15185 /* The function prologue may also push pc, but not annotate it as it is
15186 never restored. We turn this into a stack pointer adjustment. */
15187 if (nregs * 4 == offset - 4)
15189 fprintf (asm_out_file, "\t.pad #4\n");
15190 offset -= 4;
15192 reg_size = 4;
15194 else if (IS_VFP_REGNUM (reg))
15196 /* FPA register saves use an additional word. */
15197 offset -= 4;
15198 reg_size = 8;
15200 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
15202 /* FPA registers are done differently. */
15203 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
15204 return;
15206 else
15207 /* Unknown register type. */
15208 abort ();
15210 /* If the stack increment doesn't match the size of the saved registers,
15211 something has gone horribly wrong. */
15212 if (offset != nregs * reg_size)
15213 abort ();
15215 fprintf (asm_out_file, "\t.save {");
15217 offset = 0;
15218 lastreg = 0;
15219 /* The remaining insns will describe the stores. */
15220 for (i = 1; i <= nregs; i++)
15222 /* Expect (set (mem <addr>) (reg)).
15223 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15224 e = XVECEXP (p, 0, i);
15225 if (GET_CODE (e) != SET
15226 || GET_CODE (XEXP (e, 0)) != MEM
15227 || GET_CODE (XEXP (e, 1)) != REG)
15228 abort ();
15230 reg = REGNO (XEXP (e, 1));
15231 if (reg < lastreg)
15232 abort ();
15234 if (i != 1)
15235 fprintf (asm_out_file, ", ");
15236 /* We can't use %r for vfp because we need to use the
15237 double precision register names. */
15238 if (IS_VFP_REGNUM (reg))
15239 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
15240 else
15241 asm_fprintf (asm_out_file, "%r", reg);
15243 #ifdef ENABLE_CHECKING
15244 /* Check that the addresses are consecutive. */
15245 e = XEXP (XEXP (e, 0), 0);
15246 if (GET_CODE (e) == PLUS)
15248 offset += reg_size;
15249 if (GET_CODE (XEXP (e, 0)) != REG
15250 || REGNO (XEXP (e, 0)) != SP_REGNUM
15251 || GET_CODE (XEXP (e, 1)) != CONST_INT
15252 || offset != INTVAL (XEXP (e, 1)))
15253 abort ();
15255 else if (i != 1
15256 || GET_CODE (e) != REG
15257 || REGNO (e) != SP_REGNUM)
15258 abort ();
15259 #endif
15261 fprintf (asm_out_file, "}\n");
15264 /* Emit unwind directives for a SET. */
15266 static void
15267 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
15269 rtx e0;
15270 rtx e1;
15272 e0 = XEXP (p, 0);
15273 e1 = XEXP (p, 1);
15274 switch (GET_CODE (e0))
15276 case MEM:
15277 /* Pushing a single register. */
15278 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
15279 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
15280 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
15281 abort ();
15283 asm_fprintf (asm_out_file, "\t.save ");
15284 if (IS_VFP_REGNUM (REGNO (e1)))
15285 asm_fprintf(asm_out_file, "{d%d}\n",
15286 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
15287 else
15288 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
15289 break;
15291 case REG:
15292 if (REGNO (e0) == SP_REGNUM)
15294 /* A stack increment. */
15295 if (GET_CODE (e1) != PLUS
15296 || GET_CODE (XEXP (e1, 0)) != REG
15297 || REGNO (XEXP (e1, 0)) != SP_REGNUM
15298 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15299 abort ();
15301 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
15302 -INTVAL (XEXP (e1, 1)));
15304 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
15306 HOST_WIDE_INT offset;
15307 unsigned reg;
15309 if (GET_CODE (e1) == PLUS)
15311 if (GET_CODE (XEXP (e1, 0)) != REG
15312 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15313 abort ();
15314 reg = REGNO (XEXP (e1, 0));
15315 offset = INTVAL (XEXP (e1, 1));
15316 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
15317 HARD_FRAME_POINTER_REGNUM, reg,
15318 INTVAL (XEXP (e1, 1)));
15320 else if (GET_CODE (e1) == REG)
15322 reg = REGNO (e1);
15323 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
15324 HARD_FRAME_POINTER_REGNUM, reg);
15326 else
15327 abort ();
15329 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
15331 /* Move from sp to reg. */
15332 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
15334 else
15335 abort ();
15336 break;
15338 default:
15339 abort ();
15344 /* Emit unwind directives for the given insn. */
15346 static void
15347 arm_unwind_emit (FILE * asm_out_file, rtx insn)
15349 rtx pat;
15351 if (!ARM_EABI_UNWIND_TABLES)
15352 return;
15354 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15355 return;
15357 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15358 if (pat)
15359 pat = XEXP (pat, 0);
15360 else
15361 pat = PATTERN (insn);
15363 switch (GET_CODE (pat))
15365 case SET:
15366 arm_unwind_emit_set (asm_out_file, pat);
15367 break;
15369 case SEQUENCE:
15370 /* Store multiple. */
15371 arm_unwind_emit_stm (asm_out_file, pat);
15372 break;
15374 default:
15375 abort();
15380 /* Output a reference from a function exception table to the type_info
15381 object X. The EABI specifies that the symbol should be relocated by
15382 an R_ARM_TARGET2 relocation. */
15384 static bool
15385 arm_output_ttype (rtx x)
15387 fputs ("\t.word\t", asm_out_file);
15388 output_addr_const (asm_out_file, x);
15389 /* Use special relocations for symbol references. */
15390 if (GET_CODE (x) != CONST_INT)
15391 fputs ("(TARGET2)", asm_out_file);
15392 fputc ('\n', asm_out_file);
15394 return TRUE;
15396 #endif /* TARGET_UNWIND_INFO */
15399 /* Output unwind directives for the start/end of a function. */
15401 void
15402 arm_output_fn_unwind (FILE * f, bool prologue)
15404 if (!ARM_EABI_UNWIND_TABLES)
15405 return;
15407 if (prologue)
15408 fputs ("\t.fnstart\n", f);
15409 else
15410 fputs ("\t.fnend\n", f);
15413 static bool
15414 arm_emit_tls_decoration (FILE *fp, rtx x)
15416 enum tls_reloc reloc;
15417 rtx val;
15419 val = XVECEXP (x, 0, 0);
15420 reloc = INTVAL (XVECEXP (x, 0, 1));
15422 output_addr_const (fp, val);
15424 switch (reloc)
15426 case TLS_GD32:
15427 fputs ("(tlsgd)", fp);
15428 break;
15429 case TLS_LDM32:
15430 fputs ("(tlsldm)", fp);
15431 break;
15432 case TLS_LDO32:
15433 fputs ("(tlsldo)", fp);
15434 break;
15435 case TLS_IE32:
15436 fputs ("(gottpoff)", fp);
15437 break;
15438 case TLS_LE32:
15439 fputs ("(tpoff)", fp);
15440 break;
15441 default:
15442 gcc_unreachable ();
15445 switch (reloc)
15447 case TLS_GD32:
15448 case TLS_LDM32:
15449 case TLS_IE32:
15450 fputs (" + (. - ", fp);
15451 output_addr_const (fp, XVECEXP (x, 0, 2));
15452 fputs (" - ", fp);
15453 output_addr_const (fp, XVECEXP (x, 0, 3));
15454 fputc (')', fp);
15455 break;
15456 default:
15457 break;
15460 return TRUE;
15463 bool
15464 arm_output_addr_const_extra (FILE *fp, rtx x)
15466 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
15467 return arm_emit_tls_decoration (fp, x);
15468 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
15470 char label[256];
15471 int labelno = INTVAL (XVECEXP (x, 0, 0));
15473 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
15474 assemble_name_raw (fp, label);
15476 return TRUE;
15478 else if (GET_CODE (x) == CONST_VECTOR)
15479 return arm_emit_vector_const (fp, x);
15481 return FALSE;
15484 #include "gt-arm.h"