* config/arm/arm.c (arm_legitimize_address): Limit the value passed
[official-gcc.git] / gcc / config / arm / arm.c
blob7df3aba1c23c0c24097a66734e05dcdfb8df5d6e
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
146 tree, bool);
148 #ifdef OBJECT_FORMAT_ELF
149 static void arm_elf_asm_constructor (rtx, int);
150 #endif
151 #ifndef ARM_PE
152 static void arm_encode_section_info (tree, rtx, int);
153 #endif
155 static void arm_file_end (void);
157 #ifdef AOF_ASSEMBLER
158 static void aof_globalize_label (FILE *, const char *);
159 static void aof_dump_imports (FILE *);
160 static void aof_dump_pic_table (FILE *);
161 static void aof_file_start (void);
162 static void aof_file_end (void);
163 #endif
164 static rtx arm_struct_value_rtx (tree, int);
165 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
166 tree, int *, int);
167 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
168 enum machine_mode, tree, bool);
169 static bool arm_promote_prototypes (tree);
170 static bool arm_default_short_enums (void);
171 static bool arm_align_anon_bitfield (void);
172 static bool arm_return_in_msb (tree);
173 static bool arm_must_pass_in_stack (enum machine_mode, tree);
174 #ifdef TARGET_UNWIND_INFO
175 static void arm_unwind_emit (FILE *, rtx);
176 static bool arm_output_ttype (rtx);
177 #endif
179 static tree arm_cxx_guard_type (void);
180 static bool arm_cxx_guard_mask_bit (void);
181 static tree arm_get_cookie_size (tree);
182 static bool arm_cookie_has_size (void);
183 static bool arm_cxx_cdtor_returns_this (void);
184 static bool arm_cxx_key_method_may_be_inline (void);
185 static void arm_cxx_determine_class_data_visibility (tree);
186 static bool arm_cxx_class_data_always_comdat (void);
187 static bool arm_cxx_use_aeabi_atexit (void);
188 static void arm_init_libfuncs (void);
189 static bool arm_handle_option (size_t, const char *, int);
190 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
192 /* Initialize the GCC target structure. */
193 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
194 #undef TARGET_MERGE_DECL_ATTRIBUTES
195 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
196 #endif
198 #undef TARGET_ATTRIBUTE_TABLE
199 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
201 #undef TARGET_ASM_FILE_END
202 #define TARGET_ASM_FILE_END arm_file_end
204 #ifdef AOF_ASSEMBLER
205 #undef TARGET_ASM_BYTE_OP
206 #define TARGET_ASM_BYTE_OP "\tDCB\t"
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
209 #undef TARGET_ASM_ALIGNED_SI_OP
210 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
211 #undef TARGET_ASM_GLOBALIZE_LABEL
212 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
213 #undef TARGET_ASM_FILE_START
214 #define TARGET_ASM_FILE_START aof_file_start
215 #undef TARGET_ASM_FILE_END
216 #define TARGET_ASM_FILE_END aof_file_end
217 #else
218 #undef TARGET_ASM_ALIGNED_SI_OP
219 #define TARGET_ASM_ALIGNED_SI_OP NULL
220 #undef TARGET_ASM_INTEGER
221 #define TARGET_ASM_INTEGER arm_assemble_integer
222 #endif
224 #undef TARGET_ASM_FUNCTION_PROLOGUE
225 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
230 #undef TARGET_DEFAULT_TARGET_FLAGS
231 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
232 #undef TARGET_HANDLE_OPTION
233 #define TARGET_HANDLE_OPTION arm_handle_option
235 #undef TARGET_COMP_TYPE_ATTRIBUTES
236 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
238 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
239 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
241 #undef TARGET_SCHED_ADJUST_COST
242 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
244 #undef TARGET_ENCODE_SECTION_INFO
245 #ifdef ARM_PE
246 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
247 #else
248 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
249 #endif
251 #undef TARGET_STRIP_NAME_ENCODING
252 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
254 #undef TARGET_ASM_INTERNAL_LABEL
255 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
257 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
258 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
260 #undef TARGET_ASM_OUTPUT_MI_THUNK
261 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
262 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
263 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
265 /* This will be overridden in arm_override_options. */
266 #undef TARGET_RTX_COSTS
267 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
268 #undef TARGET_ADDRESS_COST
269 #define TARGET_ADDRESS_COST arm_address_cost
271 #undef TARGET_SHIFT_TRUNCATION_MASK
272 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
273 #undef TARGET_VECTOR_MODE_SUPPORTED_P
274 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
276 #undef TARGET_MACHINE_DEPENDENT_REORG
277 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
279 #undef TARGET_INIT_BUILTINS
280 #define TARGET_INIT_BUILTINS arm_init_builtins
281 #undef TARGET_EXPAND_BUILTIN
282 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
287 #undef TARGET_PROMOTE_FUNCTION_ARGS
288 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
289 #undef TARGET_PROMOTE_FUNCTION_RETURN
290 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
291 #undef TARGET_PROMOTE_PROTOTYPES
292 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
295 #undef TARGET_ARG_PARTIAL_BYTES
296 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
298 #undef TARGET_STRUCT_VALUE_RTX
299 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
301 #undef TARGET_SETUP_INCOMING_VARARGS
302 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
304 #undef TARGET_DEFAULT_SHORT_ENUMS
305 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
307 #undef TARGET_ALIGN_ANON_BITFIELD
308 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
310 #undef TARGET_CXX_GUARD_TYPE
311 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
313 #undef TARGET_CXX_GUARD_MASK_BIT
314 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
316 #undef TARGET_CXX_GET_COOKIE_SIZE
317 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
319 #undef TARGET_CXX_COOKIE_HAS_SIZE
320 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
322 #undef TARGET_CXX_CDTOR_RETURNS_THIS
323 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
325 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
326 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
328 #undef TARGET_CXX_USE_AEABI_ATEXIT
329 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
331 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
332 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
333 arm_cxx_determine_class_data_visibility
335 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
336 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
338 #undef TARGET_RETURN_IN_MSB
339 #define TARGET_RETURN_IN_MSB arm_return_in_msb
341 #undef TARGET_MUST_PASS_IN_STACK
342 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
344 #ifdef TARGET_UNWIND_INFO
345 #undef TARGET_UNWIND_EMIT
346 #define TARGET_UNWIND_EMIT arm_unwind_emit
348 /* EABI unwinding tables use a different format for the typeinfo tables. */
349 #undef TARGET_ASM_TTYPE
350 #define TARGET_ASM_TTYPE arm_output_ttype
352 #undef TARGET_ARM_EABI_UNWINDER
353 #define TARGET_ARM_EABI_UNWINDER true
354 #endif /* TARGET_UNWIND_INFO */
356 struct gcc_target targetm = TARGET_INITIALIZER;
358 /* Obstack for minipool constant handling. */
359 static struct obstack minipool_obstack;
360 static char * minipool_startobj;
362 /* The maximum number of insns skipped which
363 will be conditionalised if possible. */
364 static int max_insns_skipped = 5;
366 extern FILE * asm_out_file;
368 /* True if we are currently building a constant table. */
369 int making_const_table;
371 /* Define the information needed to generate branch insns. This is
372 stored from the compare operation. */
373 rtx arm_compare_op0, arm_compare_op1;
375 /* The processor for which instructions should be scheduled. */
376 enum processor_type arm_tune = arm_none;
378 /* Which floating point model to use. */
379 enum arm_fp_model arm_fp_model;
381 /* Which floating point hardware is available. */
382 enum fputype arm_fpu_arch;
384 /* Which floating point hardware to schedule for. */
385 enum fputype arm_fpu_tune;
387 /* Whether to use floating point hardware. */
388 enum float_abi_type arm_float_abi;
390 /* Which ABI to use. */
391 enum arm_abi_type arm_abi;
393 /* Used to parse -mstructure_size_boundary command line option. */
394 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
396 /* Used for Thumb call_via trampolines. */
397 rtx thumb_call_via_label[14];
398 static int thumb_call_reg_needed;
400 /* Bit values used to identify processor capabilities. */
401 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
402 #define FL_ARCH3M (1 << 1) /* Extended multiply */
403 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
404 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
405 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
406 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
407 #define FL_THUMB (1 << 6) /* Thumb aware */
408 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
409 #define FL_STRONG (1 << 8) /* StrongARM */
410 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
411 #define FL_XSCALE (1 << 10) /* XScale */
412 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
413 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
414 media instructions. */
415 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
416 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
417 Note: ARM6 & 7 derivatives only. */
419 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
421 #define FL_FOR_ARCH2 0
422 #define FL_FOR_ARCH3 FL_MODE32
423 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
424 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
425 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
426 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
427 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
428 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
429 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
430 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
431 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
432 #define FL_FOR_ARCH6J FL_FOR_ARCH6
433 #define FL_FOR_ARCH6K FL_FOR_ARCH6
434 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
435 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
437 /* The bits in this mask specify which
438 instructions we are allowed to generate. */
439 static unsigned long insn_flags = 0;
441 /* The bits in this mask specify which instruction scheduling options should
442 be used. */
443 static unsigned long tune_flags = 0;
445 /* The following are used in the arm.md file as equivalents to bits
446 in the above two flag variables. */
448 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
449 int arm_arch3m = 0;
451 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
452 int arm_arch4 = 0;
454 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
455 int arm_arch4t = 0;
457 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
458 int arm_arch5 = 0;
460 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
461 int arm_arch5e = 0;
463 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
464 int arm_arch6 = 0;
466 /* Nonzero if this chip can benefit from load scheduling. */
467 int arm_ld_sched = 0;
469 /* Nonzero if this chip is a StrongARM. */
470 int arm_tune_strongarm = 0;
472 /* Nonzero if this chip is a Cirrus variant. */
473 int arm_arch_cirrus = 0;
475 /* Nonzero if this chip supports Intel Wireless MMX technology. */
476 int arm_arch_iwmmxt = 0;
478 /* Nonzero if this chip is an XScale. */
479 int arm_arch_xscale = 0;
481 /* Nonzero if tuning for XScale */
482 int arm_tune_xscale = 0;
484 /* Nonzero if we want to tune for stores that access the write-buffer.
485 This typically means an ARM6 or ARM7 with MMU or MPU. */
486 int arm_tune_wbuf = 0;
488 /* Nonzero if generating Thumb instructions. */
489 int thumb_code = 0;
491 /* Nonzero if we should define __THUMB_INTERWORK__ in the
492 preprocessor.
493 XXX This is a bit of a hack, it's intended to help work around
494 problems in GLD which doesn't understand that armv5t code is
495 interworking clean. */
496 int arm_cpp_interwork = 0;
498 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
499 must report the mode of the memory reference from PRINT_OPERAND to
500 PRINT_OPERAND_ADDRESS. */
501 enum machine_mode output_memory_reference_mode;
503 /* The register number to be used for the PIC offset register. */
504 int arm_pic_register = INVALID_REGNUM;
506 /* Set to 1 when a return insn is output, this means that the epilogue
507 is not needed. */
508 int return_used_this_function;
510 /* Set to 1 after arm_reorg has started. Reset to start at the start of
511 the next function. */
512 static int after_arm_reorg = 0;
514 /* The maximum number of insns to be used when loading a constant. */
515 static int arm_constant_limit = 3;
517 /* For an explanation of these variables, see final_prescan_insn below. */
518 int arm_ccfsm_state;
519 enum arm_cond_code arm_current_cc;
520 rtx arm_target_insn;
521 int arm_target_label;
523 /* The condition codes of the ARM, and the inverse function. */
524 static const char * const arm_condition_codes[] =
526 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
527 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
530 #define streq(string1, string2) (strcmp (string1, string2) == 0)
532 /* Initialization code. */
534 struct processors
536 const char *const name;
537 enum processor_type core;
538 const char *arch;
539 const unsigned long flags;
540 bool (* rtx_costs) (rtx, int, int, int *);
543 /* Not all of these give usefully different compilation alternatives,
544 but there is no simple way of generalizing them. */
545 static const struct processors all_cores[] =
547 /* ARM Cores */
548 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
549 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
550 #include "arm-cores.def"
551 #undef ARM_CORE
552 {NULL, arm_none, NULL, 0, NULL}
555 static const struct processors all_architectures[] =
557 /* ARM Architectures */
558 /* We don't specify rtx_costs here as it will be figured out
559 from the core. */
561 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
562 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
563 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
564 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
565 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
566 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
567 implementations that support it, so we will leave it out for now. */
568 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
569 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
570 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
571 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
572 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
573 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
574 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
575 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
576 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
577 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
578 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
579 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
580 {NULL, arm_none, NULL, 0 , NULL}
583 struct arm_cpu_select
585 const char * string;
586 const char * name;
587 const struct processors * processors;
590 /* This is a magic structure. The 'string' field is magically filled in
591 with a pointer to the value specified by the user on the command line
592 assuming that the user has specified such a value. */
594 static struct arm_cpu_select arm_select[] =
596 /* string name processors */
597 { NULL, "-mcpu=", all_cores },
598 { NULL, "-march=", all_architectures },
599 { NULL, "-mtune=", all_cores }
602 /* Defines representing the indexes into the above table. */
603 #define ARM_OPT_SET_CPU 0
604 #define ARM_OPT_SET_ARCH 1
605 #define ARM_OPT_SET_TUNE 2
607 /* The name of the proprocessor macro to define for this architecture. */
609 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
611 struct fpu_desc
613 const char * name;
614 enum fputype fpu;
618 /* Available values for for -mfpu=. */
620 static const struct fpu_desc all_fpus[] =
622 {"fpa", FPUTYPE_FPA},
623 {"fpe2", FPUTYPE_FPA_EMU2},
624 {"fpe3", FPUTYPE_FPA_EMU2},
625 {"maverick", FPUTYPE_MAVERICK},
626 {"vfp", FPUTYPE_VFP}
630 /* Floating point models used by the different hardware.
631 See fputype in arm.h. */
633 static const enum fputype fp_model_for_fpu[] =
635 /* No FP hardware. */
636 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
637 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
638 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
639 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
640 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
641 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
645 struct float_abi
647 const char * name;
648 enum float_abi_type abi_type;
652 /* Available values for -mfloat-abi=. */
654 static const struct float_abi all_float_abis[] =
656 {"soft", ARM_FLOAT_ABI_SOFT},
657 {"softfp", ARM_FLOAT_ABI_SOFTFP},
658 {"hard", ARM_FLOAT_ABI_HARD}
662 struct abi_name
664 const char *name;
665 enum arm_abi_type abi_type;
669 /* Available values for -mabi=. */
671 static const struct abi_name arm_all_abis[] =
673 {"apcs-gnu", ARM_ABI_APCS},
674 {"atpcs", ARM_ABI_ATPCS},
675 {"aapcs", ARM_ABI_AAPCS},
676 {"iwmmxt", ARM_ABI_IWMMXT}
679 /* Return the number of bits set in VALUE. */
680 static unsigned
681 bit_count (unsigned long value)
683 unsigned long count = 0;
685 while (value)
687 count++;
688 value &= value - 1; /* Clear the least-significant set bit. */
691 return count;
694 /* Set up library functions unique to ARM. */
696 static void
697 arm_init_libfuncs (void)
699 /* There are no special library functions unless we are using the
700 ARM BPABI. */
701 if (!TARGET_BPABI)
702 return;
704 /* The functions below are described in Section 4 of the "Run-Time
705 ABI for the ARM architecture", Version 1.0. */
707 /* Double-precision floating-point arithmetic. Table 2. */
708 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
709 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
710 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
711 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
712 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
714 /* Double-precision comparisons. Table 3. */
715 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
716 set_optab_libfunc (ne_optab, DFmode, NULL);
717 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
718 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
719 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
720 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
721 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
723 /* Single-precision floating-point arithmetic. Table 4. */
724 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
725 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
726 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
727 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
728 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
730 /* Single-precision comparisons. Table 5. */
731 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
732 set_optab_libfunc (ne_optab, SFmode, NULL);
733 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
734 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
735 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
736 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
737 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
739 /* Floating-point to integer conversions. Table 6. */
740 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
741 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
742 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
743 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
744 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
745 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
746 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
747 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
749 /* Conversions between floating types. Table 7. */
750 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
751 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
753 /* Integer to floating-point conversions. Table 8. */
754 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
755 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
756 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
757 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
758 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
759 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
760 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
761 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
763 /* Long long. Table 9. */
764 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
765 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
766 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
767 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
768 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
769 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
770 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
771 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
773 /* Integer (32/32->32) division. \S 4.3.1. */
774 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
775 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
777 /* The divmod functions are designed so that they can be used for
778 plain division, even though they return both the quotient and the
779 remainder. The quotient is returned in the usual location (i.e.,
780 r0 for SImode, {r0, r1} for DImode), just as would be expected
781 for an ordinary division routine. Because the AAPCS calling
782 conventions specify that all of { r0, r1, r2, r3 } are
783 callee-saved registers, there is no need to tell the compiler
784 explicitly that those registers are clobbered by these
785 routines. */
786 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
787 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
788 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
789 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
791 /* We don't have mod libcalls. Fortunately gcc knows how to use the
792 divmod libcalls instead. */
793 set_optab_libfunc (smod_optab, DImode, NULL);
794 set_optab_libfunc (umod_optab, DImode, NULL);
795 set_optab_libfunc (smod_optab, SImode, NULL);
796 set_optab_libfunc (umod_optab, SImode, NULL);
799 /* Implement TARGET_HANDLE_OPTION. */
801 static bool
802 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
804 switch (code)
806 case OPT_march_:
807 arm_select[1].string = arg;
808 return true;
810 case OPT_mcpu_:
811 arm_select[0].string = arg;
812 return true;
814 case OPT_mhard_float:
815 target_float_abi_name = "hard";
816 return true;
818 case OPT_msoft_float:
819 target_float_abi_name = "soft";
820 return true;
822 case OPT_mtune_:
823 arm_select[2].string = arg;
824 return true;
826 default:
827 return true;
831 /* Fix up any incompatible options that the user has specified.
832 This has now turned into a maze. */
833 void
834 arm_override_options (void)
836 unsigned i;
837 enum processor_type target_arch_cpu = arm_none;
839 /* Set up the flags based on the cpu/architecture selected by the user. */
840 for (i = ARRAY_SIZE (arm_select); i--;)
842 struct arm_cpu_select * ptr = arm_select + i;
844 if (ptr->string != NULL && ptr->string[0] != '\0')
846 const struct processors * sel;
848 for (sel = ptr->processors; sel->name != NULL; sel++)
849 if (streq (ptr->string, sel->name))
851 /* Set the architecture define. */
852 if (i != ARM_OPT_SET_TUNE)
853 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
855 /* Determine the processor core for which we should
856 tune code-generation. */
857 if (/* -mcpu= is a sensible default. */
858 i == ARM_OPT_SET_CPU
859 /* -mtune= overrides -mcpu= and -march=. */
860 || i == ARM_OPT_SET_TUNE)
861 arm_tune = (enum processor_type) (sel - ptr->processors);
863 /* Remember the CPU associated with this architecture.
864 If no other option is used to set the CPU type,
865 we'll use this to guess the most suitable tuning
866 options. */
867 if (i == ARM_OPT_SET_ARCH)
868 target_arch_cpu = sel->core;
870 if (i != ARM_OPT_SET_TUNE)
872 /* If we have been given an architecture and a processor
873 make sure that they are compatible. We only generate
874 a warning though, and we prefer the CPU over the
875 architecture. */
876 if (insn_flags != 0 && (insn_flags ^ sel->flags))
877 warning (0, "switch -mcpu=%s conflicts with -march= switch",
878 ptr->string);
880 insn_flags = sel->flags;
883 break;
886 if (sel->name == NULL)
887 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
891 /* Guess the tuning options from the architecture if necessary. */
892 if (arm_tune == arm_none)
893 arm_tune = target_arch_cpu;
895 /* If the user did not specify a processor, choose one for them. */
896 if (insn_flags == 0)
898 const struct processors * sel;
899 unsigned int sought;
900 enum processor_type cpu;
902 cpu = TARGET_CPU_DEFAULT;
903 if (cpu == arm_none)
905 #ifdef SUBTARGET_CPU_DEFAULT
906 /* Use the subtarget default CPU if none was specified by
907 configure. */
908 cpu = SUBTARGET_CPU_DEFAULT;
909 #endif
910 /* Default to ARM6. */
911 if (cpu == arm_none)
912 cpu = arm6;
914 sel = &all_cores[cpu];
916 insn_flags = sel->flags;
918 /* Now check to see if the user has specified some command line
919 switch that require certain abilities from the cpu. */
920 sought = 0;
922 if (TARGET_INTERWORK || TARGET_THUMB)
924 sought |= (FL_THUMB | FL_MODE32);
926 /* There are no ARM processors that support both APCS-26 and
927 interworking. Therefore we force FL_MODE26 to be removed
928 from insn_flags here (if it was set), so that the search
929 below will always be able to find a compatible processor. */
930 insn_flags &= ~FL_MODE26;
933 if (sought != 0 && ((sought & insn_flags) != sought))
935 /* Try to locate a CPU type that supports all of the abilities
936 of the default CPU, plus the extra abilities requested by
937 the user. */
938 for (sel = all_cores; sel->name != NULL; sel++)
939 if ((sel->flags & sought) == (sought | insn_flags))
940 break;
942 if (sel->name == NULL)
944 unsigned current_bit_count = 0;
945 const struct processors * best_fit = NULL;
947 /* Ideally we would like to issue an error message here
948 saying that it was not possible to find a CPU compatible
949 with the default CPU, but which also supports the command
950 line options specified by the programmer, and so they
951 ought to use the -mcpu=<name> command line option to
952 override the default CPU type.
954 If we cannot find a cpu that has both the
955 characteristics of the default cpu and the given
956 command line options we scan the array again looking
957 for a best match. */
958 for (sel = all_cores; sel->name != NULL; sel++)
959 if ((sel->flags & sought) == sought)
961 unsigned count;
963 count = bit_count (sel->flags & insn_flags);
965 if (count >= current_bit_count)
967 best_fit = sel;
968 current_bit_count = count;
972 gcc_assert (best_fit);
973 sel = best_fit;
976 insn_flags = sel->flags;
978 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
979 if (arm_tune == arm_none)
980 arm_tune = (enum processor_type) (sel - all_cores);
983 /* The processor for which we should tune should now have been
984 chosen. */
985 gcc_assert (arm_tune != arm_none);
987 tune_flags = all_cores[(int)arm_tune].flags;
988 if (optimize_size)
989 targetm.rtx_costs = arm_size_rtx_costs;
990 else
991 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
993 /* Make sure that the processor choice does not conflict with any of the
994 other command line choices. */
995 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
997 warning (0, "target CPU does not support interworking" );
998 target_flags &= ~MASK_INTERWORK;
1001 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1003 warning (0, "target CPU does not support THUMB instructions");
1004 target_flags &= ~MASK_THUMB;
1007 if (TARGET_APCS_FRAME && TARGET_THUMB)
1009 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1010 target_flags &= ~MASK_APCS_FRAME;
1013 /* Callee super interworking implies thumb interworking. Adding
1014 this to the flags here simplifies the logic elsewhere. */
1015 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1016 target_flags |= MASK_INTERWORK;
1018 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1019 from here where no function is being compiled currently. */
1020 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1021 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1023 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1024 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1026 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1027 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1029 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1031 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1032 target_flags |= MASK_APCS_FRAME;
1035 if (TARGET_POKE_FUNCTION_NAME)
1036 target_flags |= MASK_APCS_FRAME;
1038 if (TARGET_APCS_REENT && flag_pic)
1039 error ("-fpic and -mapcs-reent are incompatible");
1041 if (TARGET_APCS_REENT)
1042 warning (0, "APCS reentrant code not supported. Ignored");
1044 /* If this target is normally configured to use APCS frames, warn if they
1045 are turned off and debugging is turned on. */
1046 if (TARGET_ARM
1047 && write_symbols != NO_DEBUG
1048 && !TARGET_APCS_FRAME
1049 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1050 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1052 /* If stack checking is disabled, we can use r10 as the PIC register,
1053 which keeps r9 available. */
1054 if (flag_pic)
1055 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1057 if (TARGET_APCS_FLOAT)
1058 warning (0, "passing floating point arguments in fp regs not yet supported");
1060 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1061 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1062 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1063 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1064 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1065 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1066 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1067 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1068 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1070 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1071 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1072 thumb_code = (TARGET_ARM == 0);
1073 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1074 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1075 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1077 /* V5 code we generate is completely interworking capable, so we turn off
1078 TARGET_INTERWORK here to avoid many tests later on. */
1080 /* XXX However, we must pass the right pre-processor defines to CPP
1081 or GLD can get confused. This is a hack. */
1082 if (TARGET_INTERWORK)
1083 arm_cpp_interwork = 1;
1085 if (arm_arch5)
1086 target_flags &= ~MASK_INTERWORK;
1088 if (target_abi_name)
1090 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1092 if (streq (arm_all_abis[i].name, target_abi_name))
1094 arm_abi = arm_all_abis[i].abi_type;
1095 break;
1098 if (i == ARRAY_SIZE (arm_all_abis))
1099 error ("invalid ABI option: -mabi=%s", target_abi_name);
1101 else
1102 arm_abi = ARM_DEFAULT_ABI;
1104 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1105 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1107 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1108 error ("iwmmxt abi requires an iwmmxt capable cpu");
1110 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1111 if (target_fpu_name == NULL && target_fpe_name != NULL)
1113 if (streq (target_fpe_name, "2"))
1114 target_fpu_name = "fpe2";
1115 else if (streq (target_fpe_name, "3"))
1116 target_fpu_name = "fpe3";
1117 else
1118 error ("invalid floating point emulation option: -mfpe=%s",
1119 target_fpe_name);
1121 if (target_fpu_name != NULL)
1123 /* The user specified a FPU. */
1124 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1126 if (streq (all_fpus[i].name, target_fpu_name))
1128 arm_fpu_arch = all_fpus[i].fpu;
1129 arm_fpu_tune = arm_fpu_arch;
1130 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1131 break;
1134 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1135 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1137 else
1139 #ifdef FPUTYPE_DEFAULT
1140 /* Use the default if it is specified for this platform. */
1141 arm_fpu_arch = FPUTYPE_DEFAULT;
1142 arm_fpu_tune = FPUTYPE_DEFAULT;
1143 #else
1144 /* Pick one based on CPU type. */
1145 /* ??? Some targets assume FPA is the default.
1146 if ((insn_flags & FL_VFP) != 0)
1147 arm_fpu_arch = FPUTYPE_VFP;
1148 else
1150 if (arm_arch_cirrus)
1151 arm_fpu_arch = FPUTYPE_MAVERICK;
1152 else
1153 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1154 #endif
1155 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1156 arm_fpu_tune = FPUTYPE_FPA;
1157 else
1158 arm_fpu_tune = arm_fpu_arch;
1159 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1160 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1163 if (target_float_abi_name != NULL)
1165 /* The user specified a FP ABI. */
1166 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1168 if (streq (all_float_abis[i].name, target_float_abi_name))
1170 arm_float_abi = all_float_abis[i].abi_type;
1171 break;
1174 if (i == ARRAY_SIZE (all_float_abis))
1175 error ("invalid floating point abi: -mfloat-abi=%s",
1176 target_float_abi_name);
1178 else
1179 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1181 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1182 sorry ("-mfloat-abi=hard and VFP");
1184 /* If soft-float is specified then don't use FPU. */
1185 if (TARGET_SOFT_FLOAT)
1186 arm_fpu_arch = FPUTYPE_NONE;
1188 /* For arm2/3 there is no need to do any scheduling if there is only
1189 a floating point emulator, or we are doing software floating-point. */
1190 if ((TARGET_SOFT_FLOAT
1191 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1192 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1193 && (tune_flags & FL_MODE32) == 0)
1194 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1196 /* Override the default structure alignment for AAPCS ABI. */
1197 if (arm_abi == ARM_ABI_AAPCS)
1198 arm_structure_size_boundary = 8;
1200 if (structure_size_string != NULL)
1202 int size = strtol (structure_size_string, NULL, 0);
1204 if (size == 8 || size == 32
1205 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1206 arm_structure_size_boundary = size;
1207 else
1208 warning (0, "structure size boundary can only be set to %s",
1209 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1212 if (arm_pic_register_string != NULL)
1214 int pic_register = decode_reg_name (arm_pic_register_string);
1216 if (!flag_pic)
1217 warning (0, "-mpic-register= is useless without -fpic");
1219 /* Prevent the user from choosing an obviously stupid PIC register. */
1220 else if (pic_register < 0 || call_used_regs[pic_register]
1221 || pic_register == HARD_FRAME_POINTER_REGNUM
1222 || pic_register == STACK_POINTER_REGNUM
1223 || pic_register >= PC_REGNUM)
1224 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1225 else
1226 arm_pic_register = pic_register;
1229 if (TARGET_THUMB && flag_schedule_insns)
1231 /* Don't warn since it's on by default in -O2. */
1232 flag_schedule_insns = 0;
1235 if (optimize_size)
1237 arm_constant_limit = 1;
1239 /* If optimizing for size, bump the number of instructions that we
1240 are prepared to conditionally execute (even on a StrongARM). */
1241 max_insns_skipped = 6;
1243 else
1245 /* For processors with load scheduling, it never costs more than
1246 2 cycles to load a constant, and the load scheduler may well
1247 reduce that to 1. */
1248 if (arm_ld_sched)
1249 arm_constant_limit = 1;
1251 /* On XScale the longer latency of a load makes it more difficult
1252 to achieve a good schedule, so it's faster to synthesize
1253 constants that can be done in two insns. */
1254 if (arm_tune_xscale)
1255 arm_constant_limit = 2;
1257 /* StrongARM has early execution of branches, so a sequence
1258 that is worth skipping is shorter. */
1259 if (arm_tune_strongarm)
1260 max_insns_skipped = 3;
1263 /* Register global variables with the garbage collector. */
1264 arm_add_gc_roots ();
1267 static void
1268 arm_add_gc_roots (void)
1270 gcc_obstack_init(&minipool_obstack);
1271 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1274 /* A table of known ARM exception types.
1275 For use with the interrupt function attribute. */
1277 typedef struct
1279 const char *const arg;
1280 const unsigned long return_value;
1282 isr_attribute_arg;
1284 static const isr_attribute_arg isr_attribute_args [] =
1286 { "IRQ", ARM_FT_ISR },
1287 { "irq", ARM_FT_ISR },
1288 { "FIQ", ARM_FT_FIQ },
1289 { "fiq", ARM_FT_FIQ },
1290 { "ABORT", ARM_FT_ISR },
1291 { "abort", ARM_FT_ISR },
1292 { "ABORT", ARM_FT_ISR },
1293 { "abort", ARM_FT_ISR },
1294 { "UNDEF", ARM_FT_EXCEPTION },
1295 { "undef", ARM_FT_EXCEPTION },
1296 { "SWI", ARM_FT_EXCEPTION },
1297 { "swi", ARM_FT_EXCEPTION },
1298 { NULL, ARM_FT_NORMAL }
1301 /* Returns the (interrupt) function type of the current
1302 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1304 static unsigned long
1305 arm_isr_value (tree argument)
1307 const isr_attribute_arg * ptr;
1308 const char * arg;
1310 /* No argument - default to IRQ. */
1311 if (argument == NULL_TREE)
1312 return ARM_FT_ISR;
1314 /* Get the value of the argument. */
1315 if (TREE_VALUE (argument) == NULL_TREE
1316 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1317 return ARM_FT_UNKNOWN;
1319 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1321 /* Check it against the list of known arguments. */
1322 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1323 if (streq (arg, ptr->arg))
1324 return ptr->return_value;
1326 /* An unrecognized interrupt type. */
1327 return ARM_FT_UNKNOWN;
1330 /* Computes the type of the current function. */
1332 static unsigned long
1333 arm_compute_func_type (void)
1335 unsigned long type = ARM_FT_UNKNOWN;
1336 tree a;
1337 tree attr;
1339 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1341 /* Decide if the current function is volatile. Such functions
1342 never return, and many memory cycles can be saved by not storing
1343 register values that will never be needed again. This optimization
1344 was added to speed up context switching in a kernel application. */
1345 if (optimize > 0
1346 && TREE_NOTHROW (current_function_decl)
1347 && TREE_THIS_VOLATILE (current_function_decl))
1348 type |= ARM_FT_VOLATILE;
1350 if (cfun->static_chain_decl != NULL)
1351 type |= ARM_FT_NESTED;
1353 attr = DECL_ATTRIBUTES (current_function_decl);
1355 a = lookup_attribute ("naked", attr);
1356 if (a != NULL_TREE)
1357 type |= ARM_FT_NAKED;
1359 a = lookup_attribute ("isr", attr);
1360 if (a == NULL_TREE)
1361 a = lookup_attribute ("interrupt", attr);
1363 if (a == NULL_TREE)
1364 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1365 else
1366 type |= arm_isr_value (TREE_VALUE (a));
1368 return type;
1371 /* Returns the type of the current function. */
1373 unsigned long
1374 arm_current_func_type (void)
1376 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1377 cfun->machine->func_type = arm_compute_func_type ();
1379 return cfun->machine->func_type;
1382 /* Return 1 if it is possible to return using a single instruction.
1383 If SIBLING is non-null, this is a test for a return before a sibling
1384 call. SIBLING is the call insn, so we can examine its register usage. */
1387 use_return_insn (int iscond, rtx sibling)
1389 int regno;
1390 unsigned int func_type;
1391 unsigned long saved_int_regs;
1392 unsigned HOST_WIDE_INT stack_adjust;
1393 arm_stack_offsets *offsets;
1395 /* Never use a return instruction before reload has run. */
1396 if (!reload_completed)
1397 return 0;
1399 func_type = arm_current_func_type ();
1401 /* Naked functions and volatile functions need special
1402 consideration. */
1403 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1404 return 0;
1406 /* So do interrupt functions that use the frame pointer. */
1407 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1408 return 0;
1410 offsets = arm_get_frame_offsets ();
1411 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1413 /* As do variadic functions. */
1414 if (current_function_pretend_args_size
1415 || cfun->machine->uses_anonymous_args
1416 /* Or if the function calls __builtin_eh_return () */
1417 || current_function_calls_eh_return
1418 /* Or if the function calls alloca */
1419 || current_function_calls_alloca
1420 /* Or if there is a stack adjustment. However, if the stack pointer
1421 is saved on the stack, we can use a pre-incrementing stack load. */
1422 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1423 return 0;
1425 saved_int_regs = arm_compute_save_reg_mask ();
1427 /* Unfortunately, the insn
1429 ldmib sp, {..., sp, ...}
1431 triggers a bug on most SA-110 based devices, such that the stack
1432 pointer won't be correctly restored if the instruction takes a
1433 page fault. We work around this problem by popping r3 along with
1434 the other registers, since that is never slower than executing
1435 another instruction.
1437 We test for !arm_arch5 here, because code for any architecture
1438 less than this could potentially be run on one of the buggy
1439 chips. */
1440 if (stack_adjust == 4 && !arm_arch5)
1442 /* Validate that r3 is a call-clobbered register (always true in
1443 the default abi) ... */
1444 if (!call_used_regs[3])
1445 return 0;
1447 /* ... that it isn't being used for a return value ... */
1448 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1449 return 0;
1451 /* ... or for a tail-call argument ... */
1452 if (sibling)
1454 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1456 if (find_regno_fusage (sibling, USE, 3))
1457 return 0;
1460 /* ... and that there are no call-saved registers in r0-r2
1461 (always true in the default ABI). */
1462 if (saved_int_regs & 0x7)
1463 return 0;
1466 /* Can't be done if interworking with Thumb, and any registers have been
1467 stacked. */
1468 if (TARGET_INTERWORK && saved_int_regs != 0)
1469 return 0;
1471 /* On StrongARM, conditional returns are expensive if they aren't
1472 taken and multiple registers have been stacked. */
1473 if (iscond && arm_tune_strongarm)
1475 /* Conditional return when just the LR is stored is a simple
1476 conditional-load instruction, that's not expensive. */
1477 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1478 return 0;
1480 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1481 return 0;
1484 /* If there are saved registers but the LR isn't saved, then we need
1485 two instructions for the return. */
1486 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1487 return 0;
1489 /* Can't be done if any of the FPA regs are pushed,
1490 since this also requires an insn. */
1491 if (TARGET_HARD_FLOAT && TARGET_FPA)
1492 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1493 if (regs_ever_live[regno] && !call_used_regs[regno])
1494 return 0;
1496 /* Likewise VFP regs. */
1497 if (TARGET_HARD_FLOAT && TARGET_VFP)
1498 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1499 if (regs_ever_live[regno] && !call_used_regs[regno])
1500 return 0;
1502 if (TARGET_REALLY_IWMMXT)
1503 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1504 if (regs_ever_live[regno] && ! call_used_regs [regno])
1505 return 0;
1507 return 1;
1510 /* Return TRUE if int I is a valid immediate ARM constant. */
1513 const_ok_for_arm (HOST_WIDE_INT i)
1515 int lowbit;
1517 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1518 be all zero, or all one. */
1519 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1520 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1521 != ((~(unsigned HOST_WIDE_INT) 0)
1522 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1523 return FALSE;
1525 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1527 /* Fast return for 0 and small values. We must do this for zero, since
1528 the code below can't handle that one case. */
1529 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1530 return TRUE;
1532 /* Get the number of trailing zeros, rounded down to the nearest even
1533 number. */
1534 lowbit = (ffs ((int) i) - 1) & ~1;
1536 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1537 return TRUE;
1538 else if (lowbit <= 4
1539 && ((i & ~0xc000003f) == 0
1540 || (i & ~0xf000000f) == 0
1541 || (i & ~0xfc000003) == 0))
1542 return TRUE;
1544 return FALSE;
1547 /* Return true if I is a valid constant for the operation CODE. */
1548 static int
1549 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1551 if (const_ok_for_arm (i))
1552 return 1;
1554 switch (code)
1556 case PLUS:
1557 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1559 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1560 case XOR:
1561 case IOR:
1562 return 0;
1564 case AND:
1565 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1567 default:
1568 gcc_unreachable ();
1572 /* Emit a sequence of insns to handle a large constant.
1573 CODE is the code of the operation required, it can be any of SET, PLUS,
1574 IOR, AND, XOR, MINUS;
1575 MODE is the mode in which the operation is being performed;
1576 VAL is the integer to operate on;
1577 SOURCE is the other operand (a register, or a null-pointer for SET);
1578 SUBTARGETS means it is safe to create scratch registers if that will
1579 either produce a simpler sequence, or we will want to cse the values.
1580 Return value is the number of insns emitted. */
1583 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1584 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1586 rtx cond;
1588 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1589 cond = COND_EXEC_TEST (PATTERN (insn));
1590 else
1591 cond = NULL_RTX;
1593 if (subtargets || code == SET
1594 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1595 && REGNO (target) != REGNO (source)))
1597 /* After arm_reorg has been called, we can't fix up expensive
1598 constants by pushing them into memory so we must synthesize
1599 them in-line, regardless of the cost. This is only likely to
1600 be more costly on chips that have load delay slots and we are
1601 compiling without running the scheduler (so no splitting
1602 occurred before the final instruction emission).
1604 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1606 if (!after_arm_reorg
1607 && !cond
1608 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1609 1, 0)
1610 > arm_constant_limit + (code != SET)))
1612 if (code == SET)
1614 /* Currently SET is the only monadic value for CODE, all
1615 the rest are diadic. */
1616 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1617 return 1;
1619 else
1621 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1623 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1624 /* For MINUS, the value is subtracted from, since we never
1625 have subtraction of a constant. */
1626 if (code == MINUS)
1627 emit_insn (gen_rtx_SET (VOIDmode, target,
1628 gen_rtx_MINUS (mode, temp, source)));
1629 else
1630 emit_insn (gen_rtx_SET (VOIDmode, target,
1631 gen_rtx_fmt_ee (code, mode, source, temp)));
1632 return 2;
1637 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1641 static int
1642 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1644 HOST_WIDE_INT temp1;
1645 int num_insns = 0;
1648 int end;
1650 if (i <= 0)
1651 i += 32;
1652 if (remainder & (3 << (i - 2)))
1654 end = i - 8;
1655 if (end < 0)
1656 end += 32;
1657 temp1 = remainder & ((0x0ff << end)
1658 | ((i < end) ? (0xff >> (32 - end)) : 0));
1659 remainder &= ~temp1;
1660 num_insns++;
1661 i -= 6;
1663 i -= 2;
1664 } while (remainder);
1665 return num_insns;
1668 /* Emit an instruction with the indicated PATTERN. If COND is
1669 non-NULL, conditionalize the execution of the instruction on COND
1670 being true. */
1672 static void
1673 emit_constant_insn (rtx cond, rtx pattern)
1675 if (cond)
1676 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1677 emit_insn (pattern);
1680 /* As above, but extra parameter GENERATE which, if clear, suppresses
1681 RTL generation. */
1683 static int
1684 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1685 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1686 int generate)
1688 int can_invert = 0;
1689 int can_negate = 0;
1690 int can_negate_initial = 0;
1691 int can_shift = 0;
1692 int i;
1693 int num_bits_set = 0;
1694 int set_sign_bit_copies = 0;
1695 int clear_sign_bit_copies = 0;
1696 int clear_zero_bit_copies = 0;
1697 int set_zero_bit_copies = 0;
1698 int insns = 0;
1699 unsigned HOST_WIDE_INT temp1, temp2;
1700 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1702 /* Find out which operations are safe for a given CODE. Also do a quick
1703 check for degenerate cases; these can occur when DImode operations
1704 are split. */
1705 switch (code)
1707 case SET:
1708 can_invert = 1;
1709 can_shift = 1;
1710 can_negate = 1;
1711 break;
1713 case PLUS:
1714 can_negate = 1;
1715 can_negate_initial = 1;
1716 break;
1718 case IOR:
1719 if (remainder == 0xffffffff)
1721 if (generate)
1722 emit_constant_insn (cond,
1723 gen_rtx_SET (VOIDmode, target,
1724 GEN_INT (ARM_SIGN_EXTEND (val))));
1725 return 1;
1727 if (remainder == 0)
1729 if (reload_completed && rtx_equal_p (target, source))
1730 return 0;
1731 if (generate)
1732 emit_constant_insn (cond,
1733 gen_rtx_SET (VOIDmode, target, source));
1734 return 1;
1736 break;
1738 case AND:
1739 if (remainder == 0)
1741 if (generate)
1742 emit_constant_insn (cond,
1743 gen_rtx_SET (VOIDmode, target, const0_rtx));
1744 return 1;
1746 if (remainder == 0xffffffff)
1748 if (reload_completed && rtx_equal_p (target, source))
1749 return 0;
1750 if (generate)
1751 emit_constant_insn (cond,
1752 gen_rtx_SET (VOIDmode, target, source));
1753 return 1;
1755 can_invert = 1;
1756 break;
1758 case XOR:
1759 if (remainder == 0)
1761 if (reload_completed && rtx_equal_p (target, source))
1762 return 0;
1763 if (generate)
1764 emit_constant_insn (cond,
1765 gen_rtx_SET (VOIDmode, target, source));
1766 return 1;
1769 /* We don't know how to handle other cases yet. */
1770 gcc_assert (remainder == 0xffffffff);
1772 if (generate)
1773 emit_constant_insn (cond,
1774 gen_rtx_SET (VOIDmode, target,
1775 gen_rtx_NOT (mode, source)));
1776 return 1;
1778 case MINUS:
1779 /* We treat MINUS as (val - source), since (source - val) is always
1780 passed as (source + (-val)). */
1781 if (remainder == 0)
1783 if (generate)
1784 emit_constant_insn (cond,
1785 gen_rtx_SET (VOIDmode, target,
1786 gen_rtx_NEG (mode, source)));
1787 return 1;
1789 if (const_ok_for_arm (val))
1791 if (generate)
1792 emit_constant_insn (cond,
1793 gen_rtx_SET (VOIDmode, target,
1794 gen_rtx_MINUS (mode, GEN_INT (val),
1795 source)));
1796 return 1;
1798 can_negate = 1;
1800 break;
1802 default:
1803 gcc_unreachable ();
1806 /* If we can do it in one insn get out quickly. */
1807 if (const_ok_for_arm (val)
1808 || (can_negate_initial && const_ok_for_arm (-val))
1809 || (can_invert && const_ok_for_arm (~val)))
1811 if (generate)
1812 emit_constant_insn (cond,
1813 gen_rtx_SET (VOIDmode, target,
1814 (source
1815 ? gen_rtx_fmt_ee (code, mode, source,
1816 GEN_INT (val))
1817 : GEN_INT (val))));
1818 return 1;
1821 /* Calculate a few attributes that may be useful for specific
1822 optimizations. */
1823 for (i = 31; i >= 0; i--)
1825 if ((remainder & (1 << i)) == 0)
1826 clear_sign_bit_copies++;
1827 else
1828 break;
1831 for (i = 31; i >= 0; i--)
1833 if ((remainder & (1 << i)) != 0)
1834 set_sign_bit_copies++;
1835 else
1836 break;
1839 for (i = 0; i <= 31; i++)
1841 if ((remainder & (1 << i)) == 0)
1842 clear_zero_bit_copies++;
1843 else
1844 break;
1847 for (i = 0; i <= 31; i++)
1849 if ((remainder & (1 << i)) != 0)
1850 set_zero_bit_copies++;
1851 else
1852 break;
1855 switch (code)
1857 case SET:
1858 /* See if we can do this by sign_extending a constant that is known
1859 to be negative. This is a good, way of doing it, since the shift
1860 may well merge into a subsequent insn. */
1861 if (set_sign_bit_copies > 1)
1863 if (const_ok_for_arm
1864 (temp1 = ARM_SIGN_EXTEND (remainder
1865 << (set_sign_bit_copies - 1))))
1867 if (generate)
1869 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1870 emit_constant_insn (cond,
1871 gen_rtx_SET (VOIDmode, new_src,
1872 GEN_INT (temp1)));
1873 emit_constant_insn (cond,
1874 gen_ashrsi3 (target, new_src,
1875 GEN_INT (set_sign_bit_copies - 1)));
1877 return 2;
1879 /* For an inverted constant, we will need to set the low bits,
1880 these will be shifted out of harm's way. */
1881 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1882 if (const_ok_for_arm (~temp1))
1884 if (generate)
1886 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1887 emit_constant_insn (cond,
1888 gen_rtx_SET (VOIDmode, new_src,
1889 GEN_INT (temp1)));
1890 emit_constant_insn (cond,
1891 gen_ashrsi3 (target, new_src,
1892 GEN_INT (set_sign_bit_copies - 1)));
1894 return 2;
1898 /* See if we can calculate the value as the difference between two
1899 valid immediates. */
1900 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1902 int topshift = clear_sign_bit_copies & ~1;
1904 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1905 & (0xff000000 >> topshift));
1907 /* If temp1 is zero, then that means the 9 most significant
1908 bits of remainder were 1 and we've caused it to overflow.
1909 When topshift is 0 we don't need to do anything since we
1910 can borrow from 'bit 32'. */
1911 if (temp1 == 0 && topshift != 0)
1912 temp1 = 0x80000000 >> (topshift - 1);
1914 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1916 if (const_ok_for_arm (temp2))
1918 if (generate)
1920 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1921 emit_constant_insn (cond,
1922 gen_rtx_SET (VOIDmode, new_src,
1923 GEN_INT (temp1)));
1924 emit_constant_insn (cond,
1925 gen_addsi3 (target, new_src,
1926 GEN_INT (-temp2)));
1929 return 2;
1933 /* See if we can generate this by setting the bottom (or the top)
1934 16 bits, and then shifting these into the other half of the
1935 word. We only look for the simplest cases, to do more would cost
1936 too much. Be careful, however, not to generate this when the
1937 alternative would take fewer insns. */
1938 if (val & 0xffff0000)
1940 temp1 = remainder & 0xffff0000;
1941 temp2 = remainder & 0x0000ffff;
1943 /* Overlaps outside this range are best done using other methods. */
1944 for (i = 9; i < 24; i++)
1946 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1947 && !const_ok_for_arm (temp2))
1949 rtx new_src = (subtargets
1950 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1951 : target);
1952 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1953 source, subtargets, generate);
1954 source = new_src;
1955 if (generate)
1956 emit_constant_insn
1957 (cond,
1958 gen_rtx_SET
1959 (VOIDmode, target,
1960 gen_rtx_IOR (mode,
1961 gen_rtx_ASHIFT (mode, source,
1962 GEN_INT (i)),
1963 source)));
1964 return insns + 1;
1968 /* Don't duplicate cases already considered. */
1969 for (i = 17; i < 24; i++)
1971 if (((temp1 | (temp1 >> i)) == remainder)
1972 && !const_ok_for_arm (temp1))
1974 rtx new_src = (subtargets
1975 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1976 : target);
1977 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1978 source, subtargets, generate);
1979 source = new_src;
1980 if (generate)
1981 emit_constant_insn
1982 (cond,
1983 gen_rtx_SET (VOIDmode, target,
1984 gen_rtx_IOR
1985 (mode,
1986 gen_rtx_LSHIFTRT (mode, source,
1987 GEN_INT (i)),
1988 source)));
1989 return insns + 1;
1993 break;
1995 case IOR:
1996 case XOR:
1997 /* If we have IOR or XOR, and the constant can be loaded in a
1998 single instruction, and we can find a temporary to put it in,
1999 then this can be done in two instructions instead of 3-4. */
2000 if (subtargets
2001 /* TARGET can't be NULL if SUBTARGETS is 0 */
2002 || (reload_completed && !reg_mentioned_p (target, source)))
2004 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2006 if (generate)
2008 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2010 emit_constant_insn (cond,
2011 gen_rtx_SET (VOIDmode, sub,
2012 GEN_INT (val)));
2013 emit_constant_insn (cond,
2014 gen_rtx_SET (VOIDmode, target,
2015 gen_rtx_fmt_ee (code, mode,
2016 source, sub)));
2018 return 2;
2022 if (code == XOR)
2023 break;
2025 if (set_sign_bit_copies > 8
2026 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2028 if (generate)
2030 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2031 rtx shift = GEN_INT (set_sign_bit_copies);
2033 emit_constant_insn
2034 (cond,
2035 gen_rtx_SET (VOIDmode, sub,
2036 gen_rtx_NOT (mode,
2037 gen_rtx_ASHIFT (mode,
2038 source,
2039 shift))));
2040 emit_constant_insn
2041 (cond,
2042 gen_rtx_SET (VOIDmode, target,
2043 gen_rtx_NOT (mode,
2044 gen_rtx_LSHIFTRT (mode, sub,
2045 shift))));
2047 return 2;
2050 if (set_zero_bit_copies > 8
2051 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2053 if (generate)
2055 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2056 rtx shift = GEN_INT (set_zero_bit_copies);
2058 emit_constant_insn
2059 (cond,
2060 gen_rtx_SET (VOIDmode, sub,
2061 gen_rtx_NOT (mode,
2062 gen_rtx_LSHIFTRT (mode,
2063 source,
2064 shift))));
2065 emit_constant_insn
2066 (cond,
2067 gen_rtx_SET (VOIDmode, target,
2068 gen_rtx_NOT (mode,
2069 gen_rtx_ASHIFT (mode, sub,
2070 shift))));
2072 return 2;
2075 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2077 if (generate)
2079 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2080 emit_constant_insn (cond,
2081 gen_rtx_SET (VOIDmode, sub,
2082 gen_rtx_NOT (mode, source)));
2083 source = sub;
2084 if (subtargets)
2085 sub = gen_reg_rtx (mode);
2086 emit_constant_insn (cond,
2087 gen_rtx_SET (VOIDmode, sub,
2088 gen_rtx_AND (mode, source,
2089 GEN_INT (temp1))));
2090 emit_constant_insn (cond,
2091 gen_rtx_SET (VOIDmode, target,
2092 gen_rtx_NOT (mode, sub)));
2094 return 3;
2096 break;
2098 case AND:
2099 /* See if two shifts will do 2 or more insn's worth of work. */
2100 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2102 HOST_WIDE_INT shift_mask = ((0xffffffff
2103 << (32 - clear_sign_bit_copies))
2104 & 0xffffffff);
2106 if ((remainder | shift_mask) != 0xffffffff)
2108 if (generate)
2110 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2111 insns = arm_gen_constant (AND, mode, cond,
2112 remainder | shift_mask,
2113 new_src, source, subtargets, 1);
2114 source = new_src;
2116 else
2118 rtx targ = subtargets ? NULL_RTX : target;
2119 insns = arm_gen_constant (AND, mode, cond,
2120 remainder | shift_mask,
2121 targ, source, subtargets, 0);
2125 if (generate)
2127 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2128 rtx shift = GEN_INT (clear_sign_bit_copies);
2130 emit_insn (gen_ashlsi3 (new_src, source, shift));
2131 emit_insn (gen_lshrsi3 (target, new_src, shift));
2134 return insns + 2;
2137 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2139 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2141 if ((remainder | shift_mask) != 0xffffffff)
2143 if (generate)
2145 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2147 insns = arm_gen_constant (AND, mode, cond,
2148 remainder | shift_mask,
2149 new_src, source, subtargets, 1);
2150 source = new_src;
2152 else
2154 rtx targ = subtargets ? NULL_RTX : target;
2156 insns = arm_gen_constant (AND, mode, cond,
2157 remainder | shift_mask,
2158 targ, source, subtargets, 0);
2162 if (generate)
2164 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2165 rtx shift = GEN_INT (clear_zero_bit_copies);
2167 emit_insn (gen_lshrsi3 (new_src, source, shift));
2168 emit_insn (gen_ashlsi3 (target, new_src, shift));
2171 return insns + 2;
2174 break;
2176 default:
2177 break;
2180 for (i = 0; i < 32; i++)
2181 if (remainder & (1 << i))
2182 num_bits_set++;
2184 if (code == AND || (can_invert && num_bits_set > 16))
2185 remainder = (~remainder) & 0xffffffff;
2186 else if (code == PLUS && num_bits_set > 16)
2187 remainder = (-remainder) & 0xffffffff;
2188 else
2190 can_invert = 0;
2191 can_negate = 0;
2194 /* Now try and find a way of doing the job in either two or three
2195 instructions.
2196 We start by looking for the largest block of zeros that are aligned on
2197 a 2-bit boundary, we then fill up the temps, wrapping around to the
2198 top of the word when we drop off the bottom.
2199 In the worst case this code should produce no more than four insns. */
2201 int best_start = 0;
2202 int best_consecutive_zeros = 0;
2204 for (i = 0; i < 32; i += 2)
2206 int consecutive_zeros = 0;
2208 if (!(remainder & (3 << i)))
2210 while ((i < 32) && !(remainder & (3 << i)))
2212 consecutive_zeros += 2;
2213 i += 2;
2215 if (consecutive_zeros > best_consecutive_zeros)
2217 best_consecutive_zeros = consecutive_zeros;
2218 best_start = i - consecutive_zeros;
2220 i -= 2;
2224 /* So long as it won't require any more insns to do so, it's
2225 desirable to emit a small constant (in bits 0...9) in the last
2226 insn. This way there is more chance that it can be combined with
2227 a later addressing insn to form a pre-indexed load or store
2228 operation. Consider:
2230 *((volatile int *)0xe0000100) = 1;
2231 *((volatile int *)0xe0000110) = 2;
2233 We want this to wind up as:
2235 mov rA, #0xe0000000
2236 mov rB, #1
2237 str rB, [rA, #0x100]
2238 mov rB, #2
2239 str rB, [rA, #0x110]
2241 rather than having to synthesize both large constants from scratch.
2243 Therefore, we calculate how many insns would be required to emit
2244 the constant starting from `best_start', and also starting from
2245 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2246 yield a shorter sequence, we may as well use zero. */
2247 if (best_start != 0
2248 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2249 && (count_insns_for_constant (remainder, 0) <=
2250 count_insns_for_constant (remainder, best_start)))
2251 best_start = 0;
2253 /* Now start emitting the insns. */
2254 i = best_start;
2257 int end;
2259 if (i <= 0)
2260 i += 32;
2261 if (remainder & (3 << (i - 2)))
2263 end = i - 8;
2264 if (end < 0)
2265 end += 32;
2266 temp1 = remainder & ((0x0ff << end)
2267 | ((i < end) ? (0xff >> (32 - end)) : 0));
2268 remainder &= ~temp1;
2270 if (generate)
2272 rtx new_src, temp1_rtx;
2274 if (code == SET || code == MINUS)
2276 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2277 if (can_invert && code != MINUS)
2278 temp1 = ~temp1;
2280 else
2282 if (remainder && subtargets)
2283 new_src = gen_reg_rtx (mode);
2284 else
2285 new_src = target;
2286 if (can_invert)
2287 temp1 = ~temp1;
2288 else if (can_negate)
2289 temp1 = -temp1;
2292 temp1 = trunc_int_for_mode (temp1, mode);
2293 temp1_rtx = GEN_INT (temp1);
2295 if (code == SET)
2297 else if (code == MINUS)
2298 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2299 else
2300 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2302 emit_constant_insn (cond,
2303 gen_rtx_SET (VOIDmode, new_src,
2304 temp1_rtx));
2305 source = new_src;
2308 if (code == SET)
2310 can_invert = 0;
2311 code = PLUS;
2313 else if (code == MINUS)
2314 code = PLUS;
2316 insns++;
2317 i -= 6;
2319 i -= 2;
2321 while (remainder);
2324 return insns;
2327 /* Canonicalize a comparison so that we are more likely to recognize it.
2328 This can be done for a few constant compares, where we can make the
2329 immediate value easier to load. */
2331 enum rtx_code
2332 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2333 rtx * op1)
2335 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2336 unsigned HOST_WIDE_INT maxval;
2337 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2339 switch (code)
2341 case EQ:
2342 case NE:
2343 return code;
2345 case GT:
2346 case LE:
2347 if (i != maxval
2348 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2350 *op1 = GEN_INT (i + 1);
2351 return code == GT ? GE : LT;
2353 break;
2355 case GE:
2356 case LT:
2357 if (i != ~maxval
2358 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2360 *op1 = GEN_INT (i - 1);
2361 return code == GE ? GT : LE;
2363 break;
2365 case GTU:
2366 case LEU:
2367 if (i != ~((unsigned HOST_WIDE_INT) 0)
2368 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2370 *op1 = GEN_INT (i + 1);
2371 return code == GTU ? GEU : LTU;
2373 break;
2375 case GEU:
2376 case LTU:
2377 if (i != 0
2378 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2380 *op1 = GEN_INT (i - 1);
2381 return code == GEU ? GTU : LEU;
2383 break;
2385 default:
2386 gcc_unreachable ();
2389 return code;
2393 /* Define how to find the value returned by a function. */
2396 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2398 enum machine_mode mode;
2399 int unsignedp ATTRIBUTE_UNUSED;
2400 rtx r ATTRIBUTE_UNUSED;
2402 mode = TYPE_MODE (type);
2403 /* Promote integer types. */
2404 if (INTEGRAL_TYPE_P (type))
2405 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2407 /* Promotes small structs returned in a register to full-word size
2408 for big-endian AAPCS. */
2409 if (arm_return_in_msb (type))
2411 HOST_WIDE_INT size = int_size_in_bytes (type);
2412 if (size % UNITS_PER_WORD != 0)
2414 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2415 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2419 return LIBCALL_VALUE(mode);
2422 /* Determine the amount of memory needed to store the possible return
2423 registers of an untyped call. */
2425 arm_apply_result_size (void)
2427 int size = 16;
2429 if (TARGET_ARM)
2431 if (TARGET_HARD_FLOAT_ABI)
2433 if (TARGET_FPA)
2434 size += 12;
2435 if (TARGET_MAVERICK)
2436 size += 8;
2438 if (TARGET_IWMMXT_ABI)
2439 size += 8;
2442 return size;
2445 /* Decide whether a type should be returned in memory (true)
2446 or in a register (false). This is called by the macro
2447 RETURN_IN_MEMORY. */
2449 arm_return_in_memory (tree type)
2451 HOST_WIDE_INT size;
2453 if (!AGGREGATE_TYPE_P (type) &&
2454 (TREE_CODE (type) != VECTOR_TYPE) &&
2455 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2456 /* All simple types are returned in registers.
2457 For AAPCS, complex types are treated the same as aggregates. */
2458 return 0;
2460 size = int_size_in_bytes (type);
2462 if (arm_abi != ARM_ABI_APCS)
2464 /* ATPCS and later return aggregate types in memory only if they are
2465 larger than a word (or are variable size). */
2466 return (size < 0 || size > UNITS_PER_WORD);
2469 /* To maximize backwards compatibility with previous versions of gcc,
2470 return vectors up to 4 words in registers. */
2471 if (TREE_CODE (type) == VECTOR_TYPE)
2472 return (size < 0 || size > (4 * UNITS_PER_WORD));
2474 /* For the arm-wince targets we choose to be compatible with Microsoft's
2475 ARM and Thumb compilers, which always return aggregates in memory. */
2476 #ifndef ARM_WINCE
2477 /* All structures/unions bigger than one word are returned in memory.
2478 Also catch the case where int_size_in_bytes returns -1. In this case
2479 the aggregate is either huge or of variable size, and in either case
2480 we will want to return it via memory and not in a register. */
2481 if (size < 0 || size > UNITS_PER_WORD)
2482 return 1;
2484 if (TREE_CODE (type) == RECORD_TYPE)
2486 tree field;
2488 /* For a struct the APCS says that we only return in a register
2489 if the type is 'integer like' and every addressable element
2490 has an offset of zero. For practical purposes this means
2491 that the structure can have at most one non bit-field element
2492 and that this element must be the first one in the structure. */
2494 /* Find the first field, ignoring non FIELD_DECL things which will
2495 have been created by C++. */
2496 for (field = TYPE_FIELDS (type);
2497 field && TREE_CODE (field) != FIELD_DECL;
2498 field = TREE_CHAIN (field))
2499 continue;
2501 if (field == NULL)
2502 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2504 /* Check that the first field is valid for returning in a register. */
2506 /* ... Floats are not allowed */
2507 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2508 return 1;
2510 /* ... Aggregates that are not themselves valid for returning in
2511 a register are not allowed. */
2512 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2513 return 1;
2515 /* Now check the remaining fields, if any. Only bitfields are allowed,
2516 since they are not addressable. */
2517 for (field = TREE_CHAIN (field);
2518 field;
2519 field = TREE_CHAIN (field))
2521 if (TREE_CODE (field) != FIELD_DECL)
2522 continue;
2524 if (!DECL_BIT_FIELD_TYPE (field))
2525 return 1;
2528 return 0;
2531 if (TREE_CODE (type) == UNION_TYPE)
2533 tree field;
2535 /* Unions can be returned in registers if every element is
2536 integral, or can be returned in an integer register. */
2537 for (field = TYPE_FIELDS (type);
2538 field;
2539 field = TREE_CHAIN (field))
2541 if (TREE_CODE (field) != FIELD_DECL)
2542 continue;
2544 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2545 return 1;
2547 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2548 return 1;
2551 return 0;
2553 #endif /* not ARM_WINCE */
2555 /* Return all other types in memory. */
2556 return 1;
2559 /* Indicate whether or not words of a double are in big-endian order. */
2562 arm_float_words_big_endian (void)
2564 if (TARGET_MAVERICK)
2565 return 0;
2567 /* For FPA, float words are always big-endian. For VFP, floats words
2568 follow the memory system mode. */
2570 if (TARGET_FPA)
2572 return 1;
2575 if (TARGET_VFP)
2576 return (TARGET_BIG_END ? 1 : 0);
2578 return 1;
2581 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2582 for a call to a function whose data type is FNTYPE.
2583 For a library call, FNTYPE is NULL. */
2584 void
2585 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2586 rtx libname ATTRIBUTE_UNUSED,
2587 tree fndecl ATTRIBUTE_UNUSED)
2589 /* On the ARM, the offset starts at 0. */
2590 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2591 pcum->iwmmxt_nregs = 0;
2592 pcum->can_split = true;
2594 pcum->call_cookie = CALL_NORMAL;
2596 if (TARGET_LONG_CALLS)
2597 pcum->call_cookie = CALL_LONG;
2599 /* Check for long call/short call attributes. The attributes
2600 override any command line option. */
2601 if (fntype)
2603 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2604 pcum->call_cookie = CALL_SHORT;
2605 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2606 pcum->call_cookie = CALL_LONG;
2609 /* Varargs vectors are treated the same as long long.
2610 named_count avoids having to change the way arm handles 'named' */
2611 pcum->named_count = 0;
2612 pcum->nargs = 0;
2614 if (TARGET_REALLY_IWMMXT && fntype)
2616 tree fn_arg;
2618 for (fn_arg = TYPE_ARG_TYPES (fntype);
2619 fn_arg;
2620 fn_arg = TREE_CHAIN (fn_arg))
2621 pcum->named_count += 1;
2623 if (! pcum->named_count)
2624 pcum->named_count = INT_MAX;
2629 /* Return true if mode/type need doubleword alignment. */
2630 bool
2631 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2633 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2634 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2638 /* Determine where to put an argument to a function.
2639 Value is zero to push the argument on the stack,
2640 or a hard register in which to store the argument.
2642 MODE is the argument's machine mode.
2643 TYPE is the data type of the argument (as a tree).
2644 This is null for libcalls where that information may
2645 not be available.
2646 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2647 the preceding args and about the function being called.
2648 NAMED is nonzero if this argument is a named parameter
2649 (otherwise it is an extra parameter matching an ellipsis). */
2652 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2653 tree type, int named)
2655 int nregs;
2657 /* Varargs vectors are treated the same as long long.
2658 named_count avoids having to change the way arm handles 'named' */
2659 if (TARGET_IWMMXT_ABI
2660 && arm_vector_mode_supported_p (mode)
2661 && pcum->named_count > pcum->nargs + 1)
2663 if (pcum->iwmmxt_nregs <= 9)
2664 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2665 else
2667 pcum->can_split = false;
2668 return NULL_RTX;
2672 /* Put doubleword aligned quantities in even register pairs. */
2673 if (pcum->nregs & 1
2674 && ARM_DOUBLEWORD_ALIGN
2675 && arm_needs_doubleword_align (mode, type))
2676 pcum->nregs++;
2678 if (mode == VOIDmode)
2679 /* Compute operand 2 of the call insn. */
2680 return GEN_INT (pcum->call_cookie);
2682 /* Only allow splitting an arg between regs and memory if all preceding
2683 args were allocated to regs. For args passed by reference we only count
2684 the reference pointer. */
2685 if (pcum->can_split)
2686 nregs = 1;
2687 else
2688 nregs = ARM_NUM_REGS2 (mode, type);
2690 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2691 return NULL_RTX;
2693 return gen_rtx_REG (mode, pcum->nregs);
2696 static int
2697 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2698 tree type, bool named ATTRIBUTE_UNUSED)
2700 int nregs = pcum->nregs;
2702 if (arm_vector_mode_supported_p (mode))
2703 return 0;
2705 if (NUM_ARG_REGS > nregs
2706 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2707 && pcum->can_split)
2708 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2710 return 0;
2713 /* Variable sized types are passed by reference. This is a GCC
2714 extension to the ARM ABI. */
2716 static bool
2717 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2718 enum machine_mode mode ATTRIBUTE_UNUSED,
2719 tree type, bool named ATTRIBUTE_UNUSED)
2721 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2724 /* Encode the current state of the #pragma [no_]long_calls. */
2725 typedef enum
2727 OFF, /* No #pramgma [no_]long_calls is in effect. */
2728 LONG, /* #pragma long_calls is in effect. */
2729 SHORT /* #pragma no_long_calls is in effect. */
2730 } arm_pragma_enum;
2732 static arm_pragma_enum arm_pragma_long_calls = OFF;
2734 void
2735 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2737 arm_pragma_long_calls = LONG;
2740 void
2741 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2743 arm_pragma_long_calls = SHORT;
2746 void
2747 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2749 arm_pragma_long_calls = OFF;
2752 /* Table of machine attributes. */
2753 const struct attribute_spec arm_attribute_table[] =
2755 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2756 /* Function calls made to this symbol must be done indirectly, because
2757 it may lie outside of the 26 bit addressing range of a normal function
2758 call. */
2759 { "long_call", 0, 0, false, true, true, NULL },
2760 /* Whereas these functions are always known to reside within the 26 bit
2761 addressing range. */
2762 { "short_call", 0, 0, false, true, true, NULL },
2763 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2764 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2765 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2766 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2767 #ifdef ARM_PE
2768 /* ARM/PE has three new attributes:
2769 interfacearm - ?
2770 dllexport - for exporting a function/variable that will live in a dll
2771 dllimport - for importing a function/variable from a dll
2773 Microsoft allows multiple declspecs in one __declspec, separating
2774 them with spaces. We do NOT support this. Instead, use __declspec
2775 multiple times.
2777 { "dllimport", 0, 0, true, false, false, NULL },
2778 { "dllexport", 0, 0, true, false, false, NULL },
2779 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2780 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2781 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2782 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2783 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2784 #endif
2785 { NULL, 0, 0, false, false, false, NULL }
2788 /* Handle an attribute requiring a FUNCTION_DECL;
2789 arguments as in struct attribute_spec.handler. */
2790 static tree
2791 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2792 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2794 if (TREE_CODE (*node) != FUNCTION_DECL)
2796 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2797 IDENTIFIER_POINTER (name));
2798 *no_add_attrs = true;
2801 return NULL_TREE;
2804 /* Handle an "interrupt" or "isr" attribute;
2805 arguments as in struct attribute_spec.handler. */
2806 static tree
2807 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2808 bool *no_add_attrs)
2810 if (DECL_P (*node))
2812 if (TREE_CODE (*node) != FUNCTION_DECL)
2814 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2815 IDENTIFIER_POINTER (name));
2816 *no_add_attrs = true;
2818 /* FIXME: the argument if any is checked for type attributes;
2819 should it be checked for decl ones? */
2821 else
2823 if (TREE_CODE (*node) == FUNCTION_TYPE
2824 || TREE_CODE (*node) == METHOD_TYPE)
2826 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2828 warning (OPT_Wattributes, "%qs attribute ignored",
2829 IDENTIFIER_POINTER (name));
2830 *no_add_attrs = true;
2833 else if (TREE_CODE (*node) == POINTER_TYPE
2834 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2835 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2836 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2838 *node = build_variant_type_copy (*node);
2839 TREE_TYPE (*node) = build_type_attribute_variant
2840 (TREE_TYPE (*node),
2841 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2842 *no_add_attrs = true;
2844 else
2846 /* Possibly pass this attribute on from the type to a decl. */
2847 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2848 | (int) ATTR_FLAG_FUNCTION_NEXT
2849 | (int) ATTR_FLAG_ARRAY_NEXT))
2851 *no_add_attrs = true;
2852 return tree_cons (name, args, NULL_TREE);
2854 else
2856 warning (OPT_Wattributes, "%qs attribute ignored",
2857 IDENTIFIER_POINTER (name));
2862 return NULL_TREE;
2865 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2866 /* Handle the "notshared" attribute. This attribute is another way of
2867 requesting hidden visibility. ARM's compiler supports
2868 "__declspec(notshared)"; we support the same thing via an
2869 attribute. */
2871 static tree
2872 arm_handle_notshared_attribute (tree *node,
2873 tree name ATTRIBUTE_UNUSED,
2874 tree args ATTRIBUTE_UNUSED,
2875 int flags ATTRIBUTE_UNUSED,
2876 bool *no_add_attrs)
2878 tree decl = TYPE_NAME (*node);
2880 if (decl)
2882 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2883 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2884 *no_add_attrs = false;
2886 return NULL_TREE;
2888 #endif
2890 /* Return 0 if the attributes for two types are incompatible, 1 if they
2891 are compatible, and 2 if they are nearly compatible (which causes a
2892 warning to be generated). */
2893 static int
2894 arm_comp_type_attributes (tree type1, tree type2)
2896 int l1, l2, s1, s2;
2898 /* Check for mismatch of non-default calling convention. */
2899 if (TREE_CODE (type1) != FUNCTION_TYPE)
2900 return 1;
2902 /* Check for mismatched call attributes. */
2903 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2904 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2905 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2906 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2908 /* Only bother to check if an attribute is defined. */
2909 if (l1 | l2 | s1 | s2)
2911 /* If one type has an attribute, the other must have the same attribute. */
2912 if ((l1 != l2) || (s1 != s2))
2913 return 0;
2915 /* Disallow mixed attributes. */
2916 if ((l1 & s2) || (l2 & s1))
2917 return 0;
2920 /* Check for mismatched ISR attribute. */
2921 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2922 if (! l1)
2923 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2924 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2925 if (! l2)
2926 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2927 if (l1 != l2)
2928 return 0;
2930 return 1;
2933 /* Encode long_call or short_call attribute by prefixing
2934 symbol name in DECL with a special character FLAG. */
2935 void
2936 arm_encode_call_attribute (tree decl, int flag)
2938 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2939 int len = strlen (str);
2940 char * newstr;
2942 /* Do not allow weak functions to be treated as short call. */
2943 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2944 return;
2946 newstr = alloca (len + 2);
2947 newstr[0] = flag;
2948 strcpy (newstr + 1, str);
2950 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2951 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2954 /* Assigns default attributes to newly defined type. This is used to
2955 set short_call/long_call attributes for function types of
2956 functions defined inside corresponding #pragma scopes. */
2957 static void
2958 arm_set_default_type_attributes (tree type)
2960 /* Add __attribute__ ((long_call)) to all functions, when
2961 inside #pragma long_calls or __attribute__ ((short_call)),
2962 when inside #pragma no_long_calls. */
2963 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2965 tree type_attr_list, attr_name;
2966 type_attr_list = TYPE_ATTRIBUTES (type);
2968 if (arm_pragma_long_calls == LONG)
2969 attr_name = get_identifier ("long_call");
2970 else if (arm_pragma_long_calls == SHORT)
2971 attr_name = get_identifier ("short_call");
2972 else
2973 return;
2975 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2976 TYPE_ATTRIBUTES (type) = type_attr_list;
2980 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2981 defined within the current compilation unit. If this cannot be
2982 determined, then 0 is returned. */
2983 static int
2984 current_file_function_operand (rtx sym_ref)
2986 /* This is a bit of a fib. A function will have a short call flag
2987 applied to its name if it has the short call attribute, or it has
2988 already been defined within the current compilation unit. */
2989 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2990 return 1;
2992 /* The current function is always defined within the current compilation
2993 unit. If it s a weak definition however, then this may not be the real
2994 definition of the function, and so we have to say no. */
2995 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2996 && !DECL_WEAK (current_function_decl))
2997 return 1;
2999 /* We cannot make the determination - default to returning 0. */
3000 return 0;
3003 /* Return nonzero if a 32 bit "long_call" should be generated for
3004 this call. We generate a long_call if the function:
3006 a. has an __attribute__((long call))
3007 or b. is within the scope of a #pragma long_calls
3008 or c. the -mlong-calls command line switch has been specified
3009 . and either:
3010 1. -ffunction-sections is in effect
3011 or 2. the current function has __attribute__ ((section))
3012 or 3. the target function has __attribute__ ((section))
3014 However we do not generate a long call if the function:
3016 d. has an __attribute__ ((short_call))
3017 or e. is inside the scope of a #pragma no_long_calls
3018 or f. is defined within the current compilation unit.
3020 This function will be called by C fragments contained in the machine
3021 description file. SYM_REF and CALL_COOKIE correspond to the matched
3022 rtl operands. CALL_SYMBOL is used to distinguish between
3023 two different callers of the function. It is set to 1 in the
3024 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3025 and "call_value" patterns. This is because of the difference in the
3026 SYM_REFs passed by these patterns. */
3028 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3030 if (!call_symbol)
3032 if (GET_CODE (sym_ref) != MEM)
3033 return 0;
3035 sym_ref = XEXP (sym_ref, 0);
3038 if (GET_CODE (sym_ref) != SYMBOL_REF)
3039 return 0;
3041 if (call_cookie & CALL_SHORT)
3042 return 0;
3044 if (TARGET_LONG_CALLS)
3046 if (flag_function_sections
3047 || DECL_SECTION_NAME (current_function_decl))
3048 /* c.3 is handled by the definition of the
3049 ARM_DECLARE_FUNCTION_SIZE macro. */
3050 return 1;
3053 if (current_file_function_operand (sym_ref))
3054 return 0;
3056 return (call_cookie & CALL_LONG)
3057 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3058 || TARGET_LONG_CALLS;
3061 /* Return nonzero if it is ok to make a tail-call to DECL. */
3062 static bool
3063 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3065 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3067 if (cfun->machine->sibcall_blocked)
3068 return false;
3070 /* Never tailcall something for which we have no decl, or if we
3071 are in Thumb mode. */
3072 if (decl == NULL || TARGET_THUMB)
3073 return false;
3075 /* Get the calling method. */
3076 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3077 call_type = CALL_SHORT;
3078 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3079 call_type = CALL_LONG;
3081 /* Cannot tail-call to long calls, since these are out of range of
3082 a branch instruction. However, if not compiling PIC, we know
3083 we can reach the symbol if it is in this compilation unit. */
3084 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3085 return false;
3087 /* If we are interworking and the function is not declared static
3088 then we can't tail-call it unless we know that it exists in this
3089 compilation unit (since it might be a Thumb routine). */
3090 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3091 return false;
3093 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3094 if (IS_INTERRUPT (arm_current_func_type ()))
3095 return false;
3097 /* Everything else is ok. */
3098 return true;
3102 /* Addressing mode support functions. */
3104 /* Return nonzero if X is a legitimate immediate operand when compiling
3105 for PIC. */
3107 legitimate_pic_operand_p (rtx x)
3109 if (CONSTANT_P (x)
3110 && flag_pic
3111 && (GET_CODE (x) == SYMBOL_REF
3112 || (GET_CODE (x) == CONST
3113 && GET_CODE (XEXP (x, 0)) == PLUS
3114 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3115 return 0;
3117 return 1;
3121 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3123 if (GET_CODE (orig) == SYMBOL_REF
3124 || GET_CODE (orig) == LABEL_REF)
3126 #ifndef AOF_ASSEMBLER
3127 rtx pic_ref, address;
3128 #endif
3129 rtx insn;
3130 int subregs = 0;
3132 if (reg == 0)
3134 gcc_assert (!no_new_pseudos);
3135 reg = gen_reg_rtx (Pmode);
3137 subregs = 1;
3140 #ifdef AOF_ASSEMBLER
3141 /* The AOF assembler can generate relocations for these directly, and
3142 understands that the PIC register has to be added into the offset. */
3143 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3144 #else
3145 if (subregs)
3146 address = gen_reg_rtx (Pmode);
3147 else
3148 address = reg;
3150 if (TARGET_ARM)
3151 emit_insn (gen_pic_load_addr_arm (address, orig));
3152 else
3153 emit_insn (gen_pic_load_addr_thumb (address, orig));
3155 if ((GET_CODE (orig) == LABEL_REF
3156 || (GET_CODE (orig) == SYMBOL_REF &&
3157 SYMBOL_REF_LOCAL_P (orig)))
3158 && NEED_GOT_RELOC)
3159 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3160 else
3162 pic_ref = gen_const_mem (Pmode,
3163 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3164 address));
3167 insn = emit_move_insn (reg, pic_ref);
3168 #endif
3169 current_function_uses_pic_offset_table = 1;
3170 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3171 by loop. */
3172 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3173 REG_NOTES (insn));
3174 return reg;
3176 else if (GET_CODE (orig) == CONST)
3178 rtx base, offset;
3180 if (GET_CODE (XEXP (orig, 0)) == PLUS
3181 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3182 return orig;
3184 if (reg == 0)
3186 gcc_assert (!no_new_pseudos);
3187 reg = gen_reg_rtx (Pmode);
3190 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3192 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3193 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3194 base == reg ? 0 : reg);
3196 if (GET_CODE (offset) == CONST_INT)
3198 /* The base register doesn't really matter, we only want to
3199 test the index for the appropriate mode. */
3200 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3202 gcc_assert (!no_new_pseudos);
3203 offset = force_reg (Pmode, offset);
3206 if (GET_CODE (offset) == CONST_INT)
3207 return plus_constant (base, INTVAL (offset));
3210 if (GET_MODE_SIZE (mode) > 4
3211 && (GET_MODE_CLASS (mode) == MODE_INT
3212 || TARGET_SOFT_FLOAT))
3214 emit_insn (gen_addsi3 (reg, base, offset));
3215 return reg;
3218 return gen_rtx_PLUS (Pmode, base, offset);
3221 return orig;
3225 /* Find a spare low register to use during the prolog of a function. */
3227 static int
3228 thumb_find_work_register (unsigned long pushed_regs_mask)
3230 int reg;
3232 /* Check the argument registers first as these are call-used. The
3233 register allocation order means that sometimes r3 might be used
3234 but earlier argument registers might not, so check them all. */
3235 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3236 if (!regs_ever_live[reg])
3237 return reg;
3239 /* Before going on to check the call-saved registers we can try a couple
3240 more ways of deducing that r3 is available. The first is when we are
3241 pushing anonymous arguments onto the stack and we have less than 4
3242 registers worth of fixed arguments(*). In this case r3 will be part of
3243 the variable argument list and so we can be sure that it will be
3244 pushed right at the start of the function. Hence it will be available
3245 for the rest of the prologue.
3246 (*): ie current_function_pretend_args_size is greater than 0. */
3247 if (cfun->machine->uses_anonymous_args
3248 && current_function_pretend_args_size > 0)
3249 return LAST_ARG_REGNUM;
3251 /* The other case is when we have fixed arguments but less than 4 registers
3252 worth. In this case r3 might be used in the body of the function, but
3253 it is not being used to convey an argument into the function. In theory
3254 we could just check current_function_args_size to see how many bytes are
3255 being passed in argument registers, but it seems that it is unreliable.
3256 Sometimes it will have the value 0 when in fact arguments are being
3257 passed. (See testcase execute/20021111-1.c for an example). So we also
3258 check the args_info.nregs field as well. The problem with this field is
3259 that it makes no allowances for arguments that are passed to the
3260 function but which are not used. Hence we could miss an opportunity
3261 when a function has an unused argument in r3. But it is better to be
3262 safe than to be sorry. */
3263 if (! cfun->machine->uses_anonymous_args
3264 && current_function_args_size >= 0
3265 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3266 && cfun->args_info.nregs < 4)
3267 return LAST_ARG_REGNUM;
3269 /* Otherwise look for a call-saved register that is going to be pushed. */
3270 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3271 if (pushed_regs_mask & (1 << reg))
3272 return reg;
3274 /* Something went wrong - thumb_compute_save_reg_mask()
3275 should have arranged for a suitable register to be pushed. */
3276 gcc_unreachable ();
3280 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3281 low register. */
3283 void
3284 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3286 #ifndef AOF_ASSEMBLER
3287 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3288 rtx global_offset_table;
3290 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3291 return;
3293 gcc_assert (flag_pic);
3295 l1 = gen_label_rtx ();
3297 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3298 /* On the ARM the PC register contains 'dot + 8' at the time of the
3299 addition, on the Thumb it is 'dot + 4'. */
3300 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3301 if (GOT_PCREL)
3302 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3303 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3304 else
3305 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3307 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3309 if (TARGET_ARM)
3311 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3312 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3314 else
3316 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3318 /* We will have pushed the pic register, so we should always be
3319 able to find a work register. */
3320 pic_tmp = gen_rtx_REG (SImode,
3321 thumb_find_work_register (saved_regs));
3322 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3323 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3325 else
3326 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3327 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3330 /* Need to emit this whether or not we obey regdecls,
3331 since setjmp/longjmp can cause life info to screw up. */
3332 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3333 #endif /* AOF_ASSEMBLER */
3337 /* Return nonzero if X is valid as an ARM state addressing register. */
3338 static int
3339 arm_address_register_rtx_p (rtx x, int strict_p)
3341 int regno;
3343 if (GET_CODE (x) != REG)
3344 return 0;
3346 regno = REGNO (x);
3348 if (strict_p)
3349 return ARM_REGNO_OK_FOR_BASE_P (regno);
3351 return (regno <= LAST_ARM_REGNUM
3352 || regno >= FIRST_PSEUDO_REGISTER
3353 || regno == FRAME_POINTER_REGNUM
3354 || regno == ARG_POINTER_REGNUM);
3357 /* Return nonzero if X is a valid ARM state address operand. */
3359 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3360 int strict_p)
3362 bool use_ldrd;
3363 enum rtx_code code = GET_CODE (x);
3365 if (arm_address_register_rtx_p (x, strict_p))
3366 return 1;
3368 use_ldrd = (TARGET_LDRD
3369 && (mode == DImode
3370 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3372 if (code == POST_INC || code == PRE_DEC
3373 || ((code == PRE_INC || code == POST_DEC)
3374 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3375 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3377 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3378 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3379 && GET_CODE (XEXP (x, 1)) == PLUS
3380 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3382 rtx addend = XEXP (XEXP (x, 1), 1);
3384 /* Don't allow ldrd post increment by register because it's hard
3385 to fixup invalid register choices. */
3386 if (use_ldrd
3387 && GET_CODE (x) == POST_MODIFY
3388 && GET_CODE (addend) == REG)
3389 return 0;
3391 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3392 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3395 /* After reload constants split into minipools will have addresses
3396 from a LABEL_REF. */
3397 else if (reload_completed
3398 && (code == LABEL_REF
3399 || (code == CONST
3400 && GET_CODE (XEXP (x, 0)) == PLUS
3401 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3402 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3403 return 1;
3405 else if (mode == TImode)
3406 return 0;
3408 else if (code == PLUS)
3410 rtx xop0 = XEXP (x, 0);
3411 rtx xop1 = XEXP (x, 1);
3413 return ((arm_address_register_rtx_p (xop0, strict_p)
3414 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3415 || (arm_address_register_rtx_p (xop1, strict_p)
3416 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3419 #if 0
3420 /* Reload currently can't handle MINUS, so disable this for now */
3421 else if (GET_CODE (x) == MINUS)
3423 rtx xop0 = XEXP (x, 0);
3424 rtx xop1 = XEXP (x, 1);
3426 return (arm_address_register_rtx_p (xop0, strict_p)
3427 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3429 #endif
3431 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3432 && code == SYMBOL_REF
3433 && CONSTANT_POOL_ADDRESS_P (x)
3434 && ! (flag_pic
3435 && symbol_mentioned_p (get_pool_constant (x))))
3436 return 1;
3438 return 0;
3441 /* Return nonzero if INDEX is valid for an address index operand in
3442 ARM state. */
3443 static int
3444 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3445 int strict_p)
3447 HOST_WIDE_INT range;
3448 enum rtx_code code = GET_CODE (index);
3450 /* Standard coprocessor addressing modes. */
3451 if (TARGET_HARD_FLOAT
3452 && (TARGET_FPA || TARGET_MAVERICK)
3453 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3454 || (TARGET_MAVERICK && mode == DImode)))
3455 return (code == CONST_INT && INTVAL (index) < 1024
3456 && INTVAL (index) > -1024
3457 && (INTVAL (index) & 3) == 0);
3459 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3460 return (code == CONST_INT
3461 && INTVAL (index) < 1024
3462 && INTVAL (index) > -1024
3463 && (INTVAL (index) & 3) == 0);
3465 if (arm_address_register_rtx_p (index, strict_p)
3466 && (GET_MODE_SIZE (mode) <= 4))
3467 return 1;
3469 if (mode == DImode || mode == DFmode)
3471 if (code == CONST_INT)
3473 HOST_WIDE_INT val = INTVAL (index);
3475 if (TARGET_LDRD)
3476 return val > -256 && val < 256;
3477 else
3478 return val > -4096 && val < 4092;
3481 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3484 if (GET_MODE_SIZE (mode) <= 4
3485 && ! (arm_arch4
3486 && (mode == HImode
3487 || (mode == QImode && outer == SIGN_EXTEND))))
3489 if (code == MULT)
3491 rtx xiop0 = XEXP (index, 0);
3492 rtx xiop1 = XEXP (index, 1);
3494 return ((arm_address_register_rtx_p (xiop0, strict_p)
3495 && power_of_two_operand (xiop1, SImode))
3496 || (arm_address_register_rtx_p (xiop1, strict_p)
3497 && power_of_two_operand (xiop0, SImode)));
3499 else if (code == LSHIFTRT || code == ASHIFTRT
3500 || code == ASHIFT || code == ROTATERT)
3502 rtx op = XEXP (index, 1);
3504 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3505 && GET_CODE (op) == CONST_INT
3506 && INTVAL (op) > 0
3507 && INTVAL (op) <= 31);
3511 /* For ARM v4 we may be doing a sign-extend operation during the
3512 load. */
3513 if (arm_arch4)
3515 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3516 range = 256;
3517 else
3518 range = 4096;
3520 else
3521 range = (mode == HImode) ? 4095 : 4096;
3523 return (code == CONST_INT
3524 && INTVAL (index) < range
3525 && INTVAL (index) > -range);
3528 /* Return nonzero if X is valid as a Thumb state base register. */
3529 static int
3530 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3532 int regno;
3534 if (GET_CODE (x) != REG)
3535 return 0;
3537 regno = REGNO (x);
3539 if (strict_p)
3540 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3542 return (regno <= LAST_LO_REGNUM
3543 || regno > LAST_VIRTUAL_REGISTER
3544 || regno == FRAME_POINTER_REGNUM
3545 || (GET_MODE_SIZE (mode) >= 4
3546 && (regno == STACK_POINTER_REGNUM
3547 || regno >= FIRST_PSEUDO_REGISTER
3548 || x == hard_frame_pointer_rtx
3549 || x == arg_pointer_rtx)));
3552 /* Return nonzero if x is a legitimate index register. This is the case
3553 for any base register that can access a QImode object. */
3554 inline static int
3555 thumb_index_register_rtx_p (rtx x, int strict_p)
3557 return thumb_base_register_rtx_p (x, QImode, strict_p);
3560 /* Return nonzero if x is a legitimate Thumb-state address.
3562 The AP may be eliminated to either the SP or the FP, so we use the
3563 least common denominator, e.g. SImode, and offsets from 0 to 64.
3565 ??? Verify whether the above is the right approach.
3567 ??? Also, the FP may be eliminated to the SP, so perhaps that
3568 needs special handling also.
3570 ??? Look at how the mips16 port solves this problem. It probably uses
3571 better ways to solve some of these problems.
3573 Although it is not incorrect, we don't accept QImode and HImode
3574 addresses based on the frame pointer or arg pointer until the
3575 reload pass starts. This is so that eliminating such addresses
3576 into stack based ones won't produce impossible code. */
3578 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3580 /* ??? Not clear if this is right. Experiment. */
3581 if (GET_MODE_SIZE (mode) < 4
3582 && !(reload_in_progress || reload_completed)
3583 && (reg_mentioned_p (frame_pointer_rtx, x)
3584 || reg_mentioned_p (arg_pointer_rtx, x)
3585 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3586 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3587 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3588 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3589 return 0;
3591 /* Accept any base register. SP only in SImode or larger. */
3592 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3593 return 1;
3595 /* This is PC relative data before arm_reorg runs. */
3596 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3597 && GET_CODE (x) == SYMBOL_REF
3598 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3599 return 1;
3601 /* This is PC relative data after arm_reorg runs. */
3602 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3603 && (GET_CODE (x) == LABEL_REF
3604 || (GET_CODE (x) == CONST
3605 && GET_CODE (XEXP (x, 0)) == PLUS
3606 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3607 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3608 return 1;
3610 /* Post-inc indexing only supported for SImode and larger. */
3611 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3612 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3613 return 1;
3615 else if (GET_CODE (x) == PLUS)
3617 /* REG+REG address can be any two index registers. */
3618 /* We disallow FRAME+REG addressing since we know that FRAME
3619 will be replaced with STACK, and SP relative addressing only
3620 permits SP+OFFSET. */
3621 if (GET_MODE_SIZE (mode) <= 4
3622 && XEXP (x, 0) != frame_pointer_rtx
3623 && XEXP (x, 1) != frame_pointer_rtx
3624 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3625 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3626 return 1;
3628 /* REG+const has 5-7 bit offset for non-SP registers. */
3629 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3630 || XEXP (x, 0) == arg_pointer_rtx)
3631 && GET_CODE (XEXP (x, 1)) == CONST_INT
3632 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3633 return 1;
3635 /* REG+const has 10 bit offset for SP, but only SImode and
3636 larger is supported. */
3637 /* ??? Should probably check for DI/DFmode overflow here
3638 just like GO_IF_LEGITIMATE_OFFSET does. */
3639 else if (GET_CODE (XEXP (x, 0)) == REG
3640 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3641 && GET_MODE_SIZE (mode) >= 4
3642 && GET_CODE (XEXP (x, 1)) == CONST_INT
3643 && INTVAL (XEXP (x, 1)) >= 0
3644 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3645 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3646 return 1;
3648 else if (GET_CODE (XEXP (x, 0)) == REG
3649 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3650 && GET_MODE_SIZE (mode) >= 4
3651 && GET_CODE (XEXP (x, 1)) == CONST_INT
3652 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3653 return 1;
3656 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3657 && GET_MODE_SIZE (mode) == 4
3658 && GET_CODE (x) == SYMBOL_REF
3659 && CONSTANT_POOL_ADDRESS_P (x)
3660 && !(flag_pic
3661 && symbol_mentioned_p (get_pool_constant (x))))
3662 return 1;
3664 return 0;
3667 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3668 instruction of mode MODE. */
3670 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3672 switch (GET_MODE_SIZE (mode))
3674 case 1:
3675 return val >= 0 && val < 32;
3677 case 2:
3678 return val >= 0 && val < 64 && (val & 1) == 0;
3680 default:
3681 return (val >= 0
3682 && (val + GET_MODE_SIZE (mode)) <= 128
3683 && (val & 3) == 0);
3687 /* Try machine-dependent ways of modifying an illegitimate address
3688 to be legitimate. If we find one, return the new, valid address. */
3690 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3692 if (GET_CODE (x) == PLUS)
3694 rtx xop0 = XEXP (x, 0);
3695 rtx xop1 = XEXP (x, 1);
3697 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3698 xop0 = force_reg (SImode, xop0);
3700 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3701 xop1 = force_reg (SImode, xop1);
3703 if (ARM_BASE_REGISTER_RTX_P (xop0)
3704 && GET_CODE (xop1) == CONST_INT)
3706 HOST_WIDE_INT n, low_n;
3707 rtx base_reg, val;
3708 n = INTVAL (xop1);
3710 /* VFP addressing modes actually allow greater offsets, but for
3711 now we just stick with the lowest common denominator. */
3712 if (mode == DImode
3713 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3715 low_n = n & 0x0f;
3716 n &= ~0x0f;
3717 if (low_n > 4)
3719 n += 16;
3720 low_n -= 16;
3723 else
3725 low_n = ((mode) == TImode ? 0
3726 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3727 n -= low_n;
3730 base_reg = gen_reg_rtx (SImode);
3731 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3732 GEN_INT (n)), NULL_RTX);
3733 emit_move_insn (base_reg, val);
3734 x = (low_n == 0 ? base_reg
3735 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3737 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3738 x = gen_rtx_PLUS (SImode, xop0, xop1);
3741 /* XXX We don't allow MINUS any more -- see comment in
3742 arm_legitimate_address_p (). */
3743 else if (GET_CODE (x) == MINUS)
3745 rtx xop0 = XEXP (x, 0);
3746 rtx xop1 = XEXP (x, 1);
3748 if (CONSTANT_P (xop0))
3749 xop0 = force_reg (SImode, xop0);
3751 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3752 xop1 = force_reg (SImode, xop1);
3754 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3755 x = gen_rtx_MINUS (SImode, xop0, xop1);
3758 /* Make sure to take full advantage of the pre-indexed addressing mode
3759 with absolute addresses which often allows for the base register to
3760 be factorized for multiple adjacent memory references, and it might
3761 even allows for the mini pool to be avoided entirely. */
3762 else if (GET_CODE (x) == CONST_INT && optimize > 0)
3764 unsigned int bits;
3765 HOST_WIDE_INT mask, base, index;
3766 rtx base_reg;
3768 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
3769 use a 8 bit index. So let's use a 12 bit index for SImode only and
3770 hope that arm_gen_constant will enable ldrb to use more bits. */
3771 bits = (mode == SImode) ? 12 : 8;
3772 mask = (1 << bits) - 1;
3773 base = INTVAL (x) & ~mask;
3774 index = INTVAL (x) & mask;
3775 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
3777 /* It'll most probably be more efficient to generate the base
3778 with more bits set and use a negative index instead. */
3779 base |= mask;
3780 index -= mask;
3782 base_reg = force_reg (SImode, GEN_INT (base));
3783 x = gen_rtx_PLUS (SImode, base_reg, GEN_INT (index));
3786 if (flag_pic)
3788 /* We need to find and carefully transform any SYMBOL and LABEL
3789 references; so go back to the original address expression. */
3790 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3792 if (new_x != orig_x)
3793 x = new_x;
3796 return x;
3800 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3801 to be legitimate. If we find one, return the new, valid address. */
3803 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3805 if (GET_CODE (x) == PLUS
3806 && GET_CODE (XEXP (x, 1)) == CONST_INT
3807 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3808 || INTVAL (XEXP (x, 1)) < 0))
3810 rtx xop0 = XEXP (x, 0);
3811 rtx xop1 = XEXP (x, 1);
3812 HOST_WIDE_INT offset = INTVAL (xop1);
3814 /* Try and fold the offset into a biasing of the base register and
3815 then offsetting that. Don't do this when optimizing for space
3816 since it can cause too many CSEs. */
3817 if (optimize_size && offset >= 0
3818 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3820 HOST_WIDE_INT delta;
3822 if (offset >= 256)
3823 delta = offset - (256 - GET_MODE_SIZE (mode));
3824 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3825 delta = 31 * GET_MODE_SIZE (mode);
3826 else
3827 delta = offset & (~31 * GET_MODE_SIZE (mode));
3829 xop0 = force_operand (plus_constant (xop0, offset - delta),
3830 NULL_RTX);
3831 x = plus_constant (xop0, delta);
3833 else if (offset < 0 && offset > -256)
3834 /* Small negative offsets are best done with a subtract before the
3835 dereference, forcing these into a register normally takes two
3836 instructions. */
3837 x = force_operand (x, NULL_RTX);
3838 else
3840 /* For the remaining cases, force the constant into a register. */
3841 xop1 = force_reg (SImode, xop1);
3842 x = gen_rtx_PLUS (SImode, xop0, xop1);
3845 else if (GET_CODE (x) == PLUS
3846 && s_register_operand (XEXP (x, 1), SImode)
3847 && !s_register_operand (XEXP (x, 0), SImode))
3849 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3851 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3854 if (flag_pic)
3856 /* We need to find and carefully transform any SYMBOL and LABEL
3857 references; so go back to the original address expression. */
3858 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3860 if (new_x != orig_x)
3861 x = new_x;
3864 return x;
3868 thumb_legitimize_reload_address(rtx *x_p,
3869 enum machine_mode mode,
3870 int opnum, int type,
3871 int ind_levels ATTRIBUTE_UNUSED)
3873 rtx x = *x_p;
3875 if (GET_CODE (x) == PLUS
3876 && GET_MODE_SIZE (mode) < 4
3877 && REG_P (XEXP (x, 0))
3878 && XEXP (x, 0) == stack_pointer_rtx
3879 && GET_CODE (XEXP (x, 1)) == CONST_INT
3880 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3882 rtx orig_x = x;
3884 x = copy_rtx (x);
3885 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
3886 Pmode, VOIDmode, 0, 0, opnum, type);
3887 return x;
3890 /* If both registers are hi-regs, then it's better to reload the
3891 entire expression rather than each register individually. That
3892 only requires one reload register rather than two. */
3893 if (GET_CODE (x) == PLUS
3894 && REG_P (XEXP (x, 0))
3895 && REG_P (XEXP (x, 1))
3896 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
3897 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
3899 rtx orig_x = x;
3901 x = copy_rtx (x);
3902 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
3903 Pmode, VOIDmode, 0, 0, opnum, type);
3904 return x;
3907 return NULL;
3912 #define REG_OR_SUBREG_REG(X) \
3913 (GET_CODE (X) == REG \
3914 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3916 #define REG_OR_SUBREG_RTX(X) \
3917 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3919 #ifndef COSTS_N_INSNS
3920 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3921 #endif
3922 static inline int
3923 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3925 enum machine_mode mode = GET_MODE (x);
3927 switch (code)
3929 case ASHIFT:
3930 case ASHIFTRT:
3931 case LSHIFTRT:
3932 case ROTATERT:
3933 case PLUS:
3934 case MINUS:
3935 case COMPARE:
3936 case NEG:
3937 case NOT:
3938 return COSTS_N_INSNS (1);
3940 case MULT:
3941 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3943 int cycles = 0;
3944 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3946 while (i)
3948 i >>= 2;
3949 cycles++;
3951 return COSTS_N_INSNS (2) + cycles;
3953 return COSTS_N_INSNS (1) + 16;
3955 case SET:
3956 return (COSTS_N_INSNS (1)
3957 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3958 + GET_CODE (SET_DEST (x)) == MEM));
3960 case CONST_INT:
3961 if (outer == SET)
3963 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3964 return 0;
3965 if (thumb_shiftable_const (INTVAL (x)))
3966 return COSTS_N_INSNS (2);
3967 return COSTS_N_INSNS (3);
3969 else if ((outer == PLUS || outer == COMPARE)
3970 && INTVAL (x) < 256 && INTVAL (x) > -256)
3971 return 0;
3972 else if (outer == AND
3973 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3974 return COSTS_N_INSNS (1);
3975 else if (outer == ASHIFT || outer == ASHIFTRT
3976 || outer == LSHIFTRT)
3977 return 0;
3978 return COSTS_N_INSNS (2);
3980 case CONST:
3981 case CONST_DOUBLE:
3982 case LABEL_REF:
3983 case SYMBOL_REF:
3984 return COSTS_N_INSNS (3);
3986 case UDIV:
3987 case UMOD:
3988 case DIV:
3989 case MOD:
3990 return 100;
3992 case TRUNCATE:
3993 return 99;
3995 case AND:
3996 case XOR:
3997 case IOR:
3998 /* XXX guess. */
3999 return 8;
4001 case MEM:
4002 /* XXX another guess. */
4003 /* Memory costs quite a lot for the first word, but subsequent words
4004 load at the equivalent of a single insn each. */
4005 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4006 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4007 ? 4 : 0));
4009 case IF_THEN_ELSE:
4010 /* XXX a guess. */
4011 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4012 return 14;
4013 return 2;
4015 case ZERO_EXTEND:
4016 /* XXX still guessing. */
4017 switch (GET_MODE (XEXP (x, 0)))
4019 case QImode:
4020 return (1 + (mode == DImode ? 4 : 0)
4021 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4023 case HImode:
4024 return (4 + (mode == DImode ? 4 : 0)
4025 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4027 case SImode:
4028 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4030 default:
4031 return 99;
4034 default:
4035 return 99;
4040 /* Worker routine for arm_rtx_costs. */
4041 static inline int
4042 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4044 enum machine_mode mode = GET_MODE (x);
4045 enum rtx_code subcode;
4046 int extra_cost;
4048 switch (code)
4050 case MEM:
4051 /* Memory costs quite a lot for the first word, but subsequent words
4052 load at the equivalent of a single insn each. */
4053 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4054 + (GET_CODE (x) == SYMBOL_REF
4055 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4057 case DIV:
4058 case MOD:
4059 case UDIV:
4060 case UMOD:
4061 return optimize_size ? COSTS_N_INSNS (2) : 100;
4063 case ROTATE:
4064 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4065 return 4;
4066 /* Fall through */
4067 case ROTATERT:
4068 if (mode != SImode)
4069 return 8;
4070 /* Fall through */
4071 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4072 if (mode == DImode)
4073 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4074 + ((GET_CODE (XEXP (x, 0)) == REG
4075 || (GET_CODE (XEXP (x, 0)) == SUBREG
4076 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4077 ? 0 : 8));
4078 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4079 || (GET_CODE (XEXP (x, 0)) == SUBREG
4080 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4081 ? 0 : 4)
4082 + ((GET_CODE (XEXP (x, 1)) == REG
4083 || (GET_CODE (XEXP (x, 1)) == SUBREG
4084 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4085 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4086 ? 0 : 4));
4088 case MINUS:
4089 if (mode == DImode)
4090 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4091 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4092 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4093 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4094 ? 0 : 8));
4096 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4097 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4098 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4099 && arm_const_double_rtx (XEXP (x, 1))))
4100 ? 0 : 8)
4101 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4102 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4103 && arm_const_double_rtx (XEXP (x, 0))))
4104 ? 0 : 8));
4106 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4107 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4108 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4109 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4110 || subcode == ASHIFTRT || subcode == LSHIFTRT
4111 || subcode == ROTATE || subcode == ROTATERT
4112 || (subcode == MULT
4113 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4114 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4115 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4116 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4117 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4118 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4119 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4120 return 1;
4121 /* Fall through */
4123 case PLUS:
4124 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4125 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4126 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4127 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4128 && arm_const_double_rtx (XEXP (x, 1))))
4129 ? 0 : 8));
4131 /* Fall through */
4132 case AND: case XOR: case IOR:
4133 extra_cost = 0;
4135 /* Normally the frame registers will be spilt into reg+const during
4136 reload, so it is a bad idea to combine them with other instructions,
4137 since then they might not be moved outside of loops. As a compromise
4138 we allow integration with ops that have a constant as their second
4139 operand. */
4140 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4141 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4142 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4143 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4144 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4145 extra_cost = 4;
4147 if (mode == DImode)
4148 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4149 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4150 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4151 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4152 ? 0 : 8));
4154 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4155 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4156 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4157 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4158 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4159 ? 0 : 4));
4161 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4162 return (1 + extra_cost
4163 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4164 || subcode == LSHIFTRT || subcode == ASHIFTRT
4165 || subcode == ROTATE || subcode == ROTATERT
4166 || (subcode == MULT
4167 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4168 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4169 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4170 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4171 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4172 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4173 ? 0 : 4));
4175 return 8;
4177 case MULT:
4178 /* This should have been handled by the CPU specific routines. */
4179 gcc_unreachable ();
4181 case TRUNCATE:
4182 if (arm_arch3m && mode == SImode
4183 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4184 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4185 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4186 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4187 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4188 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4189 return 8;
4190 return 99;
4192 case NEG:
4193 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4194 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4195 /* Fall through */
4196 case NOT:
4197 if (mode == DImode)
4198 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4200 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4202 case IF_THEN_ELSE:
4203 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4204 return 14;
4205 return 2;
4207 case COMPARE:
4208 return 1;
4210 case ABS:
4211 return 4 + (mode == DImode ? 4 : 0);
4213 case SIGN_EXTEND:
4214 if (GET_MODE (XEXP (x, 0)) == QImode)
4215 return (4 + (mode == DImode ? 4 : 0)
4216 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4217 /* Fall through */
4218 case ZERO_EXTEND:
4219 switch (GET_MODE (XEXP (x, 0)))
4221 case QImode:
4222 return (1 + (mode == DImode ? 4 : 0)
4223 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4225 case HImode:
4226 return (4 + (mode == DImode ? 4 : 0)
4227 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4229 case SImode:
4230 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4232 case V8QImode:
4233 case V4HImode:
4234 case V2SImode:
4235 case V4QImode:
4236 case V2HImode:
4237 return 1;
4239 default:
4240 gcc_unreachable ();
4242 gcc_unreachable ();
4244 case CONST_INT:
4245 if (const_ok_for_arm (INTVAL (x)))
4246 return outer == SET ? 2 : -1;
4247 else if (outer == AND
4248 && const_ok_for_arm (~INTVAL (x)))
4249 return -1;
4250 else if ((outer == COMPARE
4251 || outer == PLUS || outer == MINUS)
4252 && const_ok_for_arm (-INTVAL (x)))
4253 return -1;
4254 else
4255 return 5;
4257 case CONST:
4258 case LABEL_REF:
4259 case SYMBOL_REF:
4260 return 6;
4262 case CONST_DOUBLE:
4263 if (arm_const_double_rtx (x))
4264 return outer == SET ? 2 : -1;
4265 else if ((outer == COMPARE || outer == PLUS)
4266 && neg_const_double_rtx_ok_for_fpa (x))
4267 return -1;
4268 return 7;
4270 default:
4271 return 99;
4275 /* RTX costs when optimizing for size. */
4276 static bool
4277 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4279 enum machine_mode mode = GET_MODE (x);
4281 if (TARGET_THUMB)
4283 /* XXX TBD. For now, use the standard costs. */
4284 *total = thumb_rtx_costs (x, code, outer_code);
4285 return true;
4288 switch (code)
4290 case MEM:
4291 /* A memory access costs 1 insn if the mode is small, or the address is
4292 a single register, otherwise it costs one insn per word. */
4293 if (REG_P (XEXP (x, 0)))
4294 *total = COSTS_N_INSNS (1);
4295 else
4296 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4297 return true;
4299 case DIV:
4300 case MOD:
4301 case UDIV:
4302 case UMOD:
4303 /* Needs a libcall, so it costs about this. */
4304 *total = COSTS_N_INSNS (2);
4305 return false;
4307 case ROTATE:
4308 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4310 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4311 return true;
4313 /* Fall through */
4314 case ROTATERT:
4315 case ASHIFT:
4316 case LSHIFTRT:
4317 case ASHIFTRT:
4318 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4320 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4321 return true;
4323 else if (mode == SImode)
4325 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4326 /* Slightly disparage register shifts, but not by much. */
4327 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4328 *total += 1 + rtx_cost (XEXP (x, 1), code);
4329 return true;
4332 /* Needs a libcall. */
4333 *total = COSTS_N_INSNS (2);
4334 return false;
4336 case MINUS:
4337 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4339 *total = COSTS_N_INSNS (1);
4340 return false;
4343 if (mode == SImode)
4345 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4346 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4348 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4349 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4350 || subcode1 == ROTATE || subcode1 == ROTATERT
4351 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4352 || subcode1 == ASHIFTRT)
4354 /* It's just the cost of the two operands. */
4355 *total = 0;
4356 return false;
4359 *total = COSTS_N_INSNS (1);
4360 return false;
4363 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4364 return false;
4366 case PLUS:
4367 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4369 *total = COSTS_N_INSNS (1);
4370 return false;
4373 /* Fall through */
4374 case AND: case XOR: case IOR:
4375 if (mode == SImode)
4377 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4379 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4380 || subcode == LSHIFTRT || subcode == ASHIFTRT
4381 || (code == AND && subcode == NOT))
4383 /* It's just the cost of the two operands. */
4384 *total = 0;
4385 return false;
4389 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4390 return false;
4392 case MULT:
4393 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4394 return false;
4396 case NEG:
4397 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4398 *total = COSTS_N_INSNS (1);
4399 /* Fall through */
4400 case NOT:
4401 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4403 return false;
4405 case IF_THEN_ELSE:
4406 *total = 0;
4407 return false;
4409 case COMPARE:
4410 if (cc_register (XEXP (x, 0), VOIDmode))
4411 * total = 0;
4412 else
4413 *total = COSTS_N_INSNS (1);
4414 return false;
4416 case ABS:
4417 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4418 *total = COSTS_N_INSNS (1);
4419 else
4420 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4421 return false;
4423 case SIGN_EXTEND:
4424 *total = 0;
4425 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4427 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4428 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4430 if (mode == DImode)
4431 *total += COSTS_N_INSNS (1);
4432 return false;
4434 case ZERO_EXTEND:
4435 *total = 0;
4436 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4438 switch (GET_MODE (XEXP (x, 0)))
4440 case QImode:
4441 *total += COSTS_N_INSNS (1);
4442 break;
4444 case HImode:
4445 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4447 case SImode:
4448 break;
4450 default:
4451 *total += COSTS_N_INSNS (2);
4455 if (mode == DImode)
4456 *total += COSTS_N_INSNS (1);
4458 return false;
4460 case CONST_INT:
4461 if (const_ok_for_arm (INTVAL (x)))
4462 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4463 else if (const_ok_for_arm (~INTVAL (x)))
4464 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4465 else if (const_ok_for_arm (-INTVAL (x)))
4467 if (outer_code == COMPARE || outer_code == PLUS
4468 || outer_code == MINUS)
4469 *total = 0;
4470 else
4471 *total = COSTS_N_INSNS (1);
4473 else
4474 *total = COSTS_N_INSNS (2);
4475 return true;
4477 case CONST:
4478 case LABEL_REF:
4479 case SYMBOL_REF:
4480 *total = COSTS_N_INSNS (2);
4481 return true;
4483 case CONST_DOUBLE:
4484 *total = COSTS_N_INSNS (4);
4485 return true;
4487 default:
4488 if (mode != VOIDmode)
4489 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4490 else
4491 *total = COSTS_N_INSNS (4); /* How knows? */
4492 return false;
4496 /* RTX costs for cores with a slow MUL implementation. */
4498 static bool
4499 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4501 enum machine_mode mode = GET_MODE (x);
4503 if (TARGET_THUMB)
4505 *total = thumb_rtx_costs (x, code, outer_code);
4506 return true;
4509 switch (code)
4511 case MULT:
4512 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4513 || mode == DImode)
4515 *total = 30;
4516 return true;
4519 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4521 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4522 & (unsigned HOST_WIDE_INT) 0xffffffff);
4523 int cost, const_ok = const_ok_for_arm (i);
4524 int j, booth_unit_size;
4526 /* Tune as appropriate. */
4527 cost = const_ok ? 4 : 8;
4528 booth_unit_size = 2;
4529 for (j = 0; i && j < 32; j += booth_unit_size)
4531 i >>= booth_unit_size;
4532 cost += 2;
4535 *total = cost;
4536 return true;
4539 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4540 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4541 return true;
4543 default:
4544 *total = arm_rtx_costs_1 (x, code, outer_code);
4545 return true;
4550 /* RTX cost for cores with a fast multiply unit (M variants). */
4552 static bool
4553 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4555 enum machine_mode mode = GET_MODE (x);
4557 if (TARGET_THUMB)
4559 *total = thumb_rtx_costs (x, code, outer_code);
4560 return true;
4563 switch (code)
4565 case MULT:
4566 /* There is no point basing this on the tuning, since it is always the
4567 fast variant if it exists at all. */
4568 if (mode == DImode
4569 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4570 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4571 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4573 *total = 8;
4574 return true;
4578 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4579 || mode == DImode)
4581 *total = 30;
4582 return true;
4585 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4587 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4588 & (unsigned HOST_WIDE_INT) 0xffffffff);
4589 int cost, const_ok = const_ok_for_arm (i);
4590 int j, booth_unit_size;
4592 /* Tune as appropriate. */
4593 cost = const_ok ? 4 : 8;
4594 booth_unit_size = 8;
4595 for (j = 0; i && j < 32; j += booth_unit_size)
4597 i >>= booth_unit_size;
4598 cost += 2;
4601 *total = cost;
4602 return true;
4605 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4606 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4607 return true;
4609 default:
4610 *total = arm_rtx_costs_1 (x, code, outer_code);
4611 return true;
4616 /* RTX cost for XScale CPUs. */
4618 static bool
4619 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4621 enum machine_mode mode = GET_MODE (x);
4623 if (TARGET_THUMB)
4625 *total = thumb_rtx_costs (x, code, outer_code);
4626 return true;
4629 switch (code)
4631 case MULT:
4632 /* There is no point basing this on the tuning, since it is always the
4633 fast variant if it exists at all. */
4634 if (mode == DImode
4635 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4636 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4637 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4639 *total = 8;
4640 return true;
4644 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4645 || mode == DImode)
4647 *total = 30;
4648 return true;
4651 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4653 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4654 & (unsigned HOST_WIDE_INT) 0xffffffff);
4655 int cost, const_ok = const_ok_for_arm (i);
4656 unsigned HOST_WIDE_INT masked_const;
4658 /* The cost will be related to two insns.
4659 First a load of the constant (MOV or LDR), then a multiply. */
4660 cost = 2;
4661 if (! const_ok)
4662 cost += 1; /* LDR is probably more expensive because
4663 of longer result latency. */
4664 masked_const = i & 0xffff8000;
4665 if (masked_const != 0 && masked_const != 0xffff8000)
4667 masked_const = i & 0xf8000000;
4668 if (masked_const == 0 || masked_const == 0xf8000000)
4669 cost += 1;
4670 else
4671 cost += 2;
4673 *total = cost;
4674 return true;
4677 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4678 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4679 return true;
4681 case COMPARE:
4682 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4683 will stall until the multiplication is complete. */
4684 if (GET_CODE (XEXP (x, 0)) == MULT)
4685 *total = 4 + rtx_cost (XEXP (x, 0), code);
4686 else
4687 *total = arm_rtx_costs_1 (x, code, outer_code);
4688 return true;
4690 default:
4691 *total = arm_rtx_costs_1 (x, code, outer_code);
4692 return true;
4697 /* RTX costs for 9e (and later) cores. */
4699 static bool
4700 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4702 enum machine_mode mode = GET_MODE (x);
4703 int nonreg_cost;
4704 int cost;
4706 if (TARGET_THUMB)
4708 switch (code)
4710 case MULT:
4711 *total = COSTS_N_INSNS (3);
4712 return true;
4714 default:
4715 *total = thumb_rtx_costs (x, code, outer_code);
4716 return true;
4720 switch (code)
4722 case MULT:
4723 /* There is no point basing this on the tuning, since it is always the
4724 fast variant if it exists at all. */
4725 if (mode == DImode
4726 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4727 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4728 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4730 *total = 3;
4731 return true;
4735 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4737 *total = 30;
4738 return true;
4740 if (mode == DImode)
4742 cost = 7;
4743 nonreg_cost = 8;
4745 else
4747 cost = 2;
4748 nonreg_cost = 4;
4752 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4753 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4754 return true;
4756 default:
4757 *total = arm_rtx_costs_1 (x, code, outer_code);
4758 return true;
4761 /* All address computations that can be done are free, but rtx cost returns
4762 the same for practically all of them. So we weight the different types
4763 of address here in the order (most pref first):
4764 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4765 static inline int
4766 arm_arm_address_cost (rtx x)
4768 enum rtx_code c = GET_CODE (x);
4770 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4771 return 0;
4772 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4773 return 10;
4775 if (c == PLUS || c == MINUS)
4777 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4778 return 2;
4780 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4781 return 3;
4783 return 4;
4786 return 6;
4789 static inline int
4790 arm_thumb_address_cost (rtx x)
4792 enum rtx_code c = GET_CODE (x);
4794 if (c == REG)
4795 return 1;
4796 if (c == PLUS
4797 && GET_CODE (XEXP (x, 0)) == REG
4798 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4799 return 1;
4801 return 2;
4804 static int
4805 arm_address_cost (rtx x)
4807 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4810 static int
4811 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4813 rtx i_pat, d_pat;
4815 /* Some true dependencies can have a higher cost depending
4816 on precisely how certain input operands are used. */
4817 if (arm_tune_xscale
4818 && REG_NOTE_KIND (link) == 0
4819 && recog_memoized (insn) >= 0
4820 && recog_memoized (dep) >= 0)
4822 int shift_opnum = get_attr_shift (insn);
4823 enum attr_type attr_type = get_attr_type (dep);
4825 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4826 operand for INSN. If we have a shifted input operand and the
4827 instruction we depend on is another ALU instruction, then we may
4828 have to account for an additional stall. */
4829 if (shift_opnum != 0
4830 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4832 rtx shifted_operand;
4833 int opno;
4835 /* Get the shifted operand. */
4836 extract_insn (insn);
4837 shifted_operand = recog_data.operand[shift_opnum];
4839 /* Iterate over all the operands in DEP. If we write an operand
4840 that overlaps with SHIFTED_OPERAND, then we have increase the
4841 cost of this dependency. */
4842 extract_insn (dep);
4843 preprocess_constraints ();
4844 for (opno = 0; opno < recog_data.n_operands; opno++)
4846 /* We can ignore strict inputs. */
4847 if (recog_data.operand_type[opno] == OP_IN)
4848 continue;
4850 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4851 shifted_operand))
4852 return 2;
4857 /* XXX This is not strictly true for the FPA. */
4858 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4859 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4860 return 0;
4862 /* Call insns don't incur a stall, even if they follow a load. */
4863 if (REG_NOTE_KIND (link) == 0
4864 && GET_CODE (insn) == CALL_INSN)
4865 return 1;
4867 if ((i_pat = single_set (insn)) != NULL
4868 && GET_CODE (SET_SRC (i_pat)) == MEM
4869 && (d_pat = single_set (dep)) != NULL
4870 && GET_CODE (SET_DEST (d_pat)) == MEM)
4872 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4873 /* This is a load after a store, there is no conflict if the load reads
4874 from a cached area. Assume that loads from the stack, and from the
4875 constant pool are cached, and that others will miss. This is a
4876 hack. */
4878 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4879 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4880 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4881 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4882 return 1;
4885 return cost;
4888 static int fp_consts_inited = 0;
4890 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4891 static const char * const strings_fp[8] =
4893 "0", "1", "2", "3",
4894 "4", "5", "0.5", "10"
4897 static REAL_VALUE_TYPE values_fp[8];
4899 static void
4900 init_fp_table (void)
4902 int i;
4903 REAL_VALUE_TYPE r;
4905 if (TARGET_VFP)
4906 fp_consts_inited = 1;
4907 else
4908 fp_consts_inited = 8;
4910 for (i = 0; i < fp_consts_inited; i++)
4912 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4913 values_fp[i] = r;
4917 /* Return TRUE if rtx X is a valid immediate FP constant. */
4919 arm_const_double_rtx (rtx x)
4921 REAL_VALUE_TYPE r;
4922 int i;
4924 if (!fp_consts_inited)
4925 init_fp_table ();
4927 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4928 if (REAL_VALUE_MINUS_ZERO (r))
4929 return 0;
4931 for (i = 0; i < fp_consts_inited; i++)
4932 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4933 return 1;
4935 return 0;
4938 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4940 neg_const_double_rtx_ok_for_fpa (rtx x)
4942 REAL_VALUE_TYPE r;
4943 int i;
4945 if (!fp_consts_inited)
4946 init_fp_table ();
4948 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4949 r = REAL_VALUE_NEGATE (r);
4950 if (REAL_VALUE_MINUS_ZERO (r))
4951 return 0;
4953 for (i = 0; i < 8; i++)
4954 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4955 return 1;
4957 return 0;
4960 /* Predicates for `match_operand' and `match_operator'. */
4962 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4964 cirrus_memory_offset (rtx op)
4966 /* Reject eliminable registers. */
4967 if (! (reload_in_progress || reload_completed)
4968 && ( reg_mentioned_p (frame_pointer_rtx, op)
4969 || reg_mentioned_p (arg_pointer_rtx, op)
4970 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4971 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4972 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4973 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4974 return 0;
4976 if (GET_CODE (op) == MEM)
4978 rtx ind;
4980 ind = XEXP (op, 0);
4982 /* Match: (mem (reg)). */
4983 if (GET_CODE (ind) == REG)
4984 return 1;
4986 /* Match:
4987 (mem (plus (reg)
4988 (const))). */
4989 if (GET_CODE (ind) == PLUS
4990 && GET_CODE (XEXP (ind, 0)) == REG
4991 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4992 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4993 return 1;
4996 return 0;
4999 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5000 WB if true if writeback address modes are allowed. */
5003 arm_coproc_mem_operand (rtx op, bool wb)
5005 rtx ind;
5007 /* Reject eliminable registers. */
5008 if (! (reload_in_progress || reload_completed)
5009 && ( reg_mentioned_p (frame_pointer_rtx, op)
5010 || reg_mentioned_p (arg_pointer_rtx, op)
5011 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5012 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5013 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5014 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5015 return FALSE;
5017 /* Constants are converted into offsets from labels. */
5018 if (GET_CODE (op) != MEM)
5019 return FALSE;
5021 ind = XEXP (op, 0);
5023 if (reload_completed
5024 && (GET_CODE (ind) == LABEL_REF
5025 || (GET_CODE (ind) == CONST
5026 && GET_CODE (XEXP (ind, 0)) == PLUS
5027 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5028 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5029 return TRUE;
5031 /* Match: (mem (reg)). */
5032 if (GET_CODE (ind) == REG)
5033 return arm_address_register_rtx_p (ind, 0);
5035 /* Autoincremment addressing modes. */
5036 if (wb
5037 && (GET_CODE (ind) == PRE_INC
5038 || GET_CODE (ind) == POST_INC
5039 || GET_CODE (ind) == PRE_DEC
5040 || GET_CODE (ind) == POST_DEC))
5041 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5043 if (wb
5044 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5045 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5046 && GET_CODE (XEXP (ind, 1)) == PLUS
5047 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5048 ind = XEXP (ind, 1);
5050 /* Match:
5051 (plus (reg)
5052 (const)). */
5053 if (GET_CODE (ind) == PLUS
5054 && GET_CODE (XEXP (ind, 0)) == REG
5055 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5056 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5057 && INTVAL (XEXP (ind, 1)) > -1024
5058 && INTVAL (XEXP (ind, 1)) < 1024
5059 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5060 return TRUE;
5062 return FALSE;
5065 /* Return true if X is a register that will be eliminated later on. */
5067 arm_eliminable_register (rtx x)
5069 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5070 || REGNO (x) == ARG_POINTER_REGNUM
5071 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5072 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5075 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5076 VFP registers. Otherwise return NO_REGS. */
5078 enum reg_class
5079 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5081 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5082 return NO_REGS;
5084 return GENERAL_REGS;
5087 /* Values which must be returned in the most-significant end of the return
5088 register. */
5090 static bool
5091 arm_return_in_msb (tree valtype)
5093 return (TARGET_AAPCS_BASED
5094 && BYTES_BIG_ENDIAN
5095 && (AGGREGATE_TYPE_P (valtype)
5096 || TREE_CODE (valtype) == COMPLEX_TYPE));
5099 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5100 Use by the Cirrus Maverick code which has to workaround
5101 a hardware bug triggered by such instructions. */
5102 static bool
5103 arm_memory_load_p (rtx insn)
5105 rtx body, lhs, rhs;;
5107 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5108 return false;
5110 body = PATTERN (insn);
5112 if (GET_CODE (body) != SET)
5113 return false;
5115 lhs = XEXP (body, 0);
5116 rhs = XEXP (body, 1);
5118 lhs = REG_OR_SUBREG_RTX (lhs);
5120 /* If the destination is not a general purpose
5121 register we do not have to worry. */
5122 if (GET_CODE (lhs) != REG
5123 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5124 return false;
5126 /* As well as loads from memory we also have to react
5127 to loads of invalid constants which will be turned
5128 into loads from the minipool. */
5129 return (GET_CODE (rhs) == MEM
5130 || GET_CODE (rhs) == SYMBOL_REF
5131 || note_invalid_constants (insn, -1, false));
5134 /* Return TRUE if INSN is a Cirrus instruction. */
5135 static bool
5136 arm_cirrus_insn_p (rtx insn)
5138 enum attr_cirrus attr;
5140 /* get_attr cannot accept USE or CLOBBER. */
5141 if (!insn
5142 || GET_CODE (insn) != INSN
5143 || GET_CODE (PATTERN (insn)) == USE
5144 || GET_CODE (PATTERN (insn)) == CLOBBER)
5145 return 0;
5147 attr = get_attr_cirrus (insn);
5149 return attr != CIRRUS_NOT;
5152 /* Cirrus reorg for invalid instruction combinations. */
5153 static void
5154 cirrus_reorg (rtx first)
5156 enum attr_cirrus attr;
5157 rtx body = PATTERN (first);
5158 rtx t;
5159 int nops;
5161 /* Any branch must be followed by 2 non Cirrus instructions. */
5162 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5164 nops = 0;
5165 t = next_nonnote_insn (first);
5167 if (arm_cirrus_insn_p (t))
5168 ++ nops;
5170 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5171 ++ nops;
5173 while (nops --)
5174 emit_insn_after (gen_nop (), first);
5176 return;
5179 /* (float (blah)) is in parallel with a clobber. */
5180 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5181 body = XVECEXP (body, 0, 0);
5183 if (GET_CODE (body) == SET)
5185 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5187 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5188 be followed by a non Cirrus insn. */
5189 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5191 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5192 emit_insn_after (gen_nop (), first);
5194 return;
5196 else if (arm_memory_load_p (first))
5198 unsigned int arm_regno;
5200 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5201 ldr/cfmv64hr combination where the Rd field is the same
5202 in both instructions must be split with a non Cirrus
5203 insn. Example:
5205 ldr r0, blah
5207 cfmvsr mvf0, r0. */
5209 /* Get Arm register number for ldr insn. */
5210 if (GET_CODE (lhs) == REG)
5211 arm_regno = REGNO (lhs);
5212 else
5214 gcc_assert (GET_CODE (rhs) == REG);
5215 arm_regno = REGNO (rhs);
5218 /* Next insn. */
5219 first = next_nonnote_insn (first);
5221 if (! arm_cirrus_insn_p (first))
5222 return;
5224 body = PATTERN (first);
5226 /* (float (blah)) is in parallel with a clobber. */
5227 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5228 body = XVECEXP (body, 0, 0);
5230 if (GET_CODE (body) == FLOAT)
5231 body = XEXP (body, 0);
5233 if (get_attr_cirrus (first) == CIRRUS_MOVE
5234 && GET_CODE (XEXP (body, 1)) == REG
5235 && arm_regno == REGNO (XEXP (body, 1)))
5236 emit_insn_after (gen_nop (), first);
5238 return;
5242 /* get_attr cannot accept USE or CLOBBER. */
5243 if (!first
5244 || GET_CODE (first) != INSN
5245 || GET_CODE (PATTERN (first)) == USE
5246 || GET_CODE (PATTERN (first)) == CLOBBER)
5247 return;
5249 attr = get_attr_cirrus (first);
5251 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5252 must be followed by a non-coprocessor instruction. */
5253 if (attr == CIRRUS_COMPARE)
5255 nops = 0;
5257 t = next_nonnote_insn (first);
5259 if (arm_cirrus_insn_p (t))
5260 ++ nops;
5262 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5263 ++ nops;
5265 while (nops --)
5266 emit_insn_after (gen_nop (), first);
5268 return;
5272 /* Return TRUE if X references a SYMBOL_REF. */
5274 symbol_mentioned_p (rtx x)
5276 const char * fmt;
5277 int i;
5279 if (GET_CODE (x) == SYMBOL_REF)
5280 return 1;
5282 fmt = GET_RTX_FORMAT (GET_CODE (x));
5284 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5286 if (fmt[i] == 'E')
5288 int j;
5290 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5291 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5292 return 1;
5294 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5295 return 1;
5298 return 0;
5301 /* Return TRUE if X references a LABEL_REF. */
5303 label_mentioned_p (rtx x)
5305 const char * fmt;
5306 int i;
5308 if (GET_CODE (x) == LABEL_REF)
5309 return 1;
5311 fmt = GET_RTX_FORMAT (GET_CODE (x));
5312 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5314 if (fmt[i] == 'E')
5316 int j;
5318 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5319 if (label_mentioned_p (XVECEXP (x, i, j)))
5320 return 1;
5322 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5323 return 1;
5326 return 0;
5329 enum rtx_code
5330 minmax_code (rtx x)
5332 enum rtx_code code = GET_CODE (x);
5334 switch (code)
5336 case SMAX:
5337 return GE;
5338 case SMIN:
5339 return LE;
5340 case UMIN:
5341 return LEU;
5342 case UMAX:
5343 return GEU;
5344 default:
5345 gcc_unreachable ();
5349 /* Return 1 if memory locations are adjacent. */
5351 adjacent_mem_locations (rtx a, rtx b)
5353 /* We don't guarantee to preserve the order of these memory refs. */
5354 if (volatile_refs_p (a) || volatile_refs_p (b))
5355 return 0;
5357 if ((GET_CODE (XEXP (a, 0)) == REG
5358 || (GET_CODE (XEXP (a, 0)) == PLUS
5359 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5360 && (GET_CODE (XEXP (b, 0)) == REG
5361 || (GET_CODE (XEXP (b, 0)) == PLUS
5362 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5364 HOST_WIDE_INT val0 = 0, val1 = 0;
5365 rtx reg0, reg1;
5366 int val_diff;
5368 if (GET_CODE (XEXP (a, 0)) == PLUS)
5370 reg0 = XEXP (XEXP (a, 0), 0);
5371 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5373 else
5374 reg0 = XEXP (a, 0);
5376 if (GET_CODE (XEXP (b, 0)) == PLUS)
5378 reg1 = XEXP (XEXP (b, 0), 0);
5379 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5381 else
5382 reg1 = XEXP (b, 0);
5384 /* Don't accept any offset that will require multiple
5385 instructions to handle, since this would cause the
5386 arith_adjacentmem pattern to output an overlong sequence. */
5387 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5388 return 0;
5390 /* Don't allow an eliminable register: register elimination can make
5391 the offset too large. */
5392 if (arm_eliminable_register (reg0))
5393 return 0;
5395 val_diff = val1 - val0;
5397 if (arm_ld_sched)
5399 /* If the target has load delay slots, then there's no benefit
5400 to using an ldm instruction unless the offset is zero and
5401 we are optimizing for size. */
5402 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5403 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5404 && (val_diff == 4 || val_diff == -4));
5407 return ((REGNO (reg0) == REGNO (reg1))
5408 && (val_diff == 4 || val_diff == -4));
5411 return 0;
5415 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5416 HOST_WIDE_INT *load_offset)
5418 int unsorted_regs[4];
5419 HOST_WIDE_INT unsorted_offsets[4];
5420 int order[4];
5421 int base_reg = -1;
5422 int i;
5424 /* Can only handle 2, 3, or 4 insns at present,
5425 though could be easily extended if required. */
5426 gcc_assert (nops >= 2 && nops <= 4);
5428 /* Loop over the operands and check that the memory references are
5429 suitable (i.e. immediate offsets from the same base register). At
5430 the same time, extract the target register, and the memory
5431 offsets. */
5432 for (i = 0; i < nops; i++)
5434 rtx reg;
5435 rtx offset;
5437 /* Convert a subreg of a mem into the mem itself. */
5438 if (GET_CODE (operands[nops + i]) == SUBREG)
5439 operands[nops + i] = alter_subreg (operands + (nops + i));
5441 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5443 /* Don't reorder volatile memory references; it doesn't seem worth
5444 looking for the case where the order is ok anyway. */
5445 if (MEM_VOLATILE_P (operands[nops + i]))
5446 return 0;
5448 offset = const0_rtx;
5450 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5451 || (GET_CODE (reg) == SUBREG
5452 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5453 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5454 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5455 == REG)
5456 || (GET_CODE (reg) == SUBREG
5457 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5458 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5459 == CONST_INT)))
5461 if (i == 0)
5463 base_reg = REGNO (reg);
5464 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5465 ? REGNO (operands[i])
5466 : REGNO (SUBREG_REG (operands[i])));
5467 order[0] = 0;
5469 else
5471 if (base_reg != (int) REGNO (reg))
5472 /* Not addressed from the same base register. */
5473 return 0;
5475 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5476 ? REGNO (operands[i])
5477 : REGNO (SUBREG_REG (operands[i])));
5478 if (unsorted_regs[i] < unsorted_regs[order[0]])
5479 order[0] = i;
5482 /* If it isn't an integer register, or if it overwrites the
5483 base register but isn't the last insn in the list, then
5484 we can't do this. */
5485 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5486 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5487 return 0;
5489 unsorted_offsets[i] = INTVAL (offset);
5491 else
5492 /* Not a suitable memory address. */
5493 return 0;
5496 /* All the useful information has now been extracted from the
5497 operands into unsorted_regs and unsorted_offsets; additionally,
5498 order[0] has been set to the lowest numbered register in the
5499 list. Sort the registers into order, and check that the memory
5500 offsets are ascending and adjacent. */
5502 for (i = 1; i < nops; i++)
5504 int j;
5506 order[i] = order[i - 1];
5507 for (j = 0; j < nops; j++)
5508 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5509 && (order[i] == order[i - 1]
5510 || unsorted_regs[j] < unsorted_regs[order[i]]))
5511 order[i] = j;
5513 /* Have we found a suitable register? if not, one must be used more
5514 than once. */
5515 if (order[i] == order[i - 1])
5516 return 0;
5518 /* Is the memory address adjacent and ascending? */
5519 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5520 return 0;
5523 if (base)
5525 *base = base_reg;
5527 for (i = 0; i < nops; i++)
5528 regs[i] = unsorted_regs[order[i]];
5530 *load_offset = unsorted_offsets[order[0]];
5533 if (unsorted_offsets[order[0]] == 0)
5534 return 1; /* ldmia */
5536 if (unsorted_offsets[order[0]] == 4)
5537 return 2; /* ldmib */
5539 if (unsorted_offsets[order[nops - 1]] == 0)
5540 return 3; /* ldmda */
5542 if (unsorted_offsets[order[nops - 1]] == -4)
5543 return 4; /* ldmdb */
5545 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5546 if the offset isn't small enough. The reason 2 ldrs are faster
5547 is because these ARMs are able to do more than one cache access
5548 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5549 whilst the ARM8 has a double bandwidth cache. This means that
5550 these cores can do both an instruction fetch and a data fetch in
5551 a single cycle, so the trick of calculating the address into a
5552 scratch register (one of the result regs) and then doing a load
5553 multiple actually becomes slower (and no smaller in code size).
5554 That is the transformation
5556 ldr rd1, [rbase + offset]
5557 ldr rd2, [rbase + offset + 4]
5561 add rd1, rbase, offset
5562 ldmia rd1, {rd1, rd2}
5564 produces worse code -- '3 cycles + any stalls on rd2' instead of
5565 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5566 access per cycle, the first sequence could never complete in less
5567 than 6 cycles, whereas the ldm sequence would only take 5 and
5568 would make better use of sequential accesses if not hitting the
5569 cache.
5571 We cheat here and test 'arm_ld_sched' which we currently know to
5572 only be true for the ARM8, ARM9 and StrongARM. If this ever
5573 changes, then the test below needs to be reworked. */
5574 if (nops == 2 && arm_ld_sched)
5575 return 0;
5577 /* Can't do it without setting up the offset, only do this if it takes
5578 no more than one insn. */
5579 return (const_ok_for_arm (unsorted_offsets[order[0]])
5580 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5583 const char *
5584 emit_ldm_seq (rtx *operands, int nops)
5586 int regs[4];
5587 int base_reg;
5588 HOST_WIDE_INT offset;
5589 char buf[100];
5590 int i;
5592 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5594 case 1:
5595 strcpy (buf, "ldm%?ia\t");
5596 break;
5598 case 2:
5599 strcpy (buf, "ldm%?ib\t");
5600 break;
5602 case 3:
5603 strcpy (buf, "ldm%?da\t");
5604 break;
5606 case 4:
5607 strcpy (buf, "ldm%?db\t");
5608 break;
5610 case 5:
5611 if (offset >= 0)
5612 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5613 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5614 (long) offset);
5615 else
5616 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5617 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5618 (long) -offset);
5619 output_asm_insn (buf, operands);
5620 base_reg = regs[0];
5621 strcpy (buf, "ldm%?ia\t");
5622 break;
5624 default:
5625 gcc_unreachable ();
5628 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5629 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5631 for (i = 1; i < nops; i++)
5632 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5633 reg_names[regs[i]]);
5635 strcat (buf, "}\t%@ phole ldm");
5637 output_asm_insn (buf, operands);
5638 return "";
5642 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5643 HOST_WIDE_INT * load_offset)
5645 int unsorted_regs[4];
5646 HOST_WIDE_INT unsorted_offsets[4];
5647 int order[4];
5648 int base_reg = -1;
5649 int i;
5651 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5652 extended if required. */
5653 gcc_assert (nops >= 2 && nops <= 4);
5655 /* Loop over the operands and check that the memory references are
5656 suitable (i.e. immediate offsets from the same base register). At
5657 the same time, extract the target register, and the memory
5658 offsets. */
5659 for (i = 0; i < nops; i++)
5661 rtx reg;
5662 rtx offset;
5664 /* Convert a subreg of a mem into the mem itself. */
5665 if (GET_CODE (operands[nops + i]) == SUBREG)
5666 operands[nops + i] = alter_subreg (operands + (nops + i));
5668 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5670 /* Don't reorder volatile memory references; it doesn't seem worth
5671 looking for the case where the order is ok anyway. */
5672 if (MEM_VOLATILE_P (operands[nops + i]))
5673 return 0;
5675 offset = const0_rtx;
5677 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5678 || (GET_CODE (reg) == SUBREG
5679 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5680 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5681 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5682 == REG)
5683 || (GET_CODE (reg) == SUBREG
5684 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5685 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5686 == CONST_INT)))
5688 if (i == 0)
5690 base_reg = REGNO (reg);
5691 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5692 ? REGNO (operands[i])
5693 : REGNO (SUBREG_REG (operands[i])));
5694 order[0] = 0;
5696 else
5698 if (base_reg != (int) REGNO (reg))
5699 /* Not addressed from the same base register. */
5700 return 0;
5702 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5703 ? REGNO (operands[i])
5704 : REGNO (SUBREG_REG (operands[i])));
5705 if (unsorted_regs[i] < unsorted_regs[order[0]])
5706 order[0] = i;
5709 /* If it isn't an integer register, then we can't do this. */
5710 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5711 return 0;
5713 unsorted_offsets[i] = INTVAL (offset);
5715 else
5716 /* Not a suitable memory address. */
5717 return 0;
5720 /* All the useful information has now been extracted from the
5721 operands into unsorted_regs and unsorted_offsets; additionally,
5722 order[0] has been set to the lowest numbered register in the
5723 list. Sort the registers into order, and check that the memory
5724 offsets are ascending and adjacent. */
5726 for (i = 1; i < nops; i++)
5728 int j;
5730 order[i] = order[i - 1];
5731 for (j = 0; j < nops; j++)
5732 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5733 && (order[i] == order[i - 1]
5734 || unsorted_regs[j] < unsorted_regs[order[i]]))
5735 order[i] = j;
5737 /* Have we found a suitable register? if not, one must be used more
5738 than once. */
5739 if (order[i] == order[i - 1])
5740 return 0;
5742 /* Is the memory address adjacent and ascending? */
5743 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5744 return 0;
5747 if (base)
5749 *base = base_reg;
5751 for (i = 0; i < nops; i++)
5752 regs[i] = unsorted_regs[order[i]];
5754 *load_offset = unsorted_offsets[order[0]];
5757 if (unsorted_offsets[order[0]] == 0)
5758 return 1; /* stmia */
5760 if (unsorted_offsets[order[0]] == 4)
5761 return 2; /* stmib */
5763 if (unsorted_offsets[order[nops - 1]] == 0)
5764 return 3; /* stmda */
5766 if (unsorted_offsets[order[nops - 1]] == -4)
5767 return 4; /* stmdb */
5769 return 0;
5772 const char *
5773 emit_stm_seq (rtx *operands, int nops)
5775 int regs[4];
5776 int base_reg;
5777 HOST_WIDE_INT offset;
5778 char buf[100];
5779 int i;
5781 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5783 case 1:
5784 strcpy (buf, "stm%?ia\t");
5785 break;
5787 case 2:
5788 strcpy (buf, "stm%?ib\t");
5789 break;
5791 case 3:
5792 strcpy (buf, "stm%?da\t");
5793 break;
5795 case 4:
5796 strcpy (buf, "stm%?db\t");
5797 break;
5799 default:
5800 gcc_unreachable ();
5803 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5804 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5806 for (i = 1; i < nops; i++)
5807 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5808 reg_names[regs[i]]);
5810 strcat (buf, "}\t%@ phole stm");
5812 output_asm_insn (buf, operands);
5813 return "";
5817 /* Routines for use in generating RTL. */
5820 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5821 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5823 HOST_WIDE_INT offset = *offsetp;
5824 int i = 0, j;
5825 rtx result;
5826 int sign = up ? 1 : -1;
5827 rtx mem, addr;
5829 /* XScale has load-store double instructions, but they have stricter
5830 alignment requirements than load-store multiple, so we cannot
5831 use them.
5833 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5834 the pipeline until completion.
5836 NREGS CYCLES
5842 An ldr instruction takes 1-3 cycles, but does not block the
5843 pipeline.
5845 NREGS CYCLES
5846 1 1-3
5847 2 2-6
5848 3 3-9
5849 4 4-12
5851 Best case ldr will always win. However, the more ldr instructions
5852 we issue, the less likely we are to be able to schedule them well.
5853 Using ldr instructions also increases code size.
5855 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5856 for counts of 3 or 4 regs. */
5857 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5859 rtx seq;
5861 start_sequence ();
5863 for (i = 0; i < count; i++)
5865 addr = plus_constant (from, i * 4 * sign);
5866 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5867 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5868 offset += 4 * sign;
5871 if (write_back)
5873 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5874 *offsetp = offset;
5877 seq = get_insns ();
5878 end_sequence ();
5880 return seq;
5883 result = gen_rtx_PARALLEL (VOIDmode,
5884 rtvec_alloc (count + (write_back ? 1 : 0)));
5885 if (write_back)
5887 XVECEXP (result, 0, 0)
5888 = gen_rtx_SET (GET_MODE (from), from,
5889 plus_constant (from, count * 4 * sign));
5890 i = 1;
5891 count++;
5894 for (j = 0; i < count; i++, j++)
5896 addr = plus_constant (from, j * 4 * sign);
5897 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5898 XVECEXP (result, 0, i)
5899 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5900 offset += 4 * sign;
5903 if (write_back)
5904 *offsetp = offset;
5906 return result;
5910 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5911 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5913 HOST_WIDE_INT offset = *offsetp;
5914 int i = 0, j;
5915 rtx result;
5916 int sign = up ? 1 : -1;
5917 rtx mem, addr;
5919 /* See arm_gen_load_multiple for discussion of
5920 the pros/cons of ldm/stm usage for XScale. */
5921 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5923 rtx seq;
5925 start_sequence ();
5927 for (i = 0; i < count; i++)
5929 addr = plus_constant (to, i * 4 * sign);
5930 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5931 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5932 offset += 4 * sign;
5935 if (write_back)
5937 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5938 *offsetp = offset;
5941 seq = get_insns ();
5942 end_sequence ();
5944 return seq;
5947 result = gen_rtx_PARALLEL (VOIDmode,
5948 rtvec_alloc (count + (write_back ? 1 : 0)));
5949 if (write_back)
5951 XVECEXP (result, 0, 0)
5952 = gen_rtx_SET (GET_MODE (to), to,
5953 plus_constant (to, count * 4 * sign));
5954 i = 1;
5955 count++;
5958 for (j = 0; i < count; i++, j++)
5960 addr = plus_constant (to, j * 4 * sign);
5961 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5962 XVECEXP (result, 0, i)
5963 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5964 offset += 4 * sign;
5967 if (write_back)
5968 *offsetp = offset;
5970 return result;
5974 arm_gen_movmemqi (rtx *operands)
5976 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5977 HOST_WIDE_INT srcoffset, dstoffset;
5978 int i;
5979 rtx src, dst, srcbase, dstbase;
5980 rtx part_bytes_reg = NULL;
5981 rtx mem;
5983 if (GET_CODE (operands[2]) != CONST_INT
5984 || GET_CODE (operands[3]) != CONST_INT
5985 || INTVAL (operands[2]) > 64
5986 || INTVAL (operands[3]) & 3)
5987 return 0;
5989 dstbase = operands[0];
5990 srcbase = operands[1];
5992 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5993 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5995 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5996 out_words_to_go = INTVAL (operands[2]) / 4;
5997 last_bytes = INTVAL (operands[2]) & 3;
5998 dstoffset = srcoffset = 0;
6000 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6001 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6003 for (i = 0; in_words_to_go >= 2; i+=4)
6005 if (in_words_to_go > 4)
6006 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6007 srcbase, &srcoffset));
6008 else
6009 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6010 FALSE, srcbase, &srcoffset));
6012 if (out_words_to_go)
6014 if (out_words_to_go > 4)
6015 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6016 dstbase, &dstoffset));
6017 else if (out_words_to_go != 1)
6018 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6019 dst, TRUE,
6020 (last_bytes == 0
6021 ? FALSE : TRUE),
6022 dstbase, &dstoffset));
6023 else
6025 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6026 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6027 if (last_bytes != 0)
6029 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6030 dstoffset += 4;
6035 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6036 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6039 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6040 if (out_words_to_go)
6042 rtx sreg;
6044 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6045 sreg = copy_to_reg (mem);
6047 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6048 emit_move_insn (mem, sreg);
6049 in_words_to_go--;
6051 gcc_assert (!in_words_to_go); /* Sanity check */
6054 if (in_words_to_go)
6056 gcc_assert (in_words_to_go > 0);
6058 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6059 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6062 gcc_assert (!last_bytes || part_bytes_reg);
6064 if (BYTES_BIG_ENDIAN && last_bytes)
6066 rtx tmp = gen_reg_rtx (SImode);
6068 /* The bytes we want are in the top end of the word. */
6069 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6070 GEN_INT (8 * (4 - last_bytes))));
6071 part_bytes_reg = tmp;
6073 while (last_bytes)
6075 mem = adjust_automodify_address (dstbase, QImode,
6076 plus_constant (dst, last_bytes - 1),
6077 dstoffset + last_bytes - 1);
6078 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6080 if (--last_bytes)
6082 tmp = gen_reg_rtx (SImode);
6083 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6084 part_bytes_reg = tmp;
6089 else
6091 if (last_bytes > 1)
6093 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6094 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6095 last_bytes -= 2;
6096 if (last_bytes)
6098 rtx tmp = gen_reg_rtx (SImode);
6099 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6100 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6101 part_bytes_reg = tmp;
6102 dstoffset += 2;
6106 if (last_bytes)
6108 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6109 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6113 return 1;
6116 /* Generate a memory reference for a half word, such that it will be loaded
6117 into the top 16 bits of the word. We can assume that the address is
6118 known to be alignable and of the form reg, or plus (reg, const). */
6121 arm_gen_rotated_half_load (rtx memref)
6123 HOST_WIDE_INT offset = 0;
6124 rtx base = XEXP (memref, 0);
6126 if (GET_CODE (base) == PLUS)
6128 offset = INTVAL (XEXP (base, 1));
6129 base = XEXP (base, 0);
6132 /* If we aren't allowed to generate unaligned addresses, then fail. */
6133 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6134 return NULL;
6136 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6138 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6139 return base;
6141 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6144 /* Select a dominance comparison mode if possible for a test of the general
6145 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6146 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6147 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6148 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6149 In all cases OP will be either EQ or NE, but we don't need to know which
6150 here. If we are unable to support a dominance comparison we return
6151 CC mode. This will then fail to match for the RTL expressions that
6152 generate this call. */
6153 enum machine_mode
6154 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6156 enum rtx_code cond1, cond2;
6157 int swapped = 0;
6159 /* Currently we will probably get the wrong result if the individual
6160 comparisons are not simple. This also ensures that it is safe to
6161 reverse a comparison if necessary. */
6162 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6163 != CCmode)
6164 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6165 != CCmode))
6166 return CCmode;
6168 /* The if_then_else variant of this tests the second condition if the
6169 first passes, but is true if the first fails. Reverse the first
6170 condition to get a true "inclusive-or" expression. */
6171 if (cond_or == DOM_CC_NX_OR_Y)
6172 cond1 = reverse_condition (cond1);
6174 /* If the comparisons are not equal, and one doesn't dominate the other,
6175 then we can't do this. */
6176 if (cond1 != cond2
6177 && !comparison_dominates_p (cond1, cond2)
6178 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6179 return CCmode;
6181 if (swapped)
6183 enum rtx_code temp = cond1;
6184 cond1 = cond2;
6185 cond2 = temp;
6188 switch (cond1)
6190 case EQ:
6191 if (cond_or == DOM_CC_X_AND_Y)
6192 return CC_DEQmode;
6194 switch (cond2)
6196 case EQ: return CC_DEQmode;
6197 case LE: return CC_DLEmode;
6198 case LEU: return CC_DLEUmode;
6199 case GE: return CC_DGEmode;
6200 case GEU: return CC_DGEUmode;
6201 default: gcc_unreachable ();
6204 case LT:
6205 if (cond_or == DOM_CC_X_AND_Y)
6206 return CC_DLTmode;
6208 switch (cond2)
6210 case LT:
6211 return CC_DLTmode;
6212 case LE:
6213 return CC_DLEmode;
6214 case NE:
6215 return CC_DNEmode;
6216 default:
6217 gcc_unreachable ();
6220 case GT:
6221 if (cond_or == DOM_CC_X_AND_Y)
6222 return CC_DGTmode;
6224 switch (cond2)
6226 case GT:
6227 return CC_DGTmode;
6228 case GE:
6229 return CC_DGEmode;
6230 case NE:
6231 return CC_DNEmode;
6232 default:
6233 gcc_unreachable ();
6236 case LTU:
6237 if (cond_or == DOM_CC_X_AND_Y)
6238 return CC_DLTUmode;
6240 switch (cond2)
6242 case LTU:
6243 return CC_DLTUmode;
6244 case LEU:
6245 return CC_DLEUmode;
6246 case NE:
6247 return CC_DNEmode;
6248 default:
6249 gcc_unreachable ();
6252 case GTU:
6253 if (cond_or == DOM_CC_X_AND_Y)
6254 return CC_DGTUmode;
6256 switch (cond2)
6258 case GTU:
6259 return CC_DGTUmode;
6260 case GEU:
6261 return CC_DGEUmode;
6262 case NE:
6263 return CC_DNEmode;
6264 default:
6265 gcc_unreachable ();
6268 /* The remaining cases only occur when both comparisons are the
6269 same. */
6270 case NE:
6271 gcc_assert (cond1 == cond2);
6272 return CC_DNEmode;
6274 case LE:
6275 gcc_assert (cond1 == cond2);
6276 return CC_DLEmode;
6278 case GE:
6279 gcc_assert (cond1 == cond2);
6280 return CC_DGEmode;
6282 case LEU:
6283 gcc_assert (cond1 == cond2);
6284 return CC_DLEUmode;
6286 case GEU:
6287 gcc_assert (cond1 == cond2);
6288 return CC_DGEUmode;
6290 default:
6291 gcc_unreachable ();
6295 enum machine_mode
6296 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6298 /* All floating point compares return CCFP if it is an equality
6299 comparison, and CCFPE otherwise. */
6300 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6302 switch (op)
6304 case EQ:
6305 case NE:
6306 case UNORDERED:
6307 case ORDERED:
6308 case UNLT:
6309 case UNLE:
6310 case UNGT:
6311 case UNGE:
6312 case UNEQ:
6313 case LTGT:
6314 return CCFPmode;
6316 case LT:
6317 case LE:
6318 case GT:
6319 case GE:
6320 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6321 return CCFPmode;
6322 return CCFPEmode;
6324 default:
6325 gcc_unreachable ();
6329 /* A compare with a shifted operand. Because of canonicalization, the
6330 comparison will have to be swapped when we emit the assembler. */
6331 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6332 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6333 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6334 || GET_CODE (x) == ROTATERT))
6335 return CC_SWPmode;
6337 /* This operation is performed swapped, but since we only rely on the Z
6338 flag we don't need an additional mode. */
6339 if (GET_MODE (y) == SImode && REG_P (y)
6340 && GET_CODE (x) == NEG
6341 && (op == EQ || op == NE))
6342 return CC_Zmode;
6344 /* This is a special case that is used by combine to allow a
6345 comparison of a shifted byte load to be split into a zero-extend
6346 followed by a comparison of the shifted integer (only valid for
6347 equalities and unsigned inequalities). */
6348 if (GET_MODE (x) == SImode
6349 && GET_CODE (x) == ASHIFT
6350 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6351 && GET_CODE (XEXP (x, 0)) == SUBREG
6352 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6353 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6354 && (op == EQ || op == NE
6355 || op == GEU || op == GTU || op == LTU || op == LEU)
6356 && GET_CODE (y) == CONST_INT)
6357 return CC_Zmode;
6359 /* A construct for a conditional compare, if the false arm contains
6360 0, then both conditions must be true, otherwise either condition
6361 must be true. Not all conditions are possible, so CCmode is
6362 returned if it can't be done. */
6363 if (GET_CODE (x) == IF_THEN_ELSE
6364 && (XEXP (x, 2) == const0_rtx
6365 || XEXP (x, 2) == const1_rtx)
6366 && COMPARISON_P (XEXP (x, 0))
6367 && COMPARISON_P (XEXP (x, 1)))
6368 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6369 INTVAL (XEXP (x, 2)));
6371 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6372 if (GET_CODE (x) == AND
6373 && COMPARISON_P (XEXP (x, 0))
6374 && COMPARISON_P (XEXP (x, 1)))
6375 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6376 DOM_CC_X_AND_Y);
6378 if (GET_CODE (x) == IOR
6379 && COMPARISON_P (XEXP (x, 0))
6380 && COMPARISON_P (XEXP (x, 1)))
6381 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6382 DOM_CC_X_OR_Y);
6384 /* An operation (on Thumb) where we want to test for a single bit.
6385 This is done by shifting that bit up into the top bit of a
6386 scratch register; we can then branch on the sign bit. */
6387 if (TARGET_THUMB
6388 && GET_MODE (x) == SImode
6389 && (op == EQ || op == NE)
6390 && (GET_CODE (x) == ZERO_EXTRACT))
6391 return CC_Nmode;
6393 /* An operation that sets the condition codes as a side-effect, the
6394 V flag is not set correctly, so we can only use comparisons where
6395 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6396 instead.) */
6397 if (GET_MODE (x) == SImode
6398 && y == const0_rtx
6399 && (op == EQ || op == NE || op == LT || op == GE)
6400 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6401 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6402 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6403 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6404 || GET_CODE (x) == LSHIFTRT
6405 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6406 || GET_CODE (x) == ROTATERT
6407 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6408 return CC_NOOVmode;
6410 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6411 return CC_Zmode;
6413 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6414 && GET_CODE (x) == PLUS
6415 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6416 return CC_Cmode;
6418 return CCmode;
6421 /* X and Y are two things to compare using CODE. Emit the compare insn and
6422 return the rtx for register 0 in the proper mode. FP means this is a
6423 floating point compare: I don't think that it is needed on the arm. */
6425 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6427 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6428 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6430 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6431 gen_rtx_COMPARE (mode, x, y)));
6433 return cc_reg;
6436 /* Generate a sequence of insns that will generate the correct return
6437 address mask depending on the physical architecture that the program
6438 is running on. */
6440 arm_gen_return_addr_mask (void)
6442 rtx reg = gen_reg_rtx (Pmode);
6444 emit_insn (gen_return_addr_mask (reg));
6445 return reg;
6448 void
6449 arm_reload_in_hi (rtx *operands)
6451 rtx ref = operands[1];
6452 rtx base, scratch;
6453 HOST_WIDE_INT offset = 0;
6455 if (GET_CODE (ref) == SUBREG)
6457 offset = SUBREG_BYTE (ref);
6458 ref = SUBREG_REG (ref);
6461 if (GET_CODE (ref) == REG)
6463 /* We have a pseudo which has been spilt onto the stack; there
6464 are two cases here: the first where there is a simple
6465 stack-slot replacement and a second where the stack-slot is
6466 out of range, or is used as a subreg. */
6467 if (reg_equiv_mem[REGNO (ref)])
6469 ref = reg_equiv_mem[REGNO (ref)];
6470 base = find_replacement (&XEXP (ref, 0));
6472 else
6473 /* The slot is out of range, or was dressed up in a SUBREG. */
6474 base = reg_equiv_address[REGNO (ref)];
6476 else
6477 base = find_replacement (&XEXP (ref, 0));
6479 /* Handle the case where the address is too complex to be offset by 1. */
6480 if (GET_CODE (base) == MINUS
6481 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6483 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6485 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6486 base = base_plus;
6488 else if (GET_CODE (base) == PLUS)
6490 /* The addend must be CONST_INT, or we would have dealt with it above. */
6491 HOST_WIDE_INT hi, lo;
6493 offset += INTVAL (XEXP (base, 1));
6494 base = XEXP (base, 0);
6496 /* Rework the address into a legal sequence of insns. */
6497 /* Valid range for lo is -4095 -> 4095 */
6498 lo = (offset >= 0
6499 ? (offset & 0xfff)
6500 : -((-offset) & 0xfff));
6502 /* Corner case, if lo is the max offset then we would be out of range
6503 once we have added the additional 1 below, so bump the msb into the
6504 pre-loading insn(s). */
6505 if (lo == 4095)
6506 lo &= 0x7ff;
6508 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6509 ^ (HOST_WIDE_INT) 0x80000000)
6510 - (HOST_WIDE_INT) 0x80000000);
6512 gcc_assert (hi + lo == offset);
6514 if (hi != 0)
6516 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6518 /* Get the base address; addsi3 knows how to handle constants
6519 that require more than one insn. */
6520 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6521 base = base_plus;
6522 offset = lo;
6526 /* Operands[2] may overlap operands[0] (though it won't overlap
6527 operands[1]), that's why we asked for a DImode reg -- so we can
6528 use the bit that does not overlap. */
6529 if (REGNO (operands[2]) == REGNO (operands[0]))
6530 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6531 else
6532 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6534 emit_insn (gen_zero_extendqisi2 (scratch,
6535 gen_rtx_MEM (QImode,
6536 plus_constant (base,
6537 offset))));
6538 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6539 gen_rtx_MEM (QImode,
6540 plus_constant (base,
6541 offset + 1))));
6542 if (!BYTES_BIG_ENDIAN)
6543 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6544 gen_rtx_IOR (SImode,
6545 gen_rtx_ASHIFT
6546 (SImode,
6547 gen_rtx_SUBREG (SImode, operands[0], 0),
6548 GEN_INT (8)),
6549 scratch)));
6550 else
6551 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6552 gen_rtx_IOR (SImode,
6553 gen_rtx_ASHIFT (SImode, scratch,
6554 GEN_INT (8)),
6555 gen_rtx_SUBREG (SImode, operands[0],
6556 0))));
6559 /* Handle storing a half-word to memory during reload by synthesizing as two
6560 byte stores. Take care not to clobber the input values until after we
6561 have moved them somewhere safe. This code assumes that if the DImode
6562 scratch in operands[2] overlaps either the input value or output address
6563 in some way, then that value must die in this insn (we absolutely need
6564 two scratch registers for some corner cases). */
6565 void
6566 arm_reload_out_hi (rtx *operands)
6568 rtx ref = operands[0];
6569 rtx outval = operands[1];
6570 rtx base, scratch;
6571 HOST_WIDE_INT offset = 0;
6573 if (GET_CODE (ref) == SUBREG)
6575 offset = SUBREG_BYTE (ref);
6576 ref = SUBREG_REG (ref);
6579 if (GET_CODE (ref) == REG)
6581 /* We have a pseudo which has been spilt onto the stack; there
6582 are two cases here: the first where there is a simple
6583 stack-slot replacement and a second where the stack-slot is
6584 out of range, or is used as a subreg. */
6585 if (reg_equiv_mem[REGNO (ref)])
6587 ref = reg_equiv_mem[REGNO (ref)];
6588 base = find_replacement (&XEXP (ref, 0));
6590 else
6591 /* The slot is out of range, or was dressed up in a SUBREG. */
6592 base = reg_equiv_address[REGNO (ref)];
6594 else
6595 base = find_replacement (&XEXP (ref, 0));
6597 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6599 /* Handle the case where the address is too complex to be offset by 1. */
6600 if (GET_CODE (base) == MINUS
6601 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6603 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6605 /* Be careful not to destroy OUTVAL. */
6606 if (reg_overlap_mentioned_p (base_plus, outval))
6608 /* Updating base_plus might destroy outval, see if we can
6609 swap the scratch and base_plus. */
6610 if (!reg_overlap_mentioned_p (scratch, outval))
6612 rtx tmp = scratch;
6613 scratch = base_plus;
6614 base_plus = tmp;
6616 else
6618 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6620 /* Be conservative and copy OUTVAL into the scratch now,
6621 this should only be necessary if outval is a subreg
6622 of something larger than a word. */
6623 /* XXX Might this clobber base? I can't see how it can,
6624 since scratch is known to overlap with OUTVAL, and
6625 must be wider than a word. */
6626 emit_insn (gen_movhi (scratch_hi, outval));
6627 outval = scratch_hi;
6631 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6632 base = base_plus;
6634 else if (GET_CODE (base) == PLUS)
6636 /* The addend must be CONST_INT, or we would have dealt with it above. */
6637 HOST_WIDE_INT hi, lo;
6639 offset += INTVAL (XEXP (base, 1));
6640 base = XEXP (base, 0);
6642 /* Rework the address into a legal sequence of insns. */
6643 /* Valid range for lo is -4095 -> 4095 */
6644 lo = (offset >= 0
6645 ? (offset & 0xfff)
6646 : -((-offset) & 0xfff));
6648 /* Corner case, if lo is the max offset then we would be out of range
6649 once we have added the additional 1 below, so bump the msb into the
6650 pre-loading insn(s). */
6651 if (lo == 4095)
6652 lo &= 0x7ff;
6654 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6655 ^ (HOST_WIDE_INT) 0x80000000)
6656 - (HOST_WIDE_INT) 0x80000000);
6658 gcc_assert (hi + lo == offset);
6660 if (hi != 0)
6662 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6664 /* Be careful not to destroy OUTVAL. */
6665 if (reg_overlap_mentioned_p (base_plus, outval))
6667 /* Updating base_plus might destroy outval, see if we
6668 can swap the scratch and base_plus. */
6669 if (!reg_overlap_mentioned_p (scratch, outval))
6671 rtx tmp = scratch;
6672 scratch = base_plus;
6673 base_plus = tmp;
6675 else
6677 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6679 /* Be conservative and copy outval into scratch now,
6680 this should only be necessary if outval is a
6681 subreg of something larger than a word. */
6682 /* XXX Might this clobber base? I can't see how it
6683 can, since scratch is known to overlap with
6684 outval. */
6685 emit_insn (gen_movhi (scratch_hi, outval));
6686 outval = scratch_hi;
6690 /* Get the base address; addsi3 knows how to handle constants
6691 that require more than one insn. */
6692 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6693 base = base_plus;
6694 offset = lo;
6698 if (BYTES_BIG_ENDIAN)
6700 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6701 plus_constant (base, offset + 1)),
6702 gen_lowpart (QImode, outval)));
6703 emit_insn (gen_lshrsi3 (scratch,
6704 gen_rtx_SUBREG (SImode, outval, 0),
6705 GEN_INT (8)));
6706 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6707 gen_lowpart (QImode, scratch)));
6709 else
6711 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6712 gen_lowpart (QImode, outval)));
6713 emit_insn (gen_lshrsi3 (scratch,
6714 gen_rtx_SUBREG (SImode, outval, 0),
6715 GEN_INT (8)));
6716 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6717 plus_constant (base, offset + 1)),
6718 gen_lowpart (QImode, scratch)));
6722 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6723 (padded to the size of a word) should be passed in a register. */
6725 static bool
6726 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6728 if (TARGET_AAPCS_BASED)
6729 return must_pass_in_stack_var_size (mode, type);
6730 else
6731 return must_pass_in_stack_var_size_or_pad (mode, type);
6735 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6736 Return true if an argument passed on the stack should be padded upwards,
6737 i.e. if the least-significant byte has useful data.
6738 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
6739 aggregate types are placed in the lowest memory address. */
6741 bool
6742 arm_pad_arg_upward (enum machine_mode mode, tree type)
6744 if (!TARGET_AAPCS_BASED)
6745 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
6747 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6748 return false;
6750 return true;
6754 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6755 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6756 byte of the register has useful data, and return the opposite if the
6757 most significant byte does.
6758 For AAPCS, small aggregates and small complex types are always padded
6759 upwards. */
6761 bool
6762 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6763 tree type, int first ATTRIBUTE_UNUSED)
6765 if (TARGET_AAPCS_BASED
6766 && BYTES_BIG_ENDIAN
6767 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6768 && int_size_in_bytes (type) <= 4)
6769 return true;
6771 /* Otherwise, use default padding. */
6772 return !BYTES_BIG_ENDIAN;
6777 /* Print a symbolic form of X to the debug file, F. */
6778 static void
6779 arm_print_value (FILE *f, rtx x)
6781 switch (GET_CODE (x))
6783 case CONST_INT:
6784 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6785 return;
6787 case CONST_DOUBLE:
6788 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6789 return;
6791 case CONST_VECTOR:
6793 int i;
6795 fprintf (f, "<");
6796 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6798 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6799 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6800 fputc (',', f);
6802 fprintf (f, ">");
6804 return;
6806 case CONST_STRING:
6807 fprintf (f, "\"%s\"", XSTR (x, 0));
6808 return;
6810 case SYMBOL_REF:
6811 fprintf (f, "`%s'", XSTR (x, 0));
6812 return;
6814 case LABEL_REF:
6815 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6816 return;
6818 case CONST:
6819 arm_print_value (f, XEXP (x, 0));
6820 return;
6822 case PLUS:
6823 arm_print_value (f, XEXP (x, 0));
6824 fprintf (f, "+");
6825 arm_print_value (f, XEXP (x, 1));
6826 return;
6828 case PC:
6829 fprintf (f, "pc");
6830 return;
6832 default:
6833 fprintf (f, "????");
6834 return;
6838 /* Routines for manipulation of the constant pool. */
6840 /* Arm instructions cannot load a large constant directly into a
6841 register; they have to come from a pc relative load. The constant
6842 must therefore be placed in the addressable range of the pc
6843 relative load. Depending on the precise pc relative load
6844 instruction the range is somewhere between 256 bytes and 4k. This
6845 means that we often have to dump a constant inside a function, and
6846 generate code to branch around it.
6848 It is important to minimize this, since the branches will slow
6849 things down and make the code larger.
6851 Normally we can hide the table after an existing unconditional
6852 branch so that there is no interruption of the flow, but in the
6853 worst case the code looks like this:
6855 ldr rn, L1
6857 b L2
6858 align
6859 L1: .long value
6863 ldr rn, L3
6865 b L4
6866 align
6867 L3: .long value
6871 We fix this by performing a scan after scheduling, which notices
6872 which instructions need to have their operands fetched from the
6873 constant table and builds the table.
6875 The algorithm starts by building a table of all the constants that
6876 need fixing up and all the natural barriers in the function (places
6877 where a constant table can be dropped without breaking the flow).
6878 For each fixup we note how far the pc-relative replacement will be
6879 able to reach and the offset of the instruction into the function.
6881 Having built the table we then group the fixes together to form
6882 tables that are as large as possible (subject to addressing
6883 constraints) and emit each table of constants after the last
6884 barrier that is within range of all the instructions in the group.
6885 If a group does not contain a barrier, then we forcibly create one
6886 by inserting a jump instruction into the flow. Once the table has
6887 been inserted, the insns are then modified to reference the
6888 relevant entry in the pool.
6890 Possible enhancements to the algorithm (not implemented) are:
6892 1) For some processors and object formats, there may be benefit in
6893 aligning the pools to the start of cache lines; this alignment
6894 would need to be taken into account when calculating addressability
6895 of a pool. */
6897 /* These typedefs are located at the start of this file, so that
6898 they can be used in the prototypes there. This comment is to
6899 remind readers of that fact so that the following structures
6900 can be understood more easily.
6902 typedef struct minipool_node Mnode;
6903 typedef struct minipool_fixup Mfix; */
6905 struct minipool_node
6907 /* Doubly linked chain of entries. */
6908 Mnode * next;
6909 Mnode * prev;
6910 /* The maximum offset into the code that this entry can be placed. While
6911 pushing fixes for forward references, all entries are sorted in order
6912 of increasing max_address. */
6913 HOST_WIDE_INT max_address;
6914 /* Similarly for an entry inserted for a backwards ref. */
6915 HOST_WIDE_INT min_address;
6916 /* The number of fixes referencing this entry. This can become zero
6917 if we "unpush" an entry. In this case we ignore the entry when we
6918 come to emit the code. */
6919 int refcount;
6920 /* The offset from the start of the minipool. */
6921 HOST_WIDE_INT offset;
6922 /* The value in table. */
6923 rtx value;
6924 /* The mode of value. */
6925 enum machine_mode mode;
6926 /* The size of the value. With iWMMXt enabled
6927 sizes > 4 also imply an alignment of 8-bytes. */
6928 int fix_size;
6931 struct minipool_fixup
6933 Mfix * next;
6934 rtx insn;
6935 HOST_WIDE_INT address;
6936 rtx * loc;
6937 enum machine_mode mode;
6938 int fix_size;
6939 rtx value;
6940 Mnode * minipool;
6941 HOST_WIDE_INT forwards;
6942 HOST_WIDE_INT backwards;
6945 /* Fixes less than a word need padding out to a word boundary. */
6946 #define MINIPOOL_FIX_SIZE(mode) \
6947 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6949 static Mnode * minipool_vector_head;
6950 static Mnode * minipool_vector_tail;
6951 static rtx minipool_vector_label;
6953 /* The linked list of all minipool fixes required for this function. */
6954 Mfix * minipool_fix_head;
6955 Mfix * minipool_fix_tail;
6956 /* The fix entry for the current minipool, once it has been placed. */
6957 Mfix * minipool_barrier;
6959 /* Determines if INSN is the start of a jump table. Returns the end
6960 of the TABLE or NULL_RTX. */
6961 static rtx
6962 is_jump_table (rtx insn)
6964 rtx table;
6966 if (GET_CODE (insn) == JUMP_INSN
6967 && JUMP_LABEL (insn) != NULL
6968 && ((table = next_real_insn (JUMP_LABEL (insn)))
6969 == next_real_insn (insn))
6970 && table != NULL
6971 && GET_CODE (table) == JUMP_INSN
6972 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6973 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6974 return table;
6976 return NULL_RTX;
6979 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6980 #define JUMP_TABLES_IN_TEXT_SECTION 0
6981 #endif
6983 static HOST_WIDE_INT
6984 get_jump_table_size (rtx insn)
6986 /* ADDR_VECs only take room if read-only data does into the text
6987 section. */
6988 if (JUMP_TABLES_IN_TEXT_SECTION
6989 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6990 || 1
6991 #endif
6994 rtx body = PATTERN (insn);
6995 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6997 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7000 return 0;
7003 /* Move a minipool fix MP from its current location to before MAX_MP.
7004 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7005 constraints may need updating. */
7006 static Mnode *
7007 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7008 HOST_WIDE_INT max_address)
7010 /* The code below assumes these are different. */
7011 gcc_assert (mp != max_mp);
7013 if (max_mp == NULL)
7015 if (max_address < mp->max_address)
7016 mp->max_address = max_address;
7018 else
7020 if (max_address > max_mp->max_address - mp->fix_size)
7021 mp->max_address = max_mp->max_address - mp->fix_size;
7022 else
7023 mp->max_address = max_address;
7025 /* Unlink MP from its current position. Since max_mp is non-null,
7026 mp->prev must be non-null. */
7027 mp->prev->next = mp->next;
7028 if (mp->next != NULL)
7029 mp->next->prev = mp->prev;
7030 else
7031 minipool_vector_tail = mp->prev;
7033 /* Re-insert it before MAX_MP. */
7034 mp->next = max_mp;
7035 mp->prev = max_mp->prev;
7036 max_mp->prev = mp;
7038 if (mp->prev != NULL)
7039 mp->prev->next = mp;
7040 else
7041 minipool_vector_head = mp;
7044 /* Save the new entry. */
7045 max_mp = mp;
7047 /* Scan over the preceding entries and adjust their addresses as
7048 required. */
7049 while (mp->prev != NULL
7050 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7052 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7053 mp = mp->prev;
7056 return max_mp;
7059 /* Add a constant to the minipool for a forward reference. Returns the
7060 node added or NULL if the constant will not fit in this pool. */
7061 static Mnode *
7062 add_minipool_forward_ref (Mfix *fix)
7064 /* If set, max_mp is the first pool_entry that has a lower
7065 constraint than the one we are trying to add. */
7066 Mnode * max_mp = NULL;
7067 HOST_WIDE_INT max_address = fix->address + fix->forwards;
7068 Mnode * mp;
7070 /* If this fix's address is greater than the address of the first
7071 entry, then we can't put the fix in this pool. We subtract the
7072 size of the current fix to ensure that if the table is fully
7073 packed we still have enough room to insert this value by shuffling
7074 the other fixes forwards. */
7075 if (minipool_vector_head &&
7076 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7077 return NULL;
7079 /* Scan the pool to see if a constant with the same value has
7080 already been added. While we are doing this, also note the
7081 location where we must insert the constant if it doesn't already
7082 exist. */
7083 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7085 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7086 && fix->mode == mp->mode
7087 && (GET_CODE (fix->value) != CODE_LABEL
7088 || (CODE_LABEL_NUMBER (fix->value)
7089 == CODE_LABEL_NUMBER (mp->value)))
7090 && rtx_equal_p (fix->value, mp->value))
7092 /* More than one fix references this entry. */
7093 mp->refcount++;
7094 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7097 /* Note the insertion point if necessary. */
7098 if (max_mp == NULL
7099 && mp->max_address > max_address)
7100 max_mp = mp;
7102 /* If we are inserting an 8-bytes aligned quantity and
7103 we have not already found an insertion point, then
7104 make sure that all such 8-byte aligned quantities are
7105 placed at the start of the pool. */
7106 if (ARM_DOUBLEWORD_ALIGN
7107 && max_mp == NULL
7108 && fix->fix_size == 8
7109 && mp->fix_size != 8)
7111 max_mp = mp;
7112 max_address = mp->max_address;
7116 /* The value is not currently in the minipool, so we need to create
7117 a new entry for it. If MAX_MP is NULL, the entry will be put on
7118 the end of the list since the placement is less constrained than
7119 any existing entry. Otherwise, we insert the new fix before
7120 MAX_MP and, if necessary, adjust the constraints on the other
7121 entries. */
7122 mp = xmalloc (sizeof (* mp));
7123 mp->fix_size = fix->fix_size;
7124 mp->mode = fix->mode;
7125 mp->value = fix->value;
7126 mp->refcount = 1;
7127 /* Not yet required for a backwards ref. */
7128 mp->min_address = -65536;
7130 if (max_mp == NULL)
7132 mp->max_address = max_address;
7133 mp->next = NULL;
7134 mp->prev = minipool_vector_tail;
7136 if (mp->prev == NULL)
7138 minipool_vector_head = mp;
7139 minipool_vector_label = gen_label_rtx ();
7141 else
7142 mp->prev->next = mp;
7144 minipool_vector_tail = mp;
7146 else
7148 if (max_address > max_mp->max_address - mp->fix_size)
7149 mp->max_address = max_mp->max_address - mp->fix_size;
7150 else
7151 mp->max_address = max_address;
7153 mp->next = max_mp;
7154 mp->prev = max_mp->prev;
7155 max_mp->prev = mp;
7156 if (mp->prev != NULL)
7157 mp->prev->next = mp;
7158 else
7159 minipool_vector_head = mp;
7162 /* Save the new entry. */
7163 max_mp = mp;
7165 /* Scan over the preceding entries and adjust their addresses as
7166 required. */
7167 while (mp->prev != NULL
7168 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7170 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7171 mp = mp->prev;
7174 return max_mp;
7177 static Mnode *
7178 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7179 HOST_WIDE_INT min_address)
7181 HOST_WIDE_INT offset;
7183 /* The code below assumes these are different. */
7184 gcc_assert (mp != min_mp);
7186 if (min_mp == NULL)
7188 if (min_address > mp->min_address)
7189 mp->min_address = min_address;
7191 else
7193 /* We will adjust this below if it is too loose. */
7194 mp->min_address = min_address;
7196 /* Unlink MP from its current position. Since min_mp is non-null,
7197 mp->next must be non-null. */
7198 mp->next->prev = mp->prev;
7199 if (mp->prev != NULL)
7200 mp->prev->next = mp->next;
7201 else
7202 minipool_vector_head = mp->next;
7204 /* Reinsert it after MIN_MP. */
7205 mp->prev = min_mp;
7206 mp->next = min_mp->next;
7207 min_mp->next = mp;
7208 if (mp->next != NULL)
7209 mp->next->prev = mp;
7210 else
7211 minipool_vector_tail = mp;
7214 min_mp = mp;
7216 offset = 0;
7217 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7219 mp->offset = offset;
7220 if (mp->refcount > 0)
7221 offset += mp->fix_size;
7223 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7224 mp->next->min_address = mp->min_address + mp->fix_size;
7227 return min_mp;
7230 /* Add a constant to the minipool for a backward reference. Returns the
7231 node added or NULL if the constant will not fit in this pool.
7233 Note that the code for insertion for a backwards reference can be
7234 somewhat confusing because the calculated offsets for each fix do
7235 not take into account the size of the pool (which is still under
7236 construction. */
7237 static Mnode *
7238 add_minipool_backward_ref (Mfix *fix)
7240 /* If set, min_mp is the last pool_entry that has a lower constraint
7241 than the one we are trying to add. */
7242 Mnode *min_mp = NULL;
7243 /* This can be negative, since it is only a constraint. */
7244 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7245 Mnode *mp;
7247 /* If we can't reach the current pool from this insn, or if we can't
7248 insert this entry at the end of the pool without pushing other
7249 fixes out of range, then we don't try. This ensures that we
7250 can't fail later on. */
7251 if (min_address >= minipool_barrier->address
7252 || (minipool_vector_tail->min_address + fix->fix_size
7253 >= minipool_barrier->address))
7254 return NULL;
7256 /* Scan the pool to see if a constant with the same value has
7257 already been added. While we are doing this, also note the
7258 location where we must insert the constant if it doesn't already
7259 exist. */
7260 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7262 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7263 && fix->mode == mp->mode
7264 && (GET_CODE (fix->value) != CODE_LABEL
7265 || (CODE_LABEL_NUMBER (fix->value)
7266 == CODE_LABEL_NUMBER (mp->value)))
7267 && rtx_equal_p (fix->value, mp->value)
7268 /* Check that there is enough slack to move this entry to the
7269 end of the table (this is conservative). */
7270 && (mp->max_address
7271 > (minipool_barrier->address
7272 + minipool_vector_tail->offset
7273 + minipool_vector_tail->fix_size)))
7275 mp->refcount++;
7276 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7279 if (min_mp != NULL)
7280 mp->min_address += fix->fix_size;
7281 else
7283 /* Note the insertion point if necessary. */
7284 if (mp->min_address < min_address)
7286 /* For now, we do not allow the insertion of 8-byte alignment
7287 requiring nodes anywhere but at the start of the pool. */
7288 if (ARM_DOUBLEWORD_ALIGN
7289 && fix->fix_size == 8 && mp->fix_size != 8)
7290 return NULL;
7291 else
7292 min_mp = mp;
7294 else if (mp->max_address
7295 < minipool_barrier->address + mp->offset + fix->fix_size)
7297 /* Inserting before this entry would push the fix beyond
7298 its maximum address (which can happen if we have
7299 re-located a forwards fix); force the new fix to come
7300 after it. */
7301 min_mp = mp;
7302 min_address = mp->min_address + fix->fix_size;
7304 /* If we are inserting an 8-bytes aligned quantity and
7305 we have not already found an insertion point, then
7306 make sure that all such 8-byte aligned quantities are
7307 placed at the start of the pool. */
7308 else if (ARM_DOUBLEWORD_ALIGN
7309 && min_mp == NULL
7310 && fix->fix_size == 8
7311 && mp->fix_size < 8)
7313 min_mp = mp;
7314 min_address = mp->min_address + fix->fix_size;
7319 /* We need to create a new entry. */
7320 mp = xmalloc (sizeof (* mp));
7321 mp->fix_size = fix->fix_size;
7322 mp->mode = fix->mode;
7323 mp->value = fix->value;
7324 mp->refcount = 1;
7325 mp->max_address = minipool_barrier->address + 65536;
7327 mp->min_address = min_address;
7329 if (min_mp == NULL)
7331 mp->prev = NULL;
7332 mp->next = minipool_vector_head;
7334 if (mp->next == NULL)
7336 minipool_vector_tail = mp;
7337 minipool_vector_label = gen_label_rtx ();
7339 else
7340 mp->next->prev = mp;
7342 minipool_vector_head = mp;
7344 else
7346 mp->next = min_mp->next;
7347 mp->prev = min_mp;
7348 min_mp->next = mp;
7350 if (mp->next != NULL)
7351 mp->next->prev = mp;
7352 else
7353 minipool_vector_tail = mp;
7356 /* Save the new entry. */
7357 min_mp = mp;
7359 if (mp->prev)
7360 mp = mp->prev;
7361 else
7362 mp->offset = 0;
7364 /* Scan over the following entries and adjust their offsets. */
7365 while (mp->next != NULL)
7367 if (mp->next->min_address < mp->min_address + mp->fix_size)
7368 mp->next->min_address = mp->min_address + mp->fix_size;
7370 if (mp->refcount)
7371 mp->next->offset = mp->offset + mp->fix_size;
7372 else
7373 mp->next->offset = mp->offset;
7375 mp = mp->next;
7378 return min_mp;
7381 static void
7382 assign_minipool_offsets (Mfix *barrier)
7384 HOST_WIDE_INT offset = 0;
7385 Mnode *mp;
7387 minipool_barrier = barrier;
7389 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7391 mp->offset = offset;
7393 if (mp->refcount > 0)
7394 offset += mp->fix_size;
7398 /* Output the literal table */
7399 static void
7400 dump_minipool (rtx scan)
7402 Mnode * mp;
7403 Mnode * nmp;
7404 int align64 = 0;
7406 if (ARM_DOUBLEWORD_ALIGN)
7407 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7408 if (mp->refcount > 0 && mp->fix_size == 8)
7410 align64 = 1;
7411 break;
7414 if (dump_file)
7415 fprintf (dump_file,
7416 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7417 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7419 scan = emit_label_after (gen_label_rtx (), scan);
7420 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7421 scan = emit_label_after (minipool_vector_label, scan);
7423 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7425 if (mp->refcount > 0)
7427 if (dump_file)
7429 fprintf (dump_file,
7430 ";; Offset %u, min %ld, max %ld ",
7431 (unsigned) mp->offset, (unsigned long) mp->min_address,
7432 (unsigned long) mp->max_address);
7433 arm_print_value (dump_file, mp->value);
7434 fputc ('\n', dump_file);
7437 switch (mp->fix_size)
7439 #ifdef HAVE_consttable_1
7440 case 1:
7441 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7442 break;
7444 #endif
7445 #ifdef HAVE_consttable_2
7446 case 2:
7447 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7448 break;
7450 #endif
7451 #ifdef HAVE_consttable_4
7452 case 4:
7453 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7454 break;
7456 #endif
7457 #ifdef HAVE_consttable_8
7458 case 8:
7459 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7460 break;
7462 #endif
7463 default:
7464 gcc_unreachable ();
7468 nmp = mp->next;
7469 free (mp);
7472 minipool_vector_head = minipool_vector_tail = NULL;
7473 scan = emit_insn_after (gen_consttable_end (), scan);
7474 scan = emit_barrier_after (scan);
7477 /* Return the cost of forcibly inserting a barrier after INSN. */
7478 static int
7479 arm_barrier_cost (rtx insn)
7481 /* Basing the location of the pool on the loop depth is preferable,
7482 but at the moment, the basic block information seems to be
7483 corrupt by this stage of the compilation. */
7484 int base_cost = 50;
7485 rtx next = next_nonnote_insn (insn);
7487 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7488 base_cost -= 20;
7490 switch (GET_CODE (insn))
7492 case CODE_LABEL:
7493 /* It will always be better to place the table before the label, rather
7494 than after it. */
7495 return 50;
7497 case INSN:
7498 case CALL_INSN:
7499 return base_cost;
7501 case JUMP_INSN:
7502 return base_cost - 10;
7504 default:
7505 return base_cost + 10;
7509 /* Find the best place in the insn stream in the range
7510 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7511 Create the barrier by inserting a jump and add a new fix entry for
7512 it. */
7513 static Mfix *
7514 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7516 HOST_WIDE_INT count = 0;
7517 rtx barrier;
7518 rtx from = fix->insn;
7519 rtx selected = from;
7520 int selected_cost;
7521 HOST_WIDE_INT selected_address;
7522 Mfix * new_fix;
7523 HOST_WIDE_INT max_count = max_address - fix->address;
7524 rtx label = gen_label_rtx ();
7526 selected_cost = arm_barrier_cost (from);
7527 selected_address = fix->address;
7529 while (from && count < max_count)
7531 rtx tmp;
7532 int new_cost;
7534 /* This code shouldn't have been called if there was a natural barrier
7535 within range. */
7536 gcc_assert (GET_CODE (from) != BARRIER);
7538 /* Count the length of this insn. */
7539 count += get_attr_length (from);
7541 /* If there is a jump table, add its length. */
7542 tmp = is_jump_table (from);
7543 if (tmp != NULL)
7545 count += get_jump_table_size (tmp);
7547 /* Jump tables aren't in a basic block, so base the cost on
7548 the dispatch insn. If we select this location, we will
7549 still put the pool after the table. */
7550 new_cost = arm_barrier_cost (from);
7552 if (count < max_count && new_cost <= selected_cost)
7554 selected = tmp;
7555 selected_cost = new_cost;
7556 selected_address = fix->address + count;
7559 /* Continue after the dispatch table. */
7560 from = NEXT_INSN (tmp);
7561 continue;
7564 new_cost = arm_barrier_cost (from);
7566 if (count < max_count && new_cost <= selected_cost)
7568 selected = from;
7569 selected_cost = new_cost;
7570 selected_address = fix->address + count;
7573 from = NEXT_INSN (from);
7576 /* Create a new JUMP_INSN that branches around a barrier. */
7577 from = emit_jump_insn_after (gen_jump (label), selected);
7578 JUMP_LABEL (from) = label;
7579 barrier = emit_barrier_after (from);
7580 emit_label_after (label, barrier);
7582 /* Create a minipool barrier entry for the new barrier. */
7583 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7584 new_fix->insn = barrier;
7585 new_fix->address = selected_address;
7586 new_fix->next = fix->next;
7587 fix->next = new_fix;
7589 return new_fix;
7592 /* Record that there is a natural barrier in the insn stream at
7593 ADDRESS. */
7594 static void
7595 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7597 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7599 fix->insn = insn;
7600 fix->address = address;
7602 fix->next = NULL;
7603 if (minipool_fix_head != NULL)
7604 minipool_fix_tail->next = fix;
7605 else
7606 minipool_fix_head = fix;
7608 minipool_fix_tail = fix;
7611 /* Record INSN, which will need fixing up to load a value from the
7612 minipool. ADDRESS is the offset of the insn since the start of the
7613 function; LOC is a pointer to the part of the insn which requires
7614 fixing; VALUE is the constant that must be loaded, which is of type
7615 MODE. */
7616 static void
7617 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7618 enum machine_mode mode, rtx value)
7620 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7622 #ifdef AOF_ASSEMBLER
7623 /* PIC symbol references need to be converted into offsets into the
7624 based area. */
7625 /* XXX This shouldn't be done here. */
7626 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7627 value = aof_pic_entry (value);
7628 #endif /* AOF_ASSEMBLER */
7630 fix->insn = insn;
7631 fix->address = address;
7632 fix->loc = loc;
7633 fix->mode = mode;
7634 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7635 fix->value = value;
7636 fix->forwards = get_attr_pool_range (insn);
7637 fix->backwards = get_attr_neg_pool_range (insn);
7638 fix->minipool = NULL;
7640 /* If an insn doesn't have a range defined for it, then it isn't
7641 expecting to be reworked by this code. Better to stop now than
7642 to generate duff assembly code. */
7643 gcc_assert (fix->forwards || fix->backwards);
7645 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7646 So there might be an empty word before the start of the pool.
7647 Hence we reduce the forward range by 4 to allow for this
7648 possibility. */
7649 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7650 fix->forwards -= 4;
7652 if (dump_file)
7654 fprintf (dump_file,
7655 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7656 GET_MODE_NAME (mode),
7657 INSN_UID (insn), (unsigned long) address,
7658 -1 * (long)fix->backwards, (long)fix->forwards);
7659 arm_print_value (dump_file, fix->value);
7660 fprintf (dump_file, "\n");
7663 /* Add it to the chain of fixes. */
7664 fix->next = NULL;
7666 if (minipool_fix_head != NULL)
7667 minipool_fix_tail->next = fix;
7668 else
7669 minipool_fix_head = fix;
7671 minipool_fix_tail = fix;
7674 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7675 Returns the number of insns needed, or 99 if we don't know how to
7676 do it. */
7678 arm_const_double_inline_cost (rtx val)
7680 rtx lowpart, highpart;
7681 enum machine_mode mode;
7683 mode = GET_MODE (val);
7685 if (mode == VOIDmode)
7686 mode = DImode;
7688 gcc_assert (GET_MODE_SIZE (mode) == 8);
7690 lowpart = gen_lowpart (SImode, val);
7691 highpart = gen_highpart_mode (SImode, mode, val);
7693 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7694 gcc_assert (GET_CODE (highpart) == CONST_INT);
7696 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7697 NULL_RTX, NULL_RTX, 0, 0)
7698 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7699 NULL_RTX, NULL_RTX, 0, 0));
7702 /* Return true if it is worthwhile to split a 64-bit constant into two
7703 32-bit operations. This is the case if optimizing for size, or
7704 if we have load delay slots, or if one 32-bit part can be done with
7705 a single data operation. */
7706 bool
7707 arm_const_double_by_parts (rtx val)
7709 enum machine_mode mode = GET_MODE (val);
7710 rtx part;
7712 if (optimize_size || arm_ld_sched)
7713 return true;
7715 if (mode == VOIDmode)
7716 mode = DImode;
7718 part = gen_highpart_mode (SImode, mode, val);
7720 gcc_assert (GET_CODE (part) == CONST_INT);
7722 if (const_ok_for_arm (INTVAL (part))
7723 || const_ok_for_arm (~INTVAL (part)))
7724 return true;
7726 part = gen_lowpart (SImode, val);
7728 gcc_assert (GET_CODE (part) == CONST_INT);
7730 if (const_ok_for_arm (INTVAL (part))
7731 || const_ok_for_arm (~INTVAL (part)))
7732 return true;
7734 return false;
7737 /* Scan INSN and note any of its operands that need fixing.
7738 If DO_PUSHES is false we do not actually push any of the fixups
7739 needed. The function returns TRUE if any fixups were needed/pushed.
7740 This is used by arm_memory_load_p() which needs to know about loads
7741 of constants that will be converted into minipool loads. */
7742 static bool
7743 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7745 bool result = false;
7746 int opno;
7748 extract_insn (insn);
7750 if (!constrain_operands (1))
7751 fatal_insn_not_found (insn);
7753 if (recog_data.n_alternatives == 0)
7754 return false;
7756 /* Fill in recog_op_alt with information about the constraints of
7757 this insn. */
7758 preprocess_constraints ();
7760 for (opno = 0; opno < recog_data.n_operands; opno++)
7762 /* Things we need to fix can only occur in inputs. */
7763 if (recog_data.operand_type[opno] != OP_IN)
7764 continue;
7766 /* If this alternative is a memory reference, then any mention
7767 of constants in this alternative is really to fool reload
7768 into allowing us to accept one there. We need to fix them up
7769 now so that we output the right code. */
7770 if (recog_op_alt[opno][which_alternative].memory_ok)
7772 rtx op = recog_data.operand[opno];
7774 if (CONSTANT_P (op))
7776 if (do_pushes)
7777 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7778 recog_data.operand_mode[opno], op);
7779 result = true;
7781 else if (GET_CODE (op) == MEM
7782 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7783 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7785 if (do_pushes)
7787 rtx cop = avoid_constant_pool_reference (op);
7789 /* Casting the address of something to a mode narrower
7790 than a word can cause avoid_constant_pool_reference()
7791 to return the pool reference itself. That's no good to
7792 us here. Lets just hope that we can use the
7793 constant pool value directly. */
7794 if (op == cop)
7795 cop = get_pool_constant (XEXP (op, 0));
7797 push_minipool_fix (insn, address,
7798 recog_data.operand_loc[opno],
7799 recog_data.operand_mode[opno], cop);
7802 result = true;
7807 return result;
7810 /* Gcc puts the pool in the wrong place for ARM, since we can only
7811 load addresses a limited distance around the pc. We do some
7812 special munging to move the constant pool values to the correct
7813 point in the code. */
7814 static void
7815 arm_reorg (void)
7817 rtx insn;
7818 HOST_WIDE_INT address = 0;
7819 Mfix * fix;
7821 minipool_fix_head = minipool_fix_tail = NULL;
7823 /* The first insn must always be a note, or the code below won't
7824 scan it properly. */
7825 insn = get_insns ();
7826 gcc_assert (GET_CODE (insn) == NOTE);
7828 /* Scan all the insns and record the operands that will need fixing. */
7829 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7831 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7832 && (arm_cirrus_insn_p (insn)
7833 || GET_CODE (insn) == JUMP_INSN
7834 || arm_memory_load_p (insn)))
7835 cirrus_reorg (insn);
7837 if (GET_CODE (insn) == BARRIER)
7838 push_minipool_barrier (insn, address);
7839 else if (INSN_P (insn))
7841 rtx table;
7843 note_invalid_constants (insn, address, true);
7844 address += get_attr_length (insn);
7846 /* If the insn is a vector jump, add the size of the table
7847 and skip the table. */
7848 if ((table = is_jump_table (insn)) != NULL)
7850 address += get_jump_table_size (table);
7851 insn = table;
7856 fix = minipool_fix_head;
7858 /* Now scan the fixups and perform the required changes. */
7859 while (fix)
7861 Mfix * ftmp;
7862 Mfix * fdel;
7863 Mfix * last_added_fix;
7864 Mfix * last_barrier = NULL;
7865 Mfix * this_fix;
7867 /* Skip any further barriers before the next fix. */
7868 while (fix && GET_CODE (fix->insn) == BARRIER)
7869 fix = fix->next;
7871 /* No more fixes. */
7872 if (fix == NULL)
7873 break;
7875 last_added_fix = NULL;
7877 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7879 if (GET_CODE (ftmp->insn) == BARRIER)
7881 if (ftmp->address >= minipool_vector_head->max_address)
7882 break;
7884 last_barrier = ftmp;
7886 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7887 break;
7889 last_added_fix = ftmp; /* Keep track of the last fix added. */
7892 /* If we found a barrier, drop back to that; any fixes that we
7893 could have reached but come after the barrier will now go in
7894 the next mini-pool. */
7895 if (last_barrier != NULL)
7897 /* Reduce the refcount for those fixes that won't go into this
7898 pool after all. */
7899 for (fdel = last_barrier->next;
7900 fdel && fdel != ftmp;
7901 fdel = fdel->next)
7903 fdel->minipool->refcount--;
7904 fdel->minipool = NULL;
7907 ftmp = last_barrier;
7909 else
7911 /* ftmp is first fix that we can't fit into this pool and
7912 there no natural barriers that we could use. Insert a
7913 new barrier in the code somewhere between the previous
7914 fix and this one, and arrange to jump around it. */
7915 HOST_WIDE_INT max_address;
7917 /* The last item on the list of fixes must be a barrier, so
7918 we can never run off the end of the list of fixes without
7919 last_barrier being set. */
7920 gcc_assert (ftmp);
7922 max_address = minipool_vector_head->max_address;
7923 /* Check that there isn't another fix that is in range that
7924 we couldn't fit into this pool because the pool was
7925 already too large: we need to put the pool before such an
7926 instruction. */
7927 if (ftmp->address < max_address)
7928 max_address = ftmp->address;
7930 last_barrier = create_fix_barrier (last_added_fix, max_address);
7933 assign_minipool_offsets (last_barrier);
7935 while (ftmp)
7937 if (GET_CODE (ftmp->insn) != BARRIER
7938 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7939 == NULL))
7940 break;
7942 ftmp = ftmp->next;
7945 /* Scan over the fixes we have identified for this pool, fixing them
7946 up and adding the constants to the pool itself. */
7947 for (this_fix = fix; this_fix && ftmp != this_fix;
7948 this_fix = this_fix->next)
7949 if (GET_CODE (this_fix->insn) != BARRIER)
7951 rtx addr
7952 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7953 minipool_vector_label),
7954 this_fix->minipool->offset);
7955 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7958 dump_minipool (last_barrier->insn);
7959 fix = ftmp;
7962 /* From now on we must synthesize any constants that we can't handle
7963 directly. This can happen if the RTL gets split during final
7964 instruction generation. */
7965 after_arm_reorg = 1;
7967 /* Free the minipool memory. */
7968 obstack_free (&minipool_obstack, minipool_startobj);
7971 /* Routines to output assembly language. */
7973 /* If the rtx is the correct value then return the string of the number.
7974 In this way we can ensure that valid double constants are generated even
7975 when cross compiling. */
7976 const char *
7977 fp_immediate_constant (rtx x)
7979 REAL_VALUE_TYPE r;
7980 int i;
7982 if (!fp_consts_inited)
7983 init_fp_table ();
7985 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7986 for (i = 0; i < 8; i++)
7987 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7988 return strings_fp[i];
7990 gcc_unreachable ();
7993 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7994 static const char *
7995 fp_const_from_val (REAL_VALUE_TYPE *r)
7997 int i;
7999 if (!fp_consts_inited)
8000 init_fp_table ();
8002 for (i = 0; i < 8; i++)
8003 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8004 return strings_fp[i];
8006 gcc_unreachable ();
8009 /* Output the operands of a LDM/STM instruction to STREAM.
8010 MASK is the ARM register set mask of which only bits 0-15 are important.
8011 REG is the base register, either the frame pointer or the stack pointer,
8012 INSTR is the possibly suffixed load or store instruction. */
8014 static void
8015 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8016 unsigned long mask)
8018 unsigned i;
8019 bool not_first = FALSE;
8021 fputc ('\t', stream);
8022 asm_fprintf (stream, instr, reg);
8023 fputs (", {", stream);
8025 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8026 if (mask & (1 << i))
8028 if (not_first)
8029 fprintf (stream, ", ");
8031 asm_fprintf (stream, "%r", i);
8032 not_first = TRUE;
8035 fprintf (stream, "}\n");
8039 /* Output a FLDMX instruction to STREAM.
8040 BASE if the register containing the address.
8041 REG and COUNT specify the register range.
8042 Extra registers may be added to avoid hardware bugs. */
8044 static void
8045 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8047 int i;
8049 /* Workaround ARM10 VFPr1 bug. */
8050 if (count == 2 && !arm_arch6)
8052 if (reg == 15)
8053 reg--;
8054 count++;
8057 fputc ('\t', stream);
8058 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8060 for (i = reg; i < reg + count; i++)
8062 if (i > reg)
8063 fputs (", ", stream);
8064 asm_fprintf (stream, "d%d", i);
8066 fputs ("}\n", stream);
8071 /* Output the assembly for a store multiple. */
8073 const char *
8074 vfp_output_fstmx (rtx * operands)
8076 char pattern[100];
8077 int p;
8078 int base;
8079 int i;
8081 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8082 p = strlen (pattern);
8084 gcc_assert (GET_CODE (operands[1]) == REG);
8086 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8087 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8089 p += sprintf (&pattern[p], ", d%d", base + i);
8091 strcpy (&pattern[p], "}");
8093 output_asm_insn (pattern, operands);
8094 return "";
8098 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8099 number of bytes pushed. */
8101 static int
8102 vfp_emit_fstmx (int base_reg, int count)
8104 rtx par;
8105 rtx dwarf;
8106 rtx tmp, reg;
8107 int i;
8109 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8110 register pairs are stored by a store multiple insn. We avoid this
8111 by pushing an extra pair. */
8112 if (count == 2 && !arm_arch6)
8114 if (base_reg == LAST_VFP_REGNUM - 3)
8115 base_reg -= 2;
8116 count++;
8119 /* ??? The frame layout is implementation defined. We describe
8120 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8121 We really need some way of representing the whole block so that the
8122 unwinder can figure it out at runtime. */
8123 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8124 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8126 reg = gen_rtx_REG (DFmode, base_reg);
8127 base_reg += 2;
8129 XVECEXP (par, 0, 0)
8130 = gen_rtx_SET (VOIDmode,
8131 gen_rtx_MEM (BLKmode,
8132 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8133 gen_rtx_UNSPEC (BLKmode,
8134 gen_rtvec (1, reg),
8135 UNSPEC_PUSH_MULT));
8137 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8138 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8139 GEN_INT (-(count * 8 + 4))));
8140 RTX_FRAME_RELATED_P (tmp) = 1;
8141 XVECEXP (dwarf, 0, 0) = tmp;
8143 tmp = gen_rtx_SET (VOIDmode,
8144 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8145 reg);
8146 RTX_FRAME_RELATED_P (tmp) = 1;
8147 XVECEXP (dwarf, 0, 1) = tmp;
8149 for (i = 1; i < count; i++)
8151 reg = gen_rtx_REG (DFmode, base_reg);
8152 base_reg += 2;
8153 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8155 tmp = gen_rtx_SET (VOIDmode,
8156 gen_rtx_MEM (DFmode,
8157 gen_rtx_PLUS (SImode,
8158 stack_pointer_rtx,
8159 GEN_INT (i * 8))),
8160 reg);
8161 RTX_FRAME_RELATED_P (tmp) = 1;
8162 XVECEXP (dwarf, 0, i + 1) = tmp;
8165 par = emit_insn (par);
8166 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8167 REG_NOTES (par));
8168 RTX_FRAME_RELATED_P (par) = 1;
8170 return count * 8 + 4;
8174 /* Output a 'call' insn. */
8175 const char *
8176 output_call (rtx *operands)
8178 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8180 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8181 if (REGNO (operands[0]) == LR_REGNUM)
8183 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8184 output_asm_insn ("mov%?\t%0, %|lr", operands);
8187 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8189 if (TARGET_INTERWORK || arm_arch4t)
8190 output_asm_insn ("bx%?\t%0", operands);
8191 else
8192 output_asm_insn ("mov%?\t%|pc, %0", operands);
8194 return "";
8197 /* Output a 'call' insn that is a reference in memory. */
8198 const char *
8199 output_call_mem (rtx *operands)
8201 if (TARGET_INTERWORK && !arm_arch5)
8203 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8204 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8205 output_asm_insn ("bx%?\t%|ip", operands);
8207 else if (regno_use_in (LR_REGNUM, operands[0]))
8209 /* LR is used in the memory address. We load the address in the
8210 first instruction. It's safe to use IP as the target of the
8211 load since the call will kill it anyway. */
8212 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8213 if (arm_arch5)
8214 output_asm_insn ("blx%?\t%|ip", operands);
8215 else
8217 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8218 if (arm_arch4t)
8219 output_asm_insn ("bx%?\t%|ip", operands);
8220 else
8221 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8224 else
8226 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8227 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8230 return "";
8234 /* Output a move from arm registers to an fpa registers.
8235 OPERANDS[0] is an fpa register.
8236 OPERANDS[1] is the first registers of an arm register pair. */
8237 const char *
8238 output_mov_long_double_fpa_from_arm (rtx *operands)
8240 int arm_reg0 = REGNO (operands[1]);
8241 rtx ops[3];
8243 gcc_assert (arm_reg0 != IP_REGNUM);
8245 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8246 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8247 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8249 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8250 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8252 return "";
8255 /* Output a move from an fpa register to arm registers.
8256 OPERANDS[0] is the first registers of an arm register pair.
8257 OPERANDS[1] is an fpa register. */
8258 const char *
8259 output_mov_long_double_arm_from_fpa (rtx *operands)
8261 int arm_reg0 = REGNO (operands[0]);
8262 rtx ops[3];
8264 gcc_assert (arm_reg0 != IP_REGNUM);
8266 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8267 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8268 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8270 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8271 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8272 return "";
8275 /* Output a move from arm registers to arm registers of a long double
8276 OPERANDS[0] is the destination.
8277 OPERANDS[1] is the source. */
8278 const char *
8279 output_mov_long_double_arm_from_arm (rtx *operands)
8281 /* We have to be careful here because the two might overlap. */
8282 int dest_start = REGNO (operands[0]);
8283 int src_start = REGNO (operands[1]);
8284 rtx ops[2];
8285 int i;
8287 if (dest_start < src_start)
8289 for (i = 0; i < 3; i++)
8291 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8292 ops[1] = gen_rtx_REG (SImode, src_start + i);
8293 output_asm_insn ("mov%?\t%0, %1", ops);
8296 else
8298 for (i = 2; i >= 0; i--)
8300 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8301 ops[1] = gen_rtx_REG (SImode, src_start + i);
8302 output_asm_insn ("mov%?\t%0, %1", ops);
8306 return "";
8310 /* Output a move from arm registers to an fpa registers.
8311 OPERANDS[0] is an fpa register.
8312 OPERANDS[1] is the first registers of an arm register pair. */
8313 const char *
8314 output_mov_double_fpa_from_arm (rtx *operands)
8316 int arm_reg0 = REGNO (operands[1]);
8317 rtx ops[2];
8319 gcc_assert (arm_reg0 != IP_REGNUM);
8321 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8322 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8323 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8324 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8325 return "";
8328 /* Output a move from an fpa register to arm registers.
8329 OPERANDS[0] is the first registers of an arm register pair.
8330 OPERANDS[1] is an fpa register. */
8331 const char *
8332 output_mov_double_arm_from_fpa (rtx *operands)
8334 int arm_reg0 = REGNO (operands[0]);
8335 rtx ops[2];
8337 gcc_assert (arm_reg0 != IP_REGNUM);
8339 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8340 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8341 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8342 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8343 return "";
8346 /* Output a move between double words.
8347 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8348 or MEM<-REG and all MEMs must be offsettable addresses. */
8349 const char *
8350 output_move_double (rtx *operands)
8352 enum rtx_code code0 = GET_CODE (operands[0]);
8353 enum rtx_code code1 = GET_CODE (operands[1]);
8354 rtx otherops[3];
8356 if (code0 == REG)
8358 int reg0 = REGNO (operands[0]);
8360 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8362 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8364 switch (GET_CODE (XEXP (operands[1], 0)))
8366 case REG:
8367 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8368 break;
8370 case PRE_INC:
8371 gcc_assert (TARGET_LDRD);
8372 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8373 break;
8375 case PRE_DEC:
8376 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8377 break;
8379 case POST_INC:
8380 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8381 break;
8383 case POST_DEC:
8384 gcc_assert (TARGET_LDRD);
8385 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8386 break;
8388 case PRE_MODIFY:
8389 case POST_MODIFY:
8390 otherops[0] = operands[0];
8391 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8392 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8394 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8396 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8398 /* Registers overlap so split out the increment. */
8399 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8400 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8402 else
8403 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8405 else
8407 /* We only allow constant increments, so this is safe. */
8408 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8410 break;
8412 case LABEL_REF:
8413 case CONST:
8414 output_asm_insn ("adr%?\t%0, %1", operands);
8415 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8416 break;
8418 default:
8419 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8420 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8422 otherops[0] = operands[0];
8423 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8424 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8426 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8428 if (GET_CODE (otherops[2]) == CONST_INT)
8430 switch ((int) INTVAL (otherops[2]))
8432 case -8:
8433 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8434 return "";
8435 case -4:
8436 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8437 return "";
8438 case 4:
8439 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8440 return "";
8443 if (TARGET_LDRD
8444 && (GET_CODE (otherops[2]) == REG
8445 || (GET_CODE (otherops[2]) == CONST_INT
8446 && INTVAL (otherops[2]) > -256
8447 && INTVAL (otherops[2]) < 256)))
8449 if (reg_overlap_mentioned_p (otherops[0],
8450 otherops[2]))
8452 /* Swap base and index registers over to
8453 avoid a conflict. */
8454 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8455 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8458 /* If both registers conflict, it will usually
8459 have been fixed by a splitter. */
8460 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8462 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8463 output_asm_insn ("ldr%?d\t%0, [%1]",
8464 otherops);
8466 else
8467 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8468 return "";
8471 if (GET_CODE (otherops[2]) == CONST_INT)
8473 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8474 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8475 else
8476 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8478 else
8479 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8481 else
8482 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8484 return "ldm%?ia\t%0, %M0";
8486 else
8488 otherops[1] = adjust_address (operands[1], SImode, 4);
8489 /* Take care of overlapping base/data reg. */
8490 if (reg_mentioned_p (operands[0], operands[1]))
8492 output_asm_insn ("ldr%?\t%0, %1", otherops);
8493 output_asm_insn ("ldr%?\t%0, %1", operands);
8495 else
8497 output_asm_insn ("ldr%?\t%0, %1", operands);
8498 output_asm_insn ("ldr%?\t%0, %1", otherops);
8503 else
8505 /* Constraints should ensure this. */
8506 gcc_assert (code0 == MEM && code1 == REG);
8507 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8509 switch (GET_CODE (XEXP (operands[0], 0)))
8511 case REG:
8512 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8513 break;
8515 case PRE_INC:
8516 gcc_assert (TARGET_LDRD);
8517 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8518 break;
8520 case PRE_DEC:
8521 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8522 break;
8524 case POST_INC:
8525 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8526 break;
8528 case POST_DEC:
8529 gcc_assert (TARGET_LDRD);
8530 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8531 break;
8533 case PRE_MODIFY:
8534 case POST_MODIFY:
8535 otherops[0] = operands[1];
8536 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8537 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8539 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8540 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8541 else
8542 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8543 break;
8545 case PLUS:
8546 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8547 if (GET_CODE (otherops[2]) == CONST_INT)
8549 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8551 case -8:
8552 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8553 return "";
8555 case -4:
8556 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8557 return "";
8559 case 4:
8560 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8561 return "";
8564 if (TARGET_LDRD
8565 && (GET_CODE (otherops[2]) == REG
8566 || (GET_CODE (otherops[2]) == CONST_INT
8567 && INTVAL (otherops[2]) > -256
8568 && INTVAL (otherops[2]) < 256)))
8570 otherops[0] = operands[1];
8571 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8572 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8573 return "";
8575 /* Fall through */
8577 default:
8578 otherops[0] = adjust_address (operands[0], SImode, 4);
8579 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8580 output_asm_insn ("str%?\t%1, %0", operands);
8581 output_asm_insn ("str%?\t%1, %0", otherops);
8585 return "";
8588 /* Output an ADD r, s, #n where n may be too big for one instruction.
8589 If adding zero to one register, output nothing. */
8590 const char *
8591 output_add_immediate (rtx *operands)
8593 HOST_WIDE_INT n = INTVAL (operands[2]);
8595 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8597 if (n < 0)
8598 output_multi_immediate (operands,
8599 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8600 -n);
8601 else
8602 output_multi_immediate (operands,
8603 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8607 return "";
8610 /* Output a multiple immediate operation.
8611 OPERANDS is the vector of operands referred to in the output patterns.
8612 INSTR1 is the output pattern to use for the first constant.
8613 INSTR2 is the output pattern to use for subsequent constants.
8614 IMMED_OP is the index of the constant slot in OPERANDS.
8615 N is the constant value. */
8616 static const char *
8617 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8618 int immed_op, HOST_WIDE_INT n)
8620 #if HOST_BITS_PER_WIDE_INT > 32
8621 n &= 0xffffffff;
8622 #endif
8624 if (n == 0)
8626 /* Quick and easy output. */
8627 operands[immed_op] = const0_rtx;
8628 output_asm_insn (instr1, operands);
8630 else
8632 int i;
8633 const char * instr = instr1;
8635 /* Note that n is never zero here (which would give no output). */
8636 for (i = 0; i < 32; i += 2)
8638 if (n & (3 << i))
8640 operands[immed_op] = GEN_INT (n & (255 << i));
8641 output_asm_insn (instr, operands);
8642 instr = instr2;
8643 i += 6;
8648 return "";
8651 /* Return the appropriate ARM instruction for the operation code.
8652 The returned result should not be overwritten. OP is the rtx of the
8653 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8654 was shifted. */
8655 const char *
8656 arithmetic_instr (rtx op, int shift_first_arg)
8658 switch (GET_CODE (op))
8660 case PLUS:
8661 return "add";
8663 case MINUS:
8664 return shift_first_arg ? "rsb" : "sub";
8666 case IOR:
8667 return "orr";
8669 case XOR:
8670 return "eor";
8672 case AND:
8673 return "and";
8675 default:
8676 gcc_unreachable ();
8680 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8681 for the operation code. The returned result should not be overwritten.
8682 OP is the rtx code of the shift.
8683 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8684 shift. */
8685 static const char *
8686 shift_op (rtx op, HOST_WIDE_INT *amountp)
8688 const char * mnem;
8689 enum rtx_code code = GET_CODE (op);
8691 switch (GET_CODE (XEXP (op, 1)))
8693 case REG:
8694 case SUBREG:
8695 *amountp = -1;
8696 break;
8698 case CONST_INT:
8699 *amountp = INTVAL (XEXP (op, 1));
8700 break;
8702 default:
8703 gcc_unreachable ();
8706 switch (code)
8708 case ASHIFT:
8709 mnem = "asl";
8710 break;
8712 case ASHIFTRT:
8713 mnem = "asr";
8714 break;
8716 case LSHIFTRT:
8717 mnem = "lsr";
8718 break;
8720 case ROTATE:
8721 gcc_assert (*amountp != -1);
8722 *amountp = 32 - *amountp;
8724 /* Fall through. */
8726 case ROTATERT:
8727 mnem = "ror";
8728 break;
8730 case MULT:
8731 /* We never have to worry about the amount being other than a
8732 power of 2, since this case can never be reloaded from a reg. */
8733 gcc_assert (*amountp != -1);
8734 *amountp = int_log2 (*amountp);
8735 return "asl";
8737 default:
8738 gcc_unreachable ();
8741 if (*amountp != -1)
8743 /* This is not 100% correct, but follows from the desire to merge
8744 multiplication by a power of 2 with the recognizer for a
8745 shift. >=32 is not a valid shift for "asl", so we must try and
8746 output a shift that produces the correct arithmetical result.
8747 Using lsr #32 is identical except for the fact that the carry bit
8748 is not set correctly if we set the flags; but we never use the
8749 carry bit from such an operation, so we can ignore that. */
8750 if (code == ROTATERT)
8751 /* Rotate is just modulo 32. */
8752 *amountp &= 31;
8753 else if (*amountp != (*amountp & 31))
8755 if (code == ASHIFT)
8756 mnem = "lsr";
8757 *amountp = 32;
8760 /* Shifts of 0 are no-ops. */
8761 if (*amountp == 0)
8762 return NULL;
8765 return mnem;
8768 /* Obtain the shift from the POWER of two. */
8770 static HOST_WIDE_INT
8771 int_log2 (HOST_WIDE_INT power)
8773 HOST_WIDE_INT shift = 0;
8775 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8777 gcc_assert (shift <= 31);
8778 shift++;
8781 return shift;
8784 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8785 because /bin/as is horribly restrictive. The judgement about
8786 whether or not each character is 'printable' (and can be output as
8787 is) or not (and must be printed with an octal escape) must be made
8788 with reference to the *host* character set -- the situation is
8789 similar to that discussed in the comments above pp_c_char in
8790 c-pretty-print.c. */
8792 #define MAX_ASCII_LEN 51
8794 void
8795 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8797 int i;
8798 int len_so_far = 0;
8800 fputs ("\t.ascii\t\"", stream);
8802 for (i = 0; i < len; i++)
8804 int c = p[i];
8806 if (len_so_far >= MAX_ASCII_LEN)
8808 fputs ("\"\n\t.ascii\t\"", stream);
8809 len_so_far = 0;
8812 if (ISPRINT (c))
8814 if (c == '\\' || c == '\"')
8816 putc ('\\', stream);
8817 len_so_far++;
8819 putc (c, stream);
8820 len_so_far++;
8822 else
8824 fprintf (stream, "\\%03o", c);
8825 len_so_far += 4;
8829 fputs ("\"\n", stream);
8832 /* Compute the register save mask for registers 0 through 12
8833 inclusive. This code is used by arm_compute_save_reg_mask. */
8835 static unsigned long
8836 arm_compute_save_reg0_reg12_mask (void)
8838 unsigned long func_type = arm_current_func_type ();
8839 unsigned long save_reg_mask = 0;
8840 unsigned int reg;
8842 if (IS_INTERRUPT (func_type))
8844 unsigned int max_reg;
8845 /* Interrupt functions must not corrupt any registers,
8846 even call clobbered ones. If this is a leaf function
8847 we can just examine the registers used by the RTL, but
8848 otherwise we have to assume that whatever function is
8849 called might clobber anything, and so we have to save
8850 all the call-clobbered registers as well. */
8851 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8852 /* FIQ handlers have registers r8 - r12 banked, so
8853 we only need to check r0 - r7, Normal ISRs only
8854 bank r14 and r15, so we must check up to r12.
8855 r13 is the stack pointer which is always preserved,
8856 so we do not need to consider it here. */
8857 max_reg = 7;
8858 else
8859 max_reg = 12;
8861 for (reg = 0; reg <= max_reg; reg++)
8862 if (regs_ever_live[reg]
8863 || (! current_function_is_leaf && call_used_regs [reg]))
8864 save_reg_mask |= (1 << reg);
8866 /* Also save the pic base register if necessary. */
8867 if (flag_pic
8868 && !TARGET_SINGLE_PIC_BASE
8869 && current_function_uses_pic_offset_table)
8870 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8872 else
8874 /* In the normal case we only need to save those registers
8875 which are call saved and which are used by this function. */
8876 for (reg = 0; reg <= 10; reg++)
8877 if (regs_ever_live[reg] && ! call_used_regs [reg])
8878 save_reg_mask |= (1 << reg);
8880 /* Handle the frame pointer as a special case. */
8881 if (! TARGET_APCS_FRAME
8882 && ! frame_pointer_needed
8883 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8884 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8885 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8887 /* If we aren't loading the PIC register,
8888 don't stack it even though it may be live. */
8889 if (flag_pic
8890 && !TARGET_SINGLE_PIC_BASE
8891 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8892 || current_function_uses_pic_offset_table))
8893 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8896 /* Save registers so the exception handler can modify them. */
8897 if (current_function_calls_eh_return)
8899 unsigned int i;
8901 for (i = 0; ; i++)
8903 reg = EH_RETURN_DATA_REGNO (i);
8904 if (reg == INVALID_REGNUM)
8905 break;
8906 save_reg_mask |= 1 << reg;
8910 return save_reg_mask;
8913 /* Compute a bit mask of which registers need to be
8914 saved on the stack for the current function. */
8916 static unsigned long
8917 arm_compute_save_reg_mask (void)
8919 unsigned int save_reg_mask = 0;
8920 unsigned long func_type = arm_current_func_type ();
8922 if (IS_NAKED (func_type))
8923 /* This should never really happen. */
8924 return 0;
8926 /* If we are creating a stack frame, then we must save the frame pointer,
8927 IP (which will hold the old stack pointer), LR and the PC. */
8928 if (frame_pointer_needed)
8929 save_reg_mask |=
8930 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8931 | (1 << IP_REGNUM)
8932 | (1 << LR_REGNUM)
8933 | (1 << PC_REGNUM);
8935 /* Volatile functions do not return, so there
8936 is no need to save any other registers. */
8937 if (IS_VOLATILE (func_type))
8938 return save_reg_mask;
8940 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8942 /* Decide if we need to save the link register.
8943 Interrupt routines have their own banked link register,
8944 so they never need to save it.
8945 Otherwise if we do not use the link register we do not need to save
8946 it. If we are pushing other registers onto the stack however, we
8947 can save an instruction in the epilogue by pushing the link register
8948 now and then popping it back into the PC. This incurs extra memory
8949 accesses though, so we only do it when optimizing for size, and only
8950 if we know that we will not need a fancy return sequence. */
8951 if (regs_ever_live [LR_REGNUM]
8952 || (save_reg_mask
8953 && optimize_size
8954 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8955 && !current_function_calls_eh_return))
8956 save_reg_mask |= 1 << LR_REGNUM;
8958 if (cfun->machine->lr_save_eliminated)
8959 save_reg_mask &= ~ (1 << LR_REGNUM);
8961 if (TARGET_REALLY_IWMMXT
8962 && ((bit_count (save_reg_mask)
8963 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8965 unsigned int reg;
8967 /* The total number of registers that are going to be pushed
8968 onto the stack is odd. We need to ensure that the stack
8969 is 64-bit aligned before we start to save iWMMXt registers,
8970 and also before we start to create locals. (A local variable
8971 might be a double or long long which we will load/store using
8972 an iWMMXt instruction). Therefore we need to push another
8973 ARM register, so that the stack will be 64-bit aligned. We
8974 try to avoid using the arg registers (r0 -r3) as they might be
8975 used to pass values in a tail call. */
8976 for (reg = 4; reg <= 12; reg++)
8977 if ((save_reg_mask & (1 << reg)) == 0)
8978 break;
8980 if (reg <= 12)
8981 save_reg_mask |= (1 << reg);
8982 else
8984 cfun->machine->sibcall_blocked = 1;
8985 save_reg_mask |= (1 << 3);
8989 return save_reg_mask;
8993 /* Compute a bit mask of which registers need to be
8994 saved on the stack for the current function. */
8995 static unsigned long
8996 thumb_compute_save_reg_mask (void)
8998 unsigned long mask;
8999 unsigned reg;
9001 mask = 0;
9002 for (reg = 0; reg < 12; reg ++)
9003 if (regs_ever_live[reg] && !call_used_regs[reg])
9004 mask |= 1 << reg;
9006 if (flag_pic
9007 && !TARGET_SINGLE_PIC_BASE
9008 && current_function_uses_pic_offset_table)
9009 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9011 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9012 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9013 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9015 /* LR will also be pushed if any lo regs are pushed. */
9016 if (mask & 0xff || thumb_force_lr_save ())
9017 mask |= (1 << LR_REGNUM);
9019 /* Make sure we have a low work register if we need one.
9020 We will need one if we are going to push a high register,
9021 but we are not currently intending to push a low register. */
9022 if ((mask & 0xff) == 0
9023 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9025 /* Use thumb_find_work_register to choose which register
9026 we will use. If the register is live then we will
9027 have to push it. Use LAST_LO_REGNUM as our fallback
9028 choice for the register to select. */
9029 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9031 if (! call_used_regs[reg])
9032 mask |= 1 << reg;
9035 return mask;
9039 /* Return the number of bytes required to save VFP registers. */
9040 static int
9041 arm_get_vfp_saved_size (void)
9043 unsigned int regno;
9044 int count;
9045 int saved;
9047 saved = 0;
9048 /* Space for saved VFP registers. */
9049 if (TARGET_HARD_FLOAT && TARGET_VFP)
9051 count = 0;
9052 for (regno = FIRST_VFP_REGNUM;
9053 regno < LAST_VFP_REGNUM;
9054 regno += 2)
9056 if ((!regs_ever_live[regno] || call_used_regs[regno])
9057 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9059 if (count > 0)
9061 /* Workaround ARM10 VFPr1 bug. */
9062 if (count == 2 && !arm_arch6)
9063 count++;
9064 saved += count * 8 + 4;
9066 count = 0;
9068 else
9069 count++;
9071 if (count > 0)
9073 if (count == 2 && !arm_arch6)
9074 count++;
9075 saved += count * 8 + 4;
9078 return saved;
9082 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9083 everything bar the final return instruction. */
9084 const char *
9085 output_return_instruction (rtx operand, int really_return, int reverse)
9087 char conditional[10];
9088 char instr[100];
9089 unsigned reg;
9090 unsigned long live_regs_mask;
9091 unsigned long func_type;
9092 arm_stack_offsets *offsets;
9094 func_type = arm_current_func_type ();
9096 if (IS_NAKED (func_type))
9097 return "";
9099 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9101 /* If this function was declared non-returning, and we have
9102 found a tail call, then we have to trust that the called
9103 function won't return. */
9104 if (really_return)
9106 rtx ops[2];
9108 /* Otherwise, trap an attempted return by aborting. */
9109 ops[0] = operand;
9110 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9111 : "abort");
9112 assemble_external_libcall (ops[1]);
9113 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9116 return "";
9119 gcc_assert (!current_function_calls_alloca || really_return);
9121 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9123 return_used_this_function = 1;
9125 live_regs_mask = arm_compute_save_reg_mask ();
9127 if (live_regs_mask)
9129 const char * return_reg;
9131 /* If we do not have any special requirements for function exit
9132 (e.g. interworking, or ISR) then we can load the return address
9133 directly into the PC. Otherwise we must load it into LR. */
9134 if (really_return
9135 && ! TARGET_INTERWORK)
9136 return_reg = reg_names[PC_REGNUM];
9137 else
9138 return_reg = reg_names[LR_REGNUM];
9140 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9142 /* There are three possible reasons for the IP register
9143 being saved. 1) a stack frame was created, in which case
9144 IP contains the old stack pointer, or 2) an ISR routine
9145 corrupted it, or 3) it was saved to align the stack on
9146 iWMMXt. In case 1, restore IP into SP, otherwise just
9147 restore IP. */
9148 if (frame_pointer_needed)
9150 live_regs_mask &= ~ (1 << IP_REGNUM);
9151 live_regs_mask |= (1 << SP_REGNUM);
9153 else
9154 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9157 /* On some ARM architectures it is faster to use LDR rather than
9158 LDM to load a single register. On other architectures, the
9159 cost is the same. In 26 bit mode, or for exception handlers,
9160 we have to use LDM to load the PC so that the CPSR is also
9161 restored. */
9162 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9163 if (live_regs_mask == (1U << reg))
9164 break;
9166 if (reg <= LAST_ARM_REGNUM
9167 && (reg != LR_REGNUM
9168 || ! really_return
9169 || ! IS_INTERRUPT (func_type)))
9171 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9172 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9174 else
9176 char *p;
9177 int first = 1;
9179 /* Generate the load multiple instruction to restore the
9180 registers. Note we can get here, even if
9181 frame_pointer_needed is true, but only if sp already
9182 points to the base of the saved core registers. */
9183 if (live_regs_mask & (1 << SP_REGNUM))
9185 unsigned HOST_WIDE_INT stack_adjust;
9187 offsets = arm_get_frame_offsets ();
9188 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9189 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9191 if (stack_adjust && arm_arch5)
9192 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9193 else
9195 /* If we can't use ldmib (SA110 bug),
9196 then try to pop r3 instead. */
9197 if (stack_adjust)
9198 live_regs_mask |= 1 << 3;
9199 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9202 else
9203 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9205 p = instr + strlen (instr);
9207 for (reg = 0; reg <= SP_REGNUM; reg++)
9208 if (live_regs_mask & (1 << reg))
9210 int l = strlen (reg_names[reg]);
9212 if (first)
9213 first = 0;
9214 else
9216 memcpy (p, ", ", 2);
9217 p += 2;
9220 memcpy (p, "%|", 2);
9221 memcpy (p + 2, reg_names[reg], l);
9222 p += l + 2;
9225 if (live_regs_mask & (1 << LR_REGNUM))
9227 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9228 /* If returning from an interrupt, restore the CPSR. */
9229 if (IS_INTERRUPT (func_type))
9230 strcat (p, "^");
9232 else
9233 strcpy (p, "}");
9236 output_asm_insn (instr, & operand);
9238 /* See if we need to generate an extra instruction to
9239 perform the actual function return. */
9240 if (really_return
9241 && func_type != ARM_FT_INTERWORKED
9242 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9244 /* The return has already been handled
9245 by loading the LR into the PC. */
9246 really_return = 0;
9250 if (really_return)
9252 switch ((int) ARM_FUNC_TYPE (func_type))
9254 case ARM_FT_ISR:
9255 case ARM_FT_FIQ:
9256 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9257 break;
9259 case ARM_FT_INTERWORKED:
9260 sprintf (instr, "bx%s\t%%|lr", conditional);
9261 break;
9263 case ARM_FT_EXCEPTION:
9264 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9265 break;
9267 default:
9268 /* Use bx if it's available. */
9269 if (arm_arch5 || arm_arch4t)
9270 sprintf (instr, "bx%s\t%%|lr", conditional);
9271 else
9272 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9273 break;
9276 output_asm_insn (instr, & operand);
9279 return "";
9282 /* Write the function name into the code section, directly preceding
9283 the function prologue.
9285 Code will be output similar to this:
9287 .ascii "arm_poke_function_name", 0
9288 .align
9290 .word 0xff000000 + (t1 - t0)
9291 arm_poke_function_name
9292 mov ip, sp
9293 stmfd sp!, {fp, ip, lr, pc}
9294 sub fp, ip, #4
9296 When performing a stack backtrace, code can inspect the value
9297 of 'pc' stored at 'fp' + 0. If the trace function then looks
9298 at location pc - 12 and the top 8 bits are set, then we know
9299 that there is a function name embedded immediately preceding this
9300 location and has length ((pc[-3]) & 0xff000000).
9302 We assume that pc is declared as a pointer to an unsigned long.
9304 It is of no benefit to output the function name if we are assembling
9305 a leaf function. These function types will not contain a stack
9306 backtrace structure, therefore it is not possible to determine the
9307 function name. */
9308 void
9309 arm_poke_function_name (FILE *stream, const char *name)
9311 unsigned long alignlength;
9312 unsigned long length;
9313 rtx x;
9315 length = strlen (name) + 1;
9316 alignlength = ROUND_UP_WORD (length);
9318 ASM_OUTPUT_ASCII (stream, name, length);
9319 ASM_OUTPUT_ALIGN (stream, 2);
9320 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9321 assemble_aligned_integer (UNITS_PER_WORD, x);
9324 /* Place some comments into the assembler stream
9325 describing the current function. */
9326 static void
9327 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9329 unsigned long func_type;
9331 if (!TARGET_ARM)
9333 thumb_output_function_prologue (f, frame_size);
9334 return;
9337 /* Sanity check. */
9338 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9340 func_type = arm_current_func_type ();
9342 switch ((int) ARM_FUNC_TYPE (func_type))
9344 default:
9345 case ARM_FT_NORMAL:
9346 break;
9347 case ARM_FT_INTERWORKED:
9348 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9349 break;
9350 case ARM_FT_ISR:
9351 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9352 break;
9353 case ARM_FT_FIQ:
9354 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9355 break;
9356 case ARM_FT_EXCEPTION:
9357 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9358 break;
9361 if (IS_NAKED (func_type))
9362 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9364 if (IS_VOLATILE (func_type))
9365 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9367 if (IS_NESTED (func_type))
9368 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9370 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9371 current_function_args_size,
9372 current_function_pretend_args_size, frame_size);
9374 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9375 frame_pointer_needed,
9376 cfun->machine->uses_anonymous_args);
9378 if (cfun->machine->lr_save_eliminated)
9379 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9381 if (current_function_calls_eh_return)
9382 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9384 #ifdef AOF_ASSEMBLER
9385 if (flag_pic)
9386 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9387 #endif
9389 return_used_this_function = 0;
9392 const char *
9393 arm_output_epilogue (rtx sibling)
9395 int reg;
9396 unsigned long saved_regs_mask;
9397 unsigned long func_type;
9398 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9399 frame that is $fp + 4 for a non-variadic function. */
9400 int floats_offset = 0;
9401 rtx operands[3];
9402 FILE * f = asm_out_file;
9403 unsigned int lrm_count = 0;
9404 int really_return = (sibling == NULL);
9405 int start_reg;
9406 arm_stack_offsets *offsets;
9408 /* If we have already generated the return instruction
9409 then it is futile to generate anything else. */
9410 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9411 return "";
9413 func_type = arm_current_func_type ();
9415 if (IS_NAKED (func_type))
9416 /* Naked functions don't have epilogues. */
9417 return "";
9419 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9421 rtx op;
9423 /* A volatile function should never return. Call abort. */
9424 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9425 assemble_external_libcall (op);
9426 output_asm_insn ("bl\t%a0", &op);
9428 return "";
9431 /* If we are throwing an exception, then we really must be doing a
9432 return, so we can't tail-call. */
9433 gcc_assert (!current_function_calls_eh_return || really_return);
9435 offsets = arm_get_frame_offsets ();
9436 saved_regs_mask = arm_compute_save_reg_mask ();
9438 if (TARGET_IWMMXT)
9439 lrm_count = bit_count (saved_regs_mask);
9441 floats_offset = offsets->saved_args;
9442 /* Compute how far away the floats will be. */
9443 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9444 if (saved_regs_mask & (1 << reg))
9445 floats_offset += 4;
9447 if (frame_pointer_needed)
9449 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9450 int vfp_offset = offsets->frame;
9452 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9454 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9455 if (regs_ever_live[reg] && !call_used_regs[reg])
9457 floats_offset += 12;
9458 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9459 reg, FP_REGNUM, floats_offset - vfp_offset);
9462 else
9464 start_reg = LAST_FPA_REGNUM;
9466 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9468 if (regs_ever_live[reg] && !call_used_regs[reg])
9470 floats_offset += 12;
9472 /* We can't unstack more than four registers at once. */
9473 if (start_reg - reg == 3)
9475 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9476 reg, FP_REGNUM, floats_offset - vfp_offset);
9477 start_reg = reg - 1;
9480 else
9482 if (reg != start_reg)
9483 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9484 reg + 1, start_reg - reg,
9485 FP_REGNUM, floats_offset - vfp_offset);
9486 start_reg = reg - 1;
9490 /* Just in case the last register checked also needs unstacking. */
9491 if (reg != start_reg)
9492 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9493 reg + 1, start_reg - reg,
9494 FP_REGNUM, floats_offset - vfp_offset);
9497 if (TARGET_HARD_FLOAT && TARGET_VFP)
9499 int saved_size;
9501 /* The fldmx insn does not have base+offset addressing modes,
9502 so we use IP to hold the address. */
9503 saved_size = arm_get_vfp_saved_size ();
9505 if (saved_size > 0)
9507 floats_offset += saved_size;
9508 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9509 FP_REGNUM, floats_offset - vfp_offset);
9511 start_reg = FIRST_VFP_REGNUM;
9512 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9514 if ((!regs_ever_live[reg] || call_used_regs[reg])
9515 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9517 if (start_reg != reg)
9518 arm_output_fldmx (f, IP_REGNUM,
9519 (start_reg - FIRST_VFP_REGNUM) / 2,
9520 (reg - start_reg) / 2);
9521 start_reg = reg + 2;
9524 if (start_reg != reg)
9525 arm_output_fldmx (f, IP_REGNUM,
9526 (start_reg - FIRST_VFP_REGNUM) / 2,
9527 (reg - start_reg) / 2);
9530 if (TARGET_IWMMXT)
9532 /* The frame pointer is guaranteed to be non-double-word aligned.
9533 This is because it is set to (old_stack_pointer - 4) and the
9534 old_stack_pointer was double word aligned. Thus the offset to
9535 the iWMMXt registers to be loaded must also be non-double-word
9536 sized, so that the resultant address *is* double-word aligned.
9537 We can ignore floats_offset since that was already included in
9538 the live_regs_mask. */
9539 lrm_count += (lrm_count % 2 ? 2 : 1);
9541 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9542 if (regs_ever_live[reg] && !call_used_regs[reg])
9544 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9545 reg, FP_REGNUM, lrm_count * 4);
9546 lrm_count += 2;
9550 /* saved_regs_mask should contain the IP, which at the time of stack
9551 frame generation actually contains the old stack pointer. So a
9552 quick way to unwind the stack is just pop the IP register directly
9553 into the stack pointer. */
9554 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9555 saved_regs_mask &= ~ (1 << IP_REGNUM);
9556 saved_regs_mask |= (1 << SP_REGNUM);
9558 /* There are two registers left in saved_regs_mask - LR and PC. We
9559 only need to restore the LR register (the return address), but to
9560 save time we can load it directly into the PC, unless we need a
9561 special function exit sequence, or we are not really returning. */
9562 if (really_return
9563 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9564 && !current_function_calls_eh_return)
9565 /* Delete the LR from the register mask, so that the LR on
9566 the stack is loaded into the PC in the register mask. */
9567 saved_regs_mask &= ~ (1 << LR_REGNUM);
9568 else
9569 saved_regs_mask &= ~ (1 << PC_REGNUM);
9571 /* We must use SP as the base register, because SP is one of the
9572 registers being restored. If an interrupt or page fault
9573 happens in the ldm instruction, the SP might or might not
9574 have been restored. That would be bad, as then SP will no
9575 longer indicate the safe area of stack, and we can get stack
9576 corruption. Using SP as the base register means that it will
9577 be reset correctly to the original value, should an interrupt
9578 occur. If the stack pointer already points at the right
9579 place, then omit the subtraction. */
9580 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9581 || current_function_calls_alloca)
9582 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9583 4 * bit_count (saved_regs_mask));
9584 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9586 if (IS_INTERRUPT (func_type))
9587 /* Interrupt handlers will have pushed the
9588 IP onto the stack, so restore it now. */
9589 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9591 else
9593 /* Restore stack pointer if necessary. */
9594 if (offsets->outgoing_args != offsets->saved_regs)
9596 operands[0] = operands[1] = stack_pointer_rtx;
9597 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9598 output_add_immediate (operands);
9601 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9603 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9604 if (regs_ever_live[reg] && !call_used_regs[reg])
9605 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9606 reg, SP_REGNUM);
9608 else
9610 start_reg = FIRST_FPA_REGNUM;
9612 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9614 if (regs_ever_live[reg] && !call_used_regs[reg])
9616 if (reg - start_reg == 3)
9618 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9619 start_reg, SP_REGNUM);
9620 start_reg = reg + 1;
9623 else
9625 if (reg != start_reg)
9626 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9627 start_reg, reg - start_reg,
9628 SP_REGNUM);
9630 start_reg = reg + 1;
9634 /* Just in case the last register checked also needs unstacking. */
9635 if (reg != start_reg)
9636 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9637 start_reg, reg - start_reg, SP_REGNUM);
9640 if (TARGET_HARD_FLOAT && TARGET_VFP)
9642 start_reg = FIRST_VFP_REGNUM;
9643 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9645 if ((!regs_ever_live[reg] || call_used_regs[reg])
9646 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9648 if (start_reg != reg)
9649 arm_output_fldmx (f, SP_REGNUM,
9650 (start_reg - FIRST_VFP_REGNUM) / 2,
9651 (reg - start_reg) / 2);
9652 start_reg = reg + 2;
9655 if (start_reg != reg)
9656 arm_output_fldmx (f, SP_REGNUM,
9657 (start_reg - FIRST_VFP_REGNUM) / 2,
9658 (reg - start_reg) / 2);
9660 if (TARGET_IWMMXT)
9661 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9662 if (regs_ever_live[reg] && !call_used_regs[reg])
9663 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9665 /* If we can, restore the LR into the PC. */
9666 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9667 && really_return
9668 && current_function_pretend_args_size == 0
9669 && saved_regs_mask & (1 << LR_REGNUM)
9670 && !current_function_calls_eh_return)
9672 saved_regs_mask &= ~ (1 << LR_REGNUM);
9673 saved_regs_mask |= (1 << PC_REGNUM);
9676 /* Load the registers off the stack. If we only have one register
9677 to load use the LDR instruction - it is faster. */
9678 if (saved_regs_mask == (1 << LR_REGNUM))
9680 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9682 else if (saved_regs_mask)
9684 if (saved_regs_mask & (1 << SP_REGNUM))
9685 /* Note - write back to the stack register is not enabled
9686 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9687 in the list of registers and if we add writeback the
9688 instruction becomes UNPREDICTABLE. */
9689 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9690 else
9691 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9694 if (current_function_pretend_args_size)
9696 /* Unwind the pre-pushed regs. */
9697 operands[0] = operands[1] = stack_pointer_rtx;
9698 operands[2] = GEN_INT (current_function_pretend_args_size);
9699 output_add_immediate (operands);
9703 /* We may have already restored PC directly from the stack. */
9704 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9705 return "";
9707 /* Stack adjustment for exception handler. */
9708 if (current_function_calls_eh_return)
9709 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9710 ARM_EH_STACKADJ_REGNUM);
9712 /* Generate the return instruction. */
9713 switch ((int) ARM_FUNC_TYPE (func_type))
9715 case ARM_FT_ISR:
9716 case ARM_FT_FIQ:
9717 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9718 break;
9720 case ARM_FT_EXCEPTION:
9721 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9722 break;
9724 case ARM_FT_INTERWORKED:
9725 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9726 break;
9728 default:
9729 if (arm_arch5 || arm_arch4t)
9730 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9731 else
9732 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9733 break;
9736 return "";
9739 static void
9740 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9741 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9743 arm_stack_offsets *offsets;
9745 if (TARGET_THUMB)
9747 int regno;
9749 /* Emit any call-via-reg trampolines that are needed for v4t support
9750 of call_reg and call_value_reg type insns. */
9751 for (regno = 0; regno < LR_REGNUM; regno++)
9753 rtx label = cfun->machine->call_via[regno];
9755 if (label != NULL)
9757 function_section (current_function_decl);
9758 targetm.asm_out.internal_label (asm_out_file, "L",
9759 CODE_LABEL_NUMBER (label));
9760 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9764 /* ??? Probably not safe to set this here, since it assumes that a
9765 function will be emitted as assembly immediately after we generate
9766 RTL for it. This does not happen for inline functions. */
9767 return_used_this_function = 0;
9769 else
9771 /* We need to take into account any stack-frame rounding. */
9772 offsets = arm_get_frame_offsets ();
9774 gcc_assert (!use_return_insn (FALSE, NULL)
9775 || !return_used_this_function
9776 || offsets->saved_regs == offsets->outgoing_args
9777 || frame_pointer_needed);
9779 /* Reset the ARM-specific per-function variables. */
9780 after_arm_reorg = 0;
9784 /* Generate and emit an insn that we will recognize as a push_multi.
9785 Unfortunately, since this insn does not reflect very well the actual
9786 semantics of the operation, we need to annotate the insn for the benefit
9787 of DWARF2 frame unwind information. */
9788 static rtx
9789 emit_multi_reg_push (unsigned long mask)
9791 int num_regs = 0;
9792 int num_dwarf_regs;
9793 int i, j;
9794 rtx par;
9795 rtx dwarf;
9796 int dwarf_par_index;
9797 rtx tmp, reg;
9799 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9800 if (mask & (1 << i))
9801 num_regs++;
9803 gcc_assert (num_regs && num_regs <= 16);
9805 /* We don't record the PC in the dwarf frame information. */
9806 num_dwarf_regs = num_regs;
9807 if (mask & (1 << PC_REGNUM))
9808 num_dwarf_regs--;
9810 /* For the body of the insn we are going to generate an UNSPEC in
9811 parallel with several USEs. This allows the insn to be recognized
9812 by the push_multi pattern in the arm.md file. The insn looks
9813 something like this:
9815 (parallel [
9816 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9817 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9818 (use (reg:SI 11 fp))
9819 (use (reg:SI 12 ip))
9820 (use (reg:SI 14 lr))
9821 (use (reg:SI 15 pc))
9824 For the frame note however, we try to be more explicit and actually
9825 show each register being stored into the stack frame, plus a (single)
9826 decrement of the stack pointer. We do it this way in order to be
9827 friendly to the stack unwinding code, which only wants to see a single
9828 stack decrement per instruction. The RTL we generate for the note looks
9829 something like this:
9831 (sequence [
9832 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9833 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9834 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9835 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9836 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9839 This sequence is used both by the code to support stack unwinding for
9840 exceptions handlers and the code to generate dwarf2 frame debugging. */
9842 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9843 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9844 dwarf_par_index = 1;
9846 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9848 if (mask & (1 << i))
9850 reg = gen_rtx_REG (SImode, i);
9852 XVECEXP (par, 0, 0)
9853 = gen_rtx_SET (VOIDmode,
9854 gen_rtx_MEM (BLKmode,
9855 gen_rtx_PRE_DEC (BLKmode,
9856 stack_pointer_rtx)),
9857 gen_rtx_UNSPEC (BLKmode,
9858 gen_rtvec (1, reg),
9859 UNSPEC_PUSH_MULT));
9861 if (i != PC_REGNUM)
9863 tmp = gen_rtx_SET (VOIDmode,
9864 gen_rtx_MEM (SImode, stack_pointer_rtx),
9865 reg);
9866 RTX_FRAME_RELATED_P (tmp) = 1;
9867 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9868 dwarf_par_index++;
9871 break;
9875 for (j = 1, i++; j < num_regs; i++)
9877 if (mask & (1 << i))
9879 reg = gen_rtx_REG (SImode, i);
9881 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9883 if (i != PC_REGNUM)
9885 tmp = gen_rtx_SET (VOIDmode,
9886 gen_rtx_MEM (SImode,
9887 plus_constant (stack_pointer_rtx,
9888 4 * j)),
9889 reg);
9890 RTX_FRAME_RELATED_P (tmp) = 1;
9891 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9894 j++;
9898 par = emit_insn (par);
9900 tmp = gen_rtx_SET (SImode,
9901 stack_pointer_rtx,
9902 gen_rtx_PLUS (SImode,
9903 stack_pointer_rtx,
9904 GEN_INT (-4 * num_regs)));
9905 RTX_FRAME_RELATED_P (tmp) = 1;
9906 XVECEXP (dwarf, 0, 0) = tmp;
9908 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9909 REG_NOTES (par));
9910 return par;
9913 /* Calculate the size of the return value that is passed in registers. */
9914 static int
9915 arm_size_return_regs (void)
9917 enum machine_mode mode;
9919 if (current_function_return_rtx != 0)
9920 mode = GET_MODE (current_function_return_rtx);
9921 else
9922 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9924 return GET_MODE_SIZE (mode);
9927 static rtx
9928 emit_sfm (int base_reg, int count)
9930 rtx par;
9931 rtx dwarf;
9932 rtx tmp, reg;
9933 int i;
9935 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9936 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9938 reg = gen_rtx_REG (XFmode, base_reg++);
9940 XVECEXP (par, 0, 0)
9941 = gen_rtx_SET (VOIDmode,
9942 gen_rtx_MEM (BLKmode,
9943 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9944 gen_rtx_UNSPEC (BLKmode,
9945 gen_rtvec (1, reg),
9946 UNSPEC_PUSH_MULT));
9947 tmp = gen_rtx_SET (VOIDmode,
9948 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9949 RTX_FRAME_RELATED_P (tmp) = 1;
9950 XVECEXP (dwarf, 0, 1) = tmp;
9952 for (i = 1; i < count; i++)
9954 reg = gen_rtx_REG (XFmode, base_reg++);
9955 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9957 tmp = gen_rtx_SET (VOIDmode,
9958 gen_rtx_MEM (XFmode,
9959 plus_constant (stack_pointer_rtx,
9960 i * 12)),
9961 reg);
9962 RTX_FRAME_RELATED_P (tmp) = 1;
9963 XVECEXP (dwarf, 0, i + 1) = tmp;
9966 tmp = gen_rtx_SET (VOIDmode,
9967 stack_pointer_rtx,
9968 gen_rtx_PLUS (SImode,
9969 stack_pointer_rtx,
9970 GEN_INT (-12 * count)));
9971 RTX_FRAME_RELATED_P (tmp) = 1;
9972 XVECEXP (dwarf, 0, 0) = tmp;
9974 par = emit_insn (par);
9975 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9976 REG_NOTES (par));
9977 return par;
9981 /* Return true if the current function needs to save/restore LR. */
9983 static bool
9984 thumb_force_lr_save (void)
9986 return !cfun->machine->lr_save_eliminated
9987 && (!leaf_function_p ()
9988 || thumb_far_jump_used_p ()
9989 || regs_ever_live [LR_REGNUM]);
9993 /* Compute the distance from register FROM to register TO.
9994 These can be the arg pointer (26), the soft frame pointer (25),
9995 the stack pointer (13) or the hard frame pointer (11).
9996 In thumb mode r7 is used as the soft frame pointer, if needed.
9997 Typical stack layout looks like this:
9999 old stack pointer -> | |
10000 ----
10001 | | \
10002 | | saved arguments for
10003 | | vararg functions
10004 | | /
10006 hard FP & arg pointer -> | | \
10007 | | stack
10008 | | frame
10009 | | /
10011 | | \
10012 | | call saved
10013 | | registers
10014 soft frame pointer -> | | /
10016 | | \
10017 | | local
10018 | | variables
10019 locals base pointer -> | | /
10021 | | \
10022 | | outgoing
10023 | | arguments
10024 current stack pointer -> | | /
10027 For a given function some or all of these stack components
10028 may not be needed, giving rise to the possibility of
10029 eliminating some of the registers.
10031 The values returned by this function must reflect the behavior
10032 of arm_expand_prologue() and arm_compute_save_reg_mask().
10034 The sign of the number returned reflects the direction of stack
10035 growth, so the values are positive for all eliminations except
10036 from the soft frame pointer to the hard frame pointer.
10038 SFP may point just inside the local variables block to ensure correct
10039 alignment. */
10042 /* Calculate stack offsets. These are used to calculate register elimination
10043 offsets and in prologue/epilogue code. */
10045 static arm_stack_offsets *
10046 arm_get_frame_offsets (void)
10048 struct arm_stack_offsets *offsets;
10049 unsigned long func_type;
10050 int leaf;
10051 int saved;
10052 HOST_WIDE_INT frame_size;
10054 offsets = &cfun->machine->stack_offsets;
10056 /* We need to know if we are a leaf function. Unfortunately, it
10057 is possible to be called after start_sequence has been called,
10058 which causes get_insns to return the insns for the sequence,
10059 not the function, which will cause leaf_function_p to return
10060 the incorrect result.
10062 to know about leaf functions once reload has completed, and the
10063 frame size cannot be changed after that time, so we can safely
10064 use the cached value. */
10066 if (reload_completed)
10067 return offsets;
10069 /* Initially this is the size of the local variables. It will translated
10070 into an offset once we have determined the size of preceding data. */
10071 frame_size = ROUND_UP_WORD (get_frame_size ());
10073 leaf = leaf_function_p ();
10075 /* Space for variadic functions. */
10076 offsets->saved_args = current_function_pretend_args_size;
10078 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10080 if (TARGET_ARM)
10082 unsigned int regno;
10084 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10086 /* We know that SP will be doubleword aligned on entry, and we must
10087 preserve that condition at any subroutine call. We also require the
10088 soft frame pointer to be doubleword aligned. */
10090 if (TARGET_REALLY_IWMMXT)
10092 /* Check for the call-saved iWMMXt registers. */
10093 for (regno = FIRST_IWMMXT_REGNUM;
10094 regno <= LAST_IWMMXT_REGNUM;
10095 regno++)
10096 if (regs_ever_live [regno] && ! call_used_regs [regno])
10097 saved += 8;
10100 func_type = arm_current_func_type ();
10101 if (! IS_VOLATILE (func_type))
10103 /* Space for saved FPA registers. */
10104 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10105 if (regs_ever_live[regno] && ! call_used_regs[regno])
10106 saved += 12;
10108 /* Space for saved VFP registers. */
10109 if (TARGET_HARD_FLOAT && TARGET_VFP)
10110 saved += arm_get_vfp_saved_size ();
10113 else /* TARGET_THUMB */
10115 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10116 if (TARGET_BACKTRACE)
10117 saved += 16;
10120 /* Saved registers include the stack frame. */
10121 offsets->saved_regs = offsets->saved_args + saved;
10122 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10123 /* A leaf function does not need any stack alignment if it has nothing
10124 on the stack. */
10125 if (leaf && frame_size == 0)
10127 offsets->outgoing_args = offsets->soft_frame;
10128 return offsets;
10131 /* Ensure SFP has the correct alignment. */
10132 if (ARM_DOUBLEWORD_ALIGN
10133 && (offsets->soft_frame & 7))
10134 offsets->soft_frame += 4;
10136 offsets->locals_base = offsets->soft_frame + frame_size;
10137 offsets->outgoing_args = (offsets->locals_base
10138 + current_function_outgoing_args_size);
10140 if (ARM_DOUBLEWORD_ALIGN)
10142 /* Ensure SP remains doubleword aligned. */
10143 if (offsets->outgoing_args & 7)
10144 offsets->outgoing_args += 4;
10145 gcc_assert (!(offsets->outgoing_args & 7));
10148 return offsets;
10152 /* Calculate the relative offsets for the different stack pointers. Positive
10153 offsets are in the direction of stack growth. */
10155 HOST_WIDE_INT
10156 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10158 arm_stack_offsets *offsets;
10160 offsets = arm_get_frame_offsets ();
10162 /* OK, now we have enough information to compute the distances.
10163 There must be an entry in these switch tables for each pair
10164 of registers in ELIMINABLE_REGS, even if some of the entries
10165 seem to be redundant or useless. */
10166 switch (from)
10168 case ARG_POINTER_REGNUM:
10169 switch (to)
10171 case THUMB_HARD_FRAME_POINTER_REGNUM:
10172 return 0;
10174 case FRAME_POINTER_REGNUM:
10175 /* This is the reverse of the soft frame pointer
10176 to hard frame pointer elimination below. */
10177 return offsets->soft_frame - offsets->saved_args;
10179 case ARM_HARD_FRAME_POINTER_REGNUM:
10180 /* If there is no stack frame then the hard
10181 frame pointer and the arg pointer coincide. */
10182 if (offsets->frame == offsets->saved_regs)
10183 return 0;
10184 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10185 return (frame_pointer_needed
10186 && cfun->static_chain_decl != NULL
10187 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10189 case STACK_POINTER_REGNUM:
10190 /* If nothing has been pushed on the stack at all
10191 then this will return -4. This *is* correct! */
10192 return offsets->outgoing_args - (offsets->saved_args + 4);
10194 default:
10195 gcc_unreachable ();
10197 gcc_unreachable ();
10199 case FRAME_POINTER_REGNUM:
10200 switch (to)
10202 case THUMB_HARD_FRAME_POINTER_REGNUM:
10203 return 0;
10205 case ARM_HARD_FRAME_POINTER_REGNUM:
10206 /* The hard frame pointer points to the top entry in the
10207 stack frame. The soft frame pointer to the bottom entry
10208 in the stack frame. If there is no stack frame at all,
10209 then they are identical. */
10211 return offsets->frame - offsets->soft_frame;
10213 case STACK_POINTER_REGNUM:
10214 return offsets->outgoing_args - offsets->soft_frame;
10216 default:
10217 gcc_unreachable ();
10219 gcc_unreachable ();
10221 default:
10222 /* You cannot eliminate from the stack pointer.
10223 In theory you could eliminate from the hard frame
10224 pointer to the stack pointer, but this will never
10225 happen, since if a stack frame is not needed the
10226 hard frame pointer will never be used. */
10227 gcc_unreachable ();
10232 /* Generate the prologue instructions for entry into an ARM function. */
10233 void
10234 arm_expand_prologue (void)
10236 int reg;
10237 rtx amount;
10238 rtx insn;
10239 rtx ip_rtx;
10240 unsigned long live_regs_mask;
10241 unsigned long func_type;
10242 int fp_offset = 0;
10243 int saved_pretend_args = 0;
10244 int saved_regs = 0;
10245 unsigned HOST_WIDE_INT args_to_push;
10246 arm_stack_offsets *offsets;
10248 func_type = arm_current_func_type ();
10250 /* Naked functions don't have prologues. */
10251 if (IS_NAKED (func_type))
10252 return;
10254 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10255 args_to_push = current_function_pretend_args_size;
10257 /* Compute which register we will have to save onto the stack. */
10258 live_regs_mask = arm_compute_save_reg_mask ();
10260 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10262 if (frame_pointer_needed)
10264 if (IS_INTERRUPT (func_type))
10266 /* Interrupt functions must not corrupt any registers.
10267 Creating a frame pointer however, corrupts the IP
10268 register, so we must push it first. */
10269 insn = emit_multi_reg_push (1 << IP_REGNUM);
10271 /* Do not set RTX_FRAME_RELATED_P on this insn.
10272 The dwarf stack unwinding code only wants to see one
10273 stack decrement per function, and this is not it. If
10274 this instruction is labeled as being part of the frame
10275 creation sequence then dwarf2out_frame_debug_expr will
10276 die when it encounters the assignment of IP to FP
10277 later on, since the use of SP here establishes SP as
10278 the CFA register and not IP.
10280 Anyway this instruction is not really part of the stack
10281 frame creation although it is part of the prologue. */
10283 else if (IS_NESTED (func_type))
10285 /* The Static chain register is the same as the IP register
10286 used as a scratch register during stack frame creation.
10287 To get around this need to find somewhere to store IP
10288 whilst the frame is being created. We try the following
10289 places in order:
10291 1. The last argument register.
10292 2. A slot on the stack above the frame. (This only
10293 works if the function is not a varargs function).
10294 3. Register r3, after pushing the argument registers
10295 onto the stack.
10297 Note - we only need to tell the dwarf2 backend about the SP
10298 adjustment in the second variant; the static chain register
10299 doesn't need to be unwound, as it doesn't contain a value
10300 inherited from the caller. */
10302 if (regs_ever_live[3] == 0)
10304 insn = gen_rtx_REG (SImode, 3);
10305 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10306 insn = emit_insn (insn);
10308 else if (args_to_push == 0)
10310 rtx dwarf;
10311 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10312 insn = gen_rtx_MEM (SImode, insn);
10313 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10314 insn = emit_insn (insn);
10316 fp_offset = 4;
10318 /* Just tell the dwarf backend that we adjusted SP. */
10319 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10320 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10321 GEN_INT (-fp_offset)));
10322 RTX_FRAME_RELATED_P (insn) = 1;
10323 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10324 dwarf, REG_NOTES (insn));
10326 else
10328 /* Store the args on the stack. */
10329 if (cfun->machine->uses_anonymous_args)
10330 insn = emit_multi_reg_push
10331 ((0xf0 >> (args_to_push / 4)) & 0xf);
10332 else
10333 insn = emit_insn
10334 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10335 GEN_INT (- args_to_push)));
10337 RTX_FRAME_RELATED_P (insn) = 1;
10339 saved_pretend_args = 1;
10340 fp_offset = args_to_push;
10341 args_to_push = 0;
10343 /* Now reuse r3 to preserve IP. */
10344 insn = gen_rtx_REG (SImode, 3);
10345 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10346 (void) emit_insn (insn);
10350 if (fp_offset)
10352 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10353 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10355 else
10356 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10358 insn = emit_insn (insn);
10359 RTX_FRAME_RELATED_P (insn) = 1;
10362 if (args_to_push)
10364 /* Push the argument registers, or reserve space for them. */
10365 if (cfun->machine->uses_anonymous_args)
10366 insn = emit_multi_reg_push
10367 ((0xf0 >> (args_to_push / 4)) & 0xf);
10368 else
10369 insn = emit_insn
10370 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10371 GEN_INT (- args_to_push)));
10372 RTX_FRAME_RELATED_P (insn) = 1;
10375 /* If this is an interrupt service routine, and the link register
10376 is going to be pushed, and we are not creating a stack frame,
10377 (which would involve an extra push of IP and a pop in the epilogue)
10378 subtracting four from LR now will mean that the function return
10379 can be done with a single instruction. */
10380 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10381 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10382 && ! frame_pointer_needed)
10383 emit_insn (gen_rtx_SET (SImode,
10384 gen_rtx_REG (SImode, LR_REGNUM),
10385 gen_rtx_PLUS (SImode,
10386 gen_rtx_REG (SImode, LR_REGNUM),
10387 GEN_INT (-4))));
10389 if (live_regs_mask)
10391 insn = emit_multi_reg_push (live_regs_mask);
10392 saved_regs += bit_count (live_regs_mask) * 4;
10393 RTX_FRAME_RELATED_P (insn) = 1;
10396 if (TARGET_IWMMXT)
10397 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10398 if (regs_ever_live[reg] && ! call_used_regs [reg])
10400 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10401 insn = gen_rtx_MEM (V2SImode, insn);
10402 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10403 gen_rtx_REG (V2SImode, reg)));
10404 RTX_FRAME_RELATED_P (insn) = 1;
10405 saved_regs += 8;
10408 if (! IS_VOLATILE (func_type))
10410 int start_reg;
10412 /* Save any floating point call-saved registers used by this
10413 function. */
10414 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10416 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10417 if (regs_ever_live[reg] && !call_used_regs[reg])
10419 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10420 insn = gen_rtx_MEM (XFmode, insn);
10421 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10422 gen_rtx_REG (XFmode, reg)));
10423 RTX_FRAME_RELATED_P (insn) = 1;
10424 saved_regs += 12;
10427 else
10429 start_reg = LAST_FPA_REGNUM;
10431 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10433 if (regs_ever_live[reg] && !call_used_regs[reg])
10435 if (start_reg - reg == 3)
10437 insn = emit_sfm (reg, 4);
10438 RTX_FRAME_RELATED_P (insn) = 1;
10439 saved_regs += 48;
10440 start_reg = reg - 1;
10443 else
10445 if (start_reg != reg)
10447 insn = emit_sfm (reg + 1, start_reg - reg);
10448 RTX_FRAME_RELATED_P (insn) = 1;
10449 saved_regs += (start_reg - reg) * 12;
10451 start_reg = reg - 1;
10455 if (start_reg != reg)
10457 insn = emit_sfm (reg + 1, start_reg - reg);
10458 saved_regs += (start_reg - reg) * 12;
10459 RTX_FRAME_RELATED_P (insn) = 1;
10462 if (TARGET_HARD_FLOAT && TARGET_VFP)
10464 start_reg = FIRST_VFP_REGNUM;
10466 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10468 if ((!regs_ever_live[reg] || call_used_regs[reg])
10469 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10471 if (start_reg != reg)
10472 saved_regs += vfp_emit_fstmx (start_reg,
10473 (reg - start_reg) / 2);
10474 start_reg = reg + 2;
10477 if (start_reg != reg)
10478 saved_regs += vfp_emit_fstmx (start_reg,
10479 (reg - start_reg) / 2);
10483 if (frame_pointer_needed)
10485 /* Create the new frame pointer. */
10486 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10487 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10488 RTX_FRAME_RELATED_P (insn) = 1;
10490 if (IS_NESTED (func_type))
10492 /* Recover the static chain register. */
10493 if (regs_ever_live [3] == 0
10494 || saved_pretend_args)
10495 insn = gen_rtx_REG (SImode, 3);
10496 else /* if (current_function_pretend_args_size == 0) */
10498 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10499 GEN_INT (4));
10500 insn = gen_rtx_MEM (SImode, insn);
10503 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10504 /* Add a USE to stop propagate_one_insn() from barfing. */
10505 emit_insn (gen_prologue_use (ip_rtx));
10509 offsets = arm_get_frame_offsets ();
10510 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10512 /* This add can produce multiple insns for a large constant, so we
10513 need to get tricky. */
10514 rtx last = get_last_insn ();
10516 amount = GEN_INT (offsets->saved_args + saved_regs
10517 - offsets->outgoing_args);
10519 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10520 amount));
10523 last = last ? NEXT_INSN (last) : get_insns ();
10524 RTX_FRAME_RELATED_P (last) = 1;
10526 while (last != insn);
10528 /* If the frame pointer is needed, emit a special barrier that
10529 will prevent the scheduler from moving stores to the frame
10530 before the stack adjustment. */
10531 if (frame_pointer_needed)
10532 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10533 hard_frame_pointer_rtx));
10537 if (flag_pic)
10538 arm_load_pic_register (0UL);
10540 /* If we are profiling, make sure no instructions are scheduled before
10541 the call to mcount. Similarly if the user has requested no
10542 scheduling in the prolog. */
10543 if (current_function_profile || !TARGET_SCHED_PROLOG)
10544 emit_insn (gen_blockage ());
10546 /* If the link register is being kept alive, with the return address in it,
10547 then make sure that it does not get reused by the ce2 pass. */
10548 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10550 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10551 cfun->machine->lr_save_eliminated = 1;
10555 /* If CODE is 'd', then the X is a condition operand and the instruction
10556 should only be executed if the condition is true.
10557 if CODE is 'D', then the X is a condition operand and the instruction
10558 should only be executed if the condition is false: however, if the mode
10559 of the comparison is CCFPEmode, then always execute the instruction -- we
10560 do this because in these circumstances !GE does not necessarily imply LT;
10561 in these cases the instruction pattern will take care to make sure that
10562 an instruction containing %d will follow, thereby undoing the effects of
10563 doing this instruction unconditionally.
10564 If CODE is 'N' then X is a floating point operand that must be negated
10565 before output.
10566 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10567 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10568 void
10569 arm_print_operand (FILE *stream, rtx x, int code)
10571 switch (code)
10573 case '@':
10574 fputs (ASM_COMMENT_START, stream);
10575 return;
10577 case '_':
10578 fputs (user_label_prefix, stream);
10579 return;
10581 case '|':
10582 fputs (REGISTER_PREFIX, stream);
10583 return;
10585 case '?':
10586 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10588 if (TARGET_THUMB)
10590 output_operand_lossage ("predicated Thumb instruction");
10591 break;
10593 if (current_insn_predicate != NULL)
10595 output_operand_lossage
10596 ("predicated instruction in conditional sequence");
10597 break;
10600 fputs (arm_condition_codes[arm_current_cc], stream);
10602 else if (current_insn_predicate)
10604 enum arm_cond_code code;
10606 if (TARGET_THUMB)
10608 output_operand_lossage ("predicated Thumb instruction");
10609 break;
10612 code = get_arm_condition_code (current_insn_predicate);
10613 fputs (arm_condition_codes[code], stream);
10615 return;
10617 case 'N':
10619 REAL_VALUE_TYPE r;
10620 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10621 r = REAL_VALUE_NEGATE (r);
10622 fprintf (stream, "%s", fp_const_from_val (&r));
10624 return;
10626 case 'B':
10627 if (GET_CODE (x) == CONST_INT)
10629 HOST_WIDE_INT val;
10630 val = ARM_SIGN_EXTEND (~INTVAL (x));
10631 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10633 else
10635 putc ('~', stream);
10636 output_addr_const (stream, x);
10638 return;
10640 case 'i':
10641 fprintf (stream, "%s", arithmetic_instr (x, 1));
10642 return;
10644 /* Truncate Cirrus shift counts. */
10645 case 's':
10646 if (GET_CODE (x) == CONST_INT)
10648 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10649 return;
10651 arm_print_operand (stream, x, 0);
10652 return;
10654 case 'I':
10655 fprintf (stream, "%s", arithmetic_instr (x, 0));
10656 return;
10658 case 'S':
10660 HOST_WIDE_INT val;
10661 const char * shift = shift_op (x, &val);
10663 if (shift)
10665 fprintf (stream, ", %s ", shift_op (x, &val));
10666 if (val == -1)
10667 arm_print_operand (stream, XEXP (x, 1), 0);
10668 else
10669 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10672 return;
10674 /* An explanation of the 'Q', 'R' and 'H' register operands:
10676 In a pair of registers containing a DI or DF value the 'Q'
10677 operand returns the register number of the register containing
10678 the least significant part of the value. The 'R' operand returns
10679 the register number of the register containing the most
10680 significant part of the value.
10682 The 'H' operand returns the higher of the two register numbers.
10683 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10684 same as the 'Q' operand, since the most significant part of the
10685 value is held in the lower number register. The reverse is true
10686 on systems where WORDS_BIG_ENDIAN is false.
10688 The purpose of these operands is to distinguish between cases
10689 where the endian-ness of the values is important (for example
10690 when they are added together), and cases where the endian-ness
10691 is irrelevant, but the order of register operations is important.
10692 For example when loading a value from memory into a register
10693 pair, the endian-ness does not matter. Provided that the value
10694 from the lower memory address is put into the lower numbered
10695 register, and the value from the higher address is put into the
10696 higher numbered register, the load will work regardless of whether
10697 the value being loaded is big-wordian or little-wordian. The
10698 order of the two register loads can matter however, if the address
10699 of the memory location is actually held in one of the registers
10700 being overwritten by the load. */
10701 case 'Q':
10702 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10704 output_operand_lossage ("invalid operand for code '%c'", code);
10705 return;
10708 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10709 return;
10711 case 'R':
10712 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10714 output_operand_lossage ("invalid operand for code '%c'", code);
10715 return;
10718 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10719 return;
10721 case 'H':
10722 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10724 output_operand_lossage ("invalid operand for code '%c'", code);
10725 return;
10728 asm_fprintf (stream, "%r", REGNO (x) + 1);
10729 return;
10731 case 'm':
10732 asm_fprintf (stream, "%r",
10733 GET_CODE (XEXP (x, 0)) == REG
10734 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10735 return;
10737 case 'M':
10738 asm_fprintf (stream, "{%r-%r}",
10739 REGNO (x),
10740 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10741 return;
10743 case 'd':
10744 /* CONST_TRUE_RTX means always -- that's the default. */
10745 if (x == const_true_rtx)
10746 return;
10748 if (!COMPARISON_P (x))
10750 output_operand_lossage ("invalid operand for code '%c'", code);
10751 return;
10754 fputs (arm_condition_codes[get_arm_condition_code (x)],
10755 stream);
10756 return;
10758 case 'D':
10759 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10760 want to do that. */
10761 if (x == const_true_rtx)
10763 output_operand_lossage ("instruction never exectued");
10764 return;
10766 if (!COMPARISON_P (x))
10768 output_operand_lossage ("invalid operand for code '%c'", code);
10769 return;
10772 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10773 (get_arm_condition_code (x))],
10774 stream);
10775 return;
10777 /* Cirrus registers can be accessed in a variety of ways:
10778 single floating point (f)
10779 double floating point (d)
10780 32bit integer (fx)
10781 64bit integer (dx). */
10782 case 'W': /* Cirrus register in F mode. */
10783 case 'X': /* Cirrus register in D mode. */
10784 case 'Y': /* Cirrus register in FX mode. */
10785 case 'Z': /* Cirrus register in DX mode. */
10786 gcc_assert (GET_CODE (x) == REG
10787 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10789 fprintf (stream, "mv%s%s",
10790 code == 'W' ? "f"
10791 : code == 'X' ? "d"
10792 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10794 return;
10796 /* Print cirrus register in the mode specified by the register's mode. */
10797 case 'V':
10799 int mode = GET_MODE (x);
10801 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10803 output_operand_lossage ("invalid operand for code '%c'", code);
10804 return;
10807 fprintf (stream, "mv%s%s",
10808 mode == DFmode ? "d"
10809 : mode == SImode ? "fx"
10810 : mode == DImode ? "dx"
10811 : "f", reg_names[REGNO (x)] + 2);
10813 return;
10816 case 'U':
10817 if (GET_CODE (x) != REG
10818 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10819 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10820 /* Bad value for wCG register number. */
10822 output_operand_lossage ("invalid operand for code '%c'", code);
10823 return;
10826 else
10827 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10828 return;
10830 /* Print an iWMMXt control register name. */
10831 case 'w':
10832 if (GET_CODE (x) != CONST_INT
10833 || INTVAL (x) < 0
10834 || INTVAL (x) >= 16)
10835 /* Bad value for wC register number. */
10837 output_operand_lossage ("invalid operand for code '%c'", code);
10838 return;
10841 else
10843 static const char * wc_reg_names [16] =
10845 "wCID", "wCon", "wCSSF", "wCASF",
10846 "wC4", "wC5", "wC6", "wC7",
10847 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10848 "wC12", "wC13", "wC14", "wC15"
10851 fprintf (stream, wc_reg_names [INTVAL (x)]);
10853 return;
10855 /* Print a VFP double precision register name. */
10856 case 'P':
10858 int mode = GET_MODE (x);
10859 int num;
10861 if (mode != DImode && mode != DFmode)
10863 output_operand_lossage ("invalid operand for code '%c'", code);
10864 return;
10867 if (GET_CODE (x) != REG
10868 || !IS_VFP_REGNUM (REGNO (x)))
10870 output_operand_lossage ("invalid operand for code '%c'", code);
10871 return;
10874 num = REGNO(x) - FIRST_VFP_REGNUM;
10875 if (num & 1)
10877 output_operand_lossage ("invalid operand for code '%c'", code);
10878 return;
10881 fprintf (stream, "d%d", num >> 1);
10883 return;
10885 default:
10886 if (x == 0)
10888 output_operand_lossage ("missing operand");
10889 return;
10892 switch (GET_CODE (x))
10894 case REG:
10895 asm_fprintf (stream, "%r", REGNO (x));
10896 break;
10898 case MEM:
10899 output_memory_reference_mode = GET_MODE (x);
10900 output_address (XEXP (x, 0));
10901 break;
10903 case CONST_DOUBLE:
10904 fprintf (stream, "#%s", fp_immediate_constant (x));
10905 break;
10907 default:
10908 gcc_assert (GET_CODE (x) != NEG);
10909 fputc ('#', stream);
10910 output_addr_const (stream, x);
10911 break;
10916 #ifndef AOF_ASSEMBLER
10917 /* Target hook for assembling integer objects. The ARM version needs to
10918 handle word-sized values specially. */
10919 static bool
10920 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10922 if (size == UNITS_PER_WORD && aligned_p)
10924 fputs ("\t.word\t", asm_out_file);
10925 output_addr_const (asm_out_file, x);
10927 /* Mark symbols as position independent. We only do this in the
10928 .text segment, not in the .data segment. */
10929 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10930 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10932 if (GET_CODE (x) == SYMBOL_REF
10933 && (CONSTANT_POOL_ADDRESS_P (x)
10934 || SYMBOL_REF_LOCAL_P (x)))
10935 fputs ("(GOTOFF)", asm_out_file);
10936 else if (GET_CODE (x) == LABEL_REF)
10937 fputs ("(GOTOFF)", asm_out_file);
10938 else
10939 fputs ("(GOT)", asm_out_file);
10941 fputc ('\n', asm_out_file);
10942 return true;
10945 if (arm_vector_mode_supported_p (GET_MODE (x)))
10947 int i, units;
10949 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10951 units = CONST_VECTOR_NUNITS (x);
10953 switch (GET_MODE (x))
10955 case V2SImode: size = 4; break;
10956 case V4HImode: size = 2; break;
10957 case V8QImode: size = 1; break;
10958 default:
10959 gcc_unreachable ();
10962 for (i = 0; i < units; i++)
10964 rtx elt;
10966 elt = CONST_VECTOR_ELT (x, i);
10967 assemble_integer
10968 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10971 return true;
10974 return default_assemble_integer (x, size, aligned_p);
10978 /* Add a function to the list of static constructors. */
10980 static void
10981 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10983 if (!TARGET_AAPCS_BASED)
10985 default_named_section_asm_out_constructor (symbol, priority);
10986 return;
10989 /* Put these in the .init_array section, using a special relocation. */
10990 ctors_section ();
10991 assemble_align (POINTER_SIZE);
10992 fputs ("\t.word\t", asm_out_file);
10993 output_addr_const (asm_out_file, symbol);
10994 fputs ("(target1)\n", asm_out_file);
10996 #endif
10998 /* A finite state machine takes care of noticing whether or not instructions
10999 can be conditionally executed, and thus decrease execution time and code
11000 size by deleting branch instructions. The fsm is controlled by
11001 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11003 /* The state of the fsm controlling condition codes are:
11004 0: normal, do nothing special
11005 1: make ASM_OUTPUT_OPCODE not output this instruction
11006 2: make ASM_OUTPUT_OPCODE not output this instruction
11007 3: make instructions conditional
11008 4: make instructions conditional
11010 State transitions (state->state by whom under condition):
11011 0 -> 1 final_prescan_insn if the `target' is a label
11012 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11013 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11014 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11015 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11016 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11017 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11018 (the target insn is arm_target_insn).
11020 If the jump clobbers the conditions then we use states 2 and 4.
11022 A similar thing can be done with conditional return insns.
11024 XXX In case the `target' is an unconditional branch, this conditionalising
11025 of the instructions always reduces code size, but not always execution
11026 time. But then, I want to reduce the code size to somewhere near what
11027 /bin/cc produces. */
11029 /* Returns the index of the ARM condition code string in
11030 `arm_condition_codes'. COMPARISON should be an rtx like
11031 `(eq (...) (...))'. */
11032 static enum arm_cond_code
11033 get_arm_condition_code (rtx comparison)
11035 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11036 int code;
11037 enum rtx_code comp_code = GET_CODE (comparison);
11039 if (GET_MODE_CLASS (mode) != MODE_CC)
11040 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11041 XEXP (comparison, 1));
11043 switch (mode)
11045 case CC_DNEmode: code = ARM_NE; goto dominance;
11046 case CC_DEQmode: code = ARM_EQ; goto dominance;
11047 case CC_DGEmode: code = ARM_GE; goto dominance;
11048 case CC_DGTmode: code = ARM_GT; goto dominance;
11049 case CC_DLEmode: code = ARM_LE; goto dominance;
11050 case CC_DLTmode: code = ARM_LT; goto dominance;
11051 case CC_DGEUmode: code = ARM_CS; goto dominance;
11052 case CC_DGTUmode: code = ARM_HI; goto dominance;
11053 case CC_DLEUmode: code = ARM_LS; goto dominance;
11054 case CC_DLTUmode: code = ARM_CC;
11056 dominance:
11057 gcc_assert (comp_code == EQ || comp_code == NE);
11059 if (comp_code == EQ)
11060 return ARM_INVERSE_CONDITION_CODE (code);
11061 return code;
11063 case CC_NOOVmode:
11064 switch (comp_code)
11066 case NE: return ARM_NE;
11067 case EQ: return ARM_EQ;
11068 case GE: return ARM_PL;
11069 case LT: return ARM_MI;
11070 default: gcc_unreachable ();
11073 case CC_Zmode:
11074 switch (comp_code)
11076 case NE: return ARM_NE;
11077 case EQ: return ARM_EQ;
11078 default: gcc_unreachable ();
11081 case CC_Nmode:
11082 switch (comp_code)
11084 case NE: return ARM_MI;
11085 case EQ: return ARM_PL;
11086 default: gcc_unreachable ();
11089 case CCFPEmode:
11090 case CCFPmode:
11091 /* These encodings assume that AC=1 in the FPA system control
11092 byte. This allows us to handle all cases except UNEQ and
11093 LTGT. */
11094 switch (comp_code)
11096 case GE: return ARM_GE;
11097 case GT: return ARM_GT;
11098 case LE: return ARM_LS;
11099 case LT: return ARM_MI;
11100 case NE: return ARM_NE;
11101 case EQ: return ARM_EQ;
11102 case ORDERED: return ARM_VC;
11103 case UNORDERED: return ARM_VS;
11104 case UNLT: return ARM_LT;
11105 case UNLE: return ARM_LE;
11106 case UNGT: return ARM_HI;
11107 case UNGE: return ARM_PL;
11108 /* UNEQ and LTGT do not have a representation. */
11109 case UNEQ: /* Fall through. */
11110 case LTGT: /* Fall through. */
11111 default: gcc_unreachable ();
11114 case CC_SWPmode:
11115 switch (comp_code)
11117 case NE: return ARM_NE;
11118 case EQ: return ARM_EQ;
11119 case GE: return ARM_LE;
11120 case GT: return ARM_LT;
11121 case LE: return ARM_GE;
11122 case LT: return ARM_GT;
11123 case GEU: return ARM_LS;
11124 case GTU: return ARM_CC;
11125 case LEU: return ARM_CS;
11126 case LTU: return ARM_HI;
11127 default: gcc_unreachable ();
11130 case CC_Cmode:
11131 switch (comp_code)
11133 case LTU: return ARM_CS;
11134 case GEU: return ARM_CC;
11135 default: gcc_unreachable ();
11138 case CCmode:
11139 switch (comp_code)
11141 case NE: return ARM_NE;
11142 case EQ: return ARM_EQ;
11143 case GE: return ARM_GE;
11144 case GT: return ARM_GT;
11145 case LE: return ARM_LE;
11146 case LT: return ARM_LT;
11147 case GEU: return ARM_CS;
11148 case GTU: return ARM_HI;
11149 case LEU: return ARM_LS;
11150 case LTU: return ARM_CC;
11151 default: gcc_unreachable ();
11154 default: gcc_unreachable ();
11158 void
11159 arm_final_prescan_insn (rtx insn)
11161 /* BODY will hold the body of INSN. */
11162 rtx body = PATTERN (insn);
11164 /* This will be 1 if trying to repeat the trick, and things need to be
11165 reversed if it appears to fail. */
11166 int reverse = 0;
11168 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11169 taken are clobbered, even if the rtl suggests otherwise. It also
11170 means that we have to grub around within the jump expression to find
11171 out what the conditions are when the jump isn't taken. */
11172 int jump_clobbers = 0;
11174 /* If we start with a return insn, we only succeed if we find another one. */
11175 int seeking_return = 0;
11177 /* START_INSN will hold the insn from where we start looking. This is the
11178 first insn after the following code_label if REVERSE is true. */
11179 rtx start_insn = insn;
11181 /* If in state 4, check if the target branch is reached, in order to
11182 change back to state 0. */
11183 if (arm_ccfsm_state == 4)
11185 if (insn == arm_target_insn)
11187 arm_target_insn = NULL;
11188 arm_ccfsm_state = 0;
11190 return;
11193 /* If in state 3, it is possible to repeat the trick, if this insn is an
11194 unconditional branch to a label, and immediately following this branch
11195 is the previous target label which is only used once, and the label this
11196 branch jumps to is not too far off. */
11197 if (arm_ccfsm_state == 3)
11199 if (simplejump_p (insn))
11201 start_insn = next_nonnote_insn (start_insn);
11202 if (GET_CODE (start_insn) == BARRIER)
11204 /* XXX Isn't this always a barrier? */
11205 start_insn = next_nonnote_insn (start_insn);
11207 if (GET_CODE (start_insn) == CODE_LABEL
11208 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11209 && LABEL_NUSES (start_insn) == 1)
11210 reverse = TRUE;
11211 else
11212 return;
11214 else if (GET_CODE (body) == RETURN)
11216 start_insn = next_nonnote_insn (start_insn);
11217 if (GET_CODE (start_insn) == BARRIER)
11218 start_insn = next_nonnote_insn (start_insn);
11219 if (GET_CODE (start_insn) == CODE_LABEL
11220 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11221 && LABEL_NUSES (start_insn) == 1)
11223 reverse = TRUE;
11224 seeking_return = 1;
11226 else
11227 return;
11229 else
11230 return;
11233 gcc_assert (!arm_ccfsm_state || reverse);
11234 if (GET_CODE (insn) != JUMP_INSN)
11235 return;
11237 /* This jump might be paralleled with a clobber of the condition codes
11238 the jump should always come first */
11239 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11240 body = XVECEXP (body, 0, 0);
11242 if (reverse
11243 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11244 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11246 int insns_skipped;
11247 int fail = FALSE, succeed = FALSE;
11248 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11249 int then_not_else = TRUE;
11250 rtx this_insn = start_insn, label = 0;
11252 /* If the jump cannot be done with one instruction, we cannot
11253 conditionally execute the instruction in the inverse case. */
11254 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11256 jump_clobbers = 1;
11257 return;
11260 /* Register the insn jumped to. */
11261 if (reverse)
11263 if (!seeking_return)
11264 label = XEXP (SET_SRC (body), 0);
11266 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11267 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11268 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11270 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11271 then_not_else = FALSE;
11273 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11274 seeking_return = 1;
11275 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11277 seeking_return = 1;
11278 then_not_else = FALSE;
11280 else
11281 gcc_unreachable ();
11283 /* See how many insns this branch skips, and what kind of insns. If all
11284 insns are okay, and the label or unconditional branch to the same
11285 label is not too far away, succeed. */
11286 for (insns_skipped = 0;
11287 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11289 rtx scanbody;
11291 this_insn = next_nonnote_insn (this_insn);
11292 if (!this_insn)
11293 break;
11295 switch (GET_CODE (this_insn))
11297 case CODE_LABEL:
11298 /* Succeed if it is the target label, otherwise fail since
11299 control falls in from somewhere else. */
11300 if (this_insn == label)
11302 if (jump_clobbers)
11304 arm_ccfsm_state = 2;
11305 this_insn = next_nonnote_insn (this_insn);
11307 else
11308 arm_ccfsm_state = 1;
11309 succeed = TRUE;
11311 else
11312 fail = TRUE;
11313 break;
11315 case BARRIER:
11316 /* Succeed if the following insn is the target label.
11317 Otherwise fail.
11318 If return insns are used then the last insn in a function
11319 will be a barrier. */
11320 this_insn = next_nonnote_insn (this_insn);
11321 if (this_insn && this_insn == label)
11323 if (jump_clobbers)
11325 arm_ccfsm_state = 2;
11326 this_insn = next_nonnote_insn (this_insn);
11328 else
11329 arm_ccfsm_state = 1;
11330 succeed = TRUE;
11332 else
11333 fail = TRUE;
11334 break;
11336 case CALL_INSN:
11337 /* The AAPCS says that conditional calls should not be
11338 used since they make interworking inefficient (the
11339 linker can't transform BL<cond> into BLX). That's
11340 only a problem if the machine has BLX. */
11341 if (arm_arch5)
11343 fail = TRUE;
11344 break;
11347 /* Succeed if the following insn is the target label, or
11348 if the following two insns are a barrier and the
11349 target label. */
11350 this_insn = next_nonnote_insn (this_insn);
11351 if (this_insn && GET_CODE (this_insn) == BARRIER)
11352 this_insn = next_nonnote_insn (this_insn);
11354 if (this_insn && this_insn == label
11355 && insns_skipped < max_insns_skipped)
11357 if (jump_clobbers)
11359 arm_ccfsm_state = 2;
11360 this_insn = next_nonnote_insn (this_insn);
11362 else
11363 arm_ccfsm_state = 1;
11364 succeed = TRUE;
11366 else
11367 fail = TRUE;
11368 break;
11370 case JUMP_INSN:
11371 /* If this is an unconditional branch to the same label, succeed.
11372 If it is to another label, do nothing. If it is conditional,
11373 fail. */
11374 /* XXX Probably, the tests for SET and the PC are
11375 unnecessary. */
11377 scanbody = PATTERN (this_insn);
11378 if (GET_CODE (scanbody) == SET
11379 && GET_CODE (SET_DEST (scanbody)) == PC)
11381 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11382 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11384 arm_ccfsm_state = 2;
11385 succeed = TRUE;
11387 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11388 fail = TRUE;
11390 /* Fail if a conditional return is undesirable (e.g. on a
11391 StrongARM), but still allow this if optimizing for size. */
11392 else if (GET_CODE (scanbody) == RETURN
11393 && !use_return_insn (TRUE, NULL)
11394 && !optimize_size)
11395 fail = TRUE;
11396 else if (GET_CODE (scanbody) == RETURN
11397 && seeking_return)
11399 arm_ccfsm_state = 2;
11400 succeed = TRUE;
11402 else if (GET_CODE (scanbody) == PARALLEL)
11404 switch (get_attr_conds (this_insn))
11406 case CONDS_NOCOND:
11407 break;
11408 default:
11409 fail = TRUE;
11410 break;
11413 else
11414 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11416 break;
11418 case INSN:
11419 /* Instructions using or affecting the condition codes make it
11420 fail. */
11421 scanbody = PATTERN (this_insn);
11422 if (!(GET_CODE (scanbody) == SET
11423 || GET_CODE (scanbody) == PARALLEL)
11424 || get_attr_conds (this_insn) != CONDS_NOCOND)
11425 fail = TRUE;
11427 /* A conditional cirrus instruction must be followed by
11428 a non Cirrus instruction. However, since we
11429 conditionalize instructions in this function and by
11430 the time we get here we can't add instructions
11431 (nops), because shorten_branches() has already been
11432 called, we will disable conditionalizing Cirrus
11433 instructions to be safe. */
11434 if (GET_CODE (scanbody) != USE
11435 && GET_CODE (scanbody) != CLOBBER
11436 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11437 fail = TRUE;
11438 break;
11440 default:
11441 break;
11444 if (succeed)
11446 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11447 arm_target_label = CODE_LABEL_NUMBER (label);
11448 else
11450 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11452 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11454 this_insn = next_nonnote_insn (this_insn);
11455 gcc_assert (!this_insn
11456 || (GET_CODE (this_insn) != BARRIER
11457 && GET_CODE (this_insn) != CODE_LABEL));
11459 if (!this_insn)
11461 /* Oh, dear! we ran off the end.. give up. */
11462 recog (PATTERN (insn), insn, NULL);
11463 arm_ccfsm_state = 0;
11464 arm_target_insn = NULL;
11465 return;
11467 arm_target_insn = this_insn;
11469 if (jump_clobbers)
11471 gcc_assert (!reverse);
11472 arm_current_cc =
11473 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11474 0), 0), 1));
11475 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11476 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11477 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11478 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11480 else
11482 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11483 what it was. */
11484 if (!reverse)
11485 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11486 0));
11489 if (reverse || then_not_else)
11490 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11493 /* Restore recog_data (getting the attributes of other insns can
11494 destroy this array, but final.c assumes that it remains intact
11495 across this call; since the insn has been recognized already we
11496 call recog direct). */
11497 recog (PATTERN (insn), insn, NULL);
11501 /* Returns true if REGNO is a valid register
11502 for holding a quantity of type MODE. */
11504 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11506 if (GET_MODE_CLASS (mode) == MODE_CC)
11507 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11509 if (TARGET_THUMB)
11510 /* For the Thumb we only allow values bigger than SImode in
11511 registers 0 - 6, so that there is always a second low
11512 register available to hold the upper part of the value.
11513 We probably we ought to ensure that the register is the
11514 start of an even numbered register pair. */
11515 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11517 if (IS_CIRRUS_REGNUM (regno))
11518 /* We have outlawed SI values in Cirrus registers because they
11519 reside in the lower 32 bits, but SF values reside in the
11520 upper 32 bits. This causes gcc all sorts of grief. We can't
11521 even split the registers into pairs because Cirrus SI values
11522 get sign extended to 64bits-- aldyh. */
11523 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11525 if (IS_VFP_REGNUM (regno))
11527 if (mode == SFmode || mode == SImode)
11528 return TRUE;
11530 /* DFmode values are only valid in even register pairs. */
11531 if (mode == DFmode)
11532 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11533 return FALSE;
11536 if (IS_IWMMXT_GR_REGNUM (regno))
11537 return mode == SImode;
11539 if (IS_IWMMXT_REGNUM (regno))
11540 return VALID_IWMMXT_REG_MODE (mode);
11542 /* We allow any value to be stored in the general registers.
11543 Restrict doubleword quantities to even register pairs so that we can
11544 use ldrd. */
11545 if (regno <= LAST_ARM_REGNUM)
11546 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11548 if ( regno == FRAME_POINTER_REGNUM
11549 || regno == ARG_POINTER_REGNUM)
11550 /* We only allow integers in the fake hard registers. */
11551 return GET_MODE_CLASS (mode) == MODE_INT;
11553 /* The only registers left are the FPA registers
11554 which we only allow to hold FP values. */
11555 return GET_MODE_CLASS (mode) == MODE_FLOAT
11556 && regno >= FIRST_FPA_REGNUM
11557 && regno <= LAST_FPA_REGNUM;
11561 arm_regno_class (int regno)
11563 if (TARGET_THUMB)
11565 if (regno == STACK_POINTER_REGNUM)
11566 return STACK_REG;
11567 if (regno == CC_REGNUM)
11568 return CC_REG;
11569 if (regno < 8)
11570 return LO_REGS;
11571 return HI_REGS;
11574 if ( regno <= LAST_ARM_REGNUM
11575 || regno == FRAME_POINTER_REGNUM
11576 || regno == ARG_POINTER_REGNUM)
11577 return GENERAL_REGS;
11579 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11580 return NO_REGS;
11582 if (IS_CIRRUS_REGNUM (regno))
11583 return CIRRUS_REGS;
11585 if (IS_VFP_REGNUM (regno))
11586 return VFP_REGS;
11588 if (IS_IWMMXT_REGNUM (regno))
11589 return IWMMXT_REGS;
11591 if (IS_IWMMXT_GR_REGNUM (regno))
11592 return IWMMXT_GR_REGS;
11594 return FPA_REGS;
11597 /* Handle a special case when computing the offset
11598 of an argument from the frame pointer. */
11600 arm_debugger_arg_offset (int value, rtx addr)
11602 rtx insn;
11604 /* We are only interested if dbxout_parms() failed to compute the offset. */
11605 if (value != 0)
11606 return 0;
11608 /* We can only cope with the case where the address is held in a register. */
11609 if (GET_CODE (addr) != REG)
11610 return 0;
11612 /* If we are using the frame pointer to point at the argument, then
11613 an offset of 0 is correct. */
11614 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11615 return 0;
11617 /* If we are using the stack pointer to point at the
11618 argument, then an offset of 0 is correct. */
11619 if ((TARGET_THUMB || !frame_pointer_needed)
11620 && REGNO (addr) == SP_REGNUM)
11621 return 0;
11623 /* Oh dear. The argument is pointed to by a register rather
11624 than being held in a register, or being stored at a known
11625 offset from the frame pointer. Since GDB only understands
11626 those two kinds of argument we must translate the address
11627 held in the register into an offset from the frame pointer.
11628 We do this by searching through the insns for the function
11629 looking to see where this register gets its value. If the
11630 register is initialized from the frame pointer plus an offset
11631 then we are in luck and we can continue, otherwise we give up.
11633 This code is exercised by producing debugging information
11634 for a function with arguments like this:
11636 double func (double a, double b, int c, double d) {return d;}
11638 Without this code the stab for parameter 'd' will be set to
11639 an offset of 0 from the frame pointer, rather than 8. */
11641 /* The if() statement says:
11643 If the insn is a normal instruction
11644 and if the insn is setting the value in a register
11645 and if the register being set is the register holding the address of the argument
11646 and if the address is computing by an addition
11647 that involves adding to a register
11648 which is the frame pointer
11649 a constant integer
11651 then... */
11653 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11655 if ( GET_CODE (insn) == INSN
11656 && GET_CODE (PATTERN (insn)) == SET
11657 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11658 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11659 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11660 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11661 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11664 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11666 break;
11670 if (value == 0)
11672 debug_rtx (addr);
11673 warning (0, "unable to compute real location of stacked parameter");
11674 value = 8; /* XXX magic hack */
11677 return value;
11680 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11681 do \
11683 if ((MASK) & insn_flags) \
11684 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11685 BUILT_IN_MD, NULL, NULL_TREE); \
11687 while (0)
11689 struct builtin_description
11691 const unsigned int mask;
11692 const enum insn_code icode;
11693 const char * const name;
11694 const enum arm_builtins code;
11695 const enum rtx_code comparison;
11696 const unsigned int flag;
11699 static const struct builtin_description bdesc_2arg[] =
11701 #define IWMMXT_BUILTIN(code, string, builtin) \
11702 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11703 ARM_BUILTIN_##builtin, 0, 0 },
11705 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11706 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11707 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11708 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11709 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11710 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11711 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11712 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11713 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11714 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11715 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11716 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11717 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11718 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11719 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11720 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11721 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11722 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11723 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11724 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11725 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11726 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11727 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11728 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11729 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11730 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11731 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11732 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11733 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11734 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11735 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11736 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11737 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11738 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11739 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11740 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11741 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11742 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11743 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11744 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11745 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11746 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11747 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11748 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11749 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11750 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11751 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11752 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11753 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11754 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11755 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11756 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11757 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11758 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11759 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11760 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11761 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11762 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11764 #define IWMMXT_BUILTIN2(code, builtin) \
11765 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11767 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11768 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11769 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11770 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11771 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11772 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11773 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11774 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11775 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11776 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11777 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11778 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11779 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11780 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11781 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11782 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11783 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11784 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11785 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11786 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11787 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11788 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11789 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11790 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11791 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11792 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11793 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11794 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11795 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11796 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11797 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11798 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11801 static const struct builtin_description bdesc_1arg[] =
11803 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11804 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11805 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11806 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11807 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11808 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11809 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11810 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11811 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11812 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11813 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11814 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11815 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11816 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11817 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11818 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11819 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11820 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11823 /* Set up all the iWMMXt builtins. This is
11824 not called if TARGET_IWMMXT is zero. */
11826 static void
11827 arm_init_iwmmxt_builtins (void)
11829 const struct builtin_description * d;
11830 size_t i;
11831 tree endlink = void_list_node;
11833 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11834 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11835 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11837 tree int_ftype_int
11838 = build_function_type (integer_type_node,
11839 tree_cons (NULL_TREE, integer_type_node, endlink));
11840 tree v8qi_ftype_v8qi_v8qi_int
11841 = build_function_type (V8QI_type_node,
11842 tree_cons (NULL_TREE, V8QI_type_node,
11843 tree_cons (NULL_TREE, V8QI_type_node,
11844 tree_cons (NULL_TREE,
11845 integer_type_node,
11846 endlink))));
11847 tree v4hi_ftype_v4hi_int
11848 = build_function_type (V4HI_type_node,
11849 tree_cons (NULL_TREE, V4HI_type_node,
11850 tree_cons (NULL_TREE, integer_type_node,
11851 endlink)));
11852 tree v2si_ftype_v2si_int
11853 = build_function_type (V2SI_type_node,
11854 tree_cons (NULL_TREE, V2SI_type_node,
11855 tree_cons (NULL_TREE, integer_type_node,
11856 endlink)));
11857 tree v2si_ftype_di_di
11858 = build_function_type (V2SI_type_node,
11859 tree_cons (NULL_TREE, long_long_integer_type_node,
11860 tree_cons (NULL_TREE, long_long_integer_type_node,
11861 endlink)));
11862 tree di_ftype_di_int
11863 = build_function_type (long_long_integer_type_node,
11864 tree_cons (NULL_TREE, long_long_integer_type_node,
11865 tree_cons (NULL_TREE, integer_type_node,
11866 endlink)));
11867 tree di_ftype_di_int_int
11868 = build_function_type (long_long_integer_type_node,
11869 tree_cons (NULL_TREE, long_long_integer_type_node,
11870 tree_cons (NULL_TREE, integer_type_node,
11871 tree_cons (NULL_TREE,
11872 integer_type_node,
11873 endlink))));
11874 tree int_ftype_v8qi
11875 = build_function_type (integer_type_node,
11876 tree_cons (NULL_TREE, V8QI_type_node,
11877 endlink));
11878 tree int_ftype_v4hi
11879 = build_function_type (integer_type_node,
11880 tree_cons (NULL_TREE, V4HI_type_node,
11881 endlink));
11882 tree int_ftype_v2si
11883 = build_function_type (integer_type_node,
11884 tree_cons (NULL_TREE, V2SI_type_node,
11885 endlink));
11886 tree int_ftype_v8qi_int
11887 = build_function_type (integer_type_node,
11888 tree_cons (NULL_TREE, V8QI_type_node,
11889 tree_cons (NULL_TREE, integer_type_node,
11890 endlink)));
11891 tree int_ftype_v4hi_int
11892 = build_function_type (integer_type_node,
11893 tree_cons (NULL_TREE, V4HI_type_node,
11894 tree_cons (NULL_TREE, integer_type_node,
11895 endlink)));
11896 tree int_ftype_v2si_int
11897 = build_function_type (integer_type_node,
11898 tree_cons (NULL_TREE, V2SI_type_node,
11899 tree_cons (NULL_TREE, integer_type_node,
11900 endlink)));
11901 tree v8qi_ftype_v8qi_int_int
11902 = build_function_type (V8QI_type_node,
11903 tree_cons (NULL_TREE, V8QI_type_node,
11904 tree_cons (NULL_TREE, integer_type_node,
11905 tree_cons (NULL_TREE,
11906 integer_type_node,
11907 endlink))));
11908 tree v4hi_ftype_v4hi_int_int
11909 = build_function_type (V4HI_type_node,
11910 tree_cons (NULL_TREE, V4HI_type_node,
11911 tree_cons (NULL_TREE, integer_type_node,
11912 tree_cons (NULL_TREE,
11913 integer_type_node,
11914 endlink))));
11915 tree v2si_ftype_v2si_int_int
11916 = build_function_type (V2SI_type_node,
11917 tree_cons (NULL_TREE, V2SI_type_node,
11918 tree_cons (NULL_TREE, integer_type_node,
11919 tree_cons (NULL_TREE,
11920 integer_type_node,
11921 endlink))));
11922 /* Miscellaneous. */
11923 tree v8qi_ftype_v4hi_v4hi
11924 = build_function_type (V8QI_type_node,
11925 tree_cons (NULL_TREE, V4HI_type_node,
11926 tree_cons (NULL_TREE, V4HI_type_node,
11927 endlink)));
11928 tree v4hi_ftype_v2si_v2si
11929 = build_function_type (V4HI_type_node,
11930 tree_cons (NULL_TREE, V2SI_type_node,
11931 tree_cons (NULL_TREE, V2SI_type_node,
11932 endlink)));
11933 tree v2si_ftype_v4hi_v4hi
11934 = build_function_type (V2SI_type_node,
11935 tree_cons (NULL_TREE, V4HI_type_node,
11936 tree_cons (NULL_TREE, V4HI_type_node,
11937 endlink)));
11938 tree v2si_ftype_v8qi_v8qi
11939 = build_function_type (V2SI_type_node,
11940 tree_cons (NULL_TREE, V8QI_type_node,
11941 tree_cons (NULL_TREE, V8QI_type_node,
11942 endlink)));
11943 tree v4hi_ftype_v4hi_di
11944 = build_function_type (V4HI_type_node,
11945 tree_cons (NULL_TREE, V4HI_type_node,
11946 tree_cons (NULL_TREE,
11947 long_long_integer_type_node,
11948 endlink)));
11949 tree v2si_ftype_v2si_di
11950 = build_function_type (V2SI_type_node,
11951 tree_cons (NULL_TREE, V2SI_type_node,
11952 tree_cons (NULL_TREE,
11953 long_long_integer_type_node,
11954 endlink)));
11955 tree void_ftype_int_int
11956 = build_function_type (void_type_node,
11957 tree_cons (NULL_TREE, integer_type_node,
11958 tree_cons (NULL_TREE, integer_type_node,
11959 endlink)));
11960 tree di_ftype_void
11961 = build_function_type (long_long_unsigned_type_node, endlink);
11962 tree di_ftype_v8qi
11963 = build_function_type (long_long_integer_type_node,
11964 tree_cons (NULL_TREE, V8QI_type_node,
11965 endlink));
11966 tree di_ftype_v4hi
11967 = build_function_type (long_long_integer_type_node,
11968 tree_cons (NULL_TREE, V4HI_type_node,
11969 endlink));
11970 tree di_ftype_v2si
11971 = build_function_type (long_long_integer_type_node,
11972 tree_cons (NULL_TREE, V2SI_type_node,
11973 endlink));
11974 tree v2si_ftype_v4hi
11975 = build_function_type (V2SI_type_node,
11976 tree_cons (NULL_TREE, V4HI_type_node,
11977 endlink));
11978 tree v4hi_ftype_v8qi
11979 = build_function_type (V4HI_type_node,
11980 tree_cons (NULL_TREE, V8QI_type_node,
11981 endlink));
11983 tree di_ftype_di_v4hi_v4hi
11984 = build_function_type (long_long_unsigned_type_node,
11985 tree_cons (NULL_TREE,
11986 long_long_unsigned_type_node,
11987 tree_cons (NULL_TREE, V4HI_type_node,
11988 tree_cons (NULL_TREE,
11989 V4HI_type_node,
11990 endlink))));
11992 tree di_ftype_v4hi_v4hi
11993 = build_function_type (long_long_unsigned_type_node,
11994 tree_cons (NULL_TREE, V4HI_type_node,
11995 tree_cons (NULL_TREE, V4HI_type_node,
11996 endlink)));
11998 /* Normal vector binops. */
11999 tree v8qi_ftype_v8qi_v8qi
12000 = build_function_type (V8QI_type_node,
12001 tree_cons (NULL_TREE, V8QI_type_node,
12002 tree_cons (NULL_TREE, V8QI_type_node,
12003 endlink)));
12004 tree v4hi_ftype_v4hi_v4hi
12005 = build_function_type (V4HI_type_node,
12006 tree_cons (NULL_TREE, V4HI_type_node,
12007 tree_cons (NULL_TREE, V4HI_type_node,
12008 endlink)));
12009 tree v2si_ftype_v2si_v2si
12010 = build_function_type (V2SI_type_node,
12011 tree_cons (NULL_TREE, V2SI_type_node,
12012 tree_cons (NULL_TREE, V2SI_type_node,
12013 endlink)));
12014 tree di_ftype_di_di
12015 = build_function_type (long_long_unsigned_type_node,
12016 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12017 tree_cons (NULL_TREE,
12018 long_long_unsigned_type_node,
12019 endlink)));
12021 /* Add all builtins that are more or less simple operations on two
12022 operands. */
12023 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12025 /* Use one of the operands; the target can have a different mode for
12026 mask-generating compares. */
12027 enum machine_mode mode;
12028 tree type;
12030 if (d->name == 0)
12031 continue;
12033 mode = insn_data[d->icode].operand[1].mode;
12035 switch (mode)
12037 case V8QImode:
12038 type = v8qi_ftype_v8qi_v8qi;
12039 break;
12040 case V4HImode:
12041 type = v4hi_ftype_v4hi_v4hi;
12042 break;
12043 case V2SImode:
12044 type = v2si_ftype_v2si_v2si;
12045 break;
12046 case DImode:
12047 type = di_ftype_di_di;
12048 break;
12050 default:
12051 gcc_unreachable ();
12054 def_mbuiltin (d->mask, d->name, type, d->code);
12057 /* Add the remaining MMX insns with somewhat more complicated types. */
12058 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12059 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12060 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12062 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12063 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12064 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12065 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12066 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12067 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12069 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12070 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12071 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12072 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12073 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12074 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12076 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12077 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12078 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12079 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12080 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12081 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12083 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12084 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12085 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12086 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12087 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12088 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12090 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12092 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12093 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12094 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12095 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12097 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12098 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12099 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12100 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12101 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12102 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12103 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12104 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12105 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12107 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12108 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12109 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12111 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12112 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12113 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12115 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12116 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12117 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12118 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12119 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12120 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12122 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12123 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12124 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12125 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12126 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12127 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12128 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12129 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12130 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12131 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12132 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12133 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12135 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12136 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12137 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12138 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12140 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12141 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12142 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12143 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12144 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12145 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12146 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12149 static void
12150 arm_init_builtins (void)
12152 if (TARGET_REALLY_IWMMXT)
12153 arm_init_iwmmxt_builtins ();
12156 /* Errors in the source file can cause expand_expr to return const0_rtx
12157 where we expect a vector. To avoid crashing, use one of the vector
12158 clear instructions. */
12160 static rtx
12161 safe_vector_operand (rtx x, enum machine_mode mode)
12163 if (x != const0_rtx)
12164 return x;
12165 x = gen_reg_rtx (mode);
12167 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12168 : gen_rtx_SUBREG (DImode, x, 0)));
12169 return x;
12172 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12174 static rtx
12175 arm_expand_binop_builtin (enum insn_code icode,
12176 tree arglist, rtx target)
12178 rtx pat;
12179 tree arg0 = TREE_VALUE (arglist);
12180 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12181 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12182 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12183 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12184 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12185 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12187 if (VECTOR_MODE_P (mode0))
12188 op0 = safe_vector_operand (op0, mode0);
12189 if (VECTOR_MODE_P (mode1))
12190 op1 = safe_vector_operand (op1, mode1);
12192 if (! target
12193 || GET_MODE (target) != tmode
12194 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12195 target = gen_reg_rtx (tmode);
12197 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12199 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12200 op0 = copy_to_mode_reg (mode0, op0);
12201 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12202 op1 = copy_to_mode_reg (mode1, op1);
12204 pat = GEN_FCN (icode) (target, op0, op1);
12205 if (! pat)
12206 return 0;
12207 emit_insn (pat);
12208 return target;
12211 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12213 static rtx
12214 arm_expand_unop_builtin (enum insn_code icode,
12215 tree arglist, rtx target, int do_load)
12217 rtx pat;
12218 tree arg0 = TREE_VALUE (arglist);
12219 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12220 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12221 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12223 if (! target
12224 || GET_MODE (target) != tmode
12225 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12226 target = gen_reg_rtx (tmode);
12227 if (do_load)
12228 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12229 else
12231 if (VECTOR_MODE_P (mode0))
12232 op0 = safe_vector_operand (op0, mode0);
12234 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12235 op0 = copy_to_mode_reg (mode0, op0);
12238 pat = GEN_FCN (icode) (target, op0);
12239 if (! pat)
12240 return 0;
12241 emit_insn (pat);
12242 return target;
12245 /* Expand an expression EXP that calls a built-in function,
12246 with result going to TARGET if that's convenient
12247 (and in mode MODE if that's convenient).
12248 SUBTARGET may be used as the target for computing one of EXP's operands.
12249 IGNORE is nonzero if the value is to be ignored. */
12251 static rtx
12252 arm_expand_builtin (tree exp,
12253 rtx target,
12254 rtx subtarget ATTRIBUTE_UNUSED,
12255 enum machine_mode mode ATTRIBUTE_UNUSED,
12256 int ignore ATTRIBUTE_UNUSED)
12258 const struct builtin_description * d;
12259 enum insn_code icode;
12260 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12261 tree arglist = TREE_OPERAND (exp, 1);
12262 tree arg0;
12263 tree arg1;
12264 tree arg2;
12265 rtx op0;
12266 rtx op1;
12267 rtx op2;
12268 rtx pat;
12269 int fcode = DECL_FUNCTION_CODE (fndecl);
12270 size_t i;
12271 enum machine_mode tmode;
12272 enum machine_mode mode0;
12273 enum machine_mode mode1;
12274 enum machine_mode mode2;
12276 switch (fcode)
12278 case ARM_BUILTIN_TEXTRMSB:
12279 case ARM_BUILTIN_TEXTRMUB:
12280 case ARM_BUILTIN_TEXTRMSH:
12281 case ARM_BUILTIN_TEXTRMUH:
12282 case ARM_BUILTIN_TEXTRMSW:
12283 case ARM_BUILTIN_TEXTRMUW:
12284 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12285 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12286 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12287 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12288 : CODE_FOR_iwmmxt_textrmw);
12290 arg0 = TREE_VALUE (arglist);
12291 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12292 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12293 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12294 tmode = insn_data[icode].operand[0].mode;
12295 mode0 = insn_data[icode].operand[1].mode;
12296 mode1 = insn_data[icode].operand[2].mode;
12298 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12299 op0 = copy_to_mode_reg (mode0, op0);
12300 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12302 /* @@@ better error message */
12303 error ("selector must be an immediate");
12304 return gen_reg_rtx (tmode);
12306 if (target == 0
12307 || GET_MODE (target) != tmode
12308 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12309 target = gen_reg_rtx (tmode);
12310 pat = GEN_FCN (icode) (target, op0, op1);
12311 if (! pat)
12312 return 0;
12313 emit_insn (pat);
12314 return target;
12316 case ARM_BUILTIN_TINSRB:
12317 case ARM_BUILTIN_TINSRH:
12318 case ARM_BUILTIN_TINSRW:
12319 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12320 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12321 : CODE_FOR_iwmmxt_tinsrw);
12322 arg0 = TREE_VALUE (arglist);
12323 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12324 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12325 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12326 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12327 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12328 tmode = insn_data[icode].operand[0].mode;
12329 mode0 = insn_data[icode].operand[1].mode;
12330 mode1 = insn_data[icode].operand[2].mode;
12331 mode2 = insn_data[icode].operand[3].mode;
12333 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12334 op0 = copy_to_mode_reg (mode0, op0);
12335 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12336 op1 = copy_to_mode_reg (mode1, op1);
12337 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12339 /* @@@ better error message */
12340 error ("selector must be an immediate");
12341 return const0_rtx;
12343 if (target == 0
12344 || GET_MODE (target) != tmode
12345 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12346 target = gen_reg_rtx (tmode);
12347 pat = GEN_FCN (icode) (target, op0, op1, op2);
12348 if (! pat)
12349 return 0;
12350 emit_insn (pat);
12351 return target;
12353 case ARM_BUILTIN_SETWCX:
12354 arg0 = TREE_VALUE (arglist);
12355 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12356 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12357 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12358 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12359 return 0;
12361 case ARM_BUILTIN_GETWCX:
12362 arg0 = TREE_VALUE (arglist);
12363 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12364 target = gen_reg_rtx (SImode);
12365 emit_insn (gen_iwmmxt_tmrc (target, op0));
12366 return target;
12368 case ARM_BUILTIN_WSHUFH:
12369 icode = CODE_FOR_iwmmxt_wshufh;
12370 arg0 = TREE_VALUE (arglist);
12371 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12372 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12373 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12374 tmode = insn_data[icode].operand[0].mode;
12375 mode1 = insn_data[icode].operand[1].mode;
12376 mode2 = insn_data[icode].operand[2].mode;
12378 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12379 op0 = copy_to_mode_reg (mode1, op0);
12380 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12382 /* @@@ better error message */
12383 error ("mask must be an immediate");
12384 return const0_rtx;
12386 if (target == 0
12387 || GET_MODE (target) != tmode
12388 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12389 target = gen_reg_rtx (tmode);
12390 pat = GEN_FCN (icode) (target, op0, op1);
12391 if (! pat)
12392 return 0;
12393 emit_insn (pat);
12394 return target;
12396 case ARM_BUILTIN_WSADB:
12397 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12398 case ARM_BUILTIN_WSADH:
12399 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12400 case ARM_BUILTIN_WSADBZ:
12401 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12402 case ARM_BUILTIN_WSADHZ:
12403 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12405 /* Several three-argument builtins. */
12406 case ARM_BUILTIN_WMACS:
12407 case ARM_BUILTIN_WMACU:
12408 case ARM_BUILTIN_WALIGN:
12409 case ARM_BUILTIN_TMIA:
12410 case ARM_BUILTIN_TMIAPH:
12411 case ARM_BUILTIN_TMIATT:
12412 case ARM_BUILTIN_TMIATB:
12413 case ARM_BUILTIN_TMIABT:
12414 case ARM_BUILTIN_TMIABB:
12415 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12416 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12417 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12418 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12419 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12420 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12421 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12422 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12423 : CODE_FOR_iwmmxt_walign);
12424 arg0 = TREE_VALUE (arglist);
12425 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12426 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12427 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12428 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12429 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12430 tmode = insn_data[icode].operand[0].mode;
12431 mode0 = insn_data[icode].operand[1].mode;
12432 mode1 = insn_data[icode].operand[2].mode;
12433 mode2 = insn_data[icode].operand[3].mode;
12435 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12436 op0 = copy_to_mode_reg (mode0, op0);
12437 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12438 op1 = copy_to_mode_reg (mode1, op1);
12439 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12440 op2 = copy_to_mode_reg (mode2, op2);
12441 if (target == 0
12442 || GET_MODE (target) != tmode
12443 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12444 target = gen_reg_rtx (tmode);
12445 pat = GEN_FCN (icode) (target, op0, op1, op2);
12446 if (! pat)
12447 return 0;
12448 emit_insn (pat);
12449 return target;
12451 case ARM_BUILTIN_WZERO:
12452 target = gen_reg_rtx (DImode);
12453 emit_insn (gen_iwmmxt_clrdi (target));
12454 return target;
12456 default:
12457 break;
12460 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12461 if (d->code == (const enum arm_builtins) fcode)
12462 return arm_expand_binop_builtin (d->icode, arglist, target);
12464 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12465 if (d->code == (const enum arm_builtins) fcode)
12466 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12468 /* @@@ Should really do something sensible here. */
12469 return NULL_RTX;
12472 /* Return the number (counting from 0) of
12473 the least significant set bit in MASK. */
12475 inline static int
12476 number_of_first_bit_set (unsigned mask)
12478 int bit;
12480 for (bit = 0;
12481 (mask & (1 << bit)) == 0;
12482 ++bit)
12483 continue;
12485 return bit;
12488 /* Emit code to push or pop registers to or from the stack. F is the
12489 assembly file. MASK is the registers to push or pop. PUSH is
12490 nonzero if we should push, and zero if we should pop. For debugging
12491 output, if pushing, adjust CFA_OFFSET by the amount of space added
12492 to the stack. REAL_REGS should have the same number of bits set as
12493 MASK, and will be used instead (in the same order) to describe which
12494 registers were saved - this is used to mark the save slots when we
12495 push high registers after moving them to low registers. */
12496 static void
12497 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12498 unsigned long real_regs)
12500 int regno;
12501 int lo_mask = mask & 0xFF;
12502 int pushed_words = 0;
12504 gcc_assert (mask);
12506 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12508 /* Special case. Do not generate a POP PC statement here, do it in
12509 thumb_exit() */
12510 thumb_exit (f, -1);
12511 return;
12514 if (ARM_EABI_UNWIND_TABLES && push)
12516 fprintf (f, "\t.save\t{");
12517 for (regno = 0; regno < 15; regno++)
12519 if (real_regs & (1 << regno))
12521 if (real_regs & ((1 << regno) -1))
12522 fprintf (f, ", ");
12523 asm_fprintf (f, "%r", regno);
12526 fprintf (f, "}\n");
12529 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12531 /* Look at the low registers first. */
12532 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12534 if (lo_mask & 1)
12536 asm_fprintf (f, "%r", regno);
12538 if ((lo_mask & ~1) != 0)
12539 fprintf (f, ", ");
12541 pushed_words++;
12545 if (push && (mask & (1 << LR_REGNUM)))
12547 /* Catch pushing the LR. */
12548 if (mask & 0xFF)
12549 fprintf (f, ", ");
12551 asm_fprintf (f, "%r", LR_REGNUM);
12553 pushed_words++;
12555 else if (!push && (mask & (1 << PC_REGNUM)))
12557 /* Catch popping the PC. */
12558 if (TARGET_INTERWORK || TARGET_BACKTRACE
12559 || current_function_calls_eh_return)
12561 /* The PC is never poped directly, instead
12562 it is popped into r3 and then BX is used. */
12563 fprintf (f, "}\n");
12565 thumb_exit (f, -1);
12567 return;
12569 else
12571 if (mask & 0xFF)
12572 fprintf (f, ", ");
12574 asm_fprintf (f, "%r", PC_REGNUM);
12578 fprintf (f, "}\n");
12580 if (push && pushed_words && dwarf2out_do_frame ())
12582 char *l = dwarf2out_cfi_label ();
12583 int pushed_mask = real_regs;
12585 *cfa_offset += pushed_words * 4;
12586 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12588 pushed_words = 0;
12589 pushed_mask = real_regs;
12590 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12592 if (pushed_mask & 1)
12593 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12598 /* Generate code to return from a thumb function.
12599 If 'reg_containing_return_addr' is -1, then the return address is
12600 actually on the stack, at the stack pointer. */
12601 static void
12602 thumb_exit (FILE *f, int reg_containing_return_addr)
12604 unsigned regs_available_for_popping;
12605 unsigned regs_to_pop;
12606 int pops_needed;
12607 unsigned available;
12608 unsigned required;
12609 int mode;
12610 int size;
12611 int restore_a4 = FALSE;
12613 /* Compute the registers we need to pop. */
12614 regs_to_pop = 0;
12615 pops_needed = 0;
12617 if (reg_containing_return_addr == -1)
12619 regs_to_pop |= 1 << LR_REGNUM;
12620 ++pops_needed;
12623 if (TARGET_BACKTRACE)
12625 /* Restore the (ARM) frame pointer and stack pointer. */
12626 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12627 pops_needed += 2;
12630 /* If there is nothing to pop then just emit the BX instruction and
12631 return. */
12632 if (pops_needed == 0)
12634 if (current_function_calls_eh_return)
12635 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12637 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12638 return;
12640 /* Otherwise if we are not supporting interworking and we have not created
12641 a backtrace structure and the function was not entered in ARM mode then
12642 just pop the return address straight into the PC. */
12643 else if (!TARGET_INTERWORK
12644 && !TARGET_BACKTRACE
12645 && !is_called_in_ARM_mode (current_function_decl)
12646 && !current_function_calls_eh_return)
12648 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12649 return;
12652 /* Find out how many of the (return) argument registers we can corrupt. */
12653 regs_available_for_popping = 0;
12655 /* If returning via __builtin_eh_return, the bottom three registers
12656 all contain information needed for the return. */
12657 if (current_function_calls_eh_return)
12658 size = 12;
12659 else
12661 /* If we can deduce the registers used from the function's
12662 return value. This is more reliable that examining
12663 regs_ever_live[] because that will be set if the register is
12664 ever used in the function, not just if the register is used
12665 to hold a return value. */
12667 if (current_function_return_rtx != 0)
12668 mode = GET_MODE (current_function_return_rtx);
12669 else
12670 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12672 size = GET_MODE_SIZE (mode);
12674 if (size == 0)
12676 /* In a void function we can use any argument register.
12677 In a function that returns a structure on the stack
12678 we can use the second and third argument registers. */
12679 if (mode == VOIDmode)
12680 regs_available_for_popping =
12681 (1 << ARG_REGISTER (1))
12682 | (1 << ARG_REGISTER (2))
12683 | (1 << ARG_REGISTER (3));
12684 else
12685 regs_available_for_popping =
12686 (1 << ARG_REGISTER (2))
12687 | (1 << ARG_REGISTER (3));
12689 else if (size <= 4)
12690 regs_available_for_popping =
12691 (1 << ARG_REGISTER (2))
12692 | (1 << ARG_REGISTER (3));
12693 else if (size <= 8)
12694 regs_available_for_popping =
12695 (1 << ARG_REGISTER (3));
12698 /* Match registers to be popped with registers into which we pop them. */
12699 for (available = regs_available_for_popping,
12700 required = regs_to_pop;
12701 required != 0 && available != 0;
12702 available &= ~(available & - available),
12703 required &= ~(required & - required))
12704 -- pops_needed;
12706 /* If we have any popping registers left over, remove them. */
12707 if (available > 0)
12708 regs_available_for_popping &= ~available;
12710 /* Otherwise if we need another popping register we can use
12711 the fourth argument register. */
12712 else if (pops_needed)
12714 /* If we have not found any free argument registers and
12715 reg a4 contains the return address, we must move it. */
12716 if (regs_available_for_popping == 0
12717 && reg_containing_return_addr == LAST_ARG_REGNUM)
12719 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12720 reg_containing_return_addr = LR_REGNUM;
12722 else if (size > 12)
12724 /* Register a4 is being used to hold part of the return value,
12725 but we have dire need of a free, low register. */
12726 restore_a4 = TRUE;
12728 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12731 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12733 /* The fourth argument register is available. */
12734 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12736 --pops_needed;
12740 /* Pop as many registers as we can. */
12741 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12742 regs_available_for_popping);
12744 /* Process the registers we popped. */
12745 if (reg_containing_return_addr == -1)
12747 /* The return address was popped into the lowest numbered register. */
12748 regs_to_pop &= ~(1 << LR_REGNUM);
12750 reg_containing_return_addr =
12751 number_of_first_bit_set (regs_available_for_popping);
12753 /* Remove this register for the mask of available registers, so that
12754 the return address will not be corrupted by further pops. */
12755 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12758 /* If we popped other registers then handle them here. */
12759 if (regs_available_for_popping)
12761 int frame_pointer;
12763 /* Work out which register currently contains the frame pointer. */
12764 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12766 /* Move it into the correct place. */
12767 asm_fprintf (f, "\tmov\t%r, %r\n",
12768 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12770 /* (Temporarily) remove it from the mask of popped registers. */
12771 regs_available_for_popping &= ~(1 << frame_pointer);
12772 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12774 if (regs_available_for_popping)
12776 int stack_pointer;
12778 /* We popped the stack pointer as well,
12779 find the register that contains it. */
12780 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12782 /* Move it into the stack register. */
12783 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12785 /* At this point we have popped all necessary registers, so
12786 do not worry about restoring regs_available_for_popping
12787 to its correct value:
12789 assert (pops_needed == 0)
12790 assert (regs_available_for_popping == (1 << frame_pointer))
12791 assert (regs_to_pop == (1 << STACK_POINTER)) */
12793 else
12795 /* Since we have just move the popped value into the frame
12796 pointer, the popping register is available for reuse, and
12797 we know that we still have the stack pointer left to pop. */
12798 regs_available_for_popping |= (1 << frame_pointer);
12802 /* If we still have registers left on the stack, but we no longer have
12803 any registers into which we can pop them, then we must move the return
12804 address into the link register and make available the register that
12805 contained it. */
12806 if (regs_available_for_popping == 0 && pops_needed > 0)
12808 regs_available_for_popping |= 1 << reg_containing_return_addr;
12810 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12811 reg_containing_return_addr);
12813 reg_containing_return_addr = LR_REGNUM;
12816 /* If we have registers left on the stack then pop some more.
12817 We know that at most we will want to pop FP and SP. */
12818 if (pops_needed > 0)
12820 int popped_into;
12821 int move_to;
12823 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12824 regs_available_for_popping);
12826 /* We have popped either FP or SP.
12827 Move whichever one it is into the correct register. */
12828 popped_into = number_of_first_bit_set (regs_available_for_popping);
12829 move_to = number_of_first_bit_set (regs_to_pop);
12831 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12833 regs_to_pop &= ~(1 << move_to);
12835 --pops_needed;
12838 /* If we still have not popped everything then we must have only
12839 had one register available to us and we are now popping the SP. */
12840 if (pops_needed > 0)
12842 int popped_into;
12844 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12845 regs_available_for_popping);
12847 popped_into = number_of_first_bit_set (regs_available_for_popping);
12849 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12851 assert (regs_to_pop == (1 << STACK_POINTER))
12852 assert (pops_needed == 1)
12856 /* If necessary restore the a4 register. */
12857 if (restore_a4)
12859 if (reg_containing_return_addr != LR_REGNUM)
12861 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12862 reg_containing_return_addr = LR_REGNUM;
12865 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12868 if (current_function_calls_eh_return)
12869 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12871 /* Return to caller. */
12872 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12876 void
12877 thumb_final_prescan_insn (rtx insn)
12879 if (flag_print_asm_name)
12880 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12881 INSN_ADDRESSES (INSN_UID (insn)));
12885 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12887 unsigned HOST_WIDE_INT mask = 0xff;
12888 int i;
12890 if (val == 0) /* XXX */
12891 return 0;
12893 for (i = 0; i < 25; i++)
12894 if ((val & (mask << i)) == val)
12895 return 1;
12897 return 0;
12900 /* Returns nonzero if the current function contains,
12901 or might contain a far jump. */
12902 static int
12903 thumb_far_jump_used_p (void)
12905 rtx insn;
12907 /* This test is only important for leaf functions. */
12908 /* assert (!leaf_function_p ()); */
12910 /* If we have already decided that far jumps may be used,
12911 do not bother checking again, and always return true even if
12912 it turns out that they are not being used. Once we have made
12913 the decision that far jumps are present (and that hence the link
12914 register will be pushed onto the stack) we cannot go back on it. */
12915 if (cfun->machine->far_jump_used)
12916 return 1;
12918 /* If this function is not being called from the prologue/epilogue
12919 generation code then it must be being called from the
12920 INITIAL_ELIMINATION_OFFSET macro. */
12921 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12923 /* In this case we know that we are being asked about the elimination
12924 of the arg pointer register. If that register is not being used,
12925 then there are no arguments on the stack, and we do not have to
12926 worry that a far jump might force the prologue to push the link
12927 register, changing the stack offsets. In this case we can just
12928 return false, since the presence of far jumps in the function will
12929 not affect stack offsets.
12931 If the arg pointer is live (or if it was live, but has now been
12932 eliminated and so set to dead) then we do have to test to see if
12933 the function might contain a far jump. This test can lead to some
12934 false negatives, since before reload is completed, then length of
12935 branch instructions is not known, so gcc defaults to returning their
12936 longest length, which in turn sets the far jump attribute to true.
12938 A false negative will not result in bad code being generated, but it
12939 will result in a needless push and pop of the link register. We
12940 hope that this does not occur too often.
12942 If we need doubleword stack alignment this could affect the other
12943 elimination offsets so we can't risk getting it wrong. */
12944 if (regs_ever_live [ARG_POINTER_REGNUM])
12945 cfun->machine->arg_pointer_live = 1;
12946 else if (!cfun->machine->arg_pointer_live)
12947 return 0;
12950 /* Check to see if the function contains a branch
12951 insn with the far jump attribute set. */
12952 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12954 if (GET_CODE (insn) == JUMP_INSN
12955 /* Ignore tablejump patterns. */
12956 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12957 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12958 && get_attr_far_jump (insn) == FAR_JUMP_YES
12961 /* Record the fact that we have decided that
12962 the function does use far jumps. */
12963 cfun->machine->far_jump_used = 1;
12964 return 1;
12968 return 0;
12971 /* Return nonzero if FUNC must be entered in ARM mode. */
12973 is_called_in_ARM_mode (tree func)
12975 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12977 /* Ignore the problem about functions whose address is taken. */
12978 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12979 return TRUE;
12981 #ifdef ARM_PE
12982 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12983 #else
12984 return FALSE;
12985 #endif
12988 /* The bits which aren't usefully expanded as rtl. */
12989 const char *
12990 thumb_unexpanded_epilogue (void)
12992 int regno;
12993 unsigned long live_regs_mask = 0;
12994 int high_regs_pushed = 0;
12995 int had_to_push_lr;
12996 int size;
12998 if (return_used_this_function)
12999 return "";
13001 if (IS_NAKED (arm_current_func_type ()))
13002 return "";
13004 live_regs_mask = thumb_compute_save_reg_mask ();
13005 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13007 /* If we can deduce the registers used from the function's return value.
13008 This is more reliable that examining regs_ever_live[] because that
13009 will be set if the register is ever used in the function, not just if
13010 the register is used to hold a return value. */
13011 size = arm_size_return_regs ();
13013 /* The prolog may have pushed some high registers to use as
13014 work registers. e.g. the testsuite file:
13015 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13016 compiles to produce:
13017 push {r4, r5, r6, r7, lr}
13018 mov r7, r9
13019 mov r6, r8
13020 push {r6, r7}
13021 as part of the prolog. We have to undo that pushing here. */
13023 if (high_regs_pushed)
13025 unsigned long mask = live_regs_mask & 0xff;
13026 int next_hi_reg;
13028 /* The available low registers depend on the size of the value we are
13029 returning. */
13030 if (size <= 12)
13031 mask |= 1 << 3;
13032 if (size <= 8)
13033 mask |= 1 << 2;
13035 if (mask == 0)
13036 /* Oh dear! We have no low registers into which we can pop
13037 high registers! */
13038 internal_error
13039 ("no low registers available for popping high registers");
13041 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13042 if (live_regs_mask & (1 << next_hi_reg))
13043 break;
13045 while (high_regs_pushed)
13047 /* Find lo register(s) into which the high register(s) can
13048 be popped. */
13049 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13051 if (mask & (1 << regno))
13052 high_regs_pushed--;
13053 if (high_regs_pushed == 0)
13054 break;
13057 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13059 /* Pop the values into the low register(s). */
13060 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13062 /* Move the value(s) into the high registers. */
13063 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13065 if (mask & (1 << regno))
13067 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13068 regno);
13070 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13071 if (live_regs_mask & (1 << next_hi_reg))
13072 break;
13076 live_regs_mask &= ~0x0f00;
13079 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13080 live_regs_mask &= 0xff;
13082 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13084 /* Pop the return address into the PC. */
13085 if (had_to_push_lr)
13086 live_regs_mask |= 1 << PC_REGNUM;
13088 /* Either no argument registers were pushed or a backtrace
13089 structure was created which includes an adjusted stack
13090 pointer, so just pop everything. */
13091 if (live_regs_mask)
13092 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13093 live_regs_mask);
13095 /* We have either just popped the return address into the
13096 PC or it is was kept in LR for the entire function. */
13097 if (!had_to_push_lr)
13098 thumb_exit (asm_out_file, LR_REGNUM);
13100 else
13102 /* Pop everything but the return address. */
13103 if (live_regs_mask)
13104 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13105 live_regs_mask);
13107 if (had_to_push_lr)
13109 if (size > 12)
13111 /* We have no free low regs, so save one. */
13112 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13113 LAST_ARG_REGNUM);
13116 /* Get the return address into a temporary register. */
13117 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13118 1 << LAST_ARG_REGNUM);
13120 if (size > 12)
13122 /* Move the return address to lr. */
13123 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13124 LAST_ARG_REGNUM);
13125 /* Restore the low register. */
13126 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13127 IP_REGNUM);
13128 regno = LR_REGNUM;
13130 else
13131 regno = LAST_ARG_REGNUM;
13133 else
13134 regno = LR_REGNUM;
13136 /* Remove the argument registers that were pushed onto the stack. */
13137 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13138 SP_REGNUM, SP_REGNUM,
13139 current_function_pretend_args_size);
13141 thumb_exit (asm_out_file, regno);
13144 return "";
13147 /* Functions to save and restore machine-specific function data. */
13148 static struct machine_function *
13149 arm_init_machine_status (void)
13151 struct machine_function *machine;
13152 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13154 #if ARM_FT_UNKNOWN != 0
13155 machine->func_type = ARM_FT_UNKNOWN;
13156 #endif
13157 return machine;
13160 /* Return an RTX indicating where the return address to the
13161 calling function can be found. */
13163 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13165 if (count != 0)
13166 return NULL_RTX;
13168 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13171 /* Do anything needed before RTL is emitted for each function. */
13172 void
13173 arm_init_expanders (void)
13175 /* Arrange to initialize and mark the machine per-function status. */
13176 init_machine_status = arm_init_machine_status;
13178 /* This is to stop the combine pass optimizing away the alignment
13179 adjustment of va_arg. */
13180 /* ??? It is claimed that this should not be necessary. */
13181 if (cfun)
13182 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13186 /* Like arm_compute_initial_elimination offset. Simpler because there
13187 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13188 to point at the base of the local variables after static stack
13189 space for a function has been allocated. */
13191 HOST_WIDE_INT
13192 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13194 arm_stack_offsets *offsets;
13196 offsets = arm_get_frame_offsets ();
13198 switch (from)
13200 case ARG_POINTER_REGNUM:
13201 switch (to)
13203 case STACK_POINTER_REGNUM:
13204 return offsets->outgoing_args - offsets->saved_args;
13206 case FRAME_POINTER_REGNUM:
13207 return offsets->soft_frame - offsets->saved_args;
13209 case ARM_HARD_FRAME_POINTER_REGNUM:
13210 return offsets->saved_regs - offsets->saved_args;
13212 case THUMB_HARD_FRAME_POINTER_REGNUM:
13213 return offsets->locals_base - offsets->saved_args;
13215 default:
13216 gcc_unreachable ();
13218 break;
13220 case FRAME_POINTER_REGNUM:
13221 switch (to)
13223 case STACK_POINTER_REGNUM:
13224 return offsets->outgoing_args - offsets->soft_frame;
13226 case ARM_HARD_FRAME_POINTER_REGNUM:
13227 return offsets->saved_regs - offsets->soft_frame;
13229 case THUMB_HARD_FRAME_POINTER_REGNUM:
13230 return offsets->locals_base - offsets->soft_frame;
13232 default:
13233 gcc_unreachable ();
13235 break;
13237 default:
13238 gcc_unreachable ();
13243 /* Generate the rest of a function's prologue. */
13244 void
13245 thumb_expand_prologue (void)
13247 rtx insn, dwarf;
13249 HOST_WIDE_INT amount;
13250 arm_stack_offsets *offsets;
13251 unsigned long func_type;
13252 int regno;
13253 unsigned long live_regs_mask;
13255 func_type = arm_current_func_type ();
13257 /* Naked functions don't have prologues. */
13258 if (IS_NAKED (func_type))
13259 return;
13261 if (IS_INTERRUPT (func_type))
13263 error ("interrupt Service Routines cannot be coded in Thumb mode");
13264 return;
13267 live_regs_mask = thumb_compute_save_reg_mask ();
13268 /* Load the pic register before setting the frame pointer,
13269 so we can use r7 as a temporary work register. */
13270 if (flag_pic)
13271 arm_load_pic_register (live_regs_mask);
13273 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13274 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13275 stack_pointer_rtx);
13277 offsets = arm_get_frame_offsets ();
13278 amount = offsets->outgoing_args - offsets->saved_regs;
13279 if (amount)
13281 if (amount < 512)
13283 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13284 GEN_INT (- amount)));
13285 RTX_FRAME_RELATED_P (insn) = 1;
13287 else
13289 rtx reg;
13291 /* The stack decrement is too big for an immediate value in a single
13292 insn. In theory we could issue multiple subtracts, but after
13293 three of them it becomes more space efficient to place the full
13294 value in the constant pool and load into a register. (Also the
13295 ARM debugger really likes to see only one stack decrement per
13296 function). So instead we look for a scratch register into which
13297 we can load the decrement, and then we subtract this from the
13298 stack pointer. Unfortunately on the thumb the only available
13299 scratch registers are the argument registers, and we cannot use
13300 these as they may hold arguments to the function. Instead we
13301 attempt to locate a call preserved register which is used by this
13302 function. If we can find one, then we know that it will have
13303 been pushed at the start of the prologue and so we can corrupt
13304 it now. */
13305 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13306 if (live_regs_mask & (1 << regno)
13307 && !(frame_pointer_needed
13308 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13309 break;
13311 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13313 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13315 /* Choose an arbitrary, non-argument low register. */
13316 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13318 /* Save it by copying it into a high, scratch register. */
13319 emit_insn (gen_movsi (spare, reg));
13320 /* Add a USE to stop propagate_one_insn() from barfing. */
13321 emit_insn (gen_prologue_use (spare));
13323 /* Decrement the stack. */
13324 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13325 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13326 stack_pointer_rtx, reg));
13327 RTX_FRAME_RELATED_P (insn) = 1;
13328 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13329 plus_constant (stack_pointer_rtx,
13330 -amount));
13331 RTX_FRAME_RELATED_P (dwarf) = 1;
13332 REG_NOTES (insn)
13333 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13334 REG_NOTES (insn));
13336 /* Restore the low register's original value. */
13337 emit_insn (gen_movsi (reg, spare));
13339 /* Emit a USE of the restored scratch register, so that flow
13340 analysis will not consider the restore redundant. The
13341 register won't be used again in this function and isn't
13342 restored by the epilogue. */
13343 emit_insn (gen_prologue_use (reg));
13345 else
13347 reg = gen_rtx_REG (SImode, regno);
13349 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13351 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13352 stack_pointer_rtx, reg));
13353 RTX_FRAME_RELATED_P (insn) = 1;
13354 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13355 plus_constant (stack_pointer_rtx,
13356 -amount));
13357 RTX_FRAME_RELATED_P (dwarf) = 1;
13358 REG_NOTES (insn)
13359 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13360 REG_NOTES (insn));
13365 if (frame_pointer_needed)
13367 amount = offsets->outgoing_args - offsets->locals_base;
13369 if (amount < 1024)
13370 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13371 stack_pointer_rtx, GEN_INT (amount)));
13372 else
13374 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13375 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13376 hard_frame_pointer_rtx,
13377 stack_pointer_rtx));
13378 dwarf = gen_rtx_SET (SImode, hard_frame_pointer_rtx,
13379 plus_constant (stack_pointer_rtx, amount));
13380 RTX_FRAME_RELATED_P (dwarf) = 1;
13381 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13382 REG_NOTES (insn));
13385 RTX_FRAME_RELATED_P (insn) = 1;
13388 if (current_function_profile || !TARGET_SCHED_PROLOG)
13389 emit_insn (gen_blockage ());
13391 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13392 if (live_regs_mask & 0xff)
13393 cfun->machine->lr_save_eliminated = 0;
13395 /* If the link register is being kept alive, with the return address in it,
13396 then make sure that it does not get reused by the ce2 pass. */
13397 if (cfun->machine->lr_save_eliminated)
13398 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13402 void
13403 thumb_expand_epilogue (void)
13405 HOST_WIDE_INT amount;
13406 arm_stack_offsets *offsets;
13407 int regno;
13409 /* Naked functions don't have prologues. */
13410 if (IS_NAKED (arm_current_func_type ()))
13411 return;
13413 offsets = arm_get_frame_offsets ();
13414 amount = offsets->outgoing_args - offsets->saved_regs;
13416 if (frame_pointer_needed)
13418 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13419 amount = offsets->locals_base - offsets->saved_regs;
13422 if (amount)
13424 if (amount < 512)
13425 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13426 GEN_INT (amount)));
13427 else
13429 /* r3 is always free in the epilogue. */
13430 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13432 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13433 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13437 /* Emit a USE (stack_pointer_rtx), so that
13438 the stack adjustment will not be deleted. */
13439 emit_insn (gen_prologue_use (stack_pointer_rtx));
13441 if (current_function_profile || !TARGET_SCHED_PROLOG)
13442 emit_insn (gen_blockage ());
13444 /* Emit a clobber for each insn that will be restored in the epilogue,
13445 so that flow2 will get register lifetimes correct. */
13446 for (regno = 0; regno < 13; regno++)
13447 if (regs_ever_live[regno] && !call_used_regs[regno])
13448 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13450 if (! regs_ever_live[LR_REGNUM])
13451 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13454 static void
13455 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13457 unsigned long live_regs_mask = 0;
13458 unsigned long l_mask;
13459 unsigned high_regs_pushed = 0;
13460 int cfa_offset = 0;
13461 int regno;
13463 if (IS_NAKED (arm_current_func_type ()))
13464 return;
13466 if (is_called_in_ARM_mode (current_function_decl))
13468 const char * name;
13470 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13471 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13472 == SYMBOL_REF);
13473 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13475 /* Generate code sequence to switch us into Thumb mode. */
13476 /* The .code 32 directive has already been emitted by
13477 ASM_DECLARE_FUNCTION_NAME. */
13478 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13479 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13481 /* Generate a label, so that the debugger will notice the
13482 change in instruction sets. This label is also used by
13483 the assembler to bypass the ARM code when this function
13484 is called from a Thumb encoded function elsewhere in the
13485 same file. Hence the definition of STUB_NAME here must
13486 agree with the definition in gas/config/tc-arm.c. */
13488 #define STUB_NAME ".real_start_of"
13490 fprintf (f, "\t.code\t16\n");
13491 #ifdef ARM_PE
13492 if (arm_dllexport_name_p (name))
13493 name = arm_strip_name_encoding (name);
13494 #endif
13495 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13496 fprintf (f, "\t.thumb_func\n");
13497 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13500 if (current_function_pretend_args_size)
13502 /* Output unwind directive for the stack adjustment. */
13503 if (ARM_EABI_UNWIND_TABLES)
13504 fprintf (f, "\t.pad #%d\n",
13505 current_function_pretend_args_size);
13507 if (cfun->machine->uses_anonymous_args)
13509 int num_pushes;
13511 fprintf (f, "\tpush\t{");
13513 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13515 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13516 regno <= LAST_ARG_REGNUM;
13517 regno++)
13518 asm_fprintf (f, "%r%s", regno,
13519 regno == LAST_ARG_REGNUM ? "" : ", ");
13521 fprintf (f, "}\n");
13523 else
13524 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13525 SP_REGNUM, SP_REGNUM,
13526 current_function_pretend_args_size);
13528 /* We don't need to record the stores for unwinding (would it
13529 help the debugger any if we did?), but record the change in
13530 the stack pointer. */
13531 if (dwarf2out_do_frame ())
13533 char *l = dwarf2out_cfi_label ();
13535 cfa_offset = cfa_offset + current_function_pretend_args_size;
13536 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13540 /* Get the registers we are going to push. */
13541 live_regs_mask = thumb_compute_save_reg_mask ();
13542 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13543 l_mask = live_regs_mask & 0x40ff;
13544 /* Then count how many other high registers will need to be pushed. */
13545 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13547 if (TARGET_BACKTRACE)
13549 unsigned offset;
13550 unsigned work_register;
13552 /* We have been asked to create a stack backtrace structure.
13553 The code looks like this:
13555 0 .align 2
13556 0 func:
13557 0 sub SP, #16 Reserve space for 4 registers.
13558 2 push {R7} Push low registers.
13559 4 add R7, SP, #20 Get the stack pointer before the push.
13560 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13561 8 mov R7, PC Get hold of the start of this code plus 12.
13562 10 str R7, [SP, #16] Store it.
13563 12 mov R7, FP Get hold of the current frame pointer.
13564 14 str R7, [SP, #4] Store it.
13565 16 mov R7, LR Get hold of the current return address.
13566 18 str R7, [SP, #12] Store it.
13567 20 add R7, SP, #16 Point at the start of the backtrace structure.
13568 22 mov FP, R7 Put this value into the frame pointer. */
13570 work_register = thumb_find_work_register (live_regs_mask);
13572 if (ARM_EABI_UNWIND_TABLES)
13573 asm_fprintf (f, "\t.pad #16\n");
13575 asm_fprintf
13576 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13577 SP_REGNUM, SP_REGNUM);
13579 if (dwarf2out_do_frame ())
13581 char *l = dwarf2out_cfi_label ();
13583 cfa_offset = cfa_offset + 16;
13584 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13587 if (l_mask)
13589 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13590 offset = bit_count (l_mask) * UNITS_PER_WORD;
13592 else
13593 offset = 0;
13595 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13596 offset + 16 + current_function_pretend_args_size);
13598 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13599 offset + 4);
13601 /* Make sure that the instruction fetching the PC is in the right place
13602 to calculate "start of backtrace creation code + 12". */
13603 if (l_mask)
13605 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13606 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13607 offset + 12);
13608 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13609 ARM_HARD_FRAME_POINTER_REGNUM);
13610 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13611 offset);
13613 else
13615 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13616 ARM_HARD_FRAME_POINTER_REGNUM);
13617 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13618 offset);
13619 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13620 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13621 offset + 12);
13624 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13625 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13626 offset + 8);
13627 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13628 offset + 12);
13629 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13630 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13632 /* Optimization: If we are not pushing any low registers but we are going
13633 to push some high registers then delay our first push. This will just
13634 be a push of LR and we can combine it with the push of the first high
13635 register. */
13636 else if ((l_mask & 0xff) != 0
13637 || (high_regs_pushed == 0 && l_mask))
13638 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13640 if (high_regs_pushed)
13642 unsigned pushable_regs;
13643 unsigned next_hi_reg;
13645 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13646 if (live_regs_mask & (1 << next_hi_reg))
13647 break;
13649 pushable_regs = l_mask & 0xff;
13651 if (pushable_regs == 0)
13652 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13654 while (high_regs_pushed > 0)
13656 unsigned long real_regs_mask = 0;
13658 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13660 if (pushable_regs & (1 << regno))
13662 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13664 high_regs_pushed --;
13665 real_regs_mask |= (1 << next_hi_reg);
13667 if (high_regs_pushed)
13669 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13670 next_hi_reg --)
13671 if (live_regs_mask & (1 << next_hi_reg))
13672 break;
13674 else
13676 pushable_regs &= ~((1 << regno) - 1);
13677 break;
13682 /* If we had to find a work register and we have not yet
13683 saved the LR then add it to the list of regs to push. */
13684 if (l_mask == (1 << LR_REGNUM))
13686 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13687 1, &cfa_offset,
13688 real_regs_mask | (1 << LR_REGNUM));
13689 l_mask = 0;
13691 else
13692 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13697 /* Handle the case of a double word load into a low register from
13698 a computed memory address. The computed address may involve a
13699 register which is overwritten by the load. */
13700 const char *
13701 thumb_load_double_from_address (rtx *operands)
13703 rtx addr;
13704 rtx base;
13705 rtx offset;
13706 rtx arg1;
13707 rtx arg2;
13709 gcc_assert (GET_CODE (operands[0]) == REG);
13710 gcc_assert (GET_CODE (operands[1]) == MEM);
13712 /* Get the memory address. */
13713 addr = XEXP (operands[1], 0);
13715 /* Work out how the memory address is computed. */
13716 switch (GET_CODE (addr))
13718 case REG:
13719 operands[2] = gen_rtx_MEM (SImode,
13720 plus_constant (XEXP (operands[1], 0), 4));
13722 if (REGNO (operands[0]) == REGNO (addr))
13724 output_asm_insn ("ldr\t%H0, %2", operands);
13725 output_asm_insn ("ldr\t%0, %1", operands);
13727 else
13729 output_asm_insn ("ldr\t%0, %1", operands);
13730 output_asm_insn ("ldr\t%H0, %2", operands);
13732 break;
13734 case CONST:
13735 /* Compute <address> + 4 for the high order load. */
13736 operands[2] = gen_rtx_MEM (SImode,
13737 plus_constant (XEXP (operands[1], 0), 4));
13739 output_asm_insn ("ldr\t%0, %1", operands);
13740 output_asm_insn ("ldr\t%H0, %2", operands);
13741 break;
13743 case PLUS:
13744 arg1 = XEXP (addr, 0);
13745 arg2 = XEXP (addr, 1);
13747 if (CONSTANT_P (arg1))
13748 base = arg2, offset = arg1;
13749 else
13750 base = arg1, offset = arg2;
13752 gcc_assert (GET_CODE (base) == REG);
13754 /* Catch the case of <address> = <reg> + <reg> */
13755 if (GET_CODE (offset) == REG)
13757 int reg_offset = REGNO (offset);
13758 int reg_base = REGNO (base);
13759 int reg_dest = REGNO (operands[0]);
13761 /* Add the base and offset registers together into the
13762 higher destination register. */
13763 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13764 reg_dest + 1, reg_base, reg_offset);
13766 /* Load the lower destination register from the address in
13767 the higher destination register. */
13768 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13769 reg_dest, reg_dest + 1);
13771 /* Load the higher destination register from its own address
13772 plus 4. */
13773 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13774 reg_dest + 1, reg_dest + 1);
13776 else
13778 /* Compute <address> + 4 for the high order load. */
13779 operands[2] = gen_rtx_MEM (SImode,
13780 plus_constant (XEXP (operands[1], 0), 4));
13782 /* If the computed address is held in the low order register
13783 then load the high order register first, otherwise always
13784 load the low order register first. */
13785 if (REGNO (operands[0]) == REGNO (base))
13787 output_asm_insn ("ldr\t%H0, %2", operands);
13788 output_asm_insn ("ldr\t%0, %1", operands);
13790 else
13792 output_asm_insn ("ldr\t%0, %1", operands);
13793 output_asm_insn ("ldr\t%H0, %2", operands);
13796 break;
13798 case LABEL_REF:
13799 /* With no registers to worry about we can just load the value
13800 directly. */
13801 operands[2] = gen_rtx_MEM (SImode,
13802 plus_constant (XEXP (operands[1], 0), 4));
13804 output_asm_insn ("ldr\t%H0, %2", operands);
13805 output_asm_insn ("ldr\t%0, %1", operands);
13806 break;
13808 default:
13809 gcc_unreachable ();
13812 return "";
13815 const char *
13816 thumb_output_move_mem_multiple (int n, rtx *operands)
13818 rtx tmp;
13820 switch (n)
13822 case 2:
13823 if (REGNO (operands[4]) > REGNO (operands[5]))
13825 tmp = operands[4];
13826 operands[4] = operands[5];
13827 operands[5] = tmp;
13829 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13830 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13831 break;
13833 case 3:
13834 if (REGNO (operands[4]) > REGNO (operands[5]))
13836 tmp = operands[4];
13837 operands[4] = operands[5];
13838 operands[5] = tmp;
13840 if (REGNO (operands[5]) > REGNO (operands[6]))
13842 tmp = operands[5];
13843 operands[5] = operands[6];
13844 operands[6] = tmp;
13846 if (REGNO (operands[4]) > REGNO (operands[5]))
13848 tmp = operands[4];
13849 operands[4] = operands[5];
13850 operands[5] = tmp;
13853 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13854 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13855 break;
13857 default:
13858 gcc_unreachable ();
13861 return "";
13864 /* Output a call-via instruction for thumb state. */
13865 const char *
13866 thumb_call_via_reg (rtx reg)
13868 int regno = REGNO (reg);
13869 rtx *labelp;
13871 gcc_assert (regno < LR_REGNUM);
13873 /* If we are in the normal text section we can use a single instance
13874 per compilation unit. If we are doing function sections, then we need
13875 an entry per section, since we can't rely on reachability. */
13876 if (in_text_section ())
13878 thumb_call_reg_needed = 1;
13880 if (thumb_call_via_label[regno] == NULL)
13881 thumb_call_via_label[regno] = gen_label_rtx ();
13882 labelp = thumb_call_via_label + regno;
13884 else
13886 if (cfun->machine->call_via[regno] == NULL)
13887 cfun->machine->call_via[regno] = gen_label_rtx ();
13888 labelp = cfun->machine->call_via + regno;
13891 output_asm_insn ("bl\t%a0", labelp);
13892 return "";
13895 /* Routines for generating rtl. */
13896 void
13897 thumb_expand_movmemqi (rtx *operands)
13899 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13900 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13901 HOST_WIDE_INT len = INTVAL (operands[2]);
13902 HOST_WIDE_INT offset = 0;
13904 while (len >= 12)
13906 emit_insn (gen_movmem12b (out, in, out, in));
13907 len -= 12;
13910 if (len >= 8)
13912 emit_insn (gen_movmem8b (out, in, out, in));
13913 len -= 8;
13916 if (len >= 4)
13918 rtx reg = gen_reg_rtx (SImode);
13919 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13920 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13921 len -= 4;
13922 offset += 4;
13925 if (len >= 2)
13927 rtx reg = gen_reg_rtx (HImode);
13928 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13929 plus_constant (in, offset))));
13930 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13931 reg));
13932 len -= 2;
13933 offset += 2;
13936 if (len)
13938 rtx reg = gen_reg_rtx (QImode);
13939 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13940 plus_constant (in, offset))));
13941 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13942 reg));
13946 void
13947 thumb_reload_out_hi (rtx *operands)
13949 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13952 /* Handle reading a half-word from memory during reload. */
13953 void
13954 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13956 gcc_unreachable ();
13959 /* Return the length of a function name prefix
13960 that starts with the character 'c'. */
13961 static int
13962 arm_get_strip_length (int c)
13964 switch (c)
13966 ARM_NAME_ENCODING_LENGTHS
13967 default: return 0;
13971 /* Return a pointer to a function's name with any
13972 and all prefix encodings stripped from it. */
13973 const char *
13974 arm_strip_name_encoding (const char *name)
13976 int skip;
13978 while ((skip = arm_get_strip_length (* name)))
13979 name += skip;
13981 return name;
13984 /* If there is a '*' anywhere in the name's prefix, then
13985 emit the stripped name verbatim, otherwise prepend an
13986 underscore if leading underscores are being used. */
13987 void
13988 arm_asm_output_labelref (FILE *stream, const char *name)
13990 int skip;
13991 int verbatim = 0;
13993 while ((skip = arm_get_strip_length (* name)))
13995 verbatim |= (*name == '*');
13996 name += skip;
13999 if (verbatim)
14000 fputs (name, stream);
14001 else
14002 asm_fprintf (stream, "%U%s", name);
14005 static void
14006 arm_file_end (void)
14008 int regno;
14010 if (! thumb_call_reg_needed)
14011 return;
14013 text_section ();
14014 asm_fprintf (asm_out_file, "\t.code 16\n");
14015 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14017 for (regno = 0; regno < LR_REGNUM; regno++)
14019 rtx label = thumb_call_via_label[regno];
14021 if (label != 0)
14023 targetm.asm_out.internal_label (asm_out_file, "L",
14024 CODE_LABEL_NUMBER (label));
14025 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14030 rtx aof_pic_label;
14032 #ifdef AOF_ASSEMBLER
14033 /* Special functions only needed when producing AOF syntax assembler. */
14035 struct pic_chain
14037 struct pic_chain * next;
14038 const char * symname;
14041 static struct pic_chain * aof_pic_chain = NULL;
14044 aof_pic_entry (rtx x)
14046 struct pic_chain ** chainp;
14047 int offset;
14049 if (aof_pic_label == NULL_RTX)
14051 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14054 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14055 offset += 4, chainp = &(*chainp)->next)
14056 if ((*chainp)->symname == XSTR (x, 0))
14057 return plus_constant (aof_pic_label, offset);
14059 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14060 (*chainp)->next = NULL;
14061 (*chainp)->symname = XSTR (x, 0);
14062 return plus_constant (aof_pic_label, offset);
14065 void
14066 aof_dump_pic_table (FILE *f)
14068 struct pic_chain * chain;
14070 if (aof_pic_chain == NULL)
14071 return;
14073 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14074 PIC_OFFSET_TABLE_REGNUM,
14075 PIC_OFFSET_TABLE_REGNUM);
14076 fputs ("|x$adcons|\n", f);
14078 for (chain = aof_pic_chain; chain; chain = chain->next)
14080 fputs ("\tDCD\t", f);
14081 assemble_name (f, chain->symname);
14082 fputs ("\n", f);
14086 int arm_text_section_count = 1;
14088 char *
14089 aof_text_section (void )
14091 static char buf[100];
14092 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
14093 arm_text_section_count++);
14094 if (flag_pic)
14095 strcat (buf, ", PIC, REENTRANT");
14096 return buf;
14099 static int arm_data_section_count = 1;
14101 char *
14102 aof_data_section (void)
14104 static char buf[100];
14105 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14106 return buf;
14109 /* The AOF assembler is religiously strict about declarations of
14110 imported and exported symbols, so that it is impossible to declare
14111 a function as imported near the beginning of the file, and then to
14112 export it later on. It is, however, possible to delay the decision
14113 until all the functions in the file have been compiled. To get
14114 around this, we maintain a list of the imports and exports, and
14115 delete from it any that are subsequently defined. At the end of
14116 compilation we spit the remainder of the list out before the END
14117 directive. */
14119 struct import
14121 struct import * next;
14122 const char * name;
14125 static struct import * imports_list = NULL;
14127 void
14128 aof_add_import (const char *name)
14130 struct import * new;
14132 for (new = imports_list; new; new = new->next)
14133 if (new->name == name)
14134 return;
14136 new = (struct import *) xmalloc (sizeof (struct import));
14137 new->next = imports_list;
14138 imports_list = new;
14139 new->name = name;
14142 void
14143 aof_delete_import (const char *name)
14145 struct import ** old;
14147 for (old = &imports_list; *old; old = & (*old)->next)
14149 if ((*old)->name == name)
14151 *old = (*old)->next;
14152 return;
14157 int arm_main_function = 0;
14159 static void
14160 aof_dump_imports (FILE *f)
14162 /* The AOF assembler needs this to cause the startup code to be extracted
14163 from the library. Brining in __main causes the whole thing to work
14164 automagically. */
14165 if (arm_main_function)
14167 text_section ();
14168 fputs ("\tIMPORT __main\n", f);
14169 fputs ("\tDCD __main\n", f);
14172 /* Now dump the remaining imports. */
14173 while (imports_list)
14175 fprintf (f, "\tIMPORT\t");
14176 assemble_name (f, imports_list->name);
14177 fputc ('\n', f);
14178 imports_list = imports_list->next;
14182 static void
14183 aof_globalize_label (FILE *stream, const char *name)
14185 default_globalize_label (stream, name);
14186 if (! strcmp (name, "main"))
14187 arm_main_function = 1;
14190 static void
14191 aof_file_start (void)
14193 fputs ("__r0\tRN\t0\n", asm_out_file);
14194 fputs ("__a1\tRN\t0\n", asm_out_file);
14195 fputs ("__a2\tRN\t1\n", asm_out_file);
14196 fputs ("__a3\tRN\t2\n", asm_out_file);
14197 fputs ("__a4\tRN\t3\n", asm_out_file);
14198 fputs ("__v1\tRN\t4\n", asm_out_file);
14199 fputs ("__v2\tRN\t5\n", asm_out_file);
14200 fputs ("__v3\tRN\t6\n", asm_out_file);
14201 fputs ("__v4\tRN\t7\n", asm_out_file);
14202 fputs ("__v5\tRN\t8\n", asm_out_file);
14203 fputs ("__v6\tRN\t9\n", asm_out_file);
14204 fputs ("__sl\tRN\t10\n", asm_out_file);
14205 fputs ("__fp\tRN\t11\n", asm_out_file);
14206 fputs ("__ip\tRN\t12\n", asm_out_file);
14207 fputs ("__sp\tRN\t13\n", asm_out_file);
14208 fputs ("__lr\tRN\t14\n", asm_out_file);
14209 fputs ("__pc\tRN\t15\n", asm_out_file);
14210 fputs ("__f0\tFN\t0\n", asm_out_file);
14211 fputs ("__f1\tFN\t1\n", asm_out_file);
14212 fputs ("__f2\tFN\t2\n", asm_out_file);
14213 fputs ("__f3\tFN\t3\n", asm_out_file);
14214 fputs ("__f4\tFN\t4\n", asm_out_file);
14215 fputs ("__f5\tFN\t5\n", asm_out_file);
14216 fputs ("__f6\tFN\t6\n", asm_out_file);
14217 fputs ("__f7\tFN\t7\n", asm_out_file);
14218 text_section ();
14221 static void
14222 aof_file_end (void)
14224 if (flag_pic)
14225 aof_dump_pic_table (asm_out_file);
14226 arm_file_end ();
14227 aof_dump_imports (asm_out_file);
14228 fputs ("\tEND\n", asm_out_file);
14230 #endif /* AOF_ASSEMBLER */
14232 #ifndef ARM_PE
14233 /* Symbols in the text segment can be accessed without indirecting via the
14234 constant pool; it may take an extra binary operation, but this is still
14235 faster than indirecting via memory. Don't do this when not optimizing,
14236 since we won't be calculating al of the offsets necessary to do this
14237 simplification. */
14239 static void
14240 arm_encode_section_info (tree decl, rtx rtl, int first)
14242 /* This doesn't work with AOF syntax, since the string table may be in
14243 a different AREA. */
14244 #ifndef AOF_ASSEMBLER
14245 if (optimize > 0 && TREE_CONSTANT (decl))
14246 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14247 #endif
14249 /* If we are referencing a function that is weak then encode a long call
14250 flag in the function name, otherwise if the function is static or
14251 or known to be defined in this file then encode a short call flag. */
14252 if (first && DECL_P (decl))
14254 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14255 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14256 else if (! TREE_PUBLIC (decl))
14257 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14260 #endif /* !ARM_PE */
14262 static void
14263 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14265 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14266 && !strcmp (prefix, "L"))
14268 arm_ccfsm_state = 0;
14269 arm_target_insn = NULL;
14271 default_internal_label (stream, prefix, labelno);
14274 /* Output code to add DELTA to the first argument, and then jump
14275 to FUNCTION. Used for C++ multiple inheritance. */
14276 static void
14277 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14278 HOST_WIDE_INT delta,
14279 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14280 tree function)
14282 static int thunk_label = 0;
14283 char label[256];
14284 int mi_delta = delta;
14285 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14286 int shift = 0;
14287 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14288 ? 1 : 0);
14289 if (mi_delta < 0)
14290 mi_delta = - mi_delta;
14291 if (TARGET_THUMB)
14293 int labelno = thunk_label++;
14294 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14295 fputs ("\tldr\tr12, ", file);
14296 assemble_name (file, label);
14297 fputc ('\n', file);
14299 while (mi_delta != 0)
14301 if ((mi_delta & (3 << shift)) == 0)
14302 shift += 2;
14303 else
14305 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14306 mi_op, this_regno, this_regno,
14307 mi_delta & (0xff << shift));
14308 mi_delta &= ~(0xff << shift);
14309 shift += 8;
14312 if (TARGET_THUMB)
14314 fprintf (file, "\tbx\tr12\n");
14315 ASM_OUTPUT_ALIGN (file, 2);
14316 assemble_name (file, label);
14317 fputs (":\n", file);
14318 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14320 else
14322 fputs ("\tb\t", file);
14323 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14324 if (NEED_PLT_RELOC)
14325 fputs ("(PLT)", file);
14326 fputc ('\n', file);
14331 arm_emit_vector_const (FILE *file, rtx x)
14333 int i;
14334 const char * pattern;
14336 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14338 switch (GET_MODE (x))
14340 case V2SImode: pattern = "%08x"; break;
14341 case V4HImode: pattern = "%04x"; break;
14342 case V8QImode: pattern = "%02x"; break;
14343 default: gcc_unreachable ();
14346 fprintf (file, "0x");
14347 for (i = CONST_VECTOR_NUNITS (x); i--;)
14349 rtx element;
14351 element = CONST_VECTOR_ELT (x, i);
14352 fprintf (file, pattern, INTVAL (element));
14355 return 1;
14358 const char *
14359 arm_output_load_gr (rtx *operands)
14361 rtx reg;
14362 rtx offset;
14363 rtx wcgr;
14364 rtx sum;
14366 if (GET_CODE (operands [1]) != MEM
14367 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14368 || GET_CODE (reg = XEXP (sum, 0)) != REG
14369 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14370 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14371 return "wldrw%?\t%0, %1";
14373 /* Fix up an out-of-range load of a GR register. */
14374 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14375 wcgr = operands[0];
14376 operands[0] = reg;
14377 output_asm_insn ("ldr%?\t%0, %1", operands);
14379 operands[0] = wcgr;
14380 operands[1] = reg;
14381 output_asm_insn ("tmcr%?\t%0, %1", operands);
14382 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14384 return "";
14387 static rtx
14388 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14389 int incoming ATTRIBUTE_UNUSED)
14391 #if 0
14392 /* FIXME: The ARM backend has special code to handle structure
14393 returns, and will reserve its own hidden first argument. So
14394 if this macro is enabled a *second* hidden argument will be
14395 reserved, which will break binary compatibility with old
14396 toolchains and also thunk handling. One day this should be
14397 fixed. */
14398 return 0;
14399 #else
14400 /* Register in which address to store a structure value
14401 is passed to a function. */
14402 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14403 #endif
14406 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14408 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14409 named arg and all anonymous args onto the stack.
14410 XXX I know the prologue shouldn't be pushing registers, but it is faster
14411 that way. */
14413 static void
14414 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14415 enum machine_mode mode ATTRIBUTE_UNUSED,
14416 tree type ATTRIBUTE_UNUSED,
14417 int *pretend_size,
14418 int second_time ATTRIBUTE_UNUSED)
14420 cfun->machine->uses_anonymous_args = 1;
14421 if (cum->nregs < NUM_ARG_REGS)
14422 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14425 /* Return nonzero if the CONSUMER instruction (a store) does not need
14426 PRODUCER's value to calculate the address. */
14429 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14431 rtx value = PATTERN (producer);
14432 rtx addr = PATTERN (consumer);
14434 if (GET_CODE (value) == COND_EXEC)
14435 value = COND_EXEC_CODE (value);
14436 if (GET_CODE (value) == PARALLEL)
14437 value = XVECEXP (value, 0, 0);
14438 value = XEXP (value, 0);
14439 if (GET_CODE (addr) == COND_EXEC)
14440 addr = COND_EXEC_CODE (addr);
14441 if (GET_CODE (addr) == PARALLEL)
14442 addr = XVECEXP (addr, 0, 0);
14443 addr = XEXP (addr, 0);
14445 return !reg_overlap_mentioned_p (value, addr);
14448 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14449 have an early register shift value or amount dependency on the
14450 result of PRODUCER. */
14453 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14455 rtx value = PATTERN (producer);
14456 rtx op = PATTERN (consumer);
14457 rtx early_op;
14459 if (GET_CODE (value) == COND_EXEC)
14460 value = COND_EXEC_CODE (value);
14461 if (GET_CODE (value) == PARALLEL)
14462 value = XVECEXP (value, 0, 0);
14463 value = XEXP (value, 0);
14464 if (GET_CODE (op) == COND_EXEC)
14465 op = COND_EXEC_CODE (op);
14466 if (GET_CODE (op) == PARALLEL)
14467 op = XVECEXP (op, 0, 0);
14468 op = XEXP (op, 1);
14470 early_op = XEXP (op, 0);
14471 /* This is either an actual independent shift, or a shift applied to
14472 the first operand of another operation. We want the whole shift
14473 operation. */
14474 if (GET_CODE (early_op) == REG)
14475 early_op = op;
14477 return !reg_overlap_mentioned_p (value, early_op);
14480 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14481 have an early register shift value dependency on the result of
14482 PRODUCER. */
14485 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14487 rtx value = PATTERN (producer);
14488 rtx op = PATTERN (consumer);
14489 rtx early_op;
14491 if (GET_CODE (value) == COND_EXEC)
14492 value = COND_EXEC_CODE (value);
14493 if (GET_CODE (value) == PARALLEL)
14494 value = XVECEXP (value, 0, 0);
14495 value = XEXP (value, 0);
14496 if (GET_CODE (op) == COND_EXEC)
14497 op = COND_EXEC_CODE (op);
14498 if (GET_CODE (op) == PARALLEL)
14499 op = XVECEXP (op, 0, 0);
14500 op = XEXP (op, 1);
14502 early_op = XEXP (op, 0);
14504 /* This is either an actual independent shift, or a shift applied to
14505 the first operand of another operation. We want the value being
14506 shifted, in either case. */
14507 if (GET_CODE (early_op) != REG)
14508 early_op = XEXP (early_op, 0);
14510 return !reg_overlap_mentioned_p (value, early_op);
14513 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14514 have an early register mult dependency on the result of
14515 PRODUCER. */
14518 arm_no_early_mul_dep (rtx producer, rtx consumer)
14520 rtx value = PATTERN (producer);
14521 rtx op = PATTERN (consumer);
14523 if (GET_CODE (value) == COND_EXEC)
14524 value = COND_EXEC_CODE (value);
14525 if (GET_CODE (value) == PARALLEL)
14526 value = XVECEXP (value, 0, 0);
14527 value = XEXP (value, 0);
14528 if (GET_CODE (op) == COND_EXEC)
14529 op = COND_EXEC_CODE (op);
14530 if (GET_CODE (op) == PARALLEL)
14531 op = XVECEXP (op, 0, 0);
14532 op = XEXP (op, 1);
14534 return (GET_CODE (op) == PLUS
14535 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14539 /* We can't rely on the caller doing the proper promotion when
14540 using APCS or ATPCS. */
14542 static bool
14543 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14545 return !TARGET_AAPCS_BASED;
14549 /* AAPCS based ABIs use short enums by default. */
14551 static bool
14552 arm_default_short_enums (void)
14554 return TARGET_AAPCS_BASED;
14558 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14560 static bool
14561 arm_align_anon_bitfield (void)
14563 return TARGET_AAPCS_BASED;
14567 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14569 static tree
14570 arm_cxx_guard_type (void)
14572 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14576 /* The EABI says test the least significant bit of a guard variable. */
14578 static bool
14579 arm_cxx_guard_mask_bit (void)
14581 return TARGET_AAPCS_BASED;
14585 /* The EABI specifies that all array cookies are 8 bytes long. */
14587 static tree
14588 arm_get_cookie_size (tree type)
14590 tree size;
14592 if (!TARGET_AAPCS_BASED)
14593 return default_cxx_get_cookie_size (type);
14595 size = build_int_cst (sizetype, 8);
14596 return size;
14600 /* The EABI says that array cookies should also contain the element size. */
14602 static bool
14603 arm_cookie_has_size (void)
14605 return TARGET_AAPCS_BASED;
14609 /* The EABI says constructors and destructors should return a pointer to
14610 the object constructed/destroyed. */
14612 static bool
14613 arm_cxx_cdtor_returns_this (void)
14615 return TARGET_AAPCS_BASED;
14618 /* The EABI says that an inline function may never be the key
14619 method. */
14621 static bool
14622 arm_cxx_key_method_may_be_inline (void)
14624 return !TARGET_AAPCS_BASED;
14627 static void
14628 arm_cxx_determine_class_data_visibility (tree decl)
14630 if (!TARGET_AAPCS_BASED)
14631 return;
14633 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14634 is exported. However, on systems without dynamic vague linkage,
14635 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14636 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14637 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14638 else
14639 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14640 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14643 static bool
14644 arm_cxx_class_data_always_comdat (void)
14646 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14647 vague linkage if the class has no key function. */
14648 return !TARGET_AAPCS_BASED;
14652 /* The EABI says __aeabi_atexit should be used to register static
14653 destructors. */
14655 static bool
14656 arm_cxx_use_aeabi_atexit (void)
14658 return TARGET_AAPCS_BASED;
14662 void
14663 arm_set_return_address (rtx source, rtx scratch)
14665 arm_stack_offsets *offsets;
14666 HOST_WIDE_INT delta;
14667 rtx addr;
14668 unsigned long saved_regs;
14670 saved_regs = arm_compute_save_reg_mask ();
14672 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14673 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14674 else
14676 if (frame_pointer_needed)
14677 addr = plus_constant(hard_frame_pointer_rtx, -4);
14678 else
14680 /* LR will be the first saved register. */
14681 offsets = arm_get_frame_offsets ();
14682 delta = offsets->outgoing_args - (offsets->frame + 4);
14685 if (delta >= 4096)
14687 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14688 GEN_INT (delta & ~4095)));
14689 addr = scratch;
14690 delta &= 4095;
14692 else
14693 addr = stack_pointer_rtx;
14695 addr = plus_constant (addr, delta);
14697 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14702 void
14703 thumb_set_return_address (rtx source, rtx scratch)
14705 arm_stack_offsets *offsets;
14706 HOST_WIDE_INT delta;
14707 int reg;
14708 rtx addr;
14709 unsigned long mask;
14711 emit_insn (gen_rtx_USE (VOIDmode, source));
14713 mask = thumb_compute_save_reg_mask ();
14714 if (mask & (1 << LR_REGNUM))
14716 offsets = arm_get_frame_offsets ();
14718 /* Find the saved regs. */
14719 if (frame_pointer_needed)
14721 delta = offsets->soft_frame - offsets->saved_args;
14722 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14724 else
14726 delta = offsets->outgoing_args - offsets->saved_args;
14727 reg = SP_REGNUM;
14729 /* Allow for the stack frame. */
14730 if (TARGET_BACKTRACE)
14731 delta -= 16;
14732 /* The link register is always the first saved register. */
14733 delta -= 4;
14735 /* Construct the address. */
14736 addr = gen_rtx_REG (SImode, reg);
14737 if ((reg != SP_REGNUM && delta >= 128)
14738 || delta >= 1024)
14740 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14741 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14742 addr = scratch;
14744 else
14745 addr = plus_constant (addr, delta);
14747 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14749 else
14750 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14753 /* Implements target hook vector_mode_supported_p. */
14754 bool
14755 arm_vector_mode_supported_p (enum machine_mode mode)
14757 if ((mode == V2SImode)
14758 || (mode == V4HImode)
14759 || (mode == V8QImode))
14760 return true;
14762 return false;
14765 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14766 ARM insns and therefore guarantee that the shift count is modulo 256.
14767 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14768 guarantee no particular behavior for out-of-range counts. */
14770 static unsigned HOST_WIDE_INT
14771 arm_shift_truncation_mask (enum machine_mode mode)
14773 return mode == SImode ? 255 : 0;
14777 /* Map internal gcc register numbers to DWARF2 register numbers. */
14779 unsigned int
14780 arm_dbx_register_number (unsigned int regno)
14782 if (regno < 16)
14783 return regno;
14785 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14786 compatibility. The EABI defines them as registers 96-103. */
14787 if (IS_FPA_REGNUM (regno))
14788 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14790 if (IS_VFP_REGNUM (regno))
14791 return 64 + regno - FIRST_VFP_REGNUM;
14793 if (IS_IWMMXT_GR_REGNUM (regno))
14794 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14796 if (IS_IWMMXT_REGNUM (regno))
14797 return 112 + regno - FIRST_IWMMXT_REGNUM;
14799 gcc_unreachable ();
14803 #ifdef TARGET_UNWIND_INFO
14804 /* Emit unwind directives for a store-multiple instruction. This should
14805 only ever be generated by the function prologue code, so we expect it
14806 to have a particular form. */
14808 static void
14809 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
14811 int i;
14812 HOST_WIDE_INT offset;
14813 HOST_WIDE_INT nregs;
14814 int reg_size;
14815 unsigned reg;
14816 unsigned lastreg;
14817 rtx e;
14819 /* First insn will adjust the stack pointer. */
14820 e = XVECEXP (p, 0, 0);
14821 if (GET_CODE (e) != SET
14822 || GET_CODE (XEXP (e, 0)) != REG
14823 || REGNO (XEXP (e, 0)) != SP_REGNUM
14824 || GET_CODE (XEXP (e, 1)) != PLUS)
14825 abort ();
14827 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
14828 nregs = XVECLEN (p, 0) - 1;
14830 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
14831 if (reg < 16)
14833 /* The function prologue may also push pc, but not annotate it as it is
14834 never restored. We turn this into a stack pointer adjustment. */
14835 if (nregs * 4 == offset - 4)
14837 fprintf (asm_out_file, "\t.pad #4\n");
14838 offset -= 4;
14840 reg_size = 4;
14842 else if (IS_VFP_REGNUM (reg))
14844 /* FPA register saves use an additional word. */
14845 offset -= 4;
14846 reg_size = 8;
14848 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
14850 /* FPA registers are done differently. */
14851 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
14852 return;
14854 else
14855 /* Unknown register type. */
14856 abort ();
14858 /* If the stack increment doesn't match the size of the saved registers,
14859 something has gone horribly wrong. */
14860 if (offset != nregs * reg_size)
14861 abort ();
14863 fprintf (asm_out_file, "\t.save {");
14865 offset = 0;
14866 lastreg = 0;
14867 /* The remaining insns will describe the stores. */
14868 for (i = 1; i <= nregs; i++)
14870 /* Expect (set (mem <addr>) (reg)).
14871 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
14872 e = XVECEXP (p, 0, i);
14873 if (GET_CODE (e) != SET
14874 || GET_CODE (XEXP (e, 0)) != MEM
14875 || GET_CODE (XEXP (e, 1)) != REG)
14876 abort ();
14878 reg = REGNO (XEXP (e, 1));
14879 if (reg < lastreg)
14880 abort ();
14882 if (i != 1)
14883 fprintf (asm_out_file, ", ");
14884 /* We can't use %r for vfp because we need to use the
14885 double precision register names. */
14886 if (IS_VFP_REGNUM (reg))
14887 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
14888 else
14889 asm_fprintf (asm_out_file, "%r", reg);
14891 #ifdef ENABLE_CHECKING
14892 /* Check that the addresses are consecutive. */
14893 e = XEXP (XEXP (e, 0), 0);
14894 if (GET_CODE (e) == PLUS)
14896 offset += reg_size;
14897 if (GET_CODE (XEXP (e, 0)) != REG
14898 || REGNO (XEXP (e, 0)) != SP_REGNUM
14899 || GET_CODE (XEXP (e, 1)) != CONST_INT
14900 || offset != INTVAL (XEXP (e, 1)))
14901 abort ();
14903 else if (i != 1
14904 || GET_CODE (e) != REG
14905 || REGNO (e) != SP_REGNUM)
14906 abort ();
14907 #endif
14909 fprintf (asm_out_file, "}\n");
14912 /* Emit unwind directives for a SET. */
14914 static void
14915 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
14917 rtx e0;
14918 rtx e1;
14920 e0 = XEXP (p, 0);
14921 e1 = XEXP (p, 1);
14922 switch (GET_CODE (e0))
14924 case MEM:
14925 /* Pushing a single register. */
14926 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
14927 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
14928 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
14929 abort ();
14931 asm_fprintf (asm_out_file, "\t.save ");
14932 if (IS_VFP_REGNUM (REGNO (e1)))
14933 asm_fprintf(asm_out_file, "{d%d}\n",
14934 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
14935 else
14936 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
14937 break;
14939 case REG:
14940 if (REGNO (e0) == SP_REGNUM)
14942 /* A stack increment. */
14943 if (GET_CODE (e1) != PLUS
14944 || GET_CODE (XEXP (e1, 0)) != REG
14945 || REGNO (XEXP (e1, 0)) != SP_REGNUM
14946 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14947 abort ();
14949 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
14950 -INTVAL (XEXP (e1, 1)));
14952 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
14954 HOST_WIDE_INT offset;
14955 unsigned reg;
14957 if (GET_CODE (e1) == PLUS)
14959 if (GET_CODE (XEXP (e1, 0)) != REG
14960 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14961 abort ();
14962 reg = REGNO (XEXP (e1, 0));
14963 offset = INTVAL (XEXP (e1, 1));
14964 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
14965 HARD_FRAME_POINTER_REGNUM, reg,
14966 INTVAL (XEXP (e1, 1)));
14968 else if (GET_CODE (e1) == REG)
14970 reg = REGNO (e1);
14971 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
14972 HARD_FRAME_POINTER_REGNUM, reg);
14974 else
14975 abort ();
14977 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
14979 /* Move from sp to reg. */
14980 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
14982 else
14983 abort ();
14984 break;
14986 default:
14987 abort ();
14992 /* Emit unwind directives for the given insn. */
14994 static void
14995 arm_unwind_emit (FILE * asm_out_file, rtx insn)
14997 rtx pat;
14999 if (!ARM_EABI_UNWIND_TABLES)
15000 return;
15002 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15003 return;
15005 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15006 if (pat)
15007 pat = XEXP (pat, 0);
15008 else
15009 pat = PATTERN (insn);
15011 switch (GET_CODE (pat))
15013 case SET:
15014 arm_unwind_emit_set (asm_out_file, pat);
15015 break;
15017 case SEQUENCE:
15018 /* Store multiple. */
15019 arm_unwind_emit_stm (asm_out_file, pat);
15020 break;
15022 default:
15023 abort();
15028 /* Output a reference from a function exception table to the type_info
15029 object X. The EABI specifies that the symbol should be relocated by
15030 an R_ARM_TARGET2 relocation. */
15032 static bool
15033 arm_output_ttype (rtx x)
15035 fputs ("\t.word\t", asm_out_file);
15036 output_addr_const (asm_out_file, x);
15037 /* Use special relocations for symbol references. */
15038 if (GET_CODE (x) != CONST_INT)
15039 fputs ("(TARGET2)", asm_out_file);
15040 fputc ('\n', asm_out_file);
15042 return TRUE;
15044 #endif /* TARGET_UNWIND_INFO */
15047 /* Output unwind directives for the start/end of a function. */
15049 void
15050 arm_output_fn_unwind (FILE * f, bool prologue)
15052 if (!ARM_EABI_UNWIND_TABLES)
15053 return;
15055 if (prologue)
15056 fputs ("\t.fnstart\n", f);
15057 else
15058 fputs ("\t.fnend\n", f);