PR target/23473
[official-gcc.git] / gcc / config / arm / arm.c
blob0235c53b08e8136f89d2696870bfe8aab2e8894b
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
146 tree, bool);
148 #ifdef OBJECT_FORMAT_ELF
149 static void arm_elf_asm_constructor (rtx, int);
150 #endif
151 #ifndef ARM_PE
152 static void arm_encode_section_info (tree, rtx, int);
153 #endif
155 static void arm_file_end (void);
157 #ifdef AOF_ASSEMBLER
158 static void aof_globalize_label (FILE *, const char *);
159 static void aof_dump_imports (FILE *);
160 static void aof_dump_pic_table (FILE *);
161 static void aof_file_start (void);
162 static void aof_file_end (void);
163 #endif
164 static rtx arm_struct_value_rtx (tree, int);
165 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
166 tree, int *, int);
167 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
168 enum machine_mode, tree, bool);
169 static bool arm_promote_prototypes (tree);
170 static bool arm_default_short_enums (void);
171 static bool arm_align_anon_bitfield (void);
172 static bool arm_return_in_msb (tree);
173 static bool arm_must_pass_in_stack (enum machine_mode, tree);
174 #ifdef TARGET_UNWIND_INFO
175 static void arm_unwind_emit (FILE *, rtx);
176 static bool arm_output_ttype (rtx);
177 #endif
179 static tree arm_cxx_guard_type (void);
180 static bool arm_cxx_guard_mask_bit (void);
181 static tree arm_get_cookie_size (tree);
182 static bool arm_cookie_has_size (void);
183 static bool arm_cxx_cdtor_returns_this (void);
184 static bool arm_cxx_key_method_may_be_inline (void);
185 static void arm_cxx_determine_class_data_visibility (tree);
186 static bool arm_cxx_class_data_always_comdat (void);
187 static bool arm_cxx_use_aeabi_atexit (void);
188 static void arm_init_libfuncs (void);
189 static bool arm_handle_option (size_t, const char *, int);
190 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
192 /* Initialize the GCC target structure. */
193 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
194 #undef TARGET_MERGE_DECL_ATTRIBUTES
195 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
196 #endif
198 #undef TARGET_ATTRIBUTE_TABLE
199 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
201 #undef TARGET_ASM_FILE_END
202 #define TARGET_ASM_FILE_END arm_file_end
204 #ifdef AOF_ASSEMBLER
205 #undef TARGET_ASM_BYTE_OP
206 #define TARGET_ASM_BYTE_OP "\tDCB\t"
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
209 #undef TARGET_ASM_ALIGNED_SI_OP
210 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
211 #undef TARGET_ASM_GLOBALIZE_LABEL
212 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
213 #undef TARGET_ASM_FILE_START
214 #define TARGET_ASM_FILE_START aof_file_start
215 #undef TARGET_ASM_FILE_END
216 #define TARGET_ASM_FILE_END aof_file_end
217 #else
218 #undef TARGET_ASM_ALIGNED_SI_OP
219 #define TARGET_ASM_ALIGNED_SI_OP NULL
220 #undef TARGET_ASM_INTEGER
221 #define TARGET_ASM_INTEGER arm_assemble_integer
222 #endif
224 #undef TARGET_ASM_FUNCTION_PROLOGUE
225 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
230 #undef TARGET_DEFAULT_TARGET_FLAGS
231 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
232 #undef TARGET_HANDLE_OPTION
233 #define TARGET_HANDLE_OPTION arm_handle_option
235 #undef TARGET_COMP_TYPE_ATTRIBUTES
236 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
238 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
239 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
241 #undef TARGET_SCHED_ADJUST_COST
242 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
244 #undef TARGET_ENCODE_SECTION_INFO
245 #ifdef ARM_PE
246 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
247 #else
248 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
249 #endif
251 #undef TARGET_STRIP_NAME_ENCODING
252 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
254 #undef TARGET_ASM_INTERNAL_LABEL
255 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
257 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
258 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
260 #undef TARGET_ASM_OUTPUT_MI_THUNK
261 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
262 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
263 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
265 /* This will be overridden in arm_override_options. */
266 #undef TARGET_RTX_COSTS
267 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
268 #undef TARGET_ADDRESS_COST
269 #define TARGET_ADDRESS_COST arm_address_cost
271 #undef TARGET_SHIFT_TRUNCATION_MASK
272 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
273 #undef TARGET_VECTOR_MODE_SUPPORTED_P
274 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
276 #undef TARGET_MACHINE_DEPENDENT_REORG
277 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
279 #undef TARGET_INIT_BUILTINS
280 #define TARGET_INIT_BUILTINS arm_init_builtins
281 #undef TARGET_EXPAND_BUILTIN
282 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
287 #undef TARGET_PROMOTE_FUNCTION_ARGS
288 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
289 #undef TARGET_PROMOTE_FUNCTION_RETURN
290 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
291 #undef TARGET_PROMOTE_PROTOTYPES
292 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
295 #undef TARGET_ARG_PARTIAL_BYTES
296 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
298 #undef TARGET_STRUCT_VALUE_RTX
299 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
301 #undef TARGET_SETUP_INCOMING_VARARGS
302 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
304 #undef TARGET_DEFAULT_SHORT_ENUMS
305 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
307 #undef TARGET_ALIGN_ANON_BITFIELD
308 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
310 #undef TARGET_CXX_GUARD_TYPE
311 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
313 #undef TARGET_CXX_GUARD_MASK_BIT
314 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
316 #undef TARGET_CXX_GET_COOKIE_SIZE
317 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
319 #undef TARGET_CXX_COOKIE_HAS_SIZE
320 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
322 #undef TARGET_CXX_CDTOR_RETURNS_THIS
323 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
325 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
326 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
328 #undef TARGET_CXX_USE_AEABI_ATEXIT
329 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
331 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
332 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
333 arm_cxx_determine_class_data_visibility
335 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
336 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
338 #undef TARGET_RETURN_IN_MSB
339 #define TARGET_RETURN_IN_MSB arm_return_in_msb
341 #undef TARGET_MUST_PASS_IN_STACK
342 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
344 #ifdef TARGET_UNWIND_INFO
345 #undef TARGET_UNWIND_EMIT
346 #define TARGET_UNWIND_EMIT arm_unwind_emit
348 /* EABI unwinding tables use a different format for the typeinfo tables. */
349 #undef TARGET_ASM_TTYPE
350 #define TARGET_ASM_TTYPE arm_output_ttype
352 #undef TARGET_ARM_EABI_UNWINDER
353 #define TARGET_ARM_EABI_UNWINDER true
354 #endif /* TARGET_UNWIND_INFO */
356 struct gcc_target targetm = TARGET_INITIALIZER;
358 /* Obstack for minipool constant handling. */
359 static struct obstack minipool_obstack;
360 static char * minipool_startobj;
362 /* The maximum number of insns skipped which
363 will be conditionalised if possible. */
364 static int max_insns_skipped = 5;
366 extern FILE * asm_out_file;
368 /* True if we are currently building a constant table. */
369 int making_const_table;
371 /* Define the information needed to generate branch insns. This is
372 stored from the compare operation. */
373 rtx arm_compare_op0, arm_compare_op1;
375 /* The processor for which instructions should be scheduled. */
376 enum processor_type arm_tune = arm_none;
378 /* Which floating point model to use. */
379 enum arm_fp_model arm_fp_model;
381 /* Which floating point hardware is available. */
382 enum fputype arm_fpu_arch;
384 /* Which floating point hardware to schedule for. */
385 enum fputype arm_fpu_tune;
387 /* Whether to use floating point hardware. */
388 enum float_abi_type arm_float_abi;
390 /* Which ABI to use. */
391 enum arm_abi_type arm_abi;
393 /* Used to parse -mstructure_size_boundary command line option. */
394 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
396 /* Used for Thumb call_via trampolines. */
397 rtx thumb_call_via_label[14];
398 static int thumb_call_reg_needed;
400 /* Bit values used to identify processor capabilities. */
401 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
402 #define FL_ARCH3M (1 << 1) /* Extended multiply */
403 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
404 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
405 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
406 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
407 #define FL_THUMB (1 << 6) /* Thumb aware */
408 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
409 #define FL_STRONG (1 << 8) /* StrongARM */
410 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
411 #define FL_XSCALE (1 << 10) /* XScale */
412 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
413 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
414 media instructions. */
415 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
416 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
417 Note: ARM6 & 7 derivatives only. */
419 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
421 #define FL_FOR_ARCH2 0
422 #define FL_FOR_ARCH3 FL_MODE32
423 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
424 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
425 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
426 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
427 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
428 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
429 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
430 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
431 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
432 #define FL_FOR_ARCH6J FL_FOR_ARCH6
433 #define FL_FOR_ARCH6K FL_FOR_ARCH6
434 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
435 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
437 /* The bits in this mask specify which
438 instructions we are allowed to generate. */
439 static unsigned long insn_flags = 0;
441 /* The bits in this mask specify which instruction scheduling options should
442 be used. */
443 static unsigned long tune_flags = 0;
445 /* The following are used in the arm.md file as equivalents to bits
446 in the above two flag variables. */
448 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
449 int arm_arch3m = 0;
451 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
452 int arm_arch4 = 0;
454 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
455 int arm_arch4t = 0;
457 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
458 int arm_arch5 = 0;
460 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
461 int arm_arch5e = 0;
463 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
464 int arm_arch6 = 0;
466 /* Nonzero if this chip can benefit from load scheduling. */
467 int arm_ld_sched = 0;
469 /* Nonzero if this chip is a StrongARM. */
470 int arm_tune_strongarm = 0;
472 /* Nonzero if this chip is a Cirrus variant. */
473 int arm_arch_cirrus = 0;
475 /* Nonzero if this chip supports Intel Wireless MMX technology. */
476 int arm_arch_iwmmxt = 0;
478 /* Nonzero if this chip is an XScale. */
479 int arm_arch_xscale = 0;
481 /* Nonzero if tuning for XScale */
482 int arm_tune_xscale = 0;
484 /* Nonzero if we want to tune for stores that access the write-buffer.
485 This typically means an ARM6 or ARM7 with MMU or MPU. */
486 int arm_tune_wbuf = 0;
488 /* Nonzero if generating Thumb instructions. */
489 int thumb_code = 0;
491 /* Nonzero if we should define __THUMB_INTERWORK__ in the
492 preprocessor.
493 XXX This is a bit of a hack, it's intended to help work around
494 problems in GLD which doesn't understand that armv5t code is
495 interworking clean. */
496 int arm_cpp_interwork = 0;
498 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
499 must report the mode of the memory reference from PRINT_OPERAND to
500 PRINT_OPERAND_ADDRESS. */
501 enum machine_mode output_memory_reference_mode;
503 /* The register number to be used for the PIC offset register. */
504 int arm_pic_register = INVALID_REGNUM;
506 /* Set to 1 when a return insn is output, this means that the epilogue
507 is not needed. */
508 int return_used_this_function;
510 /* Set to 1 after arm_reorg has started. Reset to start at the start of
511 the next function. */
512 static int after_arm_reorg = 0;
514 /* The maximum number of insns to be used when loading a constant. */
515 static int arm_constant_limit = 3;
517 /* For an explanation of these variables, see final_prescan_insn below. */
518 int arm_ccfsm_state;
519 enum arm_cond_code arm_current_cc;
520 rtx arm_target_insn;
521 int arm_target_label;
523 /* The condition codes of the ARM, and the inverse function. */
524 static const char * const arm_condition_codes[] =
526 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
527 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
530 #define streq(string1, string2) (strcmp (string1, string2) == 0)
532 /* Initialization code. */
534 struct processors
536 const char *const name;
537 enum processor_type core;
538 const char *arch;
539 const unsigned long flags;
540 bool (* rtx_costs) (rtx, int, int, int *);
543 /* Not all of these give usefully different compilation alternatives,
544 but there is no simple way of generalizing them. */
545 static const struct processors all_cores[] =
547 /* ARM Cores */
548 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
549 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
550 #include "arm-cores.def"
551 #undef ARM_CORE
552 {NULL, arm_none, NULL, 0, NULL}
555 static const struct processors all_architectures[] =
557 /* ARM Architectures */
558 /* We don't specify rtx_costs here as it will be figured out
559 from the core. */
561 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
562 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
563 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
564 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
565 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
566 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
567 implementations that support it, so we will leave it out for now. */
568 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
569 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
570 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
571 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
572 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
573 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
574 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
575 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
576 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
577 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
578 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
579 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
580 {NULL, arm_none, NULL, 0 , NULL}
583 struct arm_cpu_select
585 const char * string;
586 const char * name;
587 const struct processors * processors;
590 /* This is a magic structure. The 'string' field is magically filled in
591 with a pointer to the value specified by the user on the command line
592 assuming that the user has specified such a value. */
594 static struct arm_cpu_select arm_select[] =
596 /* string name processors */
597 { NULL, "-mcpu=", all_cores },
598 { NULL, "-march=", all_architectures },
599 { NULL, "-mtune=", all_cores }
602 /* Defines representing the indexes into the above table. */
603 #define ARM_OPT_SET_CPU 0
604 #define ARM_OPT_SET_ARCH 1
605 #define ARM_OPT_SET_TUNE 2
607 /* The name of the proprocessor macro to define for this architecture. */
609 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
611 struct fpu_desc
613 const char * name;
614 enum fputype fpu;
618 /* Available values for for -mfpu=. */
620 static const struct fpu_desc all_fpus[] =
622 {"fpa", FPUTYPE_FPA},
623 {"fpe2", FPUTYPE_FPA_EMU2},
624 {"fpe3", FPUTYPE_FPA_EMU2},
625 {"maverick", FPUTYPE_MAVERICK},
626 {"vfp", FPUTYPE_VFP}
630 /* Floating point models used by the different hardware.
631 See fputype in arm.h. */
633 static const enum fputype fp_model_for_fpu[] =
635 /* No FP hardware. */
636 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
637 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
638 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
639 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
640 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
641 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
645 struct float_abi
647 const char * name;
648 enum float_abi_type abi_type;
652 /* Available values for -mfloat-abi=. */
654 static const struct float_abi all_float_abis[] =
656 {"soft", ARM_FLOAT_ABI_SOFT},
657 {"softfp", ARM_FLOAT_ABI_SOFTFP},
658 {"hard", ARM_FLOAT_ABI_HARD}
662 struct abi_name
664 const char *name;
665 enum arm_abi_type abi_type;
669 /* Available values for -mabi=. */
671 static const struct abi_name arm_all_abis[] =
673 {"apcs-gnu", ARM_ABI_APCS},
674 {"atpcs", ARM_ABI_ATPCS},
675 {"aapcs", ARM_ABI_AAPCS},
676 {"iwmmxt", ARM_ABI_IWMMXT}
679 /* Return the number of bits set in VALUE. */
680 static unsigned
681 bit_count (unsigned long value)
683 unsigned long count = 0;
685 while (value)
687 count++;
688 value &= value - 1; /* Clear the least-significant set bit. */
691 return count;
694 /* Set up library functions unique to ARM. */
696 static void
697 arm_init_libfuncs (void)
699 /* There are no special library functions unless we are using the
700 ARM BPABI. */
701 if (!TARGET_BPABI)
702 return;
704 /* The functions below are described in Section 4 of the "Run-Time
705 ABI for the ARM architecture", Version 1.0. */
707 /* Double-precision floating-point arithmetic. Table 2. */
708 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
709 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
710 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
711 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
712 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
714 /* Double-precision comparisons. Table 3. */
715 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
716 set_optab_libfunc (ne_optab, DFmode, NULL);
717 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
718 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
719 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
720 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
721 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
723 /* Single-precision floating-point arithmetic. Table 4. */
724 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
725 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
726 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
727 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
728 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
730 /* Single-precision comparisons. Table 5. */
731 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
732 set_optab_libfunc (ne_optab, SFmode, NULL);
733 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
734 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
735 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
736 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
737 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
739 /* Floating-point to integer conversions. Table 6. */
740 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
741 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
742 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
743 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
744 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
745 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
746 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
747 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
749 /* Conversions between floating types. Table 7. */
750 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
751 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
753 /* Integer to floating-point conversions. Table 8. */
754 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
755 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
756 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
757 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
758 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
759 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
760 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
761 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
763 /* Long long. Table 9. */
764 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
765 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
766 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
767 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
768 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
769 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
770 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
771 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
773 /* Integer (32/32->32) division. \S 4.3.1. */
774 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
775 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
777 /* The divmod functions are designed so that they can be used for
778 plain division, even though they return both the quotient and the
779 remainder. The quotient is returned in the usual location (i.e.,
780 r0 for SImode, {r0, r1} for DImode), just as would be expected
781 for an ordinary division routine. Because the AAPCS calling
782 conventions specify that all of { r0, r1, r2, r3 } are
783 callee-saved registers, there is no need to tell the compiler
784 explicitly that those registers are clobbered by these
785 routines. */
786 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
787 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
788 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
789 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
791 /* We don't have mod libcalls. Fortunately gcc knows how to use the
792 divmod libcalls instead. */
793 set_optab_libfunc (smod_optab, DImode, NULL);
794 set_optab_libfunc (umod_optab, DImode, NULL);
795 set_optab_libfunc (smod_optab, SImode, NULL);
796 set_optab_libfunc (umod_optab, SImode, NULL);
799 /* Implement TARGET_HANDLE_OPTION. */
801 static bool
802 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
804 switch (code)
806 case OPT_march_:
807 arm_select[1].string = arg;
808 return true;
810 case OPT_mcpu_:
811 arm_select[0].string = arg;
812 return true;
814 case OPT_mhard_float:
815 target_float_abi_name = "hard";
816 return true;
818 case OPT_msoft_float:
819 target_float_abi_name = "soft";
820 return true;
822 case OPT_mtune_:
823 arm_select[2].string = arg;
824 return true;
826 default:
827 return true;
831 /* Fix up any incompatible options that the user has specified.
832 This has now turned into a maze. */
833 void
834 arm_override_options (void)
836 unsigned i;
837 enum processor_type target_arch_cpu = arm_none;
839 /* Set up the flags based on the cpu/architecture selected by the user. */
840 for (i = ARRAY_SIZE (arm_select); i--;)
842 struct arm_cpu_select * ptr = arm_select + i;
844 if (ptr->string != NULL && ptr->string[0] != '\0')
846 const struct processors * sel;
848 for (sel = ptr->processors; sel->name != NULL; sel++)
849 if (streq (ptr->string, sel->name))
851 /* Set the architecture define. */
852 if (i != ARM_OPT_SET_TUNE)
853 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
855 /* Determine the processor core for which we should
856 tune code-generation. */
857 if (/* -mcpu= is a sensible default. */
858 i == ARM_OPT_SET_CPU
859 /* -mtune= overrides -mcpu= and -march=. */
860 || i == ARM_OPT_SET_TUNE)
861 arm_tune = (enum processor_type) (sel - ptr->processors);
863 /* Remember the CPU associated with this architecture.
864 If no other option is used to set the CPU type,
865 we'll use this to guess the most suitable tuning
866 options. */
867 if (i == ARM_OPT_SET_ARCH)
868 target_arch_cpu = sel->core;
870 if (i != ARM_OPT_SET_TUNE)
872 /* If we have been given an architecture and a processor
873 make sure that they are compatible. We only generate
874 a warning though, and we prefer the CPU over the
875 architecture. */
876 if (insn_flags != 0 && (insn_flags ^ sel->flags))
877 warning (0, "switch -mcpu=%s conflicts with -march= switch",
878 ptr->string);
880 insn_flags = sel->flags;
883 break;
886 if (sel->name == NULL)
887 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
891 /* Guess the tuning options from the architecture if necessary. */
892 if (arm_tune == arm_none)
893 arm_tune = target_arch_cpu;
895 /* If the user did not specify a processor, choose one for them. */
896 if (insn_flags == 0)
898 const struct processors * sel;
899 unsigned int sought;
900 enum processor_type cpu;
902 cpu = TARGET_CPU_DEFAULT;
903 if (cpu == arm_none)
905 #ifdef SUBTARGET_CPU_DEFAULT
906 /* Use the subtarget default CPU if none was specified by
907 configure. */
908 cpu = SUBTARGET_CPU_DEFAULT;
909 #endif
910 /* Default to ARM6. */
911 if (cpu == arm_none)
912 cpu = arm6;
914 sel = &all_cores[cpu];
916 insn_flags = sel->flags;
918 /* Now check to see if the user has specified some command line
919 switch that require certain abilities from the cpu. */
920 sought = 0;
922 if (TARGET_INTERWORK || TARGET_THUMB)
924 sought |= (FL_THUMB | FL_MODE32);
926 /* There are no ARM processors that support both APCS-26 and
927 interworking. Therefore we force FL_MODE26 to be removed
928 from insn_flags here (if it was set), so that the search
929 below will always be able to find a compatible processor. */
930 insn_flags &= ~FL_MODE26;
933 if (sought != 0 && ((sought & insn_flags) != sought))
935 /* Try to locate a CPU type that supports all of the abilities
936 of the default CPU, plus the extra abilities requested by
937 the user. */
938 for (sel = all_cores; sel->name != NULL; sel++)
939 if ((sel->flags & sought) == (sought | insn_flags))
940 break;
942 if (sel->name == NULL)
944 unsigned current_bit_count = 0;
945 const struct processors * best_fit = NULL;
947 /* Ideally we would like to issue an error message here
948 saying that it was not possible to find a CPU compatible
949 with the default CPU, but which also supports the command
950 line options specified by the programmer, and so they
951 ought to use the -mcpu=<name> command line option to
952 override the default CPU type.
954 If we cannot find a cpu that has both the
955 characteristics of the default cpu and the given
956 command line options we scan the array again looking
957 for a best match. */
958 for (sel = all_cores; sel->name != NULL; sel++)
959 if ((sel->flags & sought) == sought)
961 unsigned count;
963 count = bit_count (sel->flags & insn_flags);
965 if (count >= current_bit_count)
967 best_fit = sel;
968 current_bit_count = count;
972 gcc_assert (best_fit);
973 sel = best_fit;
976 insn_flags = sel->flags;
978 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
979 if (arm_tune == arm_none)
980 arm_tune = (enum processor_type) (sel - all_cores);
983 /* The processor for which we should tune should now have been
984 chosen. */
985 gcc_assert (arm_tune != arm_none);
987 tune_flags = all_cores[(int)arm_tune].flags;
988 if (optimize_size)
989 targetm.rtx_costs = arm_size_rtx_costs;
990 else
991 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
993 /* Make sure that the processor choice does not conflict with any of the
994 other command line choices. */
995 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
997 warning (0, "target CPU does not support interworking" );
998 target_flags &= ~MASK_INTERWORK;
1001 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1003 warning (0, "target CPU does not support THUMB instructions");
1004 target_flags &= ~MASK_THUMB;
1007 if (TARGET_APCS_FRAME && TARGET_THUMB)
1009 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1010 target_flags &= ~MASK_APCS_FRAME;
1013 /* Callee super interworking implies thumb interworking. Adding
1014 this to the flags here simplifies the logic elsewhere. */
1015 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1016 target_flags |= MASK_INTERWORK;
1018 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1019 from here where no function is being compiled currently. */
1020 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1021 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1023 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1024 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1026 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1027 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1029 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1031 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1032 target_flags |= MASK_APCS_FRAME;
1035 if (TARGET_POKE_FUNCTION_NAME)
1036 target_flags |= MASK_APCS_FRAME;
1038 if (TARGET_APCS_REENT && flag_pic)
1039 error ("-fpic and -mapcs-reent are incompatible");
1041 if (TARGET_APCS_REENT)
1042 warning (0, "APCS reentrant code not supported. Ignored");
1044 /* If this target is normally configured to use APCS frames, warn if they
1045 are turned off and debugging is turned on. */
1046 if (TARGET_ARM
1047 && write_symbols != NO_DEBUG
1048 && !TARGET_APCS_FRAME
1049 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1050 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1052 /* If stack checking is disabled, we can use r10 as the PIC register,
1053 which keeps r9 available. */
1054 if (flag_pic)
1055 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1057 if (TARGET_APCS_FLOAT)
1058 warning (0, "passing floating point arguments in fp regs not yet supported");
1060 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1061 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1062 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1063 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1064 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1065 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1066 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1067 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1068 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1070 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1071 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1072 thumb_code = (TARGET_ARM == 0);
1073 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1074 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1075 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1077 /* V5 code we generate is completely interworking capable, so we turn off
1078 TARGET_INTERWORK here to avoid many tests later on. */
1080 /* XXX However, we must pass the right pre-processor defines to CPP
1081 or GLD can get confused. This is a hack. */
1082 if (TARGET_INTERWORK)
1083 arm_cpp_interwork = 1;
1085 if (arm_arch5)
1086 target_flags &= ~MASK_INTERWORK;
1088 if (target_abi_name)
1090 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1092 if (streq (arm_all_abis[i].name, target_abi_name))
1094 arm_abi = arm_all_abis[i].abi_type;
1095 break;
1098 if (i == ARRAY_SIZE (arm_all_abis))
1099 error ("invalid ABI option: -mabi=%s", target_abi_name);
1101 else
1102 arm_abi = ARM_DEFAULT_ABI;
1104 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1105 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1107 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1108 error ("iwmmxt abi requires an iwmmxt capable cpu");
1110 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1111 if (target_fpu_name == NULL && target_fpe_name != NULL)
1113 if (streq (target_fpe_name, "2"))
1114 target_fpu_name = "fpe2";
1115 else if (streq (target_fpe_name, "3"))
1116 target_fpu_name = "fpe3";
1117 else
1118 error ("invalid floating point emulation option: -mfpe=%s",
1119 target_fpe_name);
1121 if (target_fpu_name != NULL)
1123 /* The user specified a FPU. */
1124 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1126 if (streq (all_fpus[i].name, target_fpu_name))
1128 arm_fpu_arch = all_fpus[i].fpu;
1129 arm_fpu_tune = arm_fpu_arch;
1130 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1131 break;
1134 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1135 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1137 else
1139 #ifdef FPUTYPE_DEFAULT
1140 /* Use the default if it is specified for this platform. */
1141 arm_fpu_arch = FPUTYPE_DEFAULT;
1142 arm_fpu_tune = FPUTYPE_DEFAULT;
1143 #else
1144 /* Pick one based on CPU type. */
1145 /* ??? Some targets assume FPA is the default.
1146 if ((insn_flags & FL_VFP) != 0)
1147 arm_fpu_arch = FPUTYPE_VFP;
1148 else
1150 if (arm_arch_cirrus)
1151 arm_fpu_arch = FPUTYPE_MAVERICK;
1152 else
1153 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1154 #endif
1155 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1156 arm_fpu_tune = FPUTYPE_FPA;
1157 else
1158 arm_fpu_tune = arm_fpu_arch;
1159 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1160 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1163 if (target_float_abi_name != NULL)
1165 /* The user specified a FP ABI. */
1166 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1168 if (streq (all_float_abis[i].name, target_float_abi_name))
1170 arm_float_abi = all_float_abis[i].abi_type;
1171 break;
1174 if (i == ARRAY_SIZE (all_float_abis))
1175 error ("invalid floating point abi: -mfloat-abi=%s",
1176 target_float_abi_name);
1178 else
1179 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1181 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1182 sorry ("-mfloat-abi=hard and VFP");
1184 /* If soft-float is specified then don't use FPU. */
1185 if (TARGET_SOFT_FLOAT)
1186 arm_fpu_arch = FPUTYPE_NONE;
1188 /* For arm2/3 there is no need to do any scheduling if there is only
1189 a floating point emulator, or we are doing software floating-point. */
1190 if ((TARGET_SOFT_FLOAT
1191 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1192 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1193 && (tune_flags & FL_MODE32) == 0)
1194 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1196 /* Override the default structure alignment for AAPCS ABI. */
1197 if (arm_abi == ARM_ABI_AAPCS)
1198 arm_structure_size_boundary = 8;
1200 if (structure_size_string != NULL)
1202 int size = strtol (structure_size_string, NULL, 0);
1204 if (size == 8 || size == 32
1205 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1206 arm_structure_size_boundary = size;
1207 else
1208 warning (0, "structure size boundary can only be set to %s",
1209 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1212 if (arm_pic_register_string != NULL)
1214 int pic_register = decode_reg_name (arm_pic_register_string);
1216 if (!flag_pic)
1217 warning (0, "-mpic-register= is useless without -fpic");
1219 /* Prevent the user from choosing an obviously stupid PIC register. */
1220 else if (pic_register < 0 || call_used_regs[pic_register]
1221 || pic_register == HARD_FRAME_POINTER_REGNUM
1222 || pic_register == STACK_POINTER_REGNUM
1223 || pic_register >= PC_REGNUM)
1224 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1225 else
1226 arm_pic_register = pic_register;
1229 if (TARGET_THUMB && flag_schedule_insns)
1231 /* Don't warn since it's on by default in -O2. */
1232 flag_schedule_insns = 0;
1235 if (optimize_size)
1237 /* There's some dispute as to whether this should be 1 or 2. However,
1238 experiments seem to show that in pathological cases a setting of
1239 1 degrades less severely than a setting of 2. This could change if
1240 other parts of the compiler change their behavior. */
1241 arm_constant_limit = 1;
1243 /* If optimizing for size, bump the number of instructions that we
1244 are prepared to conditionally execute (even on a StrongARM). */
1245 max_insns_skipped = 6;
1247 else
1249 /* For processors with load scheduling, it never costs more than
1250 2 cycles to load a constant, and the load scheduler may well
1251 reduce that to 1. */
1252 if (arm_ld_sched)
1253 arm_constant_limit = 1;
1255 /* On XScale the longer latency of a load makes it more difficult
1256 to achieve a good schedule, so it's faster to synthesize
1257 constants that can be done in two insns. */
1258 if (arm_tune_xscale)
1259 arm_constant_limit = 2;
1261 /* StrongARM has early execution of branches, so a sequence
1262 that is worth skipping is shorter. */
1263 if (arm_tune_strongarm)
1264 max_insns_skipped = 3;
1267 /* Register global variables with the garbage collector. */
1268 arm_add_gc_roots ();
1271 static void
1272 arm_add_gc_roots (void)
1274 gcc_obstack_init(&minipool_obstack);
1275 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1278 /* A table of known ARM exception types.
1279 For use with the interrupt function attribute. */
1281 typedef struct
1283 const char *const arg;
1284 const unsigned long return_value;
1286 isr_attribute_arg;
1288 static const isr_attribute_arg isr_attribute_args [] =
1290 { "IRQ", ARM_FT_ISR },
1291 { "irq", ARM_FT_ISR },
1292 { "FIQ", ARM_FT_FIQ },
1293 { "fiq", ARM_FT_FIQ },
1294 { "ABORT", ARM_FT_ISR },
1295 { "abort", ARM_FT_ISR },
1296 { "ABORT", ARM_FT_ISR },
1297 { "abort", ARM_FT_ISR },
1298 { "UNDEF", ARM_FT_EXCEPTION },
1299 { "undef", ARM_FT_EXCEPTION },
1300 { "SWI", ARM_FT_EXCEPTION },
1301 { "swi", ARM_FT_EXCEPTION },
1302 { NULL, ARM_FT_NORMAL }
1305 /* Returns the (interrupt) function type of the current
1306 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1308 static unsigned long
1309 arm_isr_value (tree argument)
1311 const isr_attribute_arg * ptr;
1312 const char * arg;
1314 /* No argument - default to IRQ. */
1315 if (argument == NULL_TREE)
1316 return ARM_FT_ISR;
1318 /* Get the value of the argument. */
1319 if (TREE_VALUE (argument) == NULL_TREE
1320 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1321 return ARM_FT_UNKNOWN;
1323 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1325 /* Check it against the list of known arguments. */
1326 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1327 if (streq (arg, ptr->arg))
1328 return ptr->return_value;
1330 /* An unrecognized interrupt type. */
1331 return ARM_FT_UNKNOWN;
1334 /* Computes the type of the current function. */
1336 static unsigned long
1337 arm_compute_func_type (void)
1339 unsigned long type = ARM_FT_UNKNOWN;
1340 tree a;
1341 tree attr;
1343 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1345 /* Decide if the current function is volatile. Such functions
1346 never return, and many memory cycles can be saved by not storing
1347 register values that will never be needed again. This optimization
1348 was added to speed up context switching in a kernel application. */
1349 if (optimize > 0
1350 && TREE_NOTHROW (current_function_decl)
1351 && TREE_THIS_VOLATILE (current_function_decl))
1352 type |= ARM_FT_VOLATILE;
1354 if (cfun->static_chain_decl != NULL)
1355 type |= ARM_FT_NESTED;
1357 attr = DECL_ATTRIBUTES (current_function_decl);
1359 a = lookup_attribute ("naked", attr);
1360 if (a != NULL_TREE)
1361 type |= ARM_FT_NAKED;
1363 a = lookup_attribute ("isr", attr);
1364 if (a == NULL_TREE)
1365 a = lookup_attribute ("interrupt", attr);
1367 if (a == NULL_TREE)
1368 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1369 else
1370 type |= arm_isr_value (TREE_VALUE (a));
1372 return type;
1375 /* Returns the type of the current function. */
1377 unsigned long
1378 arm_current_func_type (void)
1380 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1381 cfun->machine->func_type = arm_compute_func_type ();
1383 return cfun->machine->func_type;
1386 /* Return 1 if it is possible to return using a single instruction.
1387 If SIBLING is non-null, this is a test for a return before a sibling
1388 call. SIBLING is the call insn, so we can examine its register usage. */
1391 use_return_insn (int iscond, rtx sibling)
1393 int regno;
1394 unsigned int func_type;
1395 unsigned long saved_int_regs;
1396 unsigned HOST_WIDE_INT stack_adjust;
1397 arm_stack_offsets *offsets;
1399 /* Never use a return instruction before reload has run. */
1400 if (!reload_completed)
1401 return 0;
1403 func_type = arm_current_func_type ();
1405 /* Naked functions and volatile functions need special
1406 consideration. */
1407 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1408 return 0;
1410 /* So do interrupt functions that use the frame pointer. */
1411 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1412 return 0;
1414 offsets = arm_get_frame_offsets ();
1415 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1417 /* As do variadic functions. */
1418 if (current_function_pretend_args_size
1419 || cfun->machine->uses_anonymous_args
1420 /* Or if the function calls __builtin_eh_return () */
1421 || current_function_calls_eh_return
1422 /* Or if the function calls alloca */
1423 || current_function_calls_alloca
1424 /* Or if there is a stack adjustment. However, if the stack pointer
1425 is saved on the stack, we can use a pre-incrementing stack load. */
1426 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1427 return 0;
1429 saved_int_regs = arm_compute_save_reg_mask ();
1431 /* Unfortunately, the insn
1433 ldmib sp, {..., sp, ...}
1435 triggers a bug on most SA-110 based devices, such that the stack
1436 pointer won't be correctly restored if the instruction takes a
1437 page fault. We work around this problem by popping r3 along with
1438 the other registers, since that is never slower than executing
1439 another instruction.
1441 We test for !arm_arch5 here, because code for any architecture
1442 less than this could potentially be run on one of the buggy
1443 chips. */
1444 if (stack_adjust == 4 && !arm_arch5)
1446 /* Validate that r3 is a call-clobbered register (always true in
1447 the default abi) ... */
1448 if (!call_used_regs[3])
1449 return 0;
1451 /* ... that it isn't being used for a return value ... */
1452 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1453 return 0;
1455 /* ... or for a tail-call argument ... */
1456 if (sibling)
1458 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1460 if (find_regno_fusage (sibling, USE, 3))
1461 return 0;
1464 /* ... and that there are no call-saved registers in r0-r2
1465 (always true in the default ABI). */
1466 if (saved_int_regs & 0x7)
1467 return 0;
1470 /* Can't be done if interworking with Thumb, and any registers have been
1471 stacked. */
1472 if (TARGET_INTERWORK && saved_int_regs != 0)
1473 return 0;
1475 /* On StrongARM, conditional returns are expensive if they aren't
1476 taken and multiple registers have been stacked. */
1477 if (iscond && arm_tune_strongarm)
1479 /* Conditional return when just the LR is stored is a simple
1480 conditional-load instruction, that's not expensive. */
1481 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1482 return 0;
1484 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1485 return 0;
1488 /* If there are saved registers but the LR isn't saved, then we need
1489 two instructions for the return. */
1490 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1491 return 0;
1493 /* Can't be done if any of the FPA regs are pushed,
1494 since this also requires an insn. */
1495 if (TARGET_HARD_FLOAT && TARGET_FPA)
1496 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1497 if (regs_ever_live[regno] && !call_used_regs[regno])
1498 return 0;
1500 /* Likewise VFP regs. */
1501 if (TARGET_HARD_FLOAT && TARGET_VFP)
1502 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1503 if (regs_ever_live[regno] && !call_used_regs[regno])
1504 return 0;
1506 if (TARGET_REALLY_IWMMXT)
1507 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1508 if (regs_ever_live[regno] && ! call_used_regs [regno])
1509 return 0;
1511 return 1;
1514 /* Return TRUE if int I is a valid immediate ARM constant. */
1517 const_ok_for_arm (HOST_WIDE_INT i)
1519 int lowbit;
1521 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1522 be all zero, or all one. */
1523 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1524 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1525 != ((~(unsigned HOST_WIDE_INT) 0)
1526 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1527 return FALSE;
1529 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1531 /* Fast return for 0 and small values. We must do this for zero, since
1532 the code below can't handle that one case. */
1533 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1534 return TRUE;
1536 /* Get the number of trailing zeros, rounded down to the nearest even
1537 number. */
1538 lowbit = (ffs ((int) i) - 1) & ~1;
1540 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1541 return TRUE;
1542 else if (lowbit <= 4
1543 && ((i & ~0xc000003f) == 0
1544 || (i & ~0xf000000f) == 0
1545 || (i & ~0xfc000003) == 0))
1546 return TRUE;
1548 return FALSE;
1551 /* Return true if I is a valid constant for the operation CODE. */
1552 static int
1553 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1555 if (const_ok_for_arm (i))
1556 return 1;
1558 switch (code)
1560 case PLUS:
1561 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1563 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1564 case XOR:
1565 case IOR:
1566 return 0;
1568 case AND:
1569 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1571 default:
1572 gcc_unreachable ();
1576 /* Emit a sequence of insns to handle a large constant.
1577 CODE is the code of the operation required, it can be any of SET, PLUS,
1578 IOR, AND, XOR, MINUS;
1579 MODE is the mode in which the operation is being performed;
1580 VAL is the integer to operate on;
1581 SOURCE is the other operand (a register, or a null-pointer for SET);
1582 SUBTARGETS means it is safe to create scratch registers if that will
1583 either produce a simpler sequence, or we will want to cse the values.
1584 Return value is the number of insns emitted. */
1587 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1588 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1590 rtx cond;
1592 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1593 cond = COND_EXEC_TEST (PATTERN (insn));
1594 else
1595 cond = NULL_RTX;
1597 if (subtargets || code == SET
1598 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1599 && REGNO (target) != REGNO (source)))
1601 /* After arm_reorg has been called, we can't fix up expensive
1602 constants by pushing them into memory so we must synthesize
1603 them in-line, regardless of the cost. This is only likely to
1604 be more costly on chips that have load delay slots and we are
1605 compiling without running the scheduler (so no splitting
1606 occurred before the final instruction emission).
1608 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1610 if (!after_arm_reorg
1611 && !cond
1612 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1613 1, 0)
1614 > arm_constant_limit + (code != SET)))
1616 if (code == SET)
1618 /* Currently SET is the only monadic value for CODE, all
1619 the rest are diadic. */
1620 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1621 return 1;
1623 else
1625 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1627 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1628 /* For MINUS, the value is subtracted from, since we never
1629 have subtraction of a constant. */
1630 if (code == MINUS)
1631 emit_insn (gen_rtx_SET (VOIDmode, target,
1632 gen_rtx_MINUS (mode, temp, source)));
1633 else
1634 emit_insn (gen_rtx_SET (VOIDmode, target,
1635 gen_rtx_fmt_ee (code, mode, source, temp)));
1636 return 2;
1641 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1645 static int
1646 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1648 HOST_WIDE_INT temp1;
1649 int num_insns = 0;
1652 int end;
1654 if (i <= 0)
1655 i += 32;
1656 if (remainder & (3 << (i - 2)))
1658 end = i - 8;
1659 if (end < 0)
1660 end += 32;
1661 temp1 = remainder & ((0x0ff << end)
1662 | ((i < end) ? (0xff >> (32 - end)) : 0));
1663 remainder &= ~temp1;
1664 num_insns++;
1665 i -= 6;
1667 i -= 2;
1668 } while (remainder);
1669 return num_insns;
1672 /* Emit an instruction with the indicated PATTERN. If COND is
1673 non-NULL, conditionalize the execution of the instruction on COND
1674 being true. */
1676 static void
1677 emit_constant_insn (rtx cond, rtx pattern)
1679 if (cond)
1680 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1681 emit_insn (pattern);
1684 /* As above, but extra parameter GENERATE which, if clear, suppresses
1685 RTL generation. */
1687 static int
1688 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1689 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1690 int generate)
1692 int can_invert = 0;
1693 int can_negate = 0;
1694 int can_negate_initial = 0;
1695 int can_shift = 0;
1696 int i;
1697 int num_bits_set = 0;
1698 int set_sign_bit_copies = 0;
1699 int clear_sign_bit_copies = 0;
1700 int clear_zero_bit_copies = 0;
1701 int set_zero_bit_copies = 0;
1702 int insns = 0;
1703 unsigned HOST_WIDE_INT temp1, temp2;
1704 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1706 /* Find out which operations are safe for a given CODE. Also do a quick
1707 check for degenerate cases; these can occur when DImode operations
1708 are split. */
1709 switch (code)
1711 case SET:
1712 can_invert = 1;
1713 can_shift = 1;
1714 can_negate = 1;
1715 break;
1717 case PLUS:
1718 can_negate = 1;
1719 can_negate_initial = 1;
1720 break;
1722 case IOR:
1723 if (remainder == 0xffffffff)
1725 if (generate)
1726 emit_constant_insn (cond,
1727 gen_rtx_SET (VOIDmode, target,
1728 GEN_INT (ARM_SIGN_EXTEND (val))));
1729 return 1;
1731 if (remainder == 0)
1733 if (reload_completed && rtx_equal_p (target, source))
1734 return 0;
1735 if (generate)
1736 emit_constant_insn (cond,
1737 gen_rtx_SET (VOIDmode, target, source));
1738 return 1;
1740 break;
1742 case AND:
1743 if (remainder == 0)
1745 if (generate)
1746 emit_constant_insn (cond,
1747 gen_rtx_SET (VOIDmode, target, const0_rtx));
1748 return 1;
1750 if (remainder == 0xffffffff)
1752 if (reload_completed && rtx_equal_p (target, source))
1753 return 0;
1754 if (generate)
1755 emit_constant_insn (cond,
1756 gen_rtx_SET (VOIDmode, target, source));
1757 return 1;
1759 can_invert = 1;
1760 break;
1762 case XOR:
1763 if (remainder == 0)
1765 if (reload_completed && rtx_equal_p (target, source))
1766 return 0;
1767 if (generate)
1768 emit_constant_insn (cond,
1769 gen_rtx_SET (VOIDmode, target, source));
1770 return 1;
1773 /* We don't know how to handle other cases yet. */
1774 gcc_assert (remainder == 0xffffffff);
1776 if (generate)
1777 emit_constant_insn (cond,
1778 gen_rtx_SET (VOIDmode, target,
1779 gen_rtx_NOT (mode, source)));
1780 return 1;
1782 case MINUS:
1783 /* We treat MINUS as (val - source), since (source - val) is always
1784 passed as (source + (-val)). */
1785 if (remainder == 0)
1787 if (generate)
1788 emit_constant_insn (cond,
1789 gen_rtx_SET (VOIDmode, target,
1790 gen_rtx_NEG (mode, source)));
1791 return 1;
1793 if (const_ok_for_arm (val))
1795 if (generate)
1796 emit_constant_insn (cond,
1797 gen_rtx_SET (VOIDmode, target,
1798 gen_rtx_MINUS (mode, GEN_INT (val),
1799 source)));
1800 return 1;
1802 can_negate = 1;
1804 break;
1806 default:
1807 gcc_unreachable ();
1810 /* If we can do it in one insn get out quickly. */
1811 if (const_ok_for_arm (val)
1812 || (can_negate_initial && const_ok_for_arm (-val))
1813 || (can_invert && const_ok_for_arm (~val)))
1815 if (generate)
1816 emit_constant_insn (cond,
1817 gen_rtx_SET (VOIDmode, target,
1818 (source
1819 ? gen_rtx_fmt_ee (code, mode, source,
1820 GEN_INT (val))
1821 : GEN_INT (val))));
1822 return 1;
1825 /* Calculate a few attributes that may be useful for specific
1826 optimizations. */
1827 for (i = 31; i >= 0; i--)
1829 if ((remainder & (1 << i)) == 0)
1830 clear_sign_bit_copies++;
1831 else
1832 break;
1835 for (i = 31; i >= 0; i--)
1837 if ((remainder & (1 << i)) != 0)
1838 set_sign_bit_copies++;
1839 else
1840 break;
1843 for (i = 0; i <= 31; i++)
1845 if ((remainder & (1 << i)) == 0)
1846 clear_zero_bit_copies++;
1847 else
1848 break;
1851 for (i = 0; i <= 31; i++)
1853 if ((remainder & (1 << i)) != 0)
1854 set_zero_bit_copies++;
1855 else
1856 break;
1859 switch (code)
1861 case SET:
1862 /* See if we can do this by sign_extending a constant that is known
1863 to be negative. This is a good, way of doing it, since the shift
1864 may well merge into a subsequent insn. */
1865 if (set_sign_bit_copies > 1)
1867 if (const_ok_for_arm
1868 (temp1 = ARM_SIGN_EXTEND (remainder
1869 << (set_sign_bit_copies - 1))))
1871 if (generate)
1873 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1874 emit_constant_insn (cond,
1875 gen_rtx_SET (VOIDmode, new_src,
1876 GEN_INT (temp1)));
1877 emit_constant_insn (cond,
1878 gen_ashrsi3 (target, new_src,
1879 GEN_INT (set_sign_bit_copies - 1)));
1881 return 2;
1883 /* For an inverted constant, we will need to set the low bits,
1884 these will be shifted out of harm's way. */
1885 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1886 if (const_ok_for_arm (~temp1))
1888 if (generate)
1890 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1891 emit_constant_insn (cond,
1892 gen_rtx_SET (VOIDmode, new_src,
1893 GEN_INT (temp1)));
1894 emit_constant_insn (cond,
1895 gen_ashrsi3 (target, new_src,
1896 GEN_INT (set_sign_bit_copies - 1)));
1898 return 2;
1902 /* See if we can calculate the value as the difference between two
1903 valid immediates. */
1904 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1906 int topshift = clear_sign_bit_copies & ~1;
1908 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1909 & (0xff000000 >> topshift));
1911 /* If temp1 is zero, then that means the 9 most significant
1912 bits of remainder were 1 and we've caused it to overflow.
1913 When topshift is 0 we don't need to do anything since we
1914 can borrow from 'bit 32'. */
1915 if (temp1 == 0 && topshift != 0)
1916 temp1 = 0x80000000 >> (topshift - 1);
1918 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1920 if (const_ok_for_arm (temp2))
1922 if (generate)
1924 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1925 emit_constant_insn (cond,
1926 gen_rtx_SET (VOIDmode, new_src,
1927 GEN_INT (temp1)));
1928 emit_constant_insn (cond,
1929 gen_addsi3 (target, new_src,
1930 GEN_INT (-temp2)));
1933 return 2;
1937 /* See if we can generate this by setting the bottom (or the top)
1938 16 bits, and then shifting these into the other half of the
1939 word. We only look for the simplest cases, to do more would cost
1940 too much. Be careful, however, not to generate this when the
1941 alternative would take fewer insns. */
1942 if (val & 0xffff0000)
1944 temp1 = remainder & 0xffff0000;
1945 temp2 = remainder & 0x0000ffff;
1947 /* Overlaps outside this range are best done using other methods. */
1948 for (i = 9; i < 24; i++)
1950 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1951 && !const_ok_for_arm (temp2))
1953 rtx new_src = (subtargets
1954 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1955 : target);
1956 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1957 source, subtargets, generate);
1958 source = new_src;
1959 if (generate)
1960 emit_constant_insn
1961 (cond,
1962 gen_rtx_SET
1963 (VOIDmode, target,
1964 gen_rtx_IOR (mode,
1965 gen_rtx_ASHIFT (mode, source,
1966 GEN_INT (i)),
1967 source)));
1968 return insns + 1;
1972 /* Don't duplicate cases already considered. */
1973 for (i = 17; i < 24; i++)
1975 if (((temp1 | (temp1 >> i)) == remainder)
1976 && !const_ok_for_arm (temp1))
1978 rtx new_src = (subtargets
1979 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1980 : target);
1981 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1982 source, subtargets, generate);
1983 source = new_src;
1984 if (generate)
1985 emit_constant_insn
1986 (cond,
1987 gen_rtx_SET (VOIDmode, target,
1988 gen_rtx_IOR
1989 (mode,
1990 gen_rtx_LSHIFTRT (mode, source,
1991 GEN_INT (i)),
1992 source)));
1993 return insns + 1;
1997 break;
1999 case IOR:
2000 case XOR:
2001 /* If we have IOR or XOR, and the constant can be loaded in a
2002 single instruction, and we can find a temporary to put it in,
2003 then this can be done in two instructions instead of 3-4. */
2004 if (subtargets
2005 /* TARGET can't be NULL if SUBTARGETS is 0 */
2006 || (reload_completed && !reg_mentioned_p (target, source)))
2008 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2010 if (generate)
2012 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2014 emit_constant_insn (cond,
2015 gen_rtx_SET (VOIDmode, sub,
2016 GEN_INT (val)));
2017 emit_constant_insn (cond,
2018 gen_rtx_SET (VOIDmode, target,
2019 gen_rtx_fmt_ee (code, mode,
2020 source, sub)));
2022 return 2;
2026 if (code == XOR)
2027 break;
2029 if (set_sign_bit_copies > 8
2030 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2032 if (generate)
2034 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2035 rtx shift = GEN_INT (set_sign_bit_copies);
2037 emit_constant_insn
2038 (cond,
2039 gen_rtx_SET (VOIDmode, sub,
2040 gen_rtx_NOT (mode,
2041 gen_rtx_ASHIFT (mode,
2042 source,
2043 shift))));
2044 emit_constant_insn
2045 (cond,
2046 gen_rtx_SET (VOIDmode, target,
2047 gen_rtx_NOT (mode,
2048 gen_rtx_LSHIFTRT (mode, sub,
2049 shift))));
2051 return 2;
2054 if (set_zero_bit_copies > 8
2055 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2057 if (generate)
2059 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2060 rtx shift = GEN_INT (set_zero_bit_copies);
2062 emit_constant_insn
2063 (cond,
2064 gen_rtx_SET (VOIDmode, sub,
2065 gen_rtx_NOT (mode,
2066 gen_rtx_LSHIFTRT (mode,
2067 source,
2068 shift))));
2069 emit_constant_insn
2070 (cond,
2071 gen_rtx_SET (VOIDmode, target,
2072 gen_rtx_NOT (mode,
2073 gen_rtx_ASHIFT (mode, sub,
2074 shift))));
2076 return 2;
2079 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2081 if (generate)
2083 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2084 emit_constant_insn (cond,
2085 gen_rtx_SET (VOIDmode, sub,
2086 gen_rtx_NOT (mode, source)));
2087 source = sub;
2088 if (subtargets)
2089 sub = gen_reg_rtx (mode);
2090 emit_constant_insn (cond,
2091 gen_rtx_SET (VOIDmode, sub,
2092 gen_rtx_AND (mode, source,
2093 GEN_INT (temp1))));
2094 emit_constant_insn (cond,
2095 gen_rtx_SET (VOIDmode, target,
2096 gen_rtx_NOT (mode, sub)));
2098 return 3;
2100 break;
2102 case AND:
2103 /* See if two shifts will do 2 or more insn's worth of work. */
2104 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2106 HOST_WIDE_INT shift_mask = ((0xffffffff
2107 << (32 - clear_sign_bit_copies))
2108 & 0xffffffff);
2110 if ((remainder | shift_mask) != 0xffffffff)
2112 if (generate)
2114 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2115 insns = arm_gen_constant (AND, mode, cond,
2116 remainder | shift_mask,
2117 new_src, source, subtargets, 1);
2118 source = new_src;
2120 else
2122 rtx targ = subtargets ? NULL_RTX : target;
2123 insns = arm_gen_constant (AND, mode, cond,
2124 remainder | shift_mask,
2125 targ, source, subtargets, 0);
2129 if (generate)
2131 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2132 rtx shift = GEN_INT (clear_sign_bit_copies);
2134 emit_insn (gen_ashlsi3 (new_src, source, shift));
2135 emit_insn (gen_lshrsi3 (target, new_src, shift));
2138 return insns + 2;
2141 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2143 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2145 if ((remainder | shift_mask) != 0xffffffff)
2147 if (generate)
2149 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2151 insns = arm_gen_constant (AND, mode, cond,
2152 remainder | shift_mask,
2153 new_src, source, subtargets, 1);
2154 source = new_src;
2156 else
2158 rtx targ = subtargets ? NULL_RTX : target;
2160 insns = arm_gen_constant (AND, mode, cond,
2161 remainder | shift_mask,
2162 targ, source, subtargets, 0);
2166 if (generate)
2168 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2169 rtx shift = GEN_INT (clear_zero_bit_copies);
2171 emit_insn (gen_lshrsi3 (new_src, source, shift));
2172 emit_insn (gen_ashlsi3 (target, new_src, shift));
2175 return insns + 2;
2178 break;
2180 default:
2181 break;
2184 for (i = 0; i < 32; i++)
2185 if (remainder & (1 << i))
2186 num_bits_set++;
2188 if (code == AND || (can_invert && num_bits_set > 16))
2189 remainder = (~remainder) & 0xffffffff;
2190 else if (code == PLUS && num_bits_set > 16)
2191 remainder = (-remainder) & 0xffffffff;
2192 else
2194 can_invert = 0;
2195 can_negate = 0;
2198 /* Now try and find a way of doing the job in either two or three
2199 instructions.
2200 We start by looking for the largest block of zeros that are aligned on
2201 a 2-bit boundary, we then fill up the temps, wrapping around to the
2202 top of the word when we drop off the bottom.
2203 In the worst case this code should produce no more than four insns. */
2205 int best_start = 0;
2206 int best_consecutive_zeros = 0;
2208 for (i = 0; i < 32; i += 2)
2210 int consecutive_zeros = 0;
2212 if (!(remainder & (3 << i)))
2214 while ((i < 32) && !(remainder & (3 << i)))
2216 consecutive_zeros += 2;
2217 i += 2;
2219 if (consecutive_zeros > best_consecutive_zeros)
2221 best_consecutive_zeros = consecutive_zeros;
2222 best_start = i - consecutive_zeros;
2224 i -= 2;
2228 /* So long as it won't require any more insns to do so, it's
2229 desirable to emit a small constant (in bits 0...9) in the last
2230 insn. This way there is more chance that it can be combined with
2231 a later addressing insn to form a pre-indexed load or store
2232 operation. Consider:
2234 *((volatile int *)0xe0000100) = 1;
2235 *((volatile int *)0xe0000110) = 2;
2237 We want this to wind up as:
2239 mov rA, #0xe0000000
2240 mov rB, #1
2241 str rB, [rA, #0x100]
2242 mov rB, #2
2243 str rB, [rA, #0x110]
2245 rather than having to synthesize both large constants from scratch.
2247 Therefore, we calculate how many insns would be required to emit
2248 the constant starting from `best_start', and also starting from
2249 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2250 yield a shorter sequence, we may as well use zero. */
2251 if (best_start != 0
2252 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2253 && (count_insns_for_constant (remainder, 0) <=
2254 count_insns_for_constant (remainder, best_start)))
2255 best_start = 0;
2257 /* Now start emitting the insns. */
2258 i = best_start;
2261 int end;
2263 if (i <= 0)
2264 i += 32;
2265 if (remainder & (3 << (i - 2)))
2267 end = i - 8;
2268 if (end < 0)
2269 end += 32;
2270 temp1 = remainder & ((0x0ff << end)
2271 | ((i < end) ? (0xff >> (32 - end)) : 0));
2272 remainder &= ~temp1;
2274 if (generate)
2276 rtx new_src, temp1_rtx;
2278 if (code == SET || code == MINUS)
2280 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2281 if (can_invert && code != MINUS)
2282 temp1 = ~temp1;
2284 else
2286 if (remainder && subtargets)
2287 new_src = gen_reg_rtx (mode);
2288 else
2289 new_src = target;
2290 if (can_invert)
2291 temp1 = ~temp1;
2292 else if (can_negate)
2293 temp1 = -temp1;
2296 temp1 = trunc_int_for_mode (temp1, mode);
2297 temp1_rtx = GEN_INT (temp1);
2299 if (code == SET)
2301 else if (code == MINUS)
2302 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2303 else
2304 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2306 emit_constant_insn (cond,
2307 gen_rtx_SET (VOIDmode, new_src,
2308 temp1_rtx));
2309 source = new_src;
2312 if (code == SET)
2314 can_invert = 0;
2315 code = PLUS;
2317 else if (code == MINUS)
2318 code = PLUS;
2320 insns++;
2321 i -= 6;
2323 i -= 2;
2325 while (remainder);
2328 return insns;
2331 /* Canonicalize a comparison so that we are more likely to recognize it.
2332 This can be done for a few constant compares, where we can make the
2333 immediate value easier to load. */
2335 enum rtx_code
2336 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2337 rtx * op1)
2339 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2340 unsigned HOST_WIDE_INT maxval;
2341 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2343 switch (code)
2345 case EQ:
2346 case NE:
2347 return code;
2349 case GT:
2350 case LE:
2351 if (i != maxval
2352 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2354 *op1 = GEN_INT (i + 1);
2355 return code == GT ? GE : LT;
2357 break;
2359 case GE:
2360 case LT:
2361 if (i != ~maxval
2362 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2364 *op1 = GEN_INT (i - 1);
2365 return code == GE ? GT : LE;
2367 break;
2369 case GTU:
2370 case LEU:
2371 if (i != ~((unsigned HOST_WIDE_INT) 0)
2372 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2374 *op1 = GEN_INT (i + 1);
2375 return code == GTU ? GEU : LTU;
2377 break;
2379 case GEU:
2380 case LTU:
2381 if (i != 0
2382 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2384 *op1 = GEN_INT (i - 1);
2385 return code == GEU ? GTU : LEU;
2387 break;
2389 default:
2390 gcc_unreachable ();
2393 return code;
2397 /* Define how to find the value returned by a function. */
2400 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2402 enum machine_mode mode;
2403 int unsignedp ATTRIBUTE_UNUSED;
2404 rtx r ATTRIBUTE_UNUSED;
2406 mode = TYPE_MODE (type);
2407 /* Promote integer types. */
2408 if (INTEGRAL_TYPE_P (type))
2409 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2411 /* Promotes small structs returned in a register to full-word size
2412 for big-endian AAPCS. */
2413 if (arm_return_in_msb (type))
2415 HOST_WIDE_INT size = int_size_in_bytes (type);
2416 if (size % UNITS_PER_WORD != 0)
2418 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2419 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2423 return LIBCALL_VALUE(mode);
2426 /* Determine the amount of memory needed to store the possible return
2427 registers of an untyped call. */
2429 arm_apply_result_size (void)
2431 int size = 16;
2433 if (TARGET_ARM)
2435 if (TARGET_HARD_FLOAT_ABI)
2437 if (TARGET_FPA)
2438 size += 12;
2439 if (TARGET_MAVERICK)
2440 size += 8;
2442 if (TARGET_IWMMXT_ABI)
2443 size += 8;
2446 return size;
2449 /* Decide whether a type should be returned in memory (true)
2450 or in a register (false). This is called by the macro
2451 RETURN_IN_MEMORY. */
2453 arm_return_in_memory (tree type)
2455 HOST_WIDE_INT size;
2457 if (!AGGREGATE_TYPE_P (type) &&
2458 (TREE_CODE (type) != VECTOR_TYPE) &&
2459 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2460 /* All simple types are returned in registers.
2461 For AAPCS, complex types are treated the same as aggregates. */
2462 return 0;
2464 size = int_size_in_bytes (type);
2466 if (arm_abi != ARM_ABI_APCS)
2468 /* ATPCS and later return aggregate types in memory only if they are
2469 larger than a word (or are variable size). */
2470 return (size < 0 || size > UNITS_PER_WORD);
2473 /* To maximize backwards compatibility with previous versions of gcc,
2474 return vectors up to 4 words in registers. */
2475 if (TREE_CODE (type) == VECTOR_TYPE)
2476 return (size < 0 || size > (4 * UNITS_PER_WORD));
2478 /* For the arm-wince targets we choose to be compatible with Microsoft's
2479 ARM and Thumb compilers, which always return aggregates in memory. */
2480 #ifndef ARM_WINCE
2481 /* All structures/unions bigger than one word are returned in memory.
2482 Also catch the case where int_size_in_bytes returns -1. In this case
2483 the aggregate is either huge or of variable size, and in either case
2484 we will want to return it via memory and not in a register. */
2485 if (size < 0 || size > UNITS_PER_WORD)
2486 return 1;
2488 if (TREE_CODE (type) == RECORD_TYPE)
2490 tree field;
2492 /* For a struct the APCS says that we only return in a register
2493 if the type is 'integer like' and every addressable element
2494 has an offset of zero. For practical purposes this means
2495 that the structure can have at most one non bit-field element
2496 and that this element must be the first one in the structure. */
2498 /* Find the first field, ignoring non FIELD_DECL things which will
2499 have been created by C++. */
2500 for (field = TYPE_FIELDS (type);
2501 field && TREE_CODE (field) != FIELD_DECL;
2502 field = TREE_CHAIN (field))
2503 continue;
2505 if (field == NULL)
2506 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2508 /* Check that the first field is valid for returning in a register. */
2510 /* ... Floats are not allowed */
2511 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2512 return 1;
2514 /* ... Aggregates that are not themselves valid for returning in
2515 a register are not allowed. */
2516 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2517 return 1;
2519 /* Now check the remaining fields, if any. Only bitfields are allowed,
2520 since they are not addressable. */
2521 for (field = TREE_CHAIN (field);
2522 field;
2523 field = TREE_CHAIN (field))
2525 if (TREE_CODE (field) != FIELD_DECL)
2526 continue;
2528 if (!DECL_BIT_FIELD_TYPE (field))
2529 return 1;
2532 return 0;
2535 if (TREE_CODE (type) == UNION_TYPE)
2537 tree field;
2539 /* Unions can be returned in registers if every element is
2540 integral, or can be returned in an integer register. */
2541 for (field = TYPE_FIELDS (type);
2542 field;
2543 field = TREE_CHAIN (field))
2545 if (TREE_CODE (field) != FIELD_DECL)
2546 continue;
2548 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2549 return 1;
2551 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2552 return 1;
2555 return 0;
2557 #endif /* not ARM_WINCE */
2559 /* Return all other types in memory. */
2560 return 1;
2563 /* Indicate whether or not words of a double are in big-endian order. */
2566 arm_float_words_big_endian (void)
2568 if (TARGET_MAVERICK)
2569 return 0;
2571 /* For FPA, float words are always big-endian. For VFP, floats words
2572 follow the memory system mode. */
2574 if (TARGET_FPA)
2576 return 1;
2579 if (TARGET_VFP)
2580 return (TARGET_BIG_END ? 1 : 0);
2582 return 1;
2585 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2586 for a call to a function whose data type is FNTYPE.
2587 For a library call, FNTYPE is NULL. */
2588 void
2589 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2590 rtx libname ATTRIBUTE_UNUSED,
2591 tree fndecl ATTRIBUTE_UNUSED)
2593 /* On the ARM, the offset starts at 0. */
2594 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2595 pcum->iwmmxt_nregs = 0;
2596 pcum->can_split = true;
2598 pcum->call_cookie = CALL_NORMAL;
2600 if (TARGET_LONG_CALLS)
2601 pcum->call_cookie = CALL_LONG;
2603 /* Check for long call/short call attributes. The attributes
2604 override any command line option. */
2605 if (fntype)
2607 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2608 pcum->call_cookie = CALL_SHORT;
2609 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2610 pcum->call_cookie = CALL_LONG;
2613 /* Varargs vectors are treated the same as long long.
2614 named_count avoids having to change the way arm handles 'named' */
2615 pcum->named_count = 0;
2616 pcum->nargs = 0;
2618 if (TARGET_REALLY_IWMMXT && fntype)
2620 tree fn_arg;
2622 for (fn_arg = TYPE_ARG_TYPES (fntype);
2623 fn_arg;
2624 fn_arg = TREE_CHAIN (fn_arg))
2625 pcum->named_count += 1;
2627 if (! pcum->named_count)
2628 pcum->named_count = INT_MAX;
2633 /* Return true if mode/type need doubleword alignment. */
2634 bool
2635 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2637 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2638 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2642 /* Determine where to put an argument to a function.
2643 Value is zero to push the argument on the stack,
2644 or a hard register in which to store the argument.
2646 MODE is the argument's machine mode.
2647 TYPE is the data type of the argument (as a tree).
2648 This is null for libcalls where that information may
2649 not be available.
2650 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2651 the preceding args and about the function being called.
2652 NAMED is nonzero if this argument is a named parameter
2653 (otherwise it is an extra parameter matching an ellipsis). */
2656 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2657 tree type, int named)
2659 int nregs;
2661 /* Varargs vectors are treated the same as long long.
2662 named_count avoids having to change the way arm handles 'named' */
2663 if (TARGET_IWMMXT_ABI
2664 && arm_vector_mode_supported_p (mode)
2665 && pcum->named_count > pcum->nargs + 1)
2667 if (pcum->iwmmxt_nregs <= 9)
2668 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2669 else
2671 pcum->can_split = false;
2672 return NULL_RTX;
2676 /* Put doubleword aligned quantities in even register pairs. */
2677 if (pcum->nregs & 1
2678 && ARM_DOUBLEWORD_ALIGN
2679 && arm_needs_doubleword_align (mode, type))
2680 pcum->nregs++;
2682 if (mode == VOIDmode)
2683 /* Compute operand 2 of the call insn. */
2684 return GEN_INT (pcum->call_cookie);
2686 /* Only allow splitting an arg between regs and memory if all preceding
2687 args were allocated to regs. For args passed by reference we only count
2688 the reference pointer. */
2689 if (pcum->can_split)
2690 nregs = 1;
2691 else
2692 nregs = ARM_NUM_REGS2 (mode, type);
2694 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2695 return NULL_RTX;
2697 return gen_rtx_REG (mode, pcum->nregs);
2700 static int
2701 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2702 tree type, bool named ATTRIBUTE_UNUSED)
2704 int nregs = pcum->nregs;
2706 if (arm_vector_mode_supported_p (mode))
2707 return 0;
2709 if (NUM_ARG_REGS > nregs
2710 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2711 && pcum->can_split)
2712 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2714 return 0;
2717 /* Variable sized types are passed by reference. This is a GCC
2718 extension to the ARM ABI. */
2720 static bool
2721 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2722 enum machine_mode mode ATTRIBUTE_UNUSED,
2723 tree type, bool named ATTRIBUTE_UNUSED)
2725 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2728 /* Encode the current state of the #pragma [no_]long_calls. */
2729 typedef enum
2731 OFF, /* No #pramgma [no_]long_calls is in effect. */
2732 LONG, /* #pragma long_calls is in effect. */
2733 SHORT /* #pragma no_long_calls is in effect. */
2734 } arm_pragma_enum;
2736 static arm_pragma_enum arm_pragma_long_calls = OFF;
2738 void
2739 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2741 arm_pragma_long_calls = LONG;
2744 void
2745 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2747 arm_pragma_long_calls = SHORT;
2750 void
2751 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2753 arm_pragma_long_calls = OFF;
2756 /* Table of machine attributes. */
2757 const struct attribute_spec arm_attribute_table[] =
2759 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2760 /* Function calls made to this symbol must be done indirectly, because
2761 it may lie outside of the 26 bit addressing range of a normal function
2762 call. */
2763 { "long_call", 0, 0, false, true, true, NULL },
2764 /* Whereas these functions are always known to reside within the 26 bit
2765 addressing range. */
2766 { "short_call", 0, 0, false, true, true, NULL },
2767 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2768 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2769 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2770 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2771 #ifdef ARM_PE
2772 /* ARM/PE has three new attributes:
2773 interfacearm - ?
2774 dllexport - for exporting a function/variable that will live in a dll
2775 dllimport - for importing a function/variable from a dll
2777 Microsoft allows multiple declspecs in one __declspec, separating
2778 them with spaces. We do NOT support this. Instead, use __declspec
2779 multiple times.
2781 { "dllimport", 0, 0, true, false, false, NULL },
2782 { "dllexport", 0, 0, true, false, false, NULL },
2783 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2784 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2785 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2786 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2787 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2788 #endif
2789 { NULL, 0, 0, false, false, false, NULL }
2792 /* Handle an attribute requiring a FUNCTION_DECL;
2793 arguments as in struct attribute_spec.handler. */
2794 static tree
2795 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2796 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2798 if (TREE_CODE (*node) != FUNCTION_DECL)
2800 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2801 IDENTIFIER_POINTER (name));
2802 *no_add_attrs = true;
2805 return NULL_TREE;
2808 /* Handle an "interrupt" or "isr" attribute;
2809 arguments as in struct attribute_spec.handler. */
2810 static tree
2811 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2812 bool *no_add_attrs)
2814 if (DECL_P (*node))
2816 if (TREE_CODE (*node) != FUNCTION_DECL)
2818 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2819 IDENTIFIER_POINTER (name));
2820 *no_add_attrs = true;
2822 /* FIXME: the argument if any is checked for type attributes;
2823 should it be checked for decl ones? */
2825 else
2827 if (TREE_CODE (*node) == FUNCTION_TYPE
2828 || TREE_CODE (*node) == METHOD_TYPE)
2830 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2832 warning (OPT_Wattributes, "%qs attribute ignored",
2833 IDENTIFIER_POINTER (name));
2834 *no_add_attrs = true;
2837 else if (TREE_CODE (*node) == POINTER_TYPE
2838 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2839 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2840 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2842 *node = build_variant_type_copy (*node);
2843 TREE_TYPE (*node) = build_type_attribute_variant
2844 (TREE_TYPE (*node),
2845 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2846 *no_add_attrs = true;
2848 else
2850 /* Possibly pass this attribute on from the type to a decl. */
2851 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2852 | (int) ATTR_FLAG_FUNCTION_NEXT
2853 | (int) ATTR_FLAG_ARRAY_NEXT))
2855 *no_add_attrs = true;
2856 return tree_cons (name, args, NULL_TREE);
2858 else
2860 warning (OPT_Wattributes, "%qs attribute ignored",
2861 IDENTIFIER_POINTER (name));
2866 return NULL_TREE;
2869 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2870 /* Handle the "notshared" attribute. This attribute is another way of
2871 requesting hidden visibility. ARM's compiler supports
2872 "__declspec(notshared)"; we support the same thing via an
2873 attribute. */
2875 static tree
2876 arm_handle_notshared_attribute (tree *node,
2877 tree name ATTRIBUTE_UNUSED,
2878 tree args ATTRIBUTE_UNUSED,
2879 int flags ATTRIBUTE_UNUSED,
2880 bool *no_add_attrs)
2882 tree decl = TYPE_NAME (*node);
2884 if (decl)
2886 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2887 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2888 *no_add_attrs = false;
2890 return NULL_TREE;
2892 #endif
2894 /* Return 0 if the attributes for two types are incompatible, 1 if they
2895 are compatible, and 2 if they are nearly compatible (which causes a
2896 warning to be generated). */
2897 static int
2898 arm_comp_type_attributes (tree type1, tree type2)
2900 int l1, l2, s1, s2;
2902 /* Check for mismatch of non-default calling convention. */
2903 if (TREE_CODE (type1) != FUNCTION_TYPE)
2904 return 1;
2906 /* Check for mismatched call attributes. */
2907 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2908 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2909 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2910 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2912 /* Only bother to check if an attribute is defined. */
2913 if (l1 | l2 | s1 | s2)
2915 /* If one type has an attribute, the other must have the same attribute. */
2916 if ((l1 != l2) || (s1 != s2))
2917 return 0;
2919 /* Disallow mixed attributes. */
2920 if ((l1 & s2) || (l2 & s1))
2921 return 0;
2924 /* Check for mismatched ISR attribute. */
2925 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2926 if (! l1)
2927 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2928 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2929 if (! l2)
2930 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2931 if (l1 != l2)
2932 return 0;
2934 return 1;
2937 /* Encode long_call or short_call attribute by prefixing
2938 symbol name in DECL with a special character FLAG. */
2939 void
2940 arm_encode_call_attribute (tree decl, int flag)
2942 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2943 int len = strlen (str);
2944 char * newstr;
2946 /* Do not allow weak functions to be treated as short call. */
2947 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2948 return;
2950 newstr = alloca (len + 2);
2951 newstr[0] = flag;
2952 strcpy (newstr + 1, str);
2954 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2955 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2958 /* Assigns default attributes to newly defined type. This is used to
2959 set short_call/long_call attributes for function types of
2960 functions defined inside corresponding #pragma scopes. */
2961 static void
2962 arm_set_default_type_attributes (tree type)
2964 /* Add __attribute__ ((long_call)) to all functions, when
2965 inside #pragma long_calls or __attribute__ ((short_call)),
2966 when inside #pragma no_long_calls. */
2967 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2969 tree type_attr_list, attr_name;
2970 type_attr_list = TYPE_ATTRIBUTES (type);
2972 if (arm_pragma_long_calls == LONG)
2973 attr_name = get_identifier ("long_call");
2974 else if (arm_pragma_long_calls == SHORT)
2975 attr_name = get_identifier ("short_call");
2976 else
2977 return;
2979 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2980 TYPE_ATTRIBUTES (type) = type_attr_list;
2984 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2985 defined within the current compilation unit. If this cannot be
2986 determined, then 0 is returned. */
2987 static int
2988 current_file_function_operand (rtx sym_ref)
2990 /* This is a bit of a fib. A function will have a short call flag
2991 applied to its name if it has the short call attribute, or it has
2992 already been defined within the current compilation unit. */
2993 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2994 return 1;
2996 /* The current function is always defined within the current compilation
2997 unit. If it s a weak definition however, then this may not be the real
2998 definition of the function, and so we have to say no. */
2999 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3000 && !DECL_WEAK (current_function_decl))
3001 return 1;
3003 /* We cannot make the determination - default to returning 0. */
3004 return 0;
3007 /* Return nonzero if a 32 bit "long_call" should be generated for
3008 this call. We generate a long_call if the function:
3010 a. has an __attribute__((long call))
3011 or b. is within the scope of a #pragma long_calls
3012 or c. the -mlong-calls command line switch has been specified
3013 . and either:
3014 1. -ffunction-sections is in effect
3015 or 2. the current function has __attribute__ ((section))
3016 or 3. the target function has __attribute__ ((section))
3018 However we do not generate a long call if the function:
3020 d. has an __attribute__ ((short_call))
3021 or e. is inside the scope of a #pragma no_long_calls
3022 or f. is defined within the current compilation unit.
3024 This function will be called by C fragments contained in the machine
3025 description file. SYM_REF and CALL_COOKIE correspond to the matched
3026 rtl operands. CALL_SYMBOL is used to distinguish between
3027 two different callers of the function. It is set to 1 in the
3028 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3029 and "call_value" patterns. This is because of the difference in the
3030 SYM_REFs passed by these patterns. */
3032 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3034 if (!call_symbol)
3036 if (GET_CODE (sym_ref) != MEM)
3037 return 0;
3039 sym_ref = XEXP (sym_ref, 0);
3042 if (GET_CODE (sym_ref) != SYMBOL_REF)
3043 return 0;
3045 if (call_cookie & CALL_SHORT)
3046 return 0;
3048 if (TARGET_LONG_CALLS)
3050 if (flag_function_sections
3051 || DECL_SECTION_NAME (current_function_decl))
3052 /* c.3 is handled by the definition of the
3053 ARM_DECLARE_FUNCTION_SIZE macro. */
3054 return 1;
3057 if (current_file_function_operand (sym_ref))
3058 return 0;
3060 return (call_cookie & CALL_LONG)
3061 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3062 || TARGET_LONG_CALLS;
3065 /* Return nonzero if it is ok to make a tail-call to DECL. */
3066 static bool
3067 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3069 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3071 if (cfun->machine->sibcall_blocked)
3072 return false;
3074 /* Never tailcall something for which we have no decl, or if we
3075 are in Thumb mode. */
3076 if (decl == NULL || TARGET_THUMB)
3077 return false;
3079 /* Get the calling method. */
3080 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3081 call_type = CALL_SHORT;
3082 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3083 call_type = CALL_LONG;
3085 /* Cannot tail-call to long calls, since these are out of range of
3086 a branch instruction. However, if not compiling PIC, we know
3087 we can reach the symbol if it is in this compilation unit. */
3088 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3089 return false;
3091 /* If we are interworking and the function is not declared static
3092 then we can't tail-call it unless we know that it exists in this
3093 compilation unit (since it might be a Thumb routine). */
3094 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3095 return false;
3097 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3098 if (IS_INTERRUPT (arm_current_func_type ()))
3099 return false;
3101 /* Everything else is ok. */
3102 return true;
3106 /* Addressing mode support functions. */
3108 /* Return nonzero if X is a legitimate immediate operand when compiling
3109 for PIC. */
3111 legitimate_pic_operand_p (rtx x)
3113 if (CONSTANT_P (x)
3114 && flag_pic
3115 && (GET_CODE (x) == SYMBOL_REF
3116 || (GET_CODE (x) == CONST
3117 && GET_CODE (XEXP (x, 0)) == PLUS
3118 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3119 return 0;
3121 return 1;
3125 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3127 if (GET_CODE (orig) == SYMBOL_REF
3128 || GET_CODE (orig) == LABEL_REF)
3130 #ifndef AOF_ASSEMBLER
3131 rtx pic_ref, address;
3132 #endif
3133 rtx insn;
3134 int subregs = 0;
3136 if (reg == 0)
3138 gcc_assert (!no_new_pseudos);
3139 reg = gen_reg_rtx (Pmode);
3141 subregs = 1;
3144 #ifdef AOF_ASSEMBLER
3145 /* The AOF assembler can generate relocations for these directly, and
3146 understands that the PIC register has to be added into the offset. */
3147 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3148 #else
3149 if (subregs)
3150 address = gen_reg_rtx (Pmode);
3151 else
3152 address = reg;
3154 if (TARGET_ARM)
3155 emit_insn (gen_pic_load_addr_arm (address, orig));
3156 else
3157 emit_insn (gen_pic_load_addr_thumb (address, orig));
3159 if ((GET_CODE (orig) == LABEL_REF
3160 || (GET_CODE (orig) == SYMBOL_REF &&
3161 SYMBOL_REF_LOCAL_P (orig)))
3162 && NEED_GOT_RELOC)
3163 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3164 else
3166 pic_ref = gen_const_mem (Pmode,
3167 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3168 address));
3171 insn = emit_move_insn (reg, pic_ref);
3172 #endif
3173 current_function_uses_pic_offset_table = 1;
3174 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3175 by loop. */
3176 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3177 REG_NOTES (insn));
3178 return reg;
3180 else if (GET_CODE (orig) == CONST)
3182 rtx base, offset;
3184 if (GET_CODE (XEXP (orig, 0)) == PLUS
3185 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3186 return orig;
3188 if (reg == 0)
3190 gcc_assert (!no_new_pseudos);
3191 reg = gen_reg_rtx (Pmode);
3194 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3196 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3197 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3198 base == reg ? 0 : reg);
3200 if (GET_CODE (offset) == CONST_INT)
3202 /* The base register doesn't really matter, we only want to
3203 test the index for the appropriate mode. */
3204 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3206 gcc_assert (!no_new_pseudos);
3207 offset = force_reg (Pmode, offset);
3210 if (GET_CODE (offset) == CONST_INT)
3211 return plus_constant (base, INTVAL (offset));
3214 if (GET_MODE_SIZE (mode) > 4
3215 && (GET_MODE_CLASS (mode) == MODE_INT
3216 || TARGET_SOFT_FLOAT))
3218 emit_insn (gen_addsi3 (reg, base, offset));
3219 return reg;
3222 return gen_rtx_PLUS (Pmode, base, offset);
3225 return orig;
3229 /* Find a spare low register to use during the prolog of a function. */
3231 static int
3232 thumb_find_work_register (unsigned long pushed_regs_mask)
3234 int reg;
3236 /* Check the argument registers first as these are call-used. The
3237 register allocation order means that sometimes r3 might be used
3238 but earlier argument registers might not, so check them all. */
3239 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3240 if (!regs_ever_live[reg])
3241 return reg;
3243 /* Before going on to check the call-saved registers we can try a couple
3244 more ways of deducing that r3 is available. The first is when we are
3245 pushing anonymous arguments onto the stack and we have less than 4
3246 registers worth of fixed arguments(*). In this case r3 will be part of
3247 the variable argument list and so we can be sure that it will be
3248 pushed right at the start of the function. Hence it will be available
3249 for the rest of the prologue.
3250 (*): ie current_function_pretend_args_size is greater than 0. */
3251 if (cfun->machine->uses_anonymous_args
3252 && current_function_pretend_args_size > 0)
3253 return LAST_ARG_REGNUM;
3255 /* The other case is when we have fixed arguments but less than 4 registers
3256 worth. In this case r3 might be used in the body of the function, but
3257 it is not being used to convey an argument into the function. In theory
3258 we could just check current_function_args_size to see how many bytes are
3259 being passed in argument registers, but it seems that it is unreliable.
3260 Sometimes it will have the value 0 when in fact arguments are being
3261 passed. (See testcase execute/20021111-1.c for an example). So we also
3262 check the args_info.nregs field as well. The problem with this field is
3263 that it makes no allowances for arguments that are passed to the
3264 function but which are not used. Hence we could miss an opportunity
3265 when a function has an unused argument in r3. But it is better to be
3266 safe than to be sorry. */
3267 if (! cfun->machine->uses_anonymous_args
3268 && current_function_args_size >= 0
3269 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3270 && cfun->args_info.nregs < 4)
3271 return LAST_ARG_REGNUM;
3273 /* Otherwise look for a call-saved register that is going to be pushed. */
3274 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3275 if (pushed_regs_mask & (1 << reg))
3276 return reg;
3278 /* Something went wrong - thumb_compute_save_reg_mask()
3279 should have arranged for a suitable register to be pushed. */
3280 gcc_unreachable ();
3284 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3285 low register. */
3287 void
3288 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3290 #ifndef AOF_ASSEMBLER
3291 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3292 rtx global_offset_table;
3294 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3295 return;
3297 gcc_assert (flag_pic);
3299 l1 = gen_label_rtx ();
3301 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3302 /* On the ARM the PC register contains 'dot + 8' at the time of the
3303 addition, on the Thumb it is 'dot + 4'. */
3304 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3305 if (GOT_PCREL)
3306 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3307 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3308 else
3309 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3311 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3313 if (TARGET_ARM)
3315 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3316 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3318 else
3320 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3322 /* We will have pushed the pic register, so we should always be
3323 able to find a work register. */
3324 pic_tmp = gen_rtx_REG (SImode,
3325 thumb_find_work_register (saved_regs));
3326 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3327 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3329 else
3330 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3331 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3334 /* Need to emit this whether or not we obey regdecls,
3335 since setjmp/longjmp can cause life info to screw up. */
3336 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3337 #endif /* AOF_ASSEMBLER */
3341 /* Return nonzero if X is valid as an ARM state addressing register. */
3342 static int
3343 arm_address_register_rtx_p (rtx x, int strict_p)
3345 int regno;
3347 if (GET_CODE (x) != REG)
3348 return 0;
3350 regno = REGNO (x);
3352 if (strict_p)
3353 return ARM_REGNO_OK_FOR_BASE_P (regno);
3355 return (regno <= LAST_ARM_REGNUM
3356 || regno >= FIRST_PSEUDO_REGISTER
3357 || regno == FRAME_POINTER_REGNUM
3358 || regno == ARG_POINTER_REGNUM);
3361 /* Return nonzero if X is a valid ARM state address operand. */
3363 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3364 int strict_p)
3366 bool use_ldrd;
3367 enum rtx_code code = GET_CODE (x);
3369 if (arm_address_register_rtx_p (x, strict_p))
3370 return 1;
3372 use_ldrd = (TARGET_LDRD
3373 && (mode == DImode
3374 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3376 if (code == POST_INC || code == PRE_DEC
3377 || ((code == PRE_INC || code == POST_DEC)
3378 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3379 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3381 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3382 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3383 && GET_CODE (XEXP (x, 1)) == PLUS
3384 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3386 rtx addend = XEXP (XEXP (x, 1), 1);
3388 /* Don't allow ldrd post increment by register because it's hard
3389 to fixup invalid register choices. */
3390 if (use_ldrd
3391 && GET_CODE (x) == POST_MODIFY
3392 && GET_CODE (addend) == REG)
3393 return 0;
3395 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3396 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3399 /* After reload constants split into minipools will have addresses
3400 from a LABEL_REF. */
3401 else if (reload_completed
3402 && (code == LABEL_REF
3403 || (code == CONST
3404 && GET_CODE (XEXP (x, 0)) == PLUS
3405 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3406 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3407 return 1;
3409 else if (mode == TImode)
3410 return 0;
3412 else if (code == PLUS)
3414 rtx xop0 = XEXP (x, 0);
3415 rtx xop1 = XEXP (x, 1);
3417 return ((arm_address_register_rtx_p (xop0, strict_p)
3418 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3419 || (arm_address_register_rtx_p (xop1, strict_p)
3420 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3423 #if 0
3424 /* Reload currently can't handle MINUS, so disable this for now */
3425 else if (GET_CODE (x) == MINUS)
3427 rtx xop0 = XEXP (x, 0);
3428 rtx xop1 = XEXP (x, 1);
3430 return (arm_address_register_rtx_p (xop0, strict_p)
3431 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3433 #endif
3435 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3436 && code == SYMBOL_REF
3437 && CONSTANT_POOL_ADDRESS_P (x)
3438 && ! (flag_pic
3439 && symbol_mentioned_p (get_pool_constant (x))))
3440 return 1;
3442 return 0;
3445 /* Return nonzero if INDEX is valid for an address index operand in
3446 ARM state. */
3447 static int
3448 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3449 int strict_p)
3451 HOST_WIDE_INT range;
3452 enum rtx_code code = GET_CODE (index);
3454 /* Standard coprocessor addressing modes. */
3455 if (TARGET_HARD_FLOAT
3456 && (TARGET_FPA || TARGET_MAVERICK)
3457 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3458 || (TARGET_MAVERICK && mode == DImode)))
3459 return (code == CONST_INT && INTVAL (index) < 1024
3460 && INTVAL (index) > -1024
3461 && (INTVAL (index) & 3) == 0);
3463 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3464 return (code == CONST_INT
3465 && INTVAL (index) < 1024
3466 && INTVAL (index) > -1024
3467 && (INTVAL (index) & 3) == 0);
3469 if (arm_address_register_rtx_p (index, strict_p)
3470 && (GET_MODE_SIZE (mode) <= 4))
3471 return 1;
3473 if (mode == DImode || mode == DFmode)
3475 if (code == CONST_INT)
3477 HOST_WIDE_INT val = INTVAL (index);
3479 if (TARGET_LDRD)
3480 return val > -256 && val < 256;
3481 else
3482 return val > -4096 && val < 4092;
3485 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3488 if (GET_MODE_SIZE (mode) <= 4
3489 && ! (arm_arch4
3490 && (mode == HImode
3491 || (mode == QImode && outer == SIGN_EXTEND))))
3493 if (code == MULT)
3495 rtx xiop0 = XEXP (index, 0);
3496 rtx xiop1 = XEXP (index, 1);
3498 return ((arm_address_register_rtx_p (xiop0, strict_p)
3499 && power_of_two_operand (xiop1, SImode))
3500 || (arm_address_register_rtx_p (xiop1, strict_p)
3501 && power_of_two_operand (xiop0, SImode)));
3503 else if (code == LSHIFTRT || code == ASHIFTRT
3504 || code == ASHIFT || code == ROTATERT)
3506 rtx op = XEXP (index, 1);
3508 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3509 && GET_CODE (op) == CONST_INT
3510 && INTVAL (op) > 0
3511 && INTVAL (op) <= 31);
3515 /* For ARM v4 we may be doing a sign-extend operation during the
3516 load. */
3517 if (arm_arch4)
3519 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3520 range = 256;
3521 else
3522 range = 4096;
3524 else
3525 range = (mode == HImode) ? 4095 : 4096;
3527 return (code == CONST_INT
3528 && INTVAL (index) < range
3529 && INTVAL (index) > -range);
3532 /* Return nonzero if X is valid as a Thumb state base register. */
3533 static int
3534 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3536 int regno;
3538 if (GET_CODE (x) != REG)
3539 return 0;
3541 regno = REGNO (x);
3543 if (strict_p)
3544 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3546 return (regno <= LAST_LO_REGNUM
3547 || regno > LAST_VIRTUAL_REGISTER
3548 || regno == FRAME_POINTER_REGNUM
3549 || (GET_MODE_SIZE (mode) >= 4
3550 && (regno == STACK_POINTER_REGNUM
3551 || regno >= FIRST_PSEUDO_REGISTER
3552 || x == hard_frame_pointer_rtx
3553 || x == arg_pointer_rtx)));
3556 /* Return nonzero if x is a legitimate index register. This is the case
3557 for any base register that can access a QImode object. */
3558 inline static int
3559 thumb_index_register_rtx_p (rtx x, int strict_p)
3561 return thumb_base_register_rtx_p (x, QImode, strict_p);
3564 /* Return nonzero if x is a legitimate Thumb-state address.
3566 The AP may be eliminated to either the SP or the FP, so we use the
3567 least common denominator, e.g. SImode, and offsets from 0 to 64.
3569 ??? Verify whether the above is the right approach.
3571 ??? Also, the FP may be eliminated to the SP, so perhaps that
3572 needs special handling also.
3574 ??? Look at how the mips16 port solves this problem. It probably uses
3575 better ways to solve some of these problems.
3577 Although it is not incorrect, we don't accept QImode and HImode
3578 addresses based on the frame pointer or arg pointer until the
3579 reload pass starts. This is so that eliminating such addresses
3580 into stack based ones won't produce impossible code. */
3582 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3584 /* ??? Not clear if this is right. Experiment. */
3585 if (GET_MODE_SIZE (mode) < 4
3586 && !(reload_in_progress || reload_completed)
3587 && (reg_mentioned_p (frame_pointer_rtx, x)
3588 || reg_mentioned_p (arg_pointer_rtx, x)
3589 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3590 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3591 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3592 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3593 return 0;
3595 /* Accept any base register. SP only in SImode or larger. */
3596 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3597 return 1;
3599 /* This is PC relative data before arm_reorg runs. */
3600 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3601 && GET_CODE (x) == SYMBOL_REF
3602 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3603 return 1;
3605 /* This is PC relative data after arm_reorg runs. */
3606 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3607 && (GET_CODE (x) == LABEL_REF
3608 || (GET_CODE (x) == CONST
3609 && GET_CODE (XEXP (x, 0)) == PLUS
3610 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3611 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3612 return 1;
3614 /* Post-inc indexing only supported for SImode and larger. */
3615 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3616 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3617 return 1;
3619 else if (GET_CODE (x) == PLUS)
3621 /* REG+REG address can be any two index registers. */
3622 /* We disallow FRAME+REG addressing since we know that FRAME
3623 will be replaced with STACK, and SP relative addressing only
3624 permits SP+OFFSET. */
3625 if (GET_MODE_SIZE (mode) <= 4
3626 && XEXP (x, 0) != frame_pointer_rtx
3627 && XEXP (x, 1) != frame_pointer_rtx
3628 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3629 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3630 return 1;
3632 /* REG+const has 5-7 bit offset for non-SP registers. */
3633 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3634 || XEXP (x, 0) == arg_pointer_rtx)
3635 && GET_CODE (XEXP (x, 1)) == CONST_INT
3636 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3637 return 1;
3639 /* REG+const has 10 bit offset for SP, but only SImode and
3640 larger is supported. */
3641 /* ??? Should probably check for DI/DFmode overflow here
3642 just like GO_IF_LEGITIMATE_OFFSET does. */
3643 else if (GET_CODE (XEXP (x, 0)) == REG
3644 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3645 && GET_MODE_SIZE (mode) >= 4
3646 && GET_CODE (XEXP (x, 1)) == CONST_INT
3647 && INTVAL (XEXP (x, 1)) >= 0
3648 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3649 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3650 return 1;
3652 else if (GET_CODE (XEXP (x, 0)) == REG
3653 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3654 && GET_MODE_SIZE (mode) >= 4
3655 && GET_CODE (XEXP (x, 1)) == CONST_INT
3656 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3657 return 1;
3660 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3661 && GET_MODE_SIZE (mode) == 4
3662 && GET_CODE (x) == SYMBOL_REF
3663 && CONSTANT_POOL_ADDRESS_P (x)
3664 && !(flag_pic
3665 && symbol_mentioned_p (get_pool_constant (x))))
3666 return 1;
3668 return 0;
3671 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3672 instruction of mode MODE. */
3674 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3676 switch (GET_MODE_SIZE (mode))
3678 case 1:
3679 return val >= 0 && val < 32;
3681 case 2:
3682 return val >= 0 && val < 64 && (val & 1) == 0;
3684 default:
3685 return (val >= 0
3686 && (val + GET_MODE_SIZE (mode)) <= 128
3687 && (val & 3) == 0);
3691 /* Try machine-dependent ways of modifying an illegitimate address
3692 to be legitimate. If we find one, return the new, valid address. */
3694 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3696 if (GET_CODE (x) == PLUS)
3698 rtx xop0 = XEXP (x, 0);
3699 rtx xop1 = XEXP (x, 1);
3701 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3702 xop0 = force_reg (SImode, xop0);
3704 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3705 xop1 = force_reg (SImode, xop1);
3707 if (ARM_BASE_REGISTER_RTX_P (xop0)
3708 && GET_CODE (xop1) == CONST_INT)
3710 HOST_WIDE_INT n, low_n;
3711 rtx base_reg, val;
3712 n = INTVAL (xop1);
3714 /* VFP addressing modes actually allow greater offsets, but for
3715 now we just stick with the lowest common denominator. */
3716 if (mode == DImode
3717 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3719 low_n = n & 0x0f;
3720 n &= ~0x0f;
3721 if (low_n > 4)
3723 n += 16;
3724 low_n -= 16;
3727 else
3729 low_n = ((mode) == TImode ? 0
3730 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3731 n -= low_n;
3734 base_reg = gen_reg_rtx (SImode);
3735 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3736 GEN_INT (n)), NULL_RTX);
3737 emit_move_insn (base_reg, val);
3738 x = (low_n == 0 ? base_reg
3739 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3741 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3742 x = gen_rtx_PLUS (SImode, xop0, xop1);
3745 /* XXX We don't allow MINUS any more -- see comment in
3746 arm_legitimate_address_p (). */
3747 else if (GET_CODE (x) == MINUS)
3749 rtx xop0 = XEXP (x, 0);
3750 rtx xop1 = XEXP (x, 1);
3752 if (CONSTANT_P (xop0))
3753 xop0 = force_reg (SImode, xop0);
3755 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3756 xop1 = force_reg (SImode, xop1);
3758 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3759 x = gen_rtx_MINUS (SImode, xop0, xop1);
3762 if (flag_pic)
3764 /* We need to find and carefully transform any SYMBOL and LABEL
3765 references; so go back to the original address expression. */
3766 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3768 if (new_x != orig_x)
3769 x = new_x;
3772 return x;
3776 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3777 to be legitimate. If we find one, return the new, valid address. */
3779 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3781 if (GET_CODE (x) == PLUS
3782 && GET_CODE (XEXP (x, 1)) == CONST_INT
3783 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3784 || INTVAL (XEXP (x, 1)) < 0))
3786 rtx xop0 = XEXP (x, 0);
3787 rtx xop1 = XEXP (x, 1);
3788 HOST_WIDE_INT offset = INTVAL (xop1);
3790 /* Try and fold the offset into a biasing of the base register and
3791 then offsetting that. Don't do this when optimizing for space
3792 since it can cause too many CSEs. */
3793 if (optimize_size && offset >= 0
3794 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3796 HOST_WIDE_INT delta;
3798 if (offset >= 256)
3799 delta = offset - (256 - GET_MODE_SIZE (mode));
3800 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3801 delta = 31 * GET_MODE_SIZE (mode);
3802 else
3803 delta = offset & (~31 * GET_MODE_SIZE (mode));
3805 xop0 = force_operand (plus_constant (xop0, offset - delta),
3806 NULL_RTX);
3807 x = plus_constant (xop0, delta);
3809 else if (offset < 0 && offset > -256)
3810 /* Small negative offsets are best done with a subtract before the
3811 dereference, forcing these into a register normally takes two
3812 instructions. */
3813 x = force_operand (x, NULL_RTX);
3814 else
3816 /* For the remaining cases, force the constant into a register. */
3817 xop1 = force_reg (SImode, xop1);
3818 x = gen_rtx_PLUS (SImode, xop0, xop1);
3821 else if (GET_CODE (x) == PLUS
3822 && s_register_operand (XEXP (x, 1), SImode)
3823 && !s_register_operand (XEXP (x, 0), SImode))
3825 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3827 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3830 if (flag_pic)
3832 /* We need to find and carefully transform any SYMBOL and LABEL
3833 references; so go back to the original address expression. */
3834 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3836 if (new_x != orig_x)
3837 x = new_x;
3840 return x;
3844 thumb_legitimize_reload_address(rtx *x_p,
3845 enum machine_mode mode,
3846 int opnum, int type,
3847 int ind_levels ATTRIBUTE_UNUSED)
3849 rtx x = *x_p;
3851 if (GET_CODE (x) == PLUS
3852 && GET_MODE_SIZE (mode) < 4
3853 && REG_P (XEXP (x, 0))
3854 && XEXP (x, 0) == stack_pointer_rtx
3855 && GET_CODE (XEXP (x, 1)) == CONST_INT
3856 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3858 rtx orig_x = x;
3860 x = copy_rtx (x);
3861 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
3862 Pmode, VOIDmode, 0, 0, opnum, type);
3863 return x;
3866 /* If both registers are hi-regs, then it's better to reload the
3867 entire expression rather than each register individually. That
3868 only requires one reload register rather than two. */
3869 if (GET_CODE (x) == PLUS
3870 && REG_P (XEXP (x, 0))
3871 && REG_P (XEXP (x, 1))
3872 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
3873 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
3875 rtx orig_x = x;
3877 x = copy_rtx (x);
3878 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
3879 Pmode, VOIDmode, 0, 0, opnum, type);
3880 return x;
3883 return NULL;
3888 #define REG_OR_SUBREG_REG(X) \
3889 (GET_CODE (X) == REG \
3890 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3892 #define REG_OR_SUBREG_RTX(X) \
3893 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3895 #ifndef COSTS_N_INSNS
3896 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3897 #endif
3898 static inline int
3899 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3901 enum machine_mode mode = GET_MODE (x);
3903 switch (code)
3905 case ASHIFT:
3906 case ASHIFTRT:
3907 case LSHIFTRT:
3908 case ROTATERT:
3909 case PLUS:
3910 case MINUS:
3911 case COMPARE:
3912 case NEG:
3913 case NOT:
3914 return COSTS_N_INSNS (1);
3916 case MULT:
3917 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3919 int cycles = 0;
3920 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3922 while (i)
3924 i >>= 2;
3925 cycles++;
3927 return COSTS_N_INSNS (2) + cycles;
3929 return COSTS_N_INSNS (1) + 16;
3931 case SET:
3932 return (COSTS_N_INSNS (1)
3933 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3934 + GET_CODE (SET_DEST (x)) == MEM));
3936 case CONST_INT:
3937 if (outer == SET)
3939 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3940 return 0;
3941 if (thumb_shiftable_const (INTVAL (x)))
3942 return COSTS_N_INSNS (2);
3943 return COSTS_N_INSNS (3);
3945 else if ((outer == PLUS || outer == COMPARE)
3946 && INTVAL (x) < 256 && INTVAL (x) > -256)
3947 return 0;
3948 else if (outer == AND
3949 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3950 return COSTS_N_INSNS (1);
3951 else if (outer == ASHIFT || outer == ASHIFTRT
3952 || outer == LSHIFTRT)
3953 return 0;
3954 return COSTS_N_INSNS (2);
3956 case CONST:
3957 case CONST_DOUBLE:
3958 case LABEL_REF:
3959 case SYMBOL_REF:
3960 return COSTS_N_INSNS (3);
3962 case UDIV:
3963 case UMOD:
3964 case DIV:
3965 case MOD:
3966 return 100;
3968 case TRUNCATE:
3969 return 99;
3971 case AND:
3972 case XOR:
3973 case IOR:
3974 /* XXX guess. */
3975 return 8;
3977 case MEM:
3978 /* XXX another guess. */
3979 /* Memory costs quite a lot for the first word, but subsequent words
3980 load at the equivalent of a single insn each. */
3981 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3982 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3983 ? 4 : 0));
3985 case IF_THEN_ELSE:
3986 /* XXX a guess. */
3987 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3988 return 14;
3989 return 2;
3991 case ZERO_EXTEND:
3992 /* XXX still guessing. */
3993 switch (GET_MODE (XEXP (x, 0)))
3995 case QImode:
3996 return (1 + (mode == DImode ? 4 : 0)
3997 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3999 case HImode:
4000 return (4 + (mode == DImode ? 4 : 0)
4001 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4003 case SImode:
4004 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4006 default:
4007 return 99;
4010 default:
4011 return 99;
4016 /* Worker routine for arm_rtx_costs. */
4017 static inline int
4018 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4020 enum machine_mode mode = GET_MODE (x);
4021 enum rtx_code subcode;
4022 int extra_cost;
4024 switch (code)
4026 case MEM:
4027 /* Memory costs quite a lot for the first word, but subsequent words
4028 load at the equivalent of a single insn each. */
4029 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4030 + (GET_CODE (x) == SYMBOL_REF
4031 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4033 case DIV:
4034 case MOD:
4035 case UDIV:
4036 case UMOD:
4037 return optimize_size ? COSTS_N_INSNS (2) : 100;
4039 case ROTATE:
4040 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4041 return 4;
4042 /* Fall through */
4043 case ROTATERT:
4044 if (mode != SImode)
4045 return 8;
4046 /* Fall through */
4047 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4048 if (mode == DImode)
4049 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4050 + ((GET_CODE (XEXP (x, 0)) == REG
4051 || (GET_CODE (XEXP (x, 0)) == SUBREG
4052 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4053 ? 0 : 8));
4054 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4055 || (GET_CODE (XEXP (x, 0)) == SUBREG
4056 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4057 ? 0 : 4)
4058 + ((GET_CODE (XEXP (x, 1)) == REG
4059 || (GET_CODE (XEXP (x, 1)) == SUBREG
4060 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4061 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4062 ? 0 : 4));
4064 case MINUS:
4065 if (mode == DImode)
4066 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4067 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4068 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4069 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4070 ? 0 : 8));
4072 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4073 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4074 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4075 && arm_const_double_rtx (XEXP (x, 1))))
4076 ? 0 : 8)
4077 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4078 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4079 && arm_const_double_rtx (XEXP (x, 0))))
4080 ? 0 : 8));
4082 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4083 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4084 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4085 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4086 || subcode == ASHIFTRT || subcode == LSHIFTRT
4087 || subcode == ROTATE || subcode == ROTATERT
4088 || (subcode == MULT
4089 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4090 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4091 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4092 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4093 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4094 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4095 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4096 return 1;
4097 /* Fall through */
4099 case PLUS:
4100 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4101 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4102 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4103 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4104 && arm_const_double_rtx (XEXP (x, 1))))
4105 ? 0 : 8));
4107 /* Fall through */
4108 case AND: case XOR: case IOR:
4109 extra_cost = 0;
4111 /* Normally the frame registers will be spilt into reg+const during
4112 reload, so it is a bad idea to combine them with other instructions,
4113 since then they might not be moved outside of loops. As a compromise
4114 we allow integration with ops that have a constant as their second
4115 operand. */
4116 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4117 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4118 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4119 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4120 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4121 extra_cost = 4;
4123 if (mode == DImode)
4124 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4125 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4126 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4127 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4128 ? 0 : 8));
4130 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4131 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4132 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4133 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4134 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4135 ? 0 : 4));
4137 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4138 return (1 + extra_cost
4139 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4140 || subcode == LSHIFTRT || subcode == ASHIFTRT
4141 || subcode == ROTATE || subcode == ROTATERT
4142 || (subcode == MULT
4143 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4144 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4145 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4146 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4147 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4148 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4149 ? 0 : 4));
4151 return 8;
4153 case MULT:
4154 /* This should have been handled by the CPU specific routines. */
4155 gcc_unreachable ();
4157 case TRUNCATE:
4158 if (arm_arch3m && mode == SImode
4159 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4160 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4161 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4162 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4163 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4164 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4165 return 8;
4166 return 99;
4168 case NEG:
4169 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4170 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4171 /* Fall through */
4172 case NOT:
4173 if (mode == DImode)
4174 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4176 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4178 case IF_THEN_ELSE:
4179 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4180 return 14;
4181 return 2;
4183 case COMPARE:
4184 return 1;
4186 case ABS:
4187 return 4 + (mode == DImode ? 4 : 0);
4189 case SIGN_EXTEND:
4190 if (GET_MODE (XEXP (x, 0)) == QImode)
4191 return (4 + (mode == DImode ? 4 : 0)
4192 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4193 /* Fall through */
4194 case ZERO_EXTEND:
4195 switch (GET_MODE (XEXP (x, 0)))
4197 case QImode:
4198 return (1 + (mode == DImode ? 4 : 0)
4199 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4201 case HImode:
4202 return (4 + (mode == DImode ? 4 : 0)
4203 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4205 case SImode:
4206 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4208 case V8QImode:
4209 case V4HImode:
4210 case V2SImode:
4211 case V4QImode:
4212 case V2HImode:
4213 return 1;
4215 default:
4216 gcc_unreachable ();
4218 gcc_unreachable ();
4220 case CONST_INT:
4221 if (const_ok_for_arm (INTVAL (x)))
4222 return outer == SET ? 2 : -1;
4223 else if (outer == AND
4224 && const_ok_for_arm (~INTVAL (x)))
4225 return -1;
4226 else if ((outer == COMPARE
4227 || outer == PLUS || outer == MINUS)
4228 && const_ok_for_arm (-INTVAL (x)))
4229 return -1;
4230 else
4231 return 5;
4233 case CONST:
4234 case LABEL_REF:
4235 case SYMBOL_REF:
4236 return 6;
4238 case CONST_DOUBLE:
4239 if (arm_const_double_rtx (x))
4240 return outer == SET ? 2 : -1;
4241 else if ((outer == COMPARE || outer == PLUS)
4242 && neg_const_double_rtx_ok_for_fpa (x))
4243 return -1;
4244 return 7;
4246 default:
4247 return 99;
4251 /* RTX costs when optimizing for size. */
4252 static bool
4253 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4255 enum machine_mode mode = GET_MODE (x);
4257 if (TARGET_THUMB)
4259 /* XXX TBD. For now, use the standard costs. */
4260 *total = thumb_rtx_costs (x, code, outer_code);
4261 return true;
4264 switch (code)
4266 case MEM:
4267 /* A memory access costs 1 insn if the mode is small, or the address is
4268 a single register, otherwise it costs one insn per word. */
4269 if (REG_P (XEXP (x, 0)))
4270 *total = COSTS_N_INSNS (1);
4271 else
4272 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4273 return true;
4275 case DIV:
4276 case MOD:
4277 case UDIV:
4278 case UMOD:
4279 /* Needs a libcall, so it costs about this. */
4280 *total = COSTS_N_INSNS (2);
4281 return false;
4283 case ROTATE:
4284 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4286 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4287 return true;
4289 /* Fall through */
4290 case ROTATERT:
4291 case ASHIFT:
4292 case LSHIFTRT:
4293 case ASHIFTRT:
4294 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4296 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4297 return true;
4299 else if (mode == SImode)
4301 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4302 /* Slightly disparage register shifts, but not by much. */
4303 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4304 *total += 1 + rtx_cost (XEXP (x, 1), code);
4305 return true;
4308 /* Needs a libcall. */
4309 *total = COSTS_N_INSNS (2);
4310 return false;
4312 case MINUS:
4313 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4315 *total = COSTS_N_INSNS (1);
4316 return false;
4319 if (mode == SImode)
4321 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4322 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4324 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4325 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4326 || subcode1 == ROTATE || subcode1 == ROTATERT
4327 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4328 || subcode1 == ASHIFTRT)
4330 /* It's just the cost of the two operands. */
4331 *total = 0;
4332 return false;
4335 *total = COSTS_N_INSNS (1);
4336 return false;
4339 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4340 return false;
4342 case PLUS:
4343 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4345 *total = COSTS_N_INSNS (1);
4346 return false;
4349 /* Fall through */
4350 case AND: case XOR: case IOR:
4351 if (mode == SImode)
4353 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4355 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4356 || subcode == LSHIFTRT || subcode == ASHIFTRT
4357 || (code == AND && subcode == NOT))
4359 /* It's just the cost of the two operands. */
4360 *total = 0;
4361 return false;
4365 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4366 return false;
4368 case MULT:
4369 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4370 return false;
4372 case NEG:
4373 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4374 *total = COSTS_N_INSNS (1);
4375 /* Fall through */
4376 case NOT:
4377 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4379 return false;
4381 case IF_THEN_ELSE:
4382 *total = 0;
4383 return false;
4385 case COMPARE:
4386 if (cc_register (XEXP (x, 0), VOIDmode))
4387 * total = 0;
4388 else
4389 *total = COSTS_N_INSNS (1);
4390 return false;
4392 case ABS:
4393 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4394 *total = COSTS_N_INSNS (1);
4395 else
4396 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4397 return false;
4399 case SIGN_EXTEND:
4400 *total = 0;
4401 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4403 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4404 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4406 if (mode == DImode)
4407 *total += COSTS_N_INSNS (1);
4408 return false;
4410 case ZERO_EXTEND:
4411 *total = 0;
4412 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4414 switch (GET_MODE (XEXP (x, 0)))
4416 case QImode:
4417 *total += COSTS_N_INSNS (1);
4418 break;
4420 case HImode:
4421 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4423 case SImode:
4424 break;
4426 default:
4427 *total += COSTS_N_INSNS (2);
4431 if (mode == DImode)
4432 *total += COSTS_N_INSNS (1);
4434 return false;
4436 case CONST_INT:
4437 if (const_ok_for_arm (INTVAL (x)))
4438 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4439 else if (const_ok_for_arm (~INTVAL (x)))
4440 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4441 else if (const_ok_for_arm (-INTVAL (x)))
4443 if (outer_code == COMPARE || outer_code == PLUS
4444 || outer_code == MINUS)
4445 *total = 0;
4446 else
4447 *total = COSTS_N_INSNS (1);
4449 else
4450 *total = COSTS_N_INSNS (2);
4451 return true;
4453 case CONST:
4454 case LABEL_REF:
4455 case SYMBOL_REF:
4456 *total = COSTS_N_INSNS (2);
4457 return true;
4459 case CONST_DOUBLE:
4460 *total = COSTS_N_INSNS (4);
4461 return true;
4463 default:
4464 if (mode != VOIDmode)
4465 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4466 else
4467 *total = COSTS_N_INSNS (4); /* How knows? */
4468 return false;
4472 /* RTX costs for cores with a slow MUL implementation. */
4474 static bool
4475 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4477 enum machine_mode mode = GET_MODE (x);
4479 if (TARGET_THUMB)
4481 *total = thumb_rtx_costs (x, code, outer_code);
4482 return true;
4485 switch (code)
4487 case MULT:
4488 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4489 || mode == DImode)
4491 *total = 30;
4492 return true;
4495 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4497 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4498 & (unsigned HOST_WIDE_INT) 0xffffffff);
4499 int cost, const_ok = const_ok_for_arm (i);
4500 int j, booth_unit_size;
4502 /* Tune as appropriate. */
4503 cost = const_ok ? 4 : 8;
4504 booth_unit_size = 2;
4505 for (j = 0; i && j < 32; j += booth_unit_size)
4507 i >>= booth_unit_size;
4508 cost += 2;
4511 *total = cost;
4512 return true;
4515 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4516 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4517 return true;
4519 default:
4520 *total = arm_rtx_costs_1 (x, code, outer_code);
4521 return true;
4526 /* RTX cost for cores with a fast multiply unit (M variants). */
4528 static bool
4529 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4531 enum machine_mode mode = GET_MODE (x);
4533 if (TARGET_THUMB)
4535 *total = thumb_rtx_costs (x, code, outer_code);
4536 return true;
4539 switch (code)
4541 case MULT:
4542 /* There is no point basing this on the tuning, since it is always the
4543 fast variant if it exists at all. */
4544 if (mode == DImode
4545 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4546 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4547 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4549 *total = 8;
4550 return true;
4554 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4555 || mode == DImode)
4557 *total = 30;
4558 return true;
4561 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4563 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4564 & (unsigned HOST_WIDE_INT) 0xffffffff);
4565 int cost, const_ok = const_ok_for_arm (i);
4566 int j, booth_unit_size;
4568 /* Tune as appropriate. */
4569 cost = const_ok ? 4 : 8;
4570 booth_unit_size = 8;
4571 for (j = 0; i && j < 32; j += booth_unit_size)
4573 i >>= booth_unit_size;
4574 cost += 2;
4577 *total = cost;
4578 return true;
4581 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4582 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4583 return true;
4585 default:
4586 *total = arm_rtx_costs_1 (x, code, outer_code);
4587 return true;
4592 /* RTX cost for XScale CPUs. */
4594 static bool
4595 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4597 enum machine_mode mode = GET_MODE (x);
4599 if (TARGET_THUMB)
4601 *total = thumb_rtx_costs (x, code, outer_code);
4602 return true;
4605 switch (code)
4607 case MULT:
4608 /* There is no point basing this on the tuning, since it is always the
4609 fast variant if it exists at all. */
4610 if (mode == DImode
4611 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4612 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4613 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4615 *total = 8;
4616 return true;
4620 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4621 || mode == DImode)
4623 *total = 30;
4624 return true;
4627 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4629 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4630 & (unsigned HOST_WIDE_INT) 0xffffffff);
4631 int cost, const_ok = const_ok_for_arm (i);
4632 unsigned HOST_WIDE_INT masked_const;
4634 /* The cost will be related to two insns.
4635 First a load of the constant (MOV or LDR), then a multiply. */
4636 cost = 2;
4637 if (! const_ok)
4638 cost += 1; /* LDR is probably more expensive because
4639 of longer result latency. */
4640 masked_const = i & 0xffff8000;
4641 if (masked_const != 0 && masked_const != 0xffff8000)
4643 masked_const = i & 0xf8000000;
4644 if (masked_const == 0 || masked_const == 0xf8000000)
4645 cost += 1;
4646 else
4647 cost += 2;
4649 *total = cost;
4650 return true;
4653 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4654 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4655 return true;
4657 case COMPARE:
4658 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4659 will stall until the multiplication is complete. */
4660 if (GET_CODE (XEXP (x, 0)) == MULT)
4661 *total = 4 + rtx_cost (XEXP (x, 0), code);
4662 else
4663 *total = arm_rtx_costs_1 (x, code, outer_code);
4664 return true;
4666 default:
4667 *total = arm_rtx_costs_1 (x, code, outer_code);
4668 return true;
4673 /* RTX costs for 9e (and later) cores. */
4675 static bool
4676 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4678 enum machine_mode mode = GET_MODE (x);
4679 int nonreg_cost;
4680 int cost;
4682 if (TARGET_THUMB)
4684 switch (code)
4686 case MULT:
4687 *total = COSTS_N_INSNS (3);
4688 return true;
4690 default:
4691 *total = thumb_rtx_costs (x, code, outer_code);
4692 return true;
4696 switch (code)
4698 case MULT:
4699 /* There is no point basing this on the tuning, since it is always the
4700 fast variant if it exists at all. */
4701 if (mode == DImode
4702 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4703 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4704 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4706 *total = 3;
4707 return true;
4711 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4713 *total = 30;
4714 return true;
4716 if (mode == DImode)
4718 cost = 7;
4719 nonreg_cost = 8;
4721 else
4723 cost = 2;
4724 nonreg_cost = 4;
4728 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4729 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4730 return true;
4732 default:
4733 *total = arm_rtx_costs_1 (x, code, outer_code);
4734 return true;
4737 /* All address computations that can be done are free, but rtx cost returns
4738 the same for practically all of them. So we weight the different types
4739 of address here in the order (most pref first):
4740 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4741 static inline int
4742 arm_arm_address_cost (rtx x)
4744 enum rtx_code c = GET_CODE (x);
4746 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4747 return 0;
4748 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4749 return 10;
4751 if (c == PLUS || c == MINUS)
4753 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4754 return 2;
4756 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4757 return 3;
4759 return 4;
4762 return 6;
4765 static inline int
4766 arm_thumb_address_cost (rtx x)
4768 enum rtx_code c = GET_CODE (x);
4770 if (c == REG)
4771 return 1;
4772 if (c == PLUS
4773 && GET_CODE (XEXP (x, 0)) == REG
4774 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4775 return 1;
4777 return 2;
4780 static int
4781 arm_address_cost (rtx x)
4783 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4786 static int
4787 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4789 rtx i_pat, d_pat;
4791 /* Some true dependencies can have a higher cost depending
4792 on precisely how certain input operands are used. */
4793 if (arm_tune_xscale
4794 && REG_NOTE_KIND (link) == 0
4795 && recog_memoized (insn) >= 0
4796 && recog_memoized (dep) >= 0)
4798 int shift_opnum = get_attr_shift (insn);
4799 enum attr_type attr_type = get_attr_type (dep);
4801 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4802 operand for INSN. If we have a shifted input operand and the
4803 instruction we depend on is another ALU instruction, then we may
4804 have to account for an additional stall. */
4805 if (shift_opnum != 0
4806 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4808 rtx shifted_operand;
4809 int opno;
4811 /* Get the shifted operand. */
4812 extract_insn (insn);
4813 shifted_operand = recog_data.operand[shift_opnum];
4815 /* Iterate over all the operands in DEP. If we write an operand
4816 that overlaps with SHIFTED_OPERAND, then we have increase the
4817 cost of this dependency. */
4818 extract_insn (dep);
4819 preprocess_constraints ();
4820 for (opno = 0; opno < recog_data.n_operands; opno++)
4822 /* We can ignore strict inputs. */
4823 if (recog_data.operand_type[opno] == OP_IN)
4824 continue;
4826 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4827 shifted_operand))
4828 return 2;
4833 /* XXX This is not strictly true for the FPA. */
4834 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4835 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4836 return 0;
4838 /* Call insns don't incur a stall, even if they follow a load. */
4839 if (REG_NOTE_KIND (link) == 0
4840 && GET_CODE (insn) == CALL_INSN)
4841 return 1;
4843 if ((i_pat = single_set (insn)) != NULL
4844 && GET_CODE (SET_SRC (i_pat)) == MEM
4845 && (d_pat = single_set (dep)) != NULL
4846 && GET_CODE (SET_DEST (d_pat)) == MEM)
4848 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4849 /* This is a load after a store, there is no conflict if the load reads
4850 from a cached area. Assume that loads from the stack, and from the
4851 constant pool are cached, and that others will miss. This is a
4852 hack. */
4854 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4855 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4856 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4857 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4858 return 1;
4861 return cost;
4864 static int fp_consts_inited = 0;
4866 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4867 static const char * const strings_fp[8] =
4869 "0", "1", "2", "3",
4870 "4", "5", "0.5", "10"
4873 static REAL_VALUE_TYPE values_fp[8];
4875 static void
4876 init_fp_table (void)
4878 int i;
4879 REAL_VALUE_TYPE r;
4881 if (TARGET_VFP)
4882 fp_consts_inited = 1;
4883 else
4884 fp_consts_inited = 8;
4886 for (i = 0; i < fp_consts_inited; i++)
4888 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4889 values_fp[i] = r;
4893 /* Return TRUE if rtx X is a valid immediate FP constant. */
4895 arm_const_double_rtx (rtx x)
4897 REAL_VALUE_TYPE r;
4898 int i;
4900 if (!fp_consts_inited)
4901 init_fp_table ();
4903 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4904 if (REAL_VALUE_MINUS_ZERO (r))
4905 return 0;
4907 for (i = 0; i < fp_consts_inited; i++)
4908 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4909 return 1;
4911 return 0;
4914 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4916 neg_const_double_rtx_ok_for_fpa (rtx x)
4918 REAL_VALUE_TYPE r;
4919 int i;
4921 if (!fp_consts_inited)
4922 init_fp_table ();
4924 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4925 r = REAL_VALUE_NEGATE (r);
4926 if (REAL_VALUE_MINUS_ZERO (r))
4927 return 0;
4929 for (i = 0; i < 8; i++)
4930 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4931 return 1;
4933 return 0;
4936 /* Predicates for `match_operand' and `match_operator'. */
4938 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4940 cirrus_memory_offset (rtx op)
4942 /* Reject eliminable registers. */
4943 if (! (reload_in_progress || reload_completed)
4944 && ( reg_mentioned_p (frame_pointer_rtx, op)
4945 || reg_mentioned_p (arg_pointer_rtx, op)
4946 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4947 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4948 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4949 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4950 return 0;
4952 if (GET_CODE (op) == MEM)
4954 rtx ind;
4956 ind = XEXP (op, 0);
4958 /* Match: (mem (reg)). */
4959 if (GET_CODE (ind) == REG)
4960 return 1;
4962 /* Match:
4963 (mem (plus (reg)
4964 (const))). */
4965 if (GET_CODE (ind) == PLUS
4966 && GET_CODE (XEXP (ind, 0)) == REG
4967 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4968 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4969 return 1;
4972 return 0;
4975 /* Return TRUE if OP is a valid coprocessor memory address pattern.
4976 WB if true if writeback address modes are allowed. */
4979 arm_coproc_mem_operand (rtx op, bool wb)
4981 rtx ind;
4983 /* Reject eliminable registers. */
4984 if (! (reload_in_progress || reload_completed)
4985 && ( reg_mentioned_p (frame_pointer_rtx, op)
4986 || reg_mentioned_p (arg_pointer_rtx, op)
4987 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4988 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4989 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4990 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4991 return FALSE;
4993 /* Constants are converted into offsets from labels. */
4994 if (GET_CODE (op) != MEM)
4995 return FALSE;
4997 ind = XEXP (op, 0);
4999 if (reload_completed
5000 && (GET_CODE (ind) == LABEL_REF
5001 || (GET_CODE (ind) == CONST
5002 && GET_CODE (XEXP (ind, 0)) == PLUS
5003 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5004 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5005 return TRUE;
5007 /* Match: (mem (reg)). */
5008 if (GET_CODE (ind) == REG)
5009 return arm_address_register_rtx_p (ind, 0);
5011 /* Autoincremment addressing modes. */
5012 if (wb
5013 && (GET_CODE (ind) == PRE_INC
5014 || GET_CODE (ind) == POST_INC
5015 || GET_CODE (ind) == PRE_DEC
5016 || GET_CODE (ind) == POST_DEC))
5017 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5019 if (wb
5020 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5021 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5022 && GET_CODE (XEXP (ind, 1)) == PLUS
5023 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5024 ind = XEXP (ind, 1);
5026 /* Match:
5027 (plus (reg)
5028 (const)). */
5029 if (GET_CODE (ind) == PLUS
5030 && GET_CODE (XEXP (ind, 0)) == REG
5031 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5032 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5033 && INTVAL (XEXP (ind, 1)) > -1024
5034 && INTVAL (XEXP (ind, 1)) < 1024
5035 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5036 return TRUE;
5038 return FALSE;
5041 /* Return true if X is a register that will be eliminated later on. */
5043 arm_eliminable_register (rtx x)
5045 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5046 || REGNO (x) == ARG_POINTER_REGNUM
5047 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5048 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5051 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5052 VFP registers. Otherwise return NO_REGS. */
5054 enum reg_class
5055 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5057 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5058 return NO_REGS;
5060 return GENERAL_REGS;
5063 /* Values which must be returned in the most-significant end of the return
5064 register. */
5066 static bool
5067 arm_return_in_msb (tree valtype)
5069 return (TARGET_AAPCS_BASED
5070 && BYTES_BIG_ENDIAN
5071 && (AGGREGATE_TYPE_P (valtype)
5072 || TREE_CODE (valtype) == COMPLEX_TYPE));
5075 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5076 Use by the Cirrus Maverick code which has to workaround
5077 a hardware bug triggered by such instructions. */
5078 static bool
5079 arm_memory_load_p (rtx insn)
5081 rtx body, lhs, rhs;;
5083 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5084 return false;
5086 body = PATTERN (insn);
5088 if (GET_CODE (body) != SET)
5089 return false;
5091 lhs = XEXP (body, 0);
5092 rhs = XEXP (body, 1);
5094 lhs = REG_OR_SUBREG_RTX (lhs);
5096 /* If the destination is not a general purpose
5097 register we do not have to worry. */
5098 if (GET_CODE (lhs) != REG
5099 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5100 return false;
5102 /* As well as loads from memory we also have to react
5103 to loads of invalid constants which will be turned
5104 into loads from the minipool. */
5105 return (GET_CODE (rhs) == MEM
5106 || GET_CODE (rhs) == SYMBOL_REF
5107 || note_invalid_constants (insn, -1, false));
5110 /* Return TRUE if INSN is a Cirrus instruction. */
5111 static bool
5112 arm_cirrus_insn_p (rtx insn)
5114 enum attr_cirrus attr;
5116 /* get_attr cannot accept USE or CLOBBER. */
5117 if (!insn
5118 || GET_CODE (insn) != INSN
5119 || GET_CODE (PATTERN (insn)) == USE
5120 || GET_CODE (PATTERN (insn)) == CLOBBER)
5121 return 0;
5123 attr = get_attr_cirrus (insn);
5125 return attr != CIRRUS_NOT;
5128 /* Cirrus reorg for invalid instruction combinations. */
5129 static void
5130 cirrus_reorg (rtx first)
5132 enum attr_cirrus attr;
5133 rtx body = PATTERN (first);
5134 rtx t;
5135 int nops;
5137 /* Any branch must be followed by 2 non Cirrus instructions. */
5138 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5140 nops = 0;
5141 t = next_nonnote_insn (first);
5143 if (arm_cirrus_insn_p (t))
5144 ++ nops;
5146 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5147 ++ nops;
5149 while (nops --)
5150 emit_insn_after (gen_nop (), first);
5152 return;
5155 /* (float (blah)) is in parallel with a clobber. */
5156 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5157 body = XVECEXP (body, 0, 0);
5159 if (GET_CODE (body) == SET)
5161 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5163 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5164 be followed by a non Cirrus insn. */
5165 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5167 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5168 emit_insn_after (gen_nop (), first);
5170 return;
5172 else if (arm_memory_load_p (first))
5174 unsigned int arm_regno;
5176 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5177 ldr/cfmv64hr combination where the Rd field is the same
5178 in both instructions must be split with a non Cirrus
5179 insn. Example:
5181 ldr r0, blah
5183 cfmvsr mvf0, r0. */
5185 /* Get Arm register number for ldr insn. */
5186 if (GET_CODE (lhs) == REG)
5187 arm_regno = REGNO (lhs);
5188 else
5190 gcc_assert (GET_CODE (rhs) == REG);
5191 arm_regno = REGNO (rhs);
5194 /* Next insn. */
5195 first = next_nonnote_insn (first);
5197 if (! arm_cirrus_insn_p (first))
5198 return;
5200 body = PATTERN (first);
5202 /* (float (blah)) is in parallel with a clobber. */
5203 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5204 body = XVECEXP (body, 0, 0);
5206 if (GET_CODE (body) == FLOAT)
5207 body = XEXP (body, 0);
5209 if (get_attr_cirrus (first) == CIRRUS_MOVE
5210 && GET_CODE (XEXP (body, 1)) == REG
5211 && arm_regno == REGNO (XEXP (body, 1)))
5212 emit_insn_after (gen_nop (), first);
5214 return;
5218 /* get_attr cannot accept USE or CLOBBER. */
5219 if (!first
5220 || GET_CODE (first) != INSN
5221 || GET_CODE (PATTERN (first)) == USE
5222 || GET_CODE (PATTERN (first)) == CLOBBER)
5223 return;
5225 attr = get_attr_cirrus (first);
5227 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5228 must be followed by a non-coprocessor instruction. */
5229 if (attr == CIRRUS_COMPARE)
5231 nops = 0;
5233 t = next_nonnote_insn (first);
5235 if (arm_cirrus_insn_p (t))
5236 ++ nops;
5238 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5239 ++ nops;
5241 while (nops --)
5242 emit_insn_after (gen_nop (), first);
5244 return;
5248 /* Return TRUE if X references a SYMBOL_REF. */
5250 symbol_mentioned_p (rtx x)
5252 const char * fmt;
5253 int i;
5255 if (GET_CODE (x) == SYMBOL_REF)
5256 return 1;
5258 fmt = GET_RTX_FORMAT (GET_CODE (x));
5260 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5262 if (fmt[i] == 'E')
5264 int j;
5266 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5267 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5268 return 1;
5270 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5271 return 1;
5274 return 0;
5277 /* Return TRUE if X references a LABEL_REF. */
5279 label_mentioned_p (rtx x)
5281 const char * fmt;
5282 int i;
5284 if (GET_CODE (x) == LABEL_REF)
5285 return 1;
5287 fmt = GET_RTX_FORMAT (GET_CODE (x));
5288 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5290 if (fmt[i] == 'E')
5292 int j;
5294 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5295 if (label_mentioned_p (XVECEXP (x, i, j)))
5296 return 1;
5298 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5299 return 1;
5302 return 0;
5305 enum rtx_code
5306 minmax_code (rtx x)
5308 enum rtx_code code = GET_CODE (x);
5310 switch (code)
5312 case SMAX:
5313 return GE;
5314 case SMIN:
5315 return LE;
5316 case UMIN:
5317 return LEU;
5318 case UMAX:
5319 return GEU;
5320 default:
5321 gcc_unreachable ();
5325 /* Return 1 if memory locations are adjacent. */
5327 adjacent_mem_locations (rtx a, rtx b)
5329 /* We don't guarantee to preserve the order of these memory refs. */
5330 if (volatile_refs_p (a) || volatile_refs_p (b))
5331 return 0;
5333 if ((GET_CODE (XEXP (a, 0)) == REG
5334 || (GET_CODE (XEXP (a, 0)) == PLUS
5335 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5336 && (GET_CODE (XEXP (b, 0)) == REG
5337 || (GET_CODE (XEXP (b, 0)) == PLUS
5338 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5340 HOST_WIDE_INT val0 = 0, val1 = 0;
5341 rtx reg0, reg1;
5342 int val_diff;
5344 if (GET_CODE (XEXP (a, 0)) == PLUS)
5346 reg0 = XEXP (XEXP (a, 0), 0);
5347 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5349 else
5350 reg0 = XEXP (a, 0);
5352 if (GET_CODE (XEXP (b, 0)) == PLUS)
5354 reg1 = XEXP (XEXP (b, 0), 0);
5355 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5357 else
5358 reg1 = XEXP (b, 0);
5360 /* Don't accept any offset that will require multiple
5361 instructions to handle, since this would cause the
5362 arith_adjacentmem pattern to output an overlong sequence. */
5363 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5364 return 0;
5366 /* Don't allow an eliminable register: register elimination can make
5367 the offset too large. */
5368 if (arm_eliminable_register (reg0))
5369 return 0;
5371 val_diff = val1 - val0;
5373 if (arm_ld_sched)
5375 /* If the target has load delay slots, then there's no benefit
5376 to using an ldm instruction unless the offset is zero and
5377 we are optimizing for size. */
5378 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5379 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5380 && (val_diff == 4 || val_diff == -4));
5383 return ((REGNO (reg0) == REGNO (reg1))
5384 && (val_diff == 4 || val_diff == -4));
5387 return 0;
5391 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5392 HOST_WIDE_INT *load_offset)
5394 int unsorted_regs[4];
5395 HOST_WIDE_INT unsorted_offsets[4];
5396 int order[4];
5397 int base_reg = -1;
5398 int i;
5400 /* Can only handle 2, 3, or 4 insns at present,
5401 though could be easily extended if required. */
5402 gcc_assert (nops >= 2 && nops <= 4);
5404 /* Loop over the operands and check that the memory references are
5405 suitable (i.e. immediate offsets from the same base register). At
5406 the same time, extract the target register, and the memory
5407 offsets. */
5408 for (i = 0; i < nops; i++)
5410 rtx reg;
5411 rtx offset;
5413 /* Convert a subreg of a mem into the mem itself. */
5414 if (GET_CODE (operands[nops + i]) == SUBREG)
5415 operands[nops + i] = alter_subreg (operands + (nops + i));
5417 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5419 /* Don't reorder volatile memory references; it doesn't seem worth
5420 looking for the case where the order is ok anyway. */
5421 if (MEM_VOLATILE_P (operands[nops + i]))
5422 return 0;
5424 offset = const0_rtx;
5426 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5427 || (GET_CODE (reg) == SUBREG
5428 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5429 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5430 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5431 == REG)
5432 || (GET_CODE (reg) == SUBREG
5433 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5434 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5435 == CONST_INT)))
5437 if (i == 0)
5439 base_reg = REGNO (reg);
5440 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5441 ? REGNO (operands[i])
5442 : REGNO (SUBREG_REG (operands[i])));
5443 order[0] = 0;
5445 else
5447 if (base_reg != (int) REGNO (reg))
5448 /* Not addressed from the same base register. */
5449 return 0;
5451 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5452 ? REGNO (operands[i])
5453 : REGNO (SUBREG_REG (operands[i])));
5454 if (unsorted_regs[i] < unsorted_regs[order[0]])
5455 order[0] = i;
5458 /* If it isn't an integer register, or if it overwrites the
5459 base register but isn't the last insn in the list, then
5460 we can't do this. */
5461 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5462 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5463 return 0;
5465 unsorted_offsets[i] = INTVAL (offset);
5467 else
5468 /* Not a suitable memory address. */
5469 return 0;
5472 /* All the useful information has now been extracted from the
5473 operands into unsorted_regs and unsorted_offsets; additionally,
5474 order[0] has been set to the lowest numbered register in the
5475 list. Sort the registers into order, and check that the memory
5476 offsets are ascending and adjacent. */
5478 for (i = 1; i < nops; i++)
5480 int j;
5482 order[i] = order[i - 1];
5483 for (j = 0; j < nops; j++)
5484 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5485 && (order[i] == order[i - 1]
5486 || unsorted_regs[j] < unsorted_regs[order[i]]))
5487 order[i] = j;
5489 /* Have we found a suitable register? if not, one must be used more
5490 than once. */
5491 if (order[i] == order[i - 1])
5492 return 0;
5494 /* Is the memory address adjacent and ascending? */
5495 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5496 return 0;
5499 if (base)
5501 *base = base_reg;
5503 for (i = 0; i < nops; i++)
5504 regs[i] = unsorted_regs[order[i]];
5506 *load_offset = unsorted_offsets[order[0]];
5509 if (unsorted_offsets[order[0]] == 0)
5510 return 1; /* ldmia */
5512 if (unsorted_offsets[order[0]] == 4)
5513 return 2; /* ldmib */
5515 if (unsorted_offsets[order[nops - 1]] == 0)
5516 return 3; /* ldmda */
5518 if (unsorted_offsets[order[nops - 1]] == -4)
5519 return 4; /* ldmdb */
5521 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5522 if the offset isn't small enough. The reason 2 ldrs are faster
5523 is because these ARMs are able to do more than one cache access
5524 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5525 whilst the ARM8 has a double bandwidth cache. This means that
5526 these cores can do both an instruction fetch and a data fetch in
5527 a single cycle, so the trick of calculating the address into a
5528 scratch register (one of the result regs) and then doing a load
5529 multiple actually becomes slower (and no smaller in code size).
5530 That is the transformation
5532 ldr rd1, [rbase + offset]
5533 ldr rd2, [rbase + offset + 4]
5537 add rd1, rbase, offset
5538 ldmia rd1, {rd1, rd2}
5540 produces worse code -- '3 cycles + any stalls on rd2' instead of
5541 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5542 access per cycle, the first sequence could never complete in less
5543 than 6 cycles, whereas the ldm sequence would only take 5 and
5544 would make better use of sequential accesses if not hitting the
5545 cache.
5547 We cheat here and test 'arm_ld_sched' which we currently know to
5548 only be true for the ARM8, ARM9 and StrongARM. If this ever
5549 changes, then the test below needs to be reworked. */
5550 if (nops == 2 && arm_ld_sched)
5551 return 0;
5553 /* Can't do it without setting up the offset, only do this if it takes
5554 no more than one insn. */
5555 return (const_ok_for_arm (unsorted_offsets[order[0]])
5556 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5559 const char *
5560 emit_ldm_seq (rtx *operands, int nops)
5562 int regs[4];
5563 int base_reg;
5564 HOST_WIDE_INT offset;
5565 char buf[100];
5566 int i;
5568 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5570 case 1:
5571 strcpy (buf, "ldm%?ia\t");
5572 break;
5574 case 2:
5575 strcpy (buf, "ldm%?ib\t");
5576 break;
5578 case 3:
5579 strcpy (buf, "ldm%?da\t");
5580 break;
5582 case 4:
5583 strcpy (buf, "ldm%?db\t");
5584 break;
5586 case 5:
5587 if (offset >= 0)
5588 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5589 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5590 (long) offset);
5591 else
5592 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5593 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5594 (long) -offset);
5595 output_asm_insn (buf, operands);
5596 base_reg = regs[0];
5597 strcpy (buf, "ldm%?ia\t");
5598 break;
5600 default:
5601 gcc_unreachable ();
5604 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5605 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5607 for (i = 1; i < nops; i++)
5608 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5609 reg_names[regs[i]]);
5611 strcat (buf, "}\t%@ phole ldm");
5613 output_asm_insn (buf, operands);
5614 return "";
5618 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5619 HOST_WIDE_INT * load_offset)
5621 int unsorted_regs[4];
5622 HOST_WIDE_INT unsorted_offsets[4];
5623 int order[4];
5624 int base_reg = -1;
5625 int i;
5627 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5628 extended if required. */
5629 gcc_assert (nops >= 2 && nops <= 4);
5631 /* Loop over the operands and check that the memory references are
5632 suitable (i.e. immediate offsets from the same base register). At
5633 the same time, extract the target register, and the memory
5634 offsets. */
5635 for (i = 0; i < nops; i++)
5637 rtx reg;
5638 rtx offset;
5640 /* Convert a subreg of a mem into the mem itself. */
5641 if (GET_CODE (operands[nops + i]) == SUBREG)
5642 operands[nops + i] = alter_subreg (operands + (nops + i));
5644 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5646 /* Don't reorder volatile memory references; it doesn't seem worth
5647 looking for the case where the order is ok anyway. */
5648 if (MEM_VOLATILE_P (operands[nops + i]))
5649 return 0;
5651 offset = const0_rtx;
5653 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5654 || (GET_CODE (reg) == SUBREG
5655 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5656 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5657 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5658 == REG)
5659 || (GET_CODE (reg) == SUBREG
5660 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5661 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5662 == CONST_INT)))
5664 if (i == 0)
5666 base_reg = REGNO (reg);
5667 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5668 ? REGNO (operands[i])
5669 : REGNO (SUBREG_REG (operands[i])));
5670 order[0] = 0;
5672 else
5674 if (base_reg != (int) REGNO (reg))
5675 /* Not addressed from the same base register. */
5676 return 0;
5678 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5679 ? REGNO (operands[i])
5680 : REGNO (SUBREG_REG (operands[i])));
5681 if (unsorted_regs[i] < unsorted_regs[order[0]])
5682 order[0] = i;
5685 /* If it isn't an integer register, then we can't do this. */
5686 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5687 return 0;
5689 unsorted_offsets[i] = INTVAL (offset);
5691 else
5692 /* Not a suitable memory address. */
5693 return 0;
5696 /* All the useful information has now been extracted from the
5697 operands into unsorted_regs and unsorted_offsets; additionally,
5698 order[0] has been set to the lowest numbered register in the
5699 list. Sort the registers into order, and check that the memory
5700 offsets are ascending and adjacent. */
5702 for (i = 1; i < nops; i++)
5704 int j;
5706 order[i] = order[i - 1];
5707 for (j = 0; j < nops; j++)
5708 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5709 && (order[i] == order[i - 1]
5710 || unsorted_regs[j] < unsorted_regs[order[i]]))
5711 order[i] = j;
5713 /* Have we found a suitable register? if not, one must be used more
5714 than once. */
5715 if (order[i] == order[i - 1])
5716 return 0;
5718 /* Is the memory address adjacent and ascending? */
5719 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5720 return 0;
5723 if (base)
5725 *base = base_reg;
5727 for (i = 0; i < nops; i++)
5728 regs[i] = unsorted_regs[order[i]];
5730 *load_offset = unsorted_offsets[order[0]];
5733 if (unsorted_offsets[order[0]] == 0)
5734 return 1; /* stmia */
5736 if (unsorted_offsets[order[0]] == 4)
5737 return 2; /* stmib */
5739 if (unsorted_offsets[order[nops - 1]] == 0)
5740 return 3; /* stmda */
5742 if (unsorted_offsets[order[nops - 1]] == -4)
5743 return 4; /* stmdb */
5745 return 0;
5748 const char *
5749 emit_stm_seq (rtx *operands, int nops)
5751 int regs[4];
5752 int base_reg;
5753 HOST_WIDE_INT offset;
5754 char buf[100];
5755 int i;
5757 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5759 case 1:
5760 strcpy (buf, "stm%?ia\t");
5761 break;
5763 case 2:
5764 strcpy (buf, "stm%?ib\t");
5765 break;
5767 case 3:
5768 strcpy (buf, "stm%?da\t");
5769 break;
5771 case 4:
5772 strcpy (buf, "stm%?db\t");
5773 break;
5775 default:
5776 gcc_unreachable ();
5779 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5780 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5782 for (i = 1; i < nops; i++)
5783 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5784 reg_names[regs[i]]);
5786 strcat (buf, "}\t%@ phole stm");
5788 output_asm_insn (buf, operands);
5789 return "";
5793 /* Routines for use in generating RTL. */
5796 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5797 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5799 HOST_WIDE_INT offset = *offsetp;
5800 int i = 0, j;
5801 rtx result;
5802 int sign = up ? 1 : -1;
5803 rtx mem, addr;
5805 /* XScale has load-store double instructions, but they have stricter
5806 alignment requirements than load-store multiple, so we cannot
5807 use them.
5809 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5810 the pipeline until completion.
5812 NREGS CYCLES
5818 An ldr instruction takes 1-3 cycles, but does not block the
5819 pipeline.
5821 NREGS CYCLES
5822 1 1-3
5823 2 2-6
5824 3 3-9
5825 4 4-12
5827 Best case ldr will always win. However, the more ldr instructions
5828 we issue, the less likely we are to be able to schedule them well.
5829 Using ldr instructions also increases code size.
5831 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5832 for counts of 3 or 4 regs. */
5833 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5835 rtx seq;
5837 start_sequence ();
5839 for (i = 0; i < count; i++)
5841 addr = plus_constant (from, i * 4 * sign);
5842 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5843 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5844 offset += 4 * sign;
5847 if (write_back)
5849 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5850 *offsetp = offset;
5853 seq = get_insns ();
5854 end_sequence ();
5856 return seq;
5859 result = gen_rtx_PARALLEL (VOIDmode,
5860 rtvec_alloc (count + (write_back ? 1 : 0)));
5861 if (write_back)
5863 XVECEXP (result, 0, 0)
5864 = gen_rtx_SET (GET_MODE (from), from,
5865 plus_constant (from, count * 4 * sign));
5866 i = 1;
5867 count++;
5870 for (j = 0; i < count; i++, j++)
5872 addr = plus_constant (from, j * 4 * sign);
5873 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5874 XVECEXP (result, 0, i)
5875 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5876 offset += 4 * sign;
5879 if (write_back)
5880 *offsetp = offset;
5882 return result;
5886 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5887 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5889 HOST_WIDE_INT offset = *offsetp;
5890 int i = 0, j;
5891 rtx result;
5892 int sign = up ? 1 : -1;
5893 rtx mem, addr;
5895 /* See arm_gen_load_multiple for discussion of
5896 the pros/cons of ldm/stm usage for XScale. */
5897 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5899 rtx seq;
5901 start_sequence ();
5903 for (i = 0; i < count; i++)
5905 addr = plus_constant (to, i * 4 * sign);
5906 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5907 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5908 offset += 4 * sign;
5911 if (write_back)
5913 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5914 *offsetp = offset;
5917 seq = get_insns ();
5918 end_sequence ();
5920 return seq;
5923 result = gen_rtx_PARALLEL (VOIDmode,
5924 rtvec_alloc (count + (write_back ? 1 : 0)));
5925 if (write_back)
5927 XVECEXP (result, 0, 0)
5928 = gen_rtx_SET (GET_MODE (to), to,
5929 plus_constant (to, count * 4 * sign));
5930 i = 1;
5931 count++;
5934 for (j = 0; i < count; i++, j++)
5936 addr = plus_constant (to, j * 4 * sign);
5937 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5938 XVECEXP (result, 0, i)
5939 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5940 offset += 4 * sign;
5943 if (write_back)
5944 *offsetp = offset;
5946 return result;
5950 arm_gen_movmemqi (rtx *operands)
5952 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5953 HOST_WIDE_INT srcoffset, dstoffset;
5954 int i;
5955 rtx src, dst, srcbase, dstbase;
5956 rtx part_bytes_reg = NULL;
5957 rtx mem;
5959 if (GET_CODE (operands[2]) != CONST_INT
5960 || GET_CODE (operands[3]) != CONST_INT
5961 || INTVAL (operands[2]) > 64
5962 || INTVAL (operands[3]) & 3)
5963 return 0;
5965 dstbase = operands[0];
5966 srcbase = operands[1];
5968 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5969 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5971 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5972 out_words_to_go = INTVAL (operands[2]) / 4;
5973 last_bytes = INTVAL (operands[2]) & 3;
5974 dstoffset = srcoffset = 0;
5976 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5977 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5979 for (i = 0; in_words_to_go >= 2; i+=4)
5981 if (in_words_to_go > 4)
5982 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5983 srcbase, &srcoffset));
5984 else
5985 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5986 FALSE, srcbase, &srcoffset));
5988 if (out_words_to_go)
5990 if (out_words_to_go > 4)
5991 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5992 dstbase, &dstoffset));
5993 else if (out_words_to_go != 1)
5994 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5995 dst, TRUE,
5996 (last_bytes == 0
5997 ? FALSE : TRUE),
5998 dstbase, &dstoffset));
5999 else
6001 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6002 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6003 if (last_bytes != 0)
6005 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6006 dstoffset += 4;
6011 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6012 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6015 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6016 if (out_words_to_go)
6018 rtx sreg;
6020 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6021 sreg = copy_to_reg (mem);
6023 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6024 emit_move_insn (mem, sreg);
6025 in_words_to_go--;
6027 gcc_assert (!in_words_to_go); /* Sanity check */
6030 if (in_words_to_go)
6032 gcc_assert (in_words_to_go > 0);
6034 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6035 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6038 gcc_assert (!last_bytes || part_bytes_reg);
6040 if (BYTES_BIG_ENDIAN && last_bytes)
6042 rtx tmp = gen_reg_rtx (SImode);
6044 /* The bytes we want are in the top end of the word. */
6045 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6046 GEN_INT (8 * (4 - last_bytes))));
6047 part_bytes_reg = tmp;
6049 while (last_bytes)
6051 mem = adjust_automodify_address (dstbase, QImode,
6052 plus_constant (dst, last_bytes - 1),
6053 dstoffset + last_bytes - 1);
6054 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6056 if (--last_bytes)
6058 tmp = gen_reg_rtx (SImode);
6059 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6060 part_bytes_reg = tmp;
6065 else
6067 if (last_bytes > 1)
6069 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6070 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6071 last_bytes -= 2;
6072 if (last_bytes)
6074 rtx tmp = gen_reg_rtx (SImode);
6075 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6076 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6077 part_bytes_reg = tmp;
6078 dstoffset += 2;
6082 if (last_bytes)
6084 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6085 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6089 return 1;
6092 /* Generate a memory reference for a half word, such that it will be loaded
6093 into the top 16 bits of the word. We can assume that the address is
6094 known to be alignable and of the form reg, or plus (reg, const). */
6097 arm_gen_rotated_half_load (rtx memref)
6099 HOST_WIDE_INT offset = 0;
6100 rtx base = XEXP (memref, 0);
6102 if (GET_CODE (base) == PLUS)
6104 offset = INTVAL (XEXP (base, 1));
6105 base = XEXP (base, 0);
6108 /* If we aren't allowed to generate unaligned addresses, then fail. */
6109 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6110 return NULL;
6112 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6114 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6115 return base;
6117 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6120 /* Select a dominance comparison mode if possible for a test of the general
6121 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6122 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6123 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6124 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6125 In all cases OP will be either EQ or NE, but we don't need to know which
6126 here. If we are unable to support a dominance comparison we return
6127 CC mode. This will then fail to match for the RTL expressions that
6128 generate this call. */
6129 enum machine_mode
6130 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6132 enum rtx_code cond1, cond2;
6133 int swapped = 0;
6135 /* Currently we will probably get the wrong result if the individual
6136 comparisons are not simple. This also ensures that it is safe to
6137 reverse a comparison if necessary. */
6138 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6139 != CCmode)
6140 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6141 != CCmode))
6142 return CCmode;
6144 /* The if_then_else variant of this tests the second condition if the
6145 first passes, but is true if the first fails. Reverse the first
6146 condition to get a true "inclusive-or" expression. */
6147 if (cond_or == DOM_CC_NX_OR_Y)
6148 cond1 = reverse_condition (cond1);
6150 /* If the comparisons are not equal, and one doesn't dominate the other,
6151 then we can't do this. */
6152 if (cond1 != cond2
6153 && !comparison_dominates_p (cond1, cond2)
6154 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6155 return CCmode;
6157 if (swapped)
6159 enum rtx_code temp = cond1;
6160 cond1 = cond2;
6161 cond2 = temp;
6164 switch (cond1)
6166 case EQ:
6167 if (cond_or == DOM_CC_X_AND_Y)
6168 return CC_DEQmode;
6170 switch (cond2)
6172 case EQ: return CC_DEQmode;
6173 case LE: return CC_DLEmode;
6174 case LEU: return CC_DLEUmode;
6175 case GE: return CC_DGEmode;
6176 case GEU: return CC_DGEUmode;
6177 default: gcc_unreachable ();
6180 case LT:
6181 if (cond_or == DOM_CC_X_AND_Y)
6182 return CC_DLTmode;
6184 switch (cond2)
6186 case LT:
6187 return CC_DLTmode;
6188 case LE:
6189 return CC_DLEmode;
6190 case NE:
6191 return CC_DNEmode;
6192 default:
6193 gcc_unreachable ();
6196 case GT:
6197 if (cond_or == DOM_CC_X_AND_Y)
6198 return CC_DGTmode;
6200 switch (cond2)
6202 case GT:
6203 return CC_DGTmode;
6204 case GE:
6205 return CC_DGEmode;
6206 case NE:
6207 return CC_DNEmode;
6208 default:
6209 gcc_unreachable ();
6212 case LTU:
6213 if (cond_or == DOM_CC_X_AND_Y)
6214 return CC_DLTUmode;
6216 switch (cond2)
6218 case LTU:
6219 return CC_DLTUmode;
6220 case LEU:
6221 return CC_DLEUmode;
6222 case NE:
6223 return CC_DNEmode;
6224 default:
6225 gcc_unreachable ();
6228 case GTU:
6229 if (cond_or == DOM_CC_X_AND_Y)
6230 return CC_DGTUmode;
6232 switch (cond2)
6234 case GTU:
6235 return CC_DGTUmode;
6236 case GEU:
6237 return CC_DGEUmode;
6238 case NE:
6239 return CC_DNEmode;
6240 default:
6241 gcc_unreachable ();
6244 /* The remaining cases only occur when both comparisons are the
6245 same. */
6246 case NE:
6247 gcc_assert (cond1 == cond2);
6248 return CC_DNEmode;
6250 case LE:
6251 gcc_assert (cond1 == cond2);
6252 return CC_DLEmode;
6254 case GE:
6255 gcc_assert (cond1 == cond2);
6256 return CC_DGEmode;
6258 case LEU:
6259 gcc_assert (cond1 == cond2);
6260 return CC_DLEUmode;
6262 case GEU:
6263 gcc_assert (cond1 == cond2);
6264 return CC_DGEUmode;
6266 default:
6267 gcc_unreachable ();
6271 enum machine_mode
6272 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6274 /* All floating point compares return CCFP if it is an equality
6275 comparison, and CCFPE otherwise. */
6276 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6278 switch (op)
6280 case EQ:
6281 case NE:
6282 case UNORDERED:
6283 case ORDERED:
6284 case UNLT:
6285 case UNLE:
6286 case UNGT:
6287 case UNGE:
6288 case UNEQ:
6289 case LTGT:
6290 return CCFPmode;
6292 case LT:
6293 case LE:
6294 case GT:
6295 case GE:
6296 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6297 return CCFPmode;
6298 return CCFPEmode;
6300 default:
6301 gcc_unreachable ();
6305 /* A compare with a shifted operand. Because of canonicalization, the
6306 comparison will have to be swapped when we emit the assembler. */
6307 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6308 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6309 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6310 || GET_CODE (x) == ROTATERT))
6311 return CC_SWPmode;
6313 /* This operation is performed swapped, but since we only rely on the Z
6314 flag we don't need an additional mode. */
6315 if (GET_MODE (y) == SImode && REG_P (y)
6316 && GET_CODE (x) == NEG
6317 && (op == EQ || op == NE))
6318 return CC_Zmode;
6320 /* This is a special case that is used by combine to allow a
6321 comparison of a shifted byte load to be split into a zero-extend
6322 followed by a comparison of the shifted integer (only valid for
6323 equalities and unsigned inequalities). */
6324 if (GET_MODE (x) == SImode
6325 && GET_CODE (x) == ASHIFT
6326 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6327 && GET_CODE (XEXP (x, 0)) == SUBREG
6328 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6329 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6330 && (op == EQ || op == NE
6331 || op == GEU || op == GTU || op == LTU || op == LEU)
6332 && GET_CODE (y) == CONST_INT)
6333 return CC_Zmode;
6335 /* A construct for a conditional compare, if the false arm contains
6336 0, then both conditions must be true, otherwise either condition
6337 must be true. Not all conditions are possible, so CCmode is
6338 returned if it can't be done. */
6339 if (GET_CODE (x) == IF_THEN_ELSE
6340 && (XEXP (x, 2) == const0_rtx
6341 || XEXP (x, 2) == const1_rtx)
6342 && COMPARISON_P (XEXP (x, 0))
6343 && COMPARISON_P (XEXP (x, 1)))
6344 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6345 INTVAL (XEXP (x, 2)));
6347 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6348 if (GET_CODE (x) == AND
6349 && COMPARISON_P (XEXP (x, 0))
6350 && COMPARISON_P (XEXP (x, 1)))
6351 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6352 DOM_CC_X_AND_Y);
6354 if (GET_CODE (x) == IOR
6355 && COMPARISON_P (XEXP (x, 0))
6356 && COMPARISON_P (XEXP (x, 1)))
6357 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6358 DOM_CC_X_OR_Y);
6360 /* An operation (on Thumb) where we want to test for a single bit.
6361 This is done by shifting that bit up into the top bit of a
6362 scratch register; we can then branch on the sign bit. */
6363 if (TARGET_THUMB
6364 && GET_MODE (x) == SImode
6365 && (op == EQ || op == NE)
6366 && (GET_CODE (x) == ZERO_EXTRACT))
6367 return CC_Nmode;
6369 /* An operation that sets the condition codes as a side-effect, the
6370 V flag is not set correctly, so we can only use comparisons where
6371 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6372 instead.) */
6373 if (GET_MODE (x) == SImode
6374 && y == const0_rtx
6375 && (op == EQ || op == NE || op == LT || op == GE)
6376 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6377 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6378 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6379 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6380 || GET_CODE (x) == LSHIFTRT
6381 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6382 || GET_CODE (x) == ROTATERT
6383 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6384 return CC_NOOVmode;
6386 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6387 return CC_Zmode;
6389 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6390 && GET_CODE (x) == PLUS
6391 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6392 return CC_Cmode;
6394 return CCmode;
6397 /* X and Y are two things to compare using CODE. Emit the compare insn and
6398 return the rtx for register 0 in the proper mode. FP means this is a
6399 floating point compare: I don't think that it is needed on the arm. */
6401 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6403 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6404 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6406 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6407 gen_rtx_COMPARE (mode, x, y)));
6409 return cc_reg;
6412 /* Generate a sequence of insns that will generate the correct return
6413 address mask depending on the physical architecture that the program
6414 is running on. */
6416 arm_gen_return_addr_mask (void)
6418 rtx reg = gen_reg_rtx (Pmode);
6420 emit_insn (gen_return_addr_mask (reg));
6421 return reg;
6424 void
6425 arm_reload_in_hi (rtx *operands)
6427 rtx ref = operands[1];
6428 rtx base, scratch;
6429 HOST_WIDE_INT offset = 0;
6431 if (GET_CODE (ref) == SUBREG)
6433 offset = SUBREG_BYTE (ref);
6434 ref = SUBREG_REG (ref);
6437 if (GET_CODE (ref) == REG)
6439 /* We have a pseudo which has been spilt onto the stack; there
6440 are two cases here: the first where there is a simple
6441 stack-slot replacement and a second where the stack-slot is
6442 out of range, or is used as a subreg. */
6443 if (reg_equiv_mem[REGNO (ref)])
6445 ref = reg_equiv_mem[REGNO (ref)];
6446 base = find_replacement (&XEXP (ref, 0));
6448 else
6449 /* The slot is out of range, or was dressed up in a SUBREG. */
6450 base = reg_equiv_address[REGNO (ref)];
6452 else
6453 base = find_replacement (&XEXP (ref, 0));
6455 /* Handle the case where the address is too complex to be offset by 1. */
6456 if (GET_CODE (base) == MINUS
6457 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6459 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6461 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6462 base = base_plus;
6464 else if (GET_CODE (base) == PLUS)
6466 /* The addend must be CONST_INT, or we would have dealt with it above. */
6467 HOST_WIDE_INT hi, lo;
6469 offset += INTVAL (XEXP (base, 1));
6470 base = XEXP (base, 0);
6472 /* Rework the address into a legal sequence of insns. */
6473 /* Valid range for lo is -4095 -> 4095 */
6474 lo = (offset >= 0
6475 ? (offset & 0xfff)
6476 : -((-offset) & 0xfff));
6478 /* Corner case, if lo is the max offset then we would be out of range
6479 once we have added the additional 1 below, so bump the msb into the
6480 pre-loading insn(s). */
6481 if (lo == 4095)
6482 lo &= 0x7ff;
6484 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6485 ^ (HOST_WIDE_INT) 0x80000000)
6486 - (HOST_WIDE_INT) 0x80000000);
6488 gcc_assert (hi + lo == offset);
6490 if (hi != 0)
6492 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6494 /* Get the base address; addsi3 knows how to handle constants
6495 that require more than one insn. */
6496 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6497 base = base_plus;
6498 offset = lo;
6502 /* Operands[2] may overlap operands[0] (though it won't overlap
6503 operands[1]), that's why we asked for a DImode reg -- so we can
6504 use the bit that does not overlap. */
6505 if (REGNO (operands[2]) == REGNO (operands[0]))
6506 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6507 else
6508 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6510 emit_insn (gen_zero_extendqisi2 (scratch,
6511 gen_rtx_MEM (QImode,
6512 plus_constant (base,
6513 offset))));
6514 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6515 gen_rtx_MEM (QImode,
6516 plus_constant (base,
6517 offset + 1))));
6518 if (!BYTES_BIG_ENDIAN)
6519 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6520 gen_rtx_IOR (SImode,
6521 gen_rtx_ASHIFT
6522 (SImode,
6523 gen_rtx_SUBREG (SImode, operands[0], 0),
6524 GEN_INT (8)),
6525 scratch)));
6526 else
6527 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6528 gen_rtx_IOR (SImode,
6529 gen_rtx_ASHIFT (SImode, scratch,
6530 GEN_INT (8)),
6531 gen_rtx_SUBREG (SImode, operands[0],
6532 0))));
6535 /* Handle storing a half-word to memory during reload by synthesizing as two
6536 byte stores. Take care not to clobber the input values until after we
6537 have moved them somewhere safe. This code assumes that if the DImode
6538 scratch in operands[2] overlaps either the input value or output address
6539 in some way, then that value must die in this insn (we absolutely need
6540 two scratch registers for some corner cases). */
6541 void
6542 arm_reload_out_hi (rtx *operands)
6544 rtx ref = operands[0];
6545 rtx outval = operands[1];
6546 rtx base, scratch;
6547 HOST_WIDE_INT offset = 0;
6549 if (GET_CODE (ref) == SUBREG)
6551 offset = SUBREG_BYTE (ref);
6552 ref = SUBREG_REG (ref);
6555 if (GET_CODE (ref) == REG)
6557 /* We have a pseudo which has been spilt onto the stack; there
6558 are two cases here: the first where there is a simple
6559 stack-slot replacement and a second where the stack-slot is
6560 out of range, or is used as a subreg. */
6561 if (reg_equiv_mem[REGNO (ref)])
6563 ref = reg_equiv_mem[REGNO (ref)];
6564 base = find_replacement (&XEXP (ref, 0));
6566 else
6567 /* The slot is out of range, or was dressed up in a SUBREG. */
6568 base = reg_equiv_address[REGNO (ref)];
6570 else
6571 base = find_replacement (&XEXP (ref, 0));
6573 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6575 /* Handle the case where the address is too complex to be offset by 1. */
6576 if (GET_CODE (base) == MINUS
6577 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6579 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6581 /* Be careful not to destroy OUTVAL. */
6582 if (reg_overlap_mentioned_p (base_plus, outval))
6584 /* Updating base_plus might destroy outval, see if we can
6585 swap the scratch and base_plus. */
6586 if (!reg_overlap_mentioned_p (scratch, outval))
6588 rtx tmp = scratch;
6589 scratch = base_plus;
6590 base_plus = tmp;
6592 else
6594 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6596 /* Be conservative and copy OUTVAL into the scratch now,
6597 this should only be necessary if outval is a subreg
6598 of something larger than a word. */
6599 /* XXX Might this clobber base? I can't see how it can,
6600 since scratch is known to overlap with OUTVAL, and
6601 must be wider than a word. */
6602 emit_insn (gen_movhi (scratch_hi, outval));
6603 outval = scratch_hi;
6607 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6608 base = base_plus;
6610 else if (GET_CODE (base) == PLUS)
6612 /* The addend must be CONST_INT, or we would have dealt with it above. */
6613 HOST_WIDE_INT hi, lo;
6615 offset += INTVAL (XEXP (base, 1));
6616 base = XEXP (base, 0);
6618 /* Rework the address into a legal sequence of insns. */
6619 /* Valid range for lo is -4095 -> 4095 */
6620 lo = (offset >= 0
6621 ? (offset & 0xfff)
6622 : -((-offset) & 0xfff));
6624 /* Corner case, if lo is the max offset then we would be out of range
6625 once we have added the additional 1 below, so bump the msb into the
6626 pre-loading insn(s). */
6627 if (lo == 4095)
6628 lo &= 0x7ff;
6630 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6631 ^ (HOST_WIDE_INT) 0x80000000)
6632 - (HOST_WIDE_INT) 0x80000000);
6634 gcc_assert (hi + lo == offset);
6636 if (hi != 0)
6638 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6640 /* Be careful not to destroy OUTVAL. */
6641 if (reg_overlap_mentioned_p (base_plus, outval))
6643 /* Updating base_plus might destroy outval, see if we
6644 can swap the scratch and base_plus. */
6645 if (!reg_overlap_mentioned_p (scratch, outval))
6647 rtx tmp = scratch;
6648 scratch = base_plus;
6649 base_plus = tmp;
6651 else
6653 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6655 /* Be conservative and copy outval into scratch now,
6656 this should only be necessary if outval is a
6657 subreg of something larger than a word. */
6658 /* XXX Might this clobber base? I can't see how it
6659 can, since scratch is known to overlap with
6660 outval. */
6661 emit_insn (gen_movhi (scratch_hi, outval));
6662 outval = scratch_hi;
6666 /* Get the base address; addsi3 knows how to handle constants
6667 that require more than one insn. */
6668 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6669 base = base_plus;
6670 offset = lo;
6674 if (BYTES_BIG_ENDIAN)
6676 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6677 plus_constant (base, offset + 1)),
6678 gen_lowpart (QImode, outval)));
6679 emit_insn (gen_lshrsi3 (scratch,
6680 gen_rtx_SUBREG (SImode, outval, 0),
6681 GEN_INT (8)));
6682 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6683 gen_lowpart (QImode, scratch)));
6685 else
6687 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6688 gen_lowpart (QImode, outval)));
6689 emit_insn (gen_lshrsi3 (scratch,
6690 gen_rtx_SUBREG (SImode, outval, 0),
6691 GEN_INT (8)));
6692 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6693 plus_constant (base, offset + 1)),
6694 gen_lowpart (QImode, scratch)));
6698 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6699 (padded to the size of a word) should be passed in a register. */
6701 static bool
6702 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6704 if (TARGET_AAPCS_BASED)
6705 return must_pass_in_stack_var_size (mode, type);
6706 else
6707 return must_pass_in_stack_var_size_or_pad (mode, type);
6711 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6712 Return true if an argument passed on the stack should be padded upwards,
6713 i.e. if the least-significant byte has useful data.
6714 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
6715 aggregate types are placed in the lowest memory address. */
6717 bool
6718 arm_pad_arg_upward (enum machine_mode mode, tree type)
6720 if (!TARGET_AAPCS_BASED)
6721 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
6723 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6724 return false;
6726 return true;
6730 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6731 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6732 byte of the register has useful data, and return the opposite if the
6733 most significant byte does.
6734 For AAPCS, small aggregates and small complex types are always padded
6735 upwards. */
6737 bool
6738 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6739 tree type, int first ATTRIBUTE_UNUSED)
6741 if (TARGET_AAPCS_BASED
6742 && BYTES_BIG_ENDIAN
6743 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6744 && int_size_in_bytes (type) <= 4)
6745 return true;
6747 /* Otherwise, use default padding. */
6748 return !BYTES_BIG_ENDIAN;
6753 /* Print a symbolic form of X to the debug file, F. */
6754 static void
6755 arm_print_value (FILE *f, rtx x)
6757 switch (GET_CODE (x))
6759 case CONST_INT:
6760 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6761 return;
6763 case CONST_DOUBLE:
6764 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6765 return;
6767 case CONST_VECTOR:
6769 int i;
6771 fprintf (f, "<");
6772 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6774 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6775 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6776 fputc (',', f);
6778 fprintf (f, ">");
6780 return;
6782 case CONST_STRING:
6783 fprintf (f, "\"%s\"", XSTR (x, 0));
6784 return;
6786 case SYMBOL_REF:
6787 fprintf (f, "`%s'", XSTR (x, 0));
6788 return;
6790 case LABEL_REF:
6791 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6792 return;
6794 case CONST:
6795 arm_print_value (f, XEXP (x, 0));
6796 return;
6798 case PLUS:
6799 arm_print_value (f, XEXP (x, 0));
6800 fprintf (f, "+");
6801 arm_print_value (f, XEXP (x, 1));
6802 return;
6804 case PC:
6805 fprintf (f, "pc");
6806 return;
6808 default:
6809 fprintf (f, "????");
6810 return;
6814 /* Routines for manipulation of the constant pool. */
6816 /* Arm instructions cannot load a large constant directly into a
6817 register; they have to come from a pc relative load. The constant
6818 must therefore be placed in the addressable range of the pc
6819 relative load. Depending on the precise pc relative load
6820 instruction the range is somewhere between 256 bytes and 4k. This
6821 means that we often have to dump a constant inside a function, and
6822 generate code to branch around it.
6824 It is important to minimize this, since the branches will slow
6825 things down and make the code larger.
6827 Normally we can hide the table after an existing unconditional
6828 branch so that there is no interruption of the flow, but in the
6829 worst case the code looks like this:
6831 ldr rn, L1
6833 b L2
6834 align
6835 L1: .long value
6839 ldr rn, L3
6841 b L4
6842 align
6843 L3: .long value
6847 We fix this by performing a scan after scheduling, which notices
6848 which instructions need to have their operands fetched from the
6849 constant table and builds the table.
6851 The algorithm starts by building a table of all the constants that
6852 need fixing up and all the natural barriers in the function (places
6853 where a constant table can be dropped without breaking the flow).
6854 For each fixup we note how far the pc-relative replacement will be
6855 able to reach and the offset of the instruction into the function.
6857 Having built the table we then group the fixes together to form
6858 tables that are as large as possible (subject to addressing
6859 constraints) and emit each table of constants after the last
6860 barrier that is within range of all the instructions in the group.
6861 If a group does not contain a barrier, then we forcibly create one
6862 by inserting a jump instruction into the flow. Once the table has
6863 been inserted, the insns are then modified to reference the
6864 relevant entry in the pool.
6866 Possible enhancements to the algorithm (not implemented) are:
6868 1) For some processors and object formats, there may be benefit in
6869 aligning the pools to the start of cache lines; this alignment
6870 would need to be taken into account when calculating addressability
6871 of a pool. */
6873 /* These typedefs are located at the start of this file, so that
6874 they can be used in the prototypes there. This comment is to
6875 remind readers of that fact so that the following structures
6876 can be understood more easily.
6878 typedef struct minipool_node Mnode;
6879 typedef struct minipool_fixup Mfix; */
6881 struct minipool_node
6883 /* Doubly linked chain of entries. */
6884 Mnode * next;
6885 Mnode * prev;
6886 /* The maximum offset into the code that this entry can be placed. While
6887 pushing fixes for forward references, all entries are sorted in order
6888 of increasing max_address. */
6889 HOST_WIDE_INT max_address;
6890 /* Similarly for an entry inserted for a backwards ref. */
6891 HOST_WIDE_INT min_address;
6892 /* The number of fixes referencing this entry. This can become zero
6893 if we "unpush" an entry. In this case we ignore the entry when we
6894 come to emit the code. */
6895 int refcount;
6896 /* The offset from the start of the minipool. */
6897 HOST_WIDE_INT offset;
6898 /* The value in table. */
6899 rtx value;
6900 /* The mode of value. */
6901 enum machine_mode mode;
6902 /* The size of the value. With iWMMXt enabled
6903 sizes > 4 also imply an alignment of 8-bytes. */
6904 int fix_size;
6907 struct minipool_fixup
6909 Mfix * next;
6910 rtx insn;
6911 HOST_WIDE_INT address;
6912 rtx * loc;
6913 enum machine_mode mode;
6914 int fix_size;
6915 rtx value;
6916 Mnode * minipool;
6917 HOST_WIDE_INT forwards;
6918 HOST_WIDE_INT backwards;
6921 /* Fixes less than a word need padding out to a word boundary. */
6922 #define MINIPOOL_FIX_SIZE(mode) \
6923 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6925 static Mnode * minipool_vector_head;
6926 static Mnode * minipool_vector_tail;
6927 static rtx minipool_vector_label;
6929 /* The linked list of all minipool fixes required for this function. */
6930 Mfix * minipool_fix_head;
6931 Mfix * minipool_fix_tail;
6932 /* The fix entry for the current minipool, once it has been placed. */
6933 Mfix * minipool_barrier;
6935 /* Determines if INSN is the start of a jump table. Returns the end
6936 of the TABLE or NULL_RTX. */
6937 static rtx
6938 is_jump_table (rtx insn)
6940 rtx table;
6942 if (GET_CODE (insn) == JUMP_INSN
6943 && JUMP_LABEL (insn) != NULL
6944 && ((table = next_real_insn (JUMP_LABEL (insn)))
6945 == next_real_insn (insn))
6946 && table != NULL
6947 && GET_CODE (table) == JUMP_INSN
6948 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6949 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6950 return table;
6952 return NULL_RTX;
6955 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6956 #define JUMP_TABLES_IN_TEXT_SECTION 0
6957 #endif
6959 static HOST_WIDE_INT
6960 get_jump_table_size (rtx insn)
6962 /* ADDR_VECs only take room if read-only data does into the text
6963 section. */
6964 if (JUMP_TABLES_IN_TEXT_SECTION
6965 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6966 || 1
6967 #endif
6970 rtx body = PATTERN (insn);
6971 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6973 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6976 return 0;
6979 /* Move a minipool fix MP from its current location to before MAX_MP.
6980 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6981 constraints may need updating. */
6982 static Mnode *
6983 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6984 HOST_WIDE_INT max_address)
6986 /* The code below assumes these are different. */
6987 gcc_assert (mp != max_mp);
6989 if (max_mp == NULL)
6991 if (max_address < mp->max_address)
6992 mp->max_address = max_address;
6994 else
6996 if (max_address > max_mp->max_address - mp->fix_size)
6997 mp->max_address = max_mp->max_address - mp->fix_size;
6998 else
6999 mp->max_address = max_address;
7001 /* Unlink MP from its current position. Since max_mp is non-null,
7002 mp->prev must be non-null. */
7003 mp->prev->next = mp->next;
7004 if (mp->next != NULL)
7005 mp->next->prev = mp->prev;
7006 else
7007 minipool_vector_tail = mp->prev;
7009 /* Re-insert it before MAX_MP. */
7010 mp->next = max_mp;
7011 mp->prev = max_mp->prev;
7012 max_mp->prev = mp;
7014 if (mp->prev != NULL)
7015 mp->prev->next = mp;
7016 else
7017 minipool_vector_head = mp;
7020 /* Save the new entry. */
7021 max_mp = mp;
7023 /* Scan over the preceding entries and adjust their addresses as
7024 required. */
7025 while (mp->prev != NULL
7026 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7028 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7029 mp = mp->prev;
7032 return max_mp;
7035 /* Add a constant to the minipool for a forward reference. Returns the
7036 node added or NULL if the constant will not fit in this pool. */
7037 static Mnode *
7038 add_minipool_forward_ref (Mfix *fix)
7040 /* If set, max_mp is the first pool_entry that has a lower
7041 constraint than the one we are trying to add. */
7042 Mnode * max_mp = NULL;
7043 HOST_WIDE_INT max_address = fix->address + fix->forwards;
7044 Mnode * mp;
7046 /* If this fix's address is greater than the address of the first
7047 entry, then we can't put the fix in this pool. We subtract the
7048 size of the current fix to ensure that if the table is fully
7049 packed we still have enough room to insert this value by shuffling
7050 the other fixes forwards. */
7051 if (minipool_vector_head &&
7052 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7053 return NULL;
7055 /* Scan the pool to see if a constant with the same value has
7056 already been added. While we are doing this, also note the
7057 location where we must insert the constant if it doesn't already
7058 exist. */
7059 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7061 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7062 && fix->mode == mp->mode
7063 && (GET_CODE (fix->value) != CODE_LABEL
7064 || (CODE_LABEL_NUMBER (fix->value)
7065 == CODE_LABEL_NUMBER (mp->value)))
7066 && rtx_equal_p (fix->value, mp->value))
7068 /* More than one fix references this entry. */
7069 mp->refcount++;
7070 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7073 /* Note the insertion point if necessary. */
7074 if (max_mp == NULL
7075 && mp->max_address > max_address)
7076 max_mp = mp;
7078 /* If we are inserting an 8-bytes aligned quantity and
7079 we have not already found an insertion point, then
7080 make sure that all such 8-byte aligned quantities are
7081 placed at the start of the pool. */
7082 if (ARM_DOUBLEWORD_ALIGN
7083 && max_mp == NULL
7084 && fix->fix_size == 8
7085 && mp->fix_size != 8)
7087 max_mp = mp;
7088 max_address = mp->max_address;
7092 /* The value is not currently in the minipool, so we need to create
7093 a new entry for it. If MAX_MP is NULL, the entry will be put on
7094 the end of the list since the placement is less constrained than
7095 any existing entry. Otherwise, we insert the new fix before
7096 MAX_MP and, if necessary, adjust the constraints on the other
7097 entries. */
7098 mp = xmalloc (sizeof (* mp));
7099 mp->fix_size = fix->fix_size;
7100 mp->mode = fix->mode;
7101 mp->value = fix->value;
7102 mp->refcount = 1;
7103 /* Not yet required for a backwards ref. */
7104 mp->min_address = -65536;
7106 if (max_mp == NULL)
7108 mp->max_address = max_address;
7109 mp->next = NULL;
7110 mp->prev = minipool_vector_tail;
7112 if (mp->prev == NULL)
7114 minipool_vector_head = mp;
7115 minipool_vector_label = gen_label_rtx ();
7117 else
7118 mp->prev->next = mp;
7120 minipool_vector_tail = mp;
7122 else
7124 if (max_address > max_mp->max_address - mp->fix_size)
7125 mp->max_address = max_mp->max_address - mp->fix_size;
7126 else
7127 mp->max_address = max_address;
7129 mp->next = max_mp;
7130 mp->prev = max_mp->prev;
7131 max_mp->prev = mp;
7132 if (mp->prev != NULL)
7133 mp->prev->next = mp;
7134 else
7135 minipool_vector_head = mp;
7138 /* Save the new entry. */
7139 max_mp = mp;
7141 /* Scan over the preceding entries and adjust their addresses as
7142 required. */
7143 while (mp->prev != NULL
7144 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7146 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7147 mp = mp->prev;
7150 return max_mp;
7153 static Mnode *
7154 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7155 HOST_WIDE_INT min_address)
7157 HOST_WIDE_INT offset;
7159 /* The code below assumes these are different. */
7160 gcc_assert (mp != min_mp);
7162 if (min_mp == NULL)
7164 if (min_address > mp->min_address)
7165 mp->min_address = min_address;
7167 else
7169 /* We will adjust this below if it is too loose. */
7170 mp->min_address = min_address;
7172 /* Unlink MP from its current position. Since min_mp is non-null,
7173 mp->next must be non-null. */
7174 mp->next->prev = mp->prev;
7175 if (mp->prev != NULL)
7176 mp->prev->next = mp->next;
7177 else
7178 minipool_vector_head = mp->next;
7180 /* Reinsert it after MIN_MP. */
7181 mp->prev = min_mp;
7182 mp->next = min_mp->next;
7183 min_mp->next = mp;
7184 if (mp->next != NULL)
7185 mp->next->prev = mp;
7186 else
7187 minipool_vector_tail = mp;
7190 min_mp = mp;
7192 offset = 0;
7193 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7195 mp->offset = offset;
7196 if (mp->refcount > 0)
7197 offset += mp->fix_size;
7199 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7200 mp->next->min_address = mp->min_address + mp->fix_size;
7203 return min_mp;
7206 /* Add a constant to the minipool for a backward reference. Returns the
7207 node added or NULL if the constant will not fit in this pool.
7209 Note that the code for insertion for a backwards reference can be
7210 somewhat confusing because the calculated offsets for each fix do
7211 not take into account the size of the pool (which is still under
7212 construction. */
7213 static Mnode *
7214 add_minipool_backward_ref (Mfix *fix)
7216 /* If set, min_mp is the last pool_entry that has a lower constraint
7217 than the one we are trying to add. */
7218 Mnode *min_mp = NULL;
7219 /* This can be negative, since it is only a constraint. */
7220 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7221 Mnode *mp;
7223 /* If we can't reach the current pool from this insn, or if we can't
7224 insert this entry at the end of the pool without pushing other
7225 fixes out of range, then we don't try. This ensures that we
7226 can't fail later on. */
7227 if (min_address >= minipool_barrier->address
7228 || (minipool_vector_tail->min_address + fix->fix_size
7229 >= minipool_barrier->address))
7230 return NULL;
7232 /* Scan the pool to see if a constant with the same value has
7233 already been added. While we are doing this, also note the
7234 location where we must insert the constant if it doesn't already
7235 exist. */
7236 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7238 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7239 && fix->mode == mp->mode
7240 && (GET_CODE (fix->value) != CODE_LABEL
7241 || (CODE_LABEL_NUMBER (fix->value)
7242 == CODE_LABEL_NUMBER (mp->value)))
7243 && rtx_equal_p (fix->value, mp->value)
7244 /* Check that there is enough slack to move this entry to the
7245 end of the table (this is conservative). */
7246 && (mp->max_address
7247 > (minipool_barrier->address
7248 + minipool_vector_tail->offset
7249 + minipool_vector_tail->fix_size)))
7251 mp->refcount++;
7252 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7255 if (min_mp != NULL)
7256 mp->min_address += fix->fix_size;
7257 else
7259 /* Note the insertion point if necessary. */
7260 if (mp->min_address < min_address)
7262 /* For now, we do not allow the insertion of 8-byte alignment
7263 requiring nodes anywhere but at the start of the pool. */
7264 if (ARM_DOUBLEWORD_ALIGN
7265 && fix->fix_size == 8 && mp->fix_size != 8)
7266 return NULL;
7267 else
7268 min_mp = mp;
7270 else if (mp->max_address
7271 < minipool_barrier->address + mp->offset + fix->fix_size)
7273 /* Inserting before this entry would push the fix beyond
7274 its maximum address (which can happen if we have
7275 re-located a forwards fix); force the new fix to come
7276 after it. */
7277 min_mp = mp;
7278 min_address = mp->min_address + fix->fix_size;
7280 /* If we are inserting an 8-bytes aligned quantity and
7281 we have not already found an insertion point, then
7282 make sure that all such 8-byte aligned quantities are
7283 placed at the start of the pool. */
7284 else if (ARM_DOUBLEWORD_ALIGN
7285 && min_mp == NULL
7286 && fix->fix_size == 8
7287 && mp->fix_size < 8)
7289 min_mp = mp;
7290 min_address = mp->min_address + fix->fix_size;
7295 /* We need to create a new entry. */
7296 mp = xmalloc (sizeof (* mp));
7297 mp->fix_size = fix->fix_size;
7298 mp->mode = fix->mode;
7299 mp->value = fix->value;
7300 mp->refcount = 1;
7301 mp->max_address = minipool_barrier->address + 65536;
7303 mp->min_address = min_address;
7305 if (min_mp == NULL)
7307 mp->prev = NULL;
7308 mp->next = minipool_vector_head;
7310 if (mp->next == NULL)
7312 minipool_vector_tail = mp;
7313 minipool_vector_label = gen_label_rtx ();
7315 else
7316 mp->next->prev = mp;
7318 minipool_vector_head = mp;
7320 else
7322 mp->next = min_mp->next;
7323 mp->prev = min_mp;
7324 min_mp->next = mp;
7326 if (mp->next != NULL)
7327 mp->next->prev = mp;
7328 else
7329 minipool_vector_tail = mp;
7332 /* Save the new entry. */
7333 min_mp = mp;
7335 if (mp->prev)
7336 mp = mp->prev;
7337 else
7338 mp->offset = 0;
7340 /* Scan over the following entries and adjust their offsets. */
7341 while (mp->next != NULL)
7343 if (mp->next->min_address < mp->min_address + mp->fix_size)
7344 mp->next->min_address = mp->min_address + mp->fix_size;
7346 if (mp->refcount)
7347 mp->next->offset = mp->offset + mp->fix_size;
7348 else
7349 mp->next->offset = mp->offset;
7351 mp = mp->next;
7354 return min_mp;
7357 static void
7358 assign_minipool_offsets (Mfix *barrier)
7360 HOST_WIDE_INT offset = 0;
7361 Mnode *mp;
7363 minipool_barrier = barrier;
7365 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7367 mp->offset = offset;
7369 if (mp->refcount > 0)
7370 offset += mp->fix_size;
7374 /* Output the literal table */
7375 static void
7376 dump_minipool (rtx scan)
7378 Mnode * mp;
7379 Mnode * nmp;
7380 int align64 = 0;
7382 if (ARM_DOUBLEWORD_ALIGN)
7383 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7384 if (mp->refcount > 0 && mp->fix_size == 8)
7386 align64 = 1;
7387 break;
7390 if (dump_file)
7391 fprintf (dump_file,
7392 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7393 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7395 scan = emit_label_after (gen_label_rtx (), scan);
7396 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7397 scan = emit_label_after (minipool_vector_label, scan);
7399 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7401 if (mp->refcount > 0)
7403 if (dump_file)
7405 fprintf (dump_file,
7406 ";; Offset %u, min %ld, max %ld ",
7407 (unsigned) mp->offset, (unsigned long) mp->min_address,
7408 (unsigned long) mp->max_address);
7409 arm_print_value (dump_file, mp->value);
7410 fputc ('\n', dump_file);
7413 switch (mp->fix_size)
7415 #ifdef HAVE_consttable_1
7416 case 1:
7417 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7418 break;
7420 #endif
7421 #ifdef HAVE_consttable_2
7422 case 2:
7423 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7424 break;
7426 #endif
7427 #ifdef HAVE_consttable_4
7428 case 4:
7429 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7430 break;
7432 #endif
7433 #ifdef HAVE_consttable_8
7434 case 8:
7435 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7436 break;
7438 #endif
7439 default:
7440 gcc_unreachable ();
7444 nmp = mp->next;
7445 free (mp);
7448 minipool_vector_head = minipool_vector_tail = NULL;
7449 scan = emit_insn_after (gen_consttable_end (), scan);
7450 scan = emit_barrier_after (scan);
7453 /* Return the cost of forcibly inserting a barrier after INSN. */
7454 static int
7455 arm_barrier_cost (rtx insn)
7457 /* Basing the location of the pool on the loop depth is preferable,
7458 but at the moment, the basic block information seems to be
7459 corrupt by this stage of the compilation. */
7460 int base_cost = 50;
7461 rtx next = next_nonnote_insn (insn);
7463 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7464 base_cost -= 20;
7466 switch (GET_CODE (insn))
7468 case CODE_LABEL:
7469 /* It will always be better to place the table before the label, rather
7470 than after it. */
7471 return 50;
7473 case INSN:
7474 case CALL_INSN:
7475 return base_cost;
7477 case JUMP_INSN:
7478 return base_cost - 10;
7480 default:
7481 return base_cost + 10;
7485 /* Find the best place in the insn stream in the range
7486 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7487 Create the barrier by inserting a jump and add a new fix entry for
7488 it. */
7489 static Mfix *
7490 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7492 HOST_WIDE_INT count = 0;
7493 rtx barrier;
7494 rtx from = fix->insn;
7495 rtx selected = from;
7496 int selected_cost;
7497 HOST_WIDE_INT selected_address;
7498 Mfix * new_fix;
7499 HOST_WIDE_INT max_count = max_address - fix->address;
7500 rtx label = gen_label_rtx ();
7502 selected_cost = arm_barrier_cost (from);
7503 selected_address = fix->address;
7505 while (from && count < max_count)
7507 rtx tmp;
7508 int new_cost;
7510 /* This code shouldn't have been called if there was a natural barrier
7511 within range. */
7512 gcc_assert (GET_CODE (from) != BARRIER);
7514 /* Count the length of this insn. */
7515 count += get_attr_length (from);
7517 /* If there is a jump table, add its length. */
7518 tmp = is_jump_table (from);
7519 if (tmp != NULL)
7521 count += get_jump_table_size (tmp);
7523 /* Jump tables aren't in a basic block, so base the cost on
7524 the dispatch insn. If we select this location, we will
7525 still put the pool after the table. */
7526 new_cost = arm_barrier_cost (from);
7528 if (count < max_count && new_cost <= selected_cost)
7530 selected = tmp;
7531 selected_cost = new_cost;
7532 selected_address = fix->address + count;
7535 /* Continue after the dispatch table. */
7536 from = NEXT_INSN (tmp);
7537 continue;
7540 new_cost = arm_barrier_cost (from);
7542 if (count < max_count && new_cost <= selected_cost)
7544 selected = from;
7545 selected_cost = new_cost;
7546 selected_address = fix->address + count;
7549 from = NEXT_INSN (from);
7552 /* Create a new JUMP_INSN that branches around a barrier. */
7553 from = emit_jump_insn_after (gen_jump (label), selected);
7554 JUMP_LABEL (from) = label;
7555 barrier = emit_barrier_after (from);
7556 emit_label_after (label, barrier);
7558 /* Create a minipool barrier entry for the new barrier. */
7559 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7560 new_fix->insn = barrier;
7561 new_fix->address = selected_address;
7562 new_fix->next = fix->next;
7563 fix->next = new_fix;
7565 return new_fix;
7568 /* Record that there is a natural barrier in the insn stream at
7569 ADDRESS. */
7570 static void
7571 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7573 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7575 fix->insn = insn;
7576 fix->address = address;
7578 fix->next = NULL;
7579 if (minipool_fix_head != NULL)
7580 minipool_fix_tail->next = fix;
7581 else
7582 minipool_fix_head = fix;
7584 minipool_fix_tail = fix;
7587 /* Record INSN, which will need fixing up to load a value from the
7588 minipool. ADDRESS is the offset of the insn since the start of the
7589 function; LOC is a pointer to the part of the insn which requires
7590 fixing; VALUE is the constant that must be loaded, which is of type
7591 MODE. */
7592 static void
7593 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7594 enum machine_mode mode, rtx value)
7596 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7598 #ifdef AOF_ASSEMBLER
7599 /* PIC symbol references need to be converted into offsets into the
7600 based area. */
7601 /* XXX This shouldn't be done here. */
7602 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7603 value = aof_pic_entry (value);
7604 #endif /* AOF_ASSEMBLER */
7606 fix->insn = insn;
7607 fix->address = address;
7608 fix->loc = loc;
7609 fix->mode = mode;
7610 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7611 fix->value = value;
7612 fix->forwards = get_attr_pool_range (insn);
7613 fix->backwards = get_attr_neg_pool_range (insn);
7614 fix->minipool = NULL;
7616 /* If an insn doesn't have a range defined for it, then it isn't
7617 expecting to be reworked by this code. Better to stop now than
7618 to generate duff assembly code. */
7619 gcc_assert (fix->forwards || fix->backwards);
7621 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7622 So there might be an empty word before the start of the pool.
7623 Hence we reduce the forward range by 4 to allow for this
7624 possibility. */
7625 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7626 fix->forwards -= 4;
7628 if (dump_file)
7630 fprintf (dump_file,
7631 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7632 GET_MODE_NAME (mode),
7633 INSN_UID (insn), (unsigned long) address,
7634 -1 * (long)fix->backwards, (long)fix->forwards);
7635 arm_print_value (dump_file, fix->value);
7636 fprintf (dump_file, "\n");
7639 /* Add it to the chain of fixes. */
7640 fix->next = NULL;
7642 if (minipool_fix_head != NULL)
7643 minipool_fix_tail->next = fix;
7644 else
7645 minipool_fix_head = fix;
7647 minipool_fix_tail = fix;
7650 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7651 Returns the number of insns needed, or 99 if we don't know how to
7652 do it. */
7654 arm_const_double_inline_cost (rtx val)
7656 rtx lowpart, highpart;
7657 enum machine_mode mode;
7659 mode = GET_MODE (val);
7661 if (mode == VOIDmode)
7662 mode = DImode;
7664 gcc_assert (GET_MODE_SIZE (mode) == 8);
7666 lowpart = gen_lowpart (SImode, val);
7667 highpart = gen_highpart_mode (SImode, mode, val);
7669 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7670 gcc_assert (GET_CODE (highpart) == CONST_INT);
7672 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7673 NULL_RTX, NULL_RTX, 0, 0)
7674 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7675 NULL_RTX, NULL_RTX, 0, 0));
7678 /* Return true if it is worthwhile to split a 64-bit constant into two
7679 32-bit operations. This is the case if optimizing for size, or
7680 if we have load delay slots, or if one 32-bit part can be done with
7681 a single data operation. */
7682 bool
7683 arm_const_double_by_parts (rtx val)
7685 enum machine_mode mode = GET_MODE (val);
7686 rtx part;
7688 if (optimize_size || arm_ld_sched)
7689 return true;
7691 if (mode == VOIDmode)
7692 mode = DImode;
7694 part = gen_highpart_mode (SImode, mode, val);
7696 gcc_assert (GET_CODE (part) == CONST_INT);
7698 if (const_ok_for_arm (INTVAL (part))
7699 || const_ok_for_arm (~INTVAL (part)))
7700 return true;
7702 part = gen_lowpart (SImode, val);
7704 gcc_assert (GET_CODE (part) == CONST_INT);
7706 if (const_ok_for_arm (INTVAL (part))
7707 || const_ok_for_arm (~INTVAL (part)))
7708 return true;
7710 return false;
7713 /* Scan INSN and note any of its operands that need fixing.
7714 If DO_PUSHES is false we do not actually push any of the fixups
7715 needed. The function returns TRUE if any fixups were needed/pushed.
7716 This is used by arm_memory_load_p() which needs to know about loads
7717 of constants that will be converted into minipool loads. */
7718 static bool
7719 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7721 bool result = false;
7722 int opno;
7724 extract_insn (insn);
7726 if (!constrain_operands (1))
7727 fatal_insn_not_found (insn);
7729 if (recog_data.n_alternatives == 0)
7730 return false;
7732 /* Fill in recog_op_alt with information about the constraints of
7733 this insn. */
7734 preprocess_constraints ();
7736 for (opno = 0; opno < recog_data.n_operands; opno++)
7738 /* Things we need to fix can only occur in inputs. */
7739 if (recog_data.operand_type[opno] != OP_IN)
7740 continue;
7742 /* If this alternative is a memory reference, then any mention
7743 of constants in this alternative is really to fool reload
7744 into allowing us to accept one there. We need to fix them up
7745 now so that we output the right code. */
7746 if (recog_op_alt[opno][which_alternative].memory_ok)
7748 rtx op = recog_data.operand[opno];
7750 if (CONSTANT_P (op))
7752 if (do_pushes)
7753 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7754 recog_data.operand_mode[opno], op);
7755 result = true;
7757 else if (GET_CODE (op) == MEM
7758 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7759 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7761 if (do_pushes)
7763 rtx cop = avoid_constant_pool_reference (op);
7765 /* Casting the address of something to a mode narrower
7766 than a word can cause avoid_constant_pool_reference()
7767 to return the pool reference itself. That's no good to
7768 us here. Lets just hope that we can use the
7769 constant pool value directly. */
7770 if (op == cop)
7771 cop = get_pool_constant (XEXP (op, 0));
7773 push_minipool_fix (insn, address,
7774 recog_data.operand_loc[opno],
7775 recog_data.operand_mode[opno], cop);
7778 result = true;
7783 return result;
7786 /* Gcc puts the pool in the wrong place for ARM, since we can only
7787 load addresses a limited distance around the pc. We do some
7788 special munging to move the constant pool values to the correct
7789 point in the code. */
7790 static void
7791 arm_reorg (void)
7793 rtx insn;
7794 HOST_WIDE_INT address = 0;
7795 Mfix * fix;
7797 minipool_fix_head = minipool_fix_tail = NULL;
7799 /* The first insn must always be a note, or the code below won't
7800 scan it properly. */
7801 insn = get_insns ();
7802 gcc_assert (GET_CODE (insn) == NOTE);
7804 /* Scan all the insns and record the operands that will need fixing. */
7805 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7807 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7808 && (arm_cirrus_insn_p (insn)
7809 || GET_CODE (insn) == JUMP_INSN
7810 || arm_memory_load_p (insn)))
7811 cirrus_reorg (insn);
7813 if (GET_CODE (insn) == BARRIER)
7814 push_minipool_barrier (insn, address);
7815 else if (INSN_P (insn))
7817 rtx table;
7819 note_invalid_constants (insn, address, true);
7820 address += get_attr_length (insn);
7822 /* If the insn is a vector jump, add the size of the table
7823 and skip the table. */
7824 if ((table = is_jump_table (insn)) != NULL)
7826 address += get_jump_table_size (table);
7827 insn = table;
7832 fix = minipool_fix_head;
7834 /* Now scan the fixups and perform the required changes. */
7835 while (fix)
7837 Mfix * ftmp;
7838 Mfix * fdel;
7839 Mfix * last_added_fix;
7840 Mfix * last_barrier = NULL;
7841 Mfix * this_fix;
7843 /* Skip any further barriers before the next fix. */
7844 while (fix && GET_CODE (fix->insn) == BARRIER)
7845 fix = fix->next;
7847 /* No more fixes. */
7848 if (fix == NULL)
7849 break;
7851 last_added_fix = NULL;
7853 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7855 if (GET_CODE (ftmp->insn) == BARRIER)
7857 if (ftmp->address >= minipool_vector_head->max_address)
7858 break;
7860 last_barrier = ftmp;
7862 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7863 break;
7865 last_added_fix = ftmp; /* Keep track of the last fix added. */
7868 /* If we found a barrier, drop back to that; any fixes that we
7869 could have reached but come after the barrier will now go in
7870 the next mini-pool. */
7871 if (last_barrier != NULL)
7873 /* Reduce the refcount for those fixes that won't go into this
7874 pool after all. */
7875 for (fdel = last_barrier->next;
7876 fdel && fdel != ftmp;
7877 fdel = fdel->next)
7879 fdel->minipool->refcount--;
7880 fdel->minipool = NULL;
7883 ftmp = last_barrier;
7885 else
7887 /* ftmp is first fix that we can't fit into this pool and
7888 there no natural barriers that we could use. Insert a
7889 new barrier in the code somewhere between the previous
7890 fix and this one, and arrange to jump around it. */
7891 HOST_WIDE_INT max_address;
7893 /* The last item on the list of fixes must be a barrier, so
7894 we can never run off the end of the list of fixes without
7895 last_barrier being set. */
7896 gcc_assert (ftmp);
7898 max_address = minipool_vector_head->max_address;
7899 /* Check that there isn't another fix that is in range that
7900 we couldn't fit into this pool because the pool was
7901 already too large: we need to put the pool before such an
7902 instruction. */
7903 if (ftmp->address < max_address)
7904 max_address = ftmp->address;
7906 last_barrier = create_fix_barrier (last_added_fix, max_address);
7909 assign_minipool_offsets (last_barrier);
7911 while (ftmp)
7913 if (GET_CODE (ftmp->insn) != BARRIER
7914 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7915 == NULL))
7916 break;
7918 ftmp = ftmp->next;
7921 /* Scan over the fixes we have identified for this pool, fixing them
7922 up and adding the constants to the pool itself. */
7923 for (this_fix = fix; this_fix && ftmp != this_fix;
7924 this_fix = this_fix->next)
7925 if (GET_CODE (this_fix->insn) != BARRIER)
7927 rtx addr
7928 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7929 minipool_vector_label),
7930 this_fix->minipool->offset);
7931 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7934 dump_minipool (last_barrier->insn);
7935 fix = ftmp;
7938 /* From now on we must synthesize any constants that we can't handle
7939 directly. This can happen if the RTL gets split during final
7940 instruction generation. */
7941 after_arm_reorg = 1;
7943 /* Free the minipool memory. */
7944 obstack_free (&minipool_obstack, minipool_startobj);
7947 /* Routines to output assembly language. */
7949 /* If the rtx is the correct value then return the string of the number.
7950 In this way we can ensure that valid double constants are generated even
7951 when cross compiling. */
7952 const char *
7953 fp_immediate_constant (rtx x)
7955 REAL_VALUE_TYPE r;
7956 int i;
7958 if (!fp_consts_inited)
7959 init_fp_table ();
7961 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7962 for (i = 0; i < 8; i++)
7963 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7964 return strings_fp[i];
7966 gcc_unreachable ();
7969 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7970 static const char *
7971 fp_const_from_val (REAL_VALUE_TYPE *r)
7973 int i;
7975 if (!fp_consts_inited)
7976 init_fp_table ();
7978 for (i = 0; i < 8; i++)
7979 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7980 return strings_fp[i];
7982 gcc_unreachable ();
7985 /* Output the operands of a LDM/STM instruction to STREAM.
7986 MASK is the ARM register set mask of which only bits 0-15 are important.
7987 REG is the base register, either the frame pointer or the stack pointer,
7988 INSTR is the possibly suffixed load or store instruction. */
7990 static void
7991 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7992 unsigned long mask)
7994 unsigned i;
7995 bool not_first = FALSE;
7997 fputc ('\t', stream);
7998 asm_fprintf (stream, instr, reg);
7999 fputs (", {", stream);
8001 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8002 if (mask & (1 << i))
8004 if (not_first)
8005 fprintf (stream, ", ");
8007 asm_fprintf (stream, "%r", i);
8008 not_first = TRUE;
8011 fprintf (stream, "}\n");
8015 /* Output a FLDMX instruction to STREAM.
8016 BASE if the register containing the address.
8017 REG and COUNT specify the register range.
8018 Extra registers may be added to avoid hardware bugs. */
8020 static void
8021 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8023 int i;
8025 /* Workaround ARM10 VFPr1 bug. */
8026 if (count == 2 && !arm_arch6)
8028 if (reg == 15)
8029 reg--;
8030 count++;
8033 fputc ('\t', stream);
8034 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8036 for (i = reg; i < reg + count; i++)
8038 if (i > reg)
8039 fputs (", ", stream);
8040 asm_fprintf (stream, "d%d", i);
8042 fputs ("}\n", stream);
8047 /* Output the assembly for a store multiple. */
8049 const char *
8050 vfp_output_fstmx (rtx * operands)
8052 char pattern[100];
8053 int p;
8054 int base;
8055 int i;
8057 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8058 p = strlen (pattern);
8060 gcc_assert (GET_CODE (operands[1]) == REG);
8062 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8063 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8065 p += sprintf (&pattern[p], ", d%d", base + i);
8067 strcpy (&pattern[p], "}");
8069 output_asm_insn (pattern, operands);
8070 return "";
8074 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8075 number of bytes pushed. */
8077 static int
8078 vfp_emit_fstmx (int base_reg, int count)
8080 rtx par;
8081 rtx dwarf;
8082 rtx tmp, reg;
8083 int i;
8085 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8086 register pairs are stored by a store multiple insn. We avoid this
8087 by pushing an extra pair. */
8088 if (count == 2 && !arm_arch6)
8090 if (base_reg == LAST_VFP_REGNUM - 3)
8091 base_reg -= 2;
8092 count++;
8095 /* ??? The frame layout is implementation defined. We describe
8096 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8097 We really need some way of representing the whole block so that the
8098 unwinder can figure it out at runtime. */
8099 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8100 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8102 reg = gen_rtx_REG (DFmode, base_reg);
8103 base_reg += 2;
8105 XVECEXP (par, 0, 0)
8106 = gen_rtx_SET (VOIDmode,
8107 gen_rtx_MEM (BLKmode,
8108 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8109 gen_rtx_UNSPEC (BLKmode,
8110 gen_rtvec (1, reg),
8111 UNSPEC_PUSH_MULT));
8113 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8114 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8115 GEN_INT (-(count * 8 + 4))));
8116 RTX_FRAME_RELATED_P (tmp) = 1;
8117 XVECEXP (dwarf, 0, 0) = tmp;
8119 tmp = gen_rtx_SET (VOIDmode,
8120 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8121 reg);
8122 RTX_FRAME_RELATED_P (tmp) = 1;
8123 XVECEXP (dwarf, 0, 1) = tmp;
8125 for (i = 1; i < count; i++)
8127 reg = gen_rtx_REG (DFmode, base_reg);
8128 base_reg += 2;
8129 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8131 tmp = gen_rtx_SET (VOIDmode,
8132 gen_rtx_MEM (DFmode,
8133 gen_rtx_PLUS (SImode,
8134 stack_pointer_rtx,
8135 GEN_INT (i * 8))),
8136 reg);
8137 RTX_FRAME_RELATED_P (tmp) = 1;
8138 XVECEXP (dwarf, 0, i + 1) = tmp;
8141 par = emit_insn (par);
8142 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8143 REG_NOTES (par));
8144 RTX_FRAME_RELATED_P (par) = 1;
8146 return count * 8 + 4;
8150 /* Output a 'call' insn. */
8151 const char *
8152 output_call (rtx *operands)
8154 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8156 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8157 if (REGNO (operands[0]) == LR_REGNUM)
8159 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8160 output_asm_insn ("mov%?\t%0, %|lr", operands);
8163 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8165 if (TARGET_INTERWORK || arm_arch4t)
8166 output_asm_insn ("bx%?\t%0", operands);
8167 else
8168 output_asm_insn ("mov%?\t%|pc, %0", operands);
8170 return "";
8173 /* Output a 'call' insn that is a reference in memory. */
8174 const char *
8175 output_call_mem (rtx *operands)
8177 if (TARGET_INTERWORK && !arm_arch5)
8179 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8180 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8181 output_asm_insn ("bx%?\t%|ip", operands);
8183 else if (regno_use_in (LR_REGNUM, operands[0]))
8185 /* LR is used in the memory address. We load the address in the
8186 first instruction. It's safe to use IP as the target of the
8187 load since the call will kill it anyway. */
8188 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8189 if (arm_arch5)
8190 output_asm_insn ("blx%?\t%|ip", operands);
8191 else
8193 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8194 if (arm_arch4t)
8195 output_asm_insn ("bx%?\t%|ip", operands);
8196 else
8197 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8200 else
8202 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8203 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8206 return "";
8210 /* Output a move from arm registers to an fpa registers.
8211 OPERANDS[0] is an fpa register.
8212 OPERANDS[1] is the first registers of an arm register pair. */
8213 const char *
8214 output_mov_long_double_fpa_from_arm (rtx *operands)
8216 int arm_reg0 = REGNO (operands[1]);
8217 rtx ops[3];
8219 gcc_assert (arm_reg0 != IP_REGNUM);
8221 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8222 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8223 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8225 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8226 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8228 return "";
8231 /* Output a move from an fpa register to arm registers.
8232 OPERANDS[0] is the first registers of an arm register pair.
8233 OPERANDS[1] is an fpa register. */
8234 const char *
8235 output_mov_long_double_arm_from_fpa (rtx *operands)
8237 int arm_reg0 = REGNO (operands[0]);
8238 rtx ops[3];
8240 gcc_assert (arm_reg0 != IP_REGNUM);
8242 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8243 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8244 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8246 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8247 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8248 return "";
8251 /* Output a move from arm registers to arm registers of a long double
8252 OPERANDS[0] is the destination.
8253 OPERANDS[1] is the source. */
8254 const char *
8255 output_mov_long_double_arm_from_arm (rtx *operands)
8257 /* We have to be careful here because the two might overlap. */
8258 int dest_start = REGNO (operands[0]);
8259 int src_start = REGNO (operands[1]);
8260 rtx ops[2];
8261 int i;
8263 if (dest_start < src_start)
8265 for (i = 0; i < 3; i++)
8267 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8268 ops[1] = gen_rtx_REG (SImode, src_start + i);
8269 output_asm_insn ("mov%?\t%0, %1", ops);
8272 else
8274 for (i = 2; i >= 0; i--)
8276 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8277 ops[1] = gen_rtx_REG (SImode, src_start + i);
8278 output_asm_insn ("mov%?\t%0, %1", ops);
8282 return "";
8286 /* Output a move from arm registers to an fpa registers.
8287 OPERANDS[0] is an fpa register.
8288 OPERANDS[1] is the first registers of an arm register pair. */
8289 const char *
8290 output_mov_double_fpa_from_arm (rtx *operands)
8292 int arm_reg0 = REGNO (operands[1]);
8293 rtx ops[2];
8295 gcc_assert (arm_reg0 != IP_REGNUM);
8297 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8298 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8299 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8300 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8301 return "";
8304 /* Output a move from an fpa register to arm registers.
8305 OPERANDS[0] is the first registers of an arm register pair.
8306 OPERANDS[1] is an fpa register. */
8307 const char *
8308 output_mov_double_arm_from_fpa (rtx *operands)
8310 int arm_reg0 = REGNO (operands[0]);
8311 rtx ops[2];
8313 gcc_assert (arm_reg0 != IP_REGNUM);
8315 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8316 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8317 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8318 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8319 return "";
8322 /* Output a move between double words.
8323 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8324 or MEM<-REG and all MEMs must be offsettable addresses. */
8325 const char *
8326 output_move_double (rtx *operands)
8328 enum rtx_code code0 = GET_CODE (operands[0]);
8329 enum rtx_code code1 = GET_CODE (operands[1]);
8330 rtx otherops[3];
8332 if (code0 == REG)
8334 int reg0 = REGNO (operands[0]);
8336 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8338 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8340 switch (GET_CODE (XEXP (operands[1], 0)))
8342 case REG:
8343 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8344 break;
8346 case PRE_INC:
8347 gcc_assert (TARGET_LDRD);
8348 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8349 break;
8351 case PRE_DEC:
8352 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8353 break;
8355 case POST_INC:
8356 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8357 break;
8359 case POST_DEC:
8360 gcc_assert (TARGET_LDRD);
8361 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8362 break;
8364 case PRE_MODIFY:
8365 case POST_MODIFY:
8366 otherops[0] = operands[0];
8367 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8368 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8370 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8372 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8374 /* Registers overlap so split out the increment. */
8375 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8376 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8378 else
8379 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8381 else
8383 /* We only allow constant increments, so this is safe. */
8384 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8386 break;
8388 case LABEL_REF:
8389 case CONST:
8390 output_asm_insn ("adr%?\t%0, %1", operands);
8391 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8392 break;
8394 default:
8395 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8396 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8398 otherops[0] = operands[0];
8399 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8400 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8402 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8404 if (GET_CODE (otherops[2]) == CONST_INT)
8406 switch ((int) INTVAL (otherops[2]))
8408 case -8:
8409 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8410 return "";
8411 case -4:
8412 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8413 return "";
8414 case 4:
8415 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8416 return "";
8419 if (TARGET_LDRD
8420 && (GET_CODE (otherops[2]) == REG
8421 || (GET_CODE (otherops[2]) == CONST_INT
8422 && INTVAL (otherops[2]) > -256
8423 && INTVAL (otherops[2]) < 256)))
8425 if (reg_overlap_mentioned_p (otherops[0],
8426 otherops[2]))
8428 /* Swap base and index registers over to
8429 avoid a conflict. */
8430 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8431 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8434 /* If both registers conflict, it will usually
8435 have been fixed by a splitter. */
8436 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8438 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8439 output_asm_insn ("ldr%?d\t%0, [%1]",
8440 otherops);
8442 else
8443 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8444 return "";
8447 if (GET_CODE (otherops[2]) == CONST_INT)
8449 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8450 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8451 else
8452 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8454 else
8455 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8457 else
8458 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8460 return "ldm%?ia\t%0, %M0";
8462 else
8464 otherops[1] = adjust_address (operands[1], SImode, 4);
8465 /* Take care of overlapping base/data reg. */
8466 if (reg_mentioned_p (operands[0], operands[1]))
8468 output_asm_insn ("ldr%?\t%0, %1", otherops);
8469 output_asm_insn ("ldr%?\t%0, %1", operands);
8471 else
8473 output_asm_insn ("ldr%?\t%0, %1", operands);
8474 output_asm_insn ("ldr%?\t%0, %1", otherops);
8479 else
8481 /* Constraints should ensure this. */
8482 gcc_assert (code0 == MEM && code1 == REG);
8483 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8485 switch (GET_CODE (XEXP (operands[0], 0)))
8487 case REG:
8488 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8489 break;
8491 case PRE_INC:
8492 gcc_assert (TARGET_LDRD);
8493 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8494 break;
8496 case PRE_DEC:
8497 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8498 break;
8500 case POST_INC:
8501 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8502 break;
8504 case POST_DEC:
8505 gcc_assert (TARGET_LDRD);
8506 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8507 break;
8509 case PRE_MODIFY:
8510 case POST_MODIFY:
8511 otherops[0] = operands[1];
8512 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8513 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8515 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8516 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8517 else
8518 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8519 break;
8521 case PLUS:
8522 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8523 if (GET_CODE (otherops[2]) == CONST_INT)
8525 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8527 case -8:
8528 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8529 return "";
8531 case -4:
8532 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8533 return "";
8535 case 4:
8536 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8537 return "";
8540 if (TARGET_LDRD
8541 && (GET_CODE (otherops[2]) == REG
8542 || (GET_CODE (otherops[2]) == CONST_INT
8543 && INTVAL (otherops[2]) > -256
8544 && INTVAL (otherops[2]) < 256)))
8546 otherops[0] = operands[1];
8547 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8548 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8549 return "";
8551 /* Fall through */
8553 default:
8554 otherops[0] = adjust_address (operands[0], SImode, 4);
8555 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8556 output_asm_insn ("str%?\t%1, %0", operands);
8557 output_asm_insn ("str%?\t%1, %0", otherops);
8561 return "";
8564 /* Output an ADD r, s, #n where n may be too big for one instruction.
8565 If adding zero to one register, output nothing. */
8566 const char *
8567 output_add_immediate (rtx *operands)
8569 HOST_WIDE_INT n = INTVAL (operands[2]);
8571 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8573 if (n < 0)
8574 output_multi_immediate (operands,
8575 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8576 -n);
8577 else
8578 output_multi_immediate (operands,
8579 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8583 return "";
8586 /* Output a multiple immediate operation.
8587 OPERANDS is the vector of operands referred to in the output patterns.
8588 INSTR1 is the output pattern to use for the first constant.
8589 INSTR2 is the output pattern to use for subsequent constants.
8590 IMMED_OP is the index of the constant slot in OPERANDS.
8591 N is the constant value. */
8592 static const char *
8593 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8594 int immed_op, HOST_WIDE_INT n)
8596 #if HOST_BITS_PER_WIDE_INT > 32
8597 n &= 0xffffffff;
8598 #endif
8600 if (n == 0)
8602 /* Quick and easy output. */
8603 operands[immed_op] = const0_rtx;
8604 output_asm_insn (instr1, operands);
8606 else
8608 int i;
8609 const char * instr = instr1;
8611 /* Note that n is never zero here (which would give no output). */
8612 for (i = 0; i < 32; i += 2)
8614 if (n & (3 << i))
8616 operands[immed_op] = GEN_INT (n & (255 << i));
8617 output_asm_insn (instr, operands);
8618 instr = instr2;
8619 i += 6;
8624 return "";
8627 /* Return the appropriate ARM instruction for the operation code.
8628 The returned result should not be overwritten. OP is the rtx of the
8629 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8630 was shifted. */
8631 const char *
8632 arithmetic_instr (rtx op, int shift_first_arg)
8634 switch (GET_CODE (op))
8636 case PLUS:
8637 return "add";
8639 case MINUS:
8640 return shift_first_arg ? "rsb" : "sub";
8642 case IOR:
8643 return "orr";
8645 case XOR:
8646 return "eor";
8648 case AND:
8649 return "and";
8651 default:
8652 gcc_unreachable ();
8656 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8657 for the operation code. The returned result should not be overwritten.
8658 OP is the rtx code of the shift.
8659 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8660 shift. */
8661 static const char *
8662 shift_op (rtx op, HOST_WIDE_INT *amountp)
8664 const char * mnem;
8665 enum rtx_code code = GET_CODE (op);
8667 switch (GET_CODE (XEXP (op, 1)))
8669 case REG:
8670 case SUBREG:
8671 *amountp = -1;
8672 break;
8674 case CONST_INT:
8675 *amountp = INTVAL (XEXP (op, 1));
8676 break;
8678 default:
8679 gcc_unreachable ();
8682 switch (code)
8684 case ASHIFT:
8685 mnem = "asl";
8686 break;
8688 case ASHIFTRT:
8689 mnem = "asr";
8690 break;
8692 case LSHIFTRT:
8693 mnem = "lsr";
8694 break;
8696 case ROTATE:
8697 gcc_assert (*amountp != -1);
8698 *amountp = 32 - *amountp;
8700 /* Fall through. */
8702 case ROTATERT:
8703 mnem = "ror";
8704 break;
8706 case MULT:
8707 /* We never have to worry about the amount being other than a
8708 power of 2, since this case can never be reloaded from a reg. */
8709 gcc_assert (*amountp != -1);
8710 *amountp = int_log2 (*amountp);
8711 return "asl";
8713 default:
8714 gcc_unreachable ();
8717 if (*amountp != -1)
8719 /* This is not 100% correct, but follows from the desire to merge
8720 multiplication by a power of 2 with the recognizer for a
8721 shift. >=32 is not a valid shift for "asl", so we must try and
8722 output a shift that produces the correct arithmetical result.
8723 Using lsr #32 is identical except for the fact that the carry bit
8724 is not set correctly if we set the flags; but we never use the
8725 carry bit from such an operation, so we can ignore that. */
8726 if (code == ROTATERT)
8727 /* Rotate is just modulo 32. */
8728 *amountp &= 31;
8729 else if (*amountp != (*amountp & 31))
8731 if (code == ASHIFT)
8732 mnem = "lsr";
8733 *amountp = 32;
8736 /* Shifts of 0 are no-ops. */
8737 if (*amountp == 0)
8738 return NULL;
8741 return mnem;
8744 /* Obtain the shift from the POWER of two. */
8746 static HOST_WIDE_INT
8747 int_log2 (HOST_WIDE_INT power)
8749 HOST_WIDE_INT shift = 0;
8751 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8753 gcc_assert (shift <= 31);
8754 shift++;
8757 return shift;
8760 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8761 because /bin/as is horribly restrictive. The judgement about
8762 whether or not each character is 'printable' (and can be output as
8763 is) or not (and must be printed with an octal escape) must be made
8764 with reference to the *host* character set -- the situation is
8765 similar to that discussed in the comments above pp_c_char in
8766 c-pretty-print.c. */
8768 #define MAX_ASCII_LEN 51
8770 void
8771 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8773 int i;
8774 int len_so_far = 0;
8776 fputs ("\t.ascii\t\"", stream);
8778 for (i = 0; i < len; i++)
8780 int c = p[i];
8782 if (len_so_far >= MAX_ASCII_LEN)
8784 fputs ("\"\n\t.ascii\t\"", stream);
8785 len_so_far = 0;
8788 if (ISPRINT (c))
8790 if (c == '\\' || c == '\"')
8792 putc ('\\', stream);
8793 len_so_far++;
8795 putc (c, stream);
8796 len_so_far++;
8798 else
8800 fprintf (stream, "\\%03o", c);
8801 len_so_far += 4;
8805 fputs ("\"\n", stream);
8808 /* Compute the register save mask for registers 0 through 12
8809 inclusive. This code is used by arm_compute_save_reg_mask. */
8811 static unsigned long
8812 arm_compute_save_reg0_reg12_mask (void)
8814 unsigned long func_type = arm_current_func_type ();
8815 unsigned long save_reg_mask = 0;
8816 unsigned int reg;
8818 if (IS_INTERRUPT (func_type))
8820 unsigned int max_reg;
8821 /* Interrupt functions must not corrupt any registers,
8822 even call clobbered ones. If this is a leaf function
8823 we can just examine the registers used by the RTL, but
8824 otherwise we have to assume that whatever function is
8825 called might clobber anything, and so we have to save
8826 all the call-clobbered registers as well. */
8827 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8828 /* FIQ handlers have registers r8 - r12 banked, so
8829 we only need to check r0 - r7, Normal ISRs only
8830 bank r14 and r15, so we must check up to r12.
8831 r13 is the stack pointer which is always preserved,
8832 so we do not need to consider it here. */
8833 max_reg = 7;
8834 else
8835 max_reg = 12;
8837 for (reg = 0; reg <= max_reg; reg++)
8838 if (regs_ever_live[reg]
8839 || (! current_function_is_leaf && call_used_regs [reg]))
8840 save_reg_mask |= (1 << reg);
8842 /* Also save the pic base register if necessary. */
8843 if (flag_pic
8844 && !TARGET_SINGLE_PIC_BASE
8845 && current_function_uses_pic_offset_table)
8846 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8848 else
8850 /* In the normal case we only need to save those registers
8851 which are call saved and which are used by this function. */
8852 for (reg = 0; reg <= 10; reg++)
8853 if (regs_ever_live[reg] && ! call_used_regs [reg])
8854 save_reg_mask |= (1 << reg);
8856 /* Handle the frame pointer as a special case. */
8857 if (! TARGET_APCS_FRAME
8858 && ! frame_pointer_needed
8859 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8860 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8861 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8863 /* If we aren't loading the PIC register,
8864 don't stack it even though it may be live. */
8865 if (flag_pic
8866 && !TARGET_SINGLE_PIC_BASE
8867 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8868 || current_function_uses_pic_offset_table))
8869 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8872 /* Save registers so the exception handler can modify them. */
8873 if (current_function_calls_eh_return)
8875 unsigned int i;
8877 for (i = 0; ; i++)
8879 reg = EH_RETURN_DATA_REGNO (i);
8880 if (reg == INVALID_REGNUM)
8881 break;
8882 save_reg_mask |= 1 << reg;
8886 return save_reg_mask;
8889 /* Compute a bit mask of which registers need to be
8890 saved on the stack for the current function. */
8892 static unsigned long
8893 arm_compute_save_reg_mask (void)
8895 unsigned int save_reg_mask = 0;
8896 unsigned long func_type = arm_current_func_type ();
8898 if (IS_NAKED (func_type))
8899 /* This should never really happen. */
8900 return 0;
8902 /* If we are creating a stack frame, then we must save the frame pointer,
8903 IP (which will hold the old stack pointer), LR and the PC. */
8904 if (frame_pointer_needed)
8905 save_reg_mask |=
8906 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8907 | (1 << IP_REGNUM)
8908 | (1 << LR_REGNUM)
8909 | (1 << PC_REGNUM);
8911 /* Volatile functions do not return, so there
8912 is no need to save any other registers. */
8913 if (IS_VOLATILE (func_type))
8914 return save_reg_mask;
8916 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8918 /* Decide if we need to save the link register.
8919 Interrupt routines have their own banked link register,
8920 so they never need to save it.
8921 Otherwise if we do not use the link register we do not need to save
8922 it. If we are pushing other registers onto the stack however, we
8923 can save an instruction in the epilogue by pushing the link register
8924 now and then popping it back into the PC. This incurs extra memory
8925 accesses though, so we only do it when optimizing for size, and only
8926 if we know that we will not need a fancy return sequence. */
8927 if (regs_ever_live [LR_REGNUM]
8928 || (save_reg_mask
8929 && optimize_size
8930 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8931 && !current_function_calls_eh_return))
8932 save_reg_mask |= 1 << LR_REGNUM;
8934 if (cfun->machine->lr_save_eliminated)
8935 save_reg_mask &= ~ (1 << LR_REGNUM);
8937 if (TARGET_REALLY_IWMMXT
8938 && ((bit_count (save_reg_mask)
8939 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8941 unsigned int reg;
8943 /* The total number of registers that are going to be pushed
8944 onto the stack is odd. We need to ensure that the stack
8945 is 64-bit aligned before we start to save iWMMXt registers,
8946 and also before we start to create locals. (A local variable
8947 might be a double or long long which we will load/store using
8948 an iWMMXt instruction). Therefore we need to push another
8949 ARM register, so that the stack will be 64-bit aligned. We
8950 try to avoid using the arg registers (r0 -r3) as they might be
8951 used to pass values in a tail call. */
8952 for (reg = 4; reg <= 12; reg++)
8953 if ((save_reg_mask & (1 << reg)) == 0)
8954 break;
8956 if (reg <= 12)
8957 save_reg_mask |= (1 << reg);
8958 else
8960 cfun->machine->sibcall_blocked = 1;
8961 save_reg_mask |= (1 << 3);
8965 return save_reg_mask;
8969 /* Compute a bit mask of which registers need to be
8970 saved on the stack for the current function. */
8971 static unsigned long
8972 thumb_compute_save_reg_mask (void)
8974 unsigned long mask;
8975 unsigned reg;
8977 mask = 0;
8978 for (reg = 0; reg < 12; reg ++)
8979 if (regs_ever_live[reg] && !call_used_regs[reg])
8980 mask |= 1 << reg;
8982 if (flag_pic
8983 && !TARGET_SINGLE_PIC_BASE
8984 && current_function_uses_pic_offset_table)
8985 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8987 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8988 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8989 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8991 /* LR will also be pushed if any lo regs are pushed. */
8992 if (mask & 0xff || thumb_force_lr_save ())
8993 mask |= (1 << LR_REGNUM);
8995 /* Make sure we have a low work register if we need one.
8996 We will need one if we are going to push a high register,
8997 but we are not currently intending to push a low register. */
8998 if ((mask & 0xff) == 0
8999 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9001 /* Use thumb_find_work_register to choose which register
9002 we will use. If the register is live then we will
9003 have to push it. Use LAST_LO_REGNUM as our fallback
9004 choice for the register to select. */
9005 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9007 if (! call_used_regs[reg])
9008 mask |= 1 << reg;
9011 return mask;
9015 /* Return the number of bytes required to save VFP registers. */
9016 static int
9017 arm_get_vfp_saved_size (void)
9019 unsigned int regno;
9020 int count;
9021 int saved;
9023 saved = 0;
9024 /* Space for saved VFP registers. */
9025 if (TARGET_HARD_FLOAT && TARGET_VFP)
9027 count = 0;
9028 for (regno = FIRST_VFP_REGNUM;
9029 regno < LAST_VFP_REGNUM;
9030 regno += 2)
9032 if ((!regs_ever_live[regno] || call_used_regs[regno])
9033 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9035 if (count > 0)
9037 /* Workaround ARM10 VFPr1 bug. */
9038 if (count == 2 && !arm_arch6)
9039 count++;
9040 saved += count * 8 + 4;
9042 count = 0;
9044 else
9045 count++;
9047 if (count > 0)
9049 if (count == 2 && !arm_arch6)
9050 count++;
9051 saved += count * 8 + 4;
9054 return saved;
9058 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9059 everything bar the final return instruction. */
9060 const char *
9061 output_return_instruction (rtx operand, int really_return, int reverse)
9063 char conditional[10];
9064 char instr[100];
9065 unsigned reg;
9066 unsigned long live_regs_mask;
9067 unsigned long func_type;
9068 arm_stack_offsets *offsets;
9070 func_type = arm_current_func_type ();
9072 if (IS_NAKED (func_type))
9073 return "";
9075 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9077 /* If this function was declared non-returning, and we have
9078 found a tail call, then we have to trust that the called
9079 function won't return. */
9080 if (really_return)
9082 rtx ops[2];
9084 /* Otherwise, trap an attempted return by aborting. */
9085 ops[0] = operand;
9086 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9087 : "abort");
9088 assemble_external_libcall (ops[1]);
9089 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9092 return "";
9095 gcc_assert (!current_function_calls_alloca || really_return);
9097 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9099 return_used_this_function = 1;
9101 live_regs_mask = arm_compute_save_reg_mask ();
9103 if (live_regs_mask)
9105 const char * return_reg;
9107 /* If we do not have any special requirements for function exit
9108 (e.g. interworking, or ISR) then we can load the return address
9109 directly into the PC. Otherwise we must load it into LR. */
9110 if (really_return
9111 && ! TARGET_INTERWORK)
9112 return_reg = reg_names[PC_REGNUM];
9113 else
9114 return_reg = reg_names[LR_REGNUM];
9116 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9118 /* There are three possible reasons for the IP register
9119 being saved. 1) a stack frame was created, in which case
9120 IP contains the old stack pointer, or 2) an ISR routine
9121 corrupted it, or 3) it was saved to align the stack on
9122 iWMMXt. In case 1, restore IP into SP, otherwise just
9123 restore IP. */
9124 if (frame_pointer_needed)
9126 live_regs_mask &= ~ (1 << IP_REGNUM);
9127 live_regs_mask |= (1 << SP_REGNUM);
9129 else
9130 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9133 /* On some ARM architectures it is faster to use LDR rather than
9134 LDM to load a single register. On other architectures, the
9135 cost is the same. In 26 bit mode, or for exception handlers,
9136 we have to use LDM to load the PC so that the CPSR is also
9137 restored. */
9138 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9139 if (live_regs_mask == (1U << reg))
9140 break;
9142 if (reg <= LAST_ARM_REGNUM
9143 && (reg != LR_REGNUM
9144 || ! really_return
9145 || ! IS_INTERRUPT (func_type)))
9147 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9148 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9150 else
9152 char *p;
9153 int first = 1;
9155 /* Generate the load multiple instruction to restore the
9156 registers. Note we can get here, even if
9157 frame_pointer_needed is true, but only if sp already
9158 points to the base of the saved core registers. */
9159 if (live_regs_mask & (1 << SP_REGNUM))
9161 unsigned HOST_WIDE_INT stack_adjust;
9163 offsets = arm_get_frame_offsets ();
9164 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9165 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9167 if (stack_adjust && arm_arch5)
9168 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9169 else
9171 /* If we can't use ldmib (SA110 bug),
9172 then try to pop r3 instead. */
9173 if (stack_adjust)
9174 live_regs_mask |= 1 << 3;
9175 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9178 else
9179 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9181 p = instr + strlen (instr);
9183 for (reg = 0; reg <= SP_REGNUM; reg++)
9184 if (live_regs_mask & (1 << reg))
9186 int l = strlen (reg_names[reg]);
9188 if (first)
9189 first = 0;
9190 else
9192 memcpy (p, ", ", 2);
9193 p += 2;
9196 memcpy (p, "%|", 2);
9197 memcpy (p + 2, reg_names[reg], l);
9198 p += l + 2;
9201 if (live_regs_mask & (1 << LR_REGNUM))
9203 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9204 /* If returning from an interrupt, restore the CPSR. */
9205 if (IS_INTERRUPT (func_type))
9206 strcat (p, "^");
9208 else
9209 strcpy (p, "}");
9212 output_asm_insn (instr, & operand);
9214 /* See if we need to generate an extra instruction to
9215 perform the actual function return. */
9216 if (really_return
9217 && func_type != ARM_FT_INTERWORKED
9218 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9220 /* The return has already been handled
9221 by loading the LR into the PC. */
9222 really_return = 0;
9226 if (really_return)
9228 switch ((int) ARM_FUNC_TYPE (func_type))
9230 case ARM_FT_ISR:
9231 case ARM_FT_FIQ:
9232 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9233 break;
9235 case ARM_FT_INTERWORKED:
9236 sprintf (instr, "bx%s\t%%|lr", conditional);
9237 break;
9239 case ARM_FT_EXCEPTION:
9240 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9241 break;
9243 default:
9244 /* Use bx if it's available. */
9245 if (arm_arch5 || arm_arch4t)
9246 sprintf (instr, "bx%s\t%%|lr", conditional);
9247 else
9248 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9249 break;
9252 output_asm_insn (instr, & operand);
9255 return "";
9258 /* Write the function name into the code section, directly preceding
9259 the function prologue.
9261 Code will be output similar to this:
9263 .ascii "arm_poke_function_name", 0
9264 .align
9266 .word 0xff000000 + (t1 - t0)
9267 arm_poke_function_name
9268 mov ip, sp
9269 stmfd sp!, {fp, ip, lr, pc}
9270 sub fp, ip, #4
9272 When performing a stack backtrace, code can inspect the value
9273 of 'pc' stored at 'fp' + 0. If the trace function then looks
9274 at location pc - 12 and the top 8 bits are set, then we know
9275 that there is a function name embedded immediately preceding this
9276 location and has length ((pc[-3]) & 0xff000000).
9278 We assume that pc is declared as a pointer to an unsigned long.
9280 It is of no benefit to output the function name if we are assembling
9281 a leaf function. These function types will not contain a stack
9282 backtrace structure, therefore it is not possible to determine the
9283 function name. */
9284 void
9285 arm_poke_function_name (FILE *stream, const char *name)
9287 unsigned long alignlength;
9288 unsigned long length;
9289 rtx x;
9291 length = strlen (name) + 1;
9292 alignlength = ROUND_UP_WORD (length);
9294 ASM_OUTPUT_ASCII (stream, name, length);
9295 ASM_OUTPUT_ALIGN (stream, 2);
9296 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9297 assemble_aligned_integer (UNITS_PER_WORD, x);
9300 /* Place some comments into the assembler stream
9301 describing the current function. */
9302 static void
9303 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9305 unsigned long func_type;
9307 if (!TARGET_ARM)
9309 thumb_output_function_prologue (f, frame_size);
9310 return;
9313 /* Sanity check. */
9314 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9316 func_type = arm_current_func_type ();
9318 switch ((int) ARM_FUNC_TYPE (func_type))
9320 default:
9321 case ARM_FT_NORMAL:
9322 break;
9323 case ARM_FT_INTERWORKED:
9324 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9325 break;
9326 case ARM_FT_ISR:
9327 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9328 break;
9329 case ARM_FT_FIQ:
9330 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9331 break;
9332 case ARM_FT_EXCEPTION:
9333 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9334 break;
9337 if (IS_NAKED (func_type))
9338 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9340 if (IS_VOLATILE (func_type))
9341 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9343 if (IS_NESTED (func_type))
9344 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9346 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9347 current_function_args_size,
9348 current_function_pretend_args_size, frame_size);
9350 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9351 frame_pointer_needed,
9352 cfun->machine->uses_anonymous_args);
9354 if (cfun->machine->lr_save_eliminated)
9355 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9357 if (current_function_calls_eh_return)
9358 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9360 #ifdef AOF_ASSEMBLER
9361 if (flag_pic)
9362 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9363 #endif
9365 return_used_this_function = 0;
9368 const char *
9369 arm_output_epilogue (rtx sibling)
9371 int reg;
9372 unsigned long saved_regs_mask;
9373 unsigned long func_type;
9374 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9375 frame that is $fp + 4 for a non-variadic function. */
9376 int floats_offset = 0;
9377 rtx operands[3];
9378 FILE * f = asm_out_file;
9379 unsigned int lrm_count = 0;
9380 int really_return = (sibling == NULL);
9381 int start_reg;
9382 arm_stack_offsets *offsets;
9384 /* If we have already generated the return instruction
9385 then it is futile to generate anything else. */
9386 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9387 return "";
9389 func_type = arm_current_func_type ();
9391 if (IS_NAKED (func_type))
9392 /* Naked functions don't have epilogues. */
9393 return "";
9395 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9397 rtx op;
9399 /* A volatile function should never return. Call abort. */
9400 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9401 assemble_external_libcall (op);
9402 output_asm_insn ("bl\t%a0", &op);
9404 return "";
9407 /* If we are throwing an exception, then we really must be doing a
9408 return, so we can't tail-call. */
9409 gcc_assert (!current_function_calls_eh_return || really_return);
9411 offsets = arm_get_frame_offsets ();
9412 saved_regs_mask = arm_compute_save_reg_mask ();
9414 if (TARGET_IWMMXT)
9415 lrm_count = bit_count (saved_regs_mask);
9417 floats_offset = offsets->saved_args;
9418 /* Compute how far away the floats will be. */
9419 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9420 if (saved_regs_mask & (1 << reg))
9421 floats_offset += 4;
9423 if (frame_pointer_needed)
9425 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9426 int vfp_offset = offsets->frame;
9428 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9430 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9431 if (regs_ever_live[reg] && !call_used_regs[reg])
9433 floats_offset += 12;
9434 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9435 reg, FP_REGNUM, floats_offset - vfp_offset);
9438 else
9440 start_reg = LAST_FPA_REGNUM;
9442 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9444 if (regs_ever_live[reg] && !call_used_regs[reg])
9446 floats_offset += 12;
9448 /* We can't unstack more than four registers at once. */
9449 if (start_reg - reg == 3)
9451 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9452 reg, FP_REGNUM, floats_offset - vfp_offset);
9453 start_reg = reg - 1;
9456 else
9458 if (reg != start_reg)
9459 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9460 reg + 1, start_reg - reg,
9461 FP_REGNUM, floats_offset - vfp_offset);
9462 start_reg = reg - 1;
9466 /* Just in case the last register checked also needs unstacking. */
9467 if (reg != start_reg)
9468 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9469 reg + 1, start_reg - reg,
9470 FP_REGNUM, floats_offset - vfp_offset);
9473 if (TARGET_HARD_FLOAT && TARGET_VFP)
9475 int saved_size;
9477 /* The fldmx insn does not have base+offset addressing modes,
9478 so we use IP to hold the address. */
9479 saved_size = arm_get_vfp_saved_size ();
9481 if (saved_size > 0)
9483 floats_offset += saved_size;
9484 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9485 FP_REGNUM, floats_offset - vfp_offset);
9487 start_reg = FIRST_VFP_REGNUM;
9488 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9490 if ((!regs_ever_live[reg] || call_used_regs[reg])
9491 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9493 if (start_reg != reg)
9494 arm_output_fldmx (f, IP_REGNUM,
9495 (start_reg - FIRST_VFP_REGNUM) / 2,
9496 (reg - start_reg) / 2);
9497 start_reg = reg + 2;
9500 if (start_reg != reg)
9501 arm_output_fldmx (f, IP_REGNUM,
9502 (start_reg - FIRST_VFP_REGNUM) / 2,
9503 (reg - start_reg) / 2);
9506 if (TARGET_IWMMXT)
9508 /* The frame pointer is guaranteed to be non-double-word aligned.
9509 This is because it is set to (old_stack_pointer - 4) and the
9510 old_stack_pointer was double word aligned. Thus the offset to
9511 the iWMMXt registers to be loaded must also be non-double-word
9512 sized, so that the resultant address *is* double-word aligned.
9513 We can ignore floats_offset since that was already included in
9514 the live_regs_mask. */
9515 lrm_count += (lrm_count % 2 ? 2 : 1);
9517 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9518 if (regs_ever_live[reg] && !call_used_regs[reg])
9520 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9521 reg, FP_REGNUM, lrm_count * 4);
9522 lrm_count += 2;
9526 /* saved_regs_mask should contain the IP, which at the time of stack
9527 frame generation actually contains the old stack pointer. So a
9528 quick way to unwind the stack is just pop the IP register directly
9529 into the stack pointer. */
9530 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9531 saved_regs_mask &= ~ (1 << IP_REGNUM);
9532 saved_regs_mask |= (1 << SP_REGNUM);
9534 /* There are two registers left in saved_regs_mask - LR and PC. We
9535 only need to restore the LR register (the return address), but to
9536 save time we can load it directly into the PC, unless we need a
9537 special function exit sequence, or we are not really returning. */
9538 if (really_return
9539 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9540 && !current_function_calls_eh_return)
9541 /* Delete the LR from the register mask, so that the LR on
9542 the stack is loaded into the PC in the register mask. */
9543 saved_regs_mask &= ~ (1 << LR_REGNUM);
9544 else
9545 saved_regs_mask &= ~ (1 << PC_REGNUM);
9547 /* We must use SP as the base register, because SP is one of the
9548 registers being restored. If an interrupt or page fault
9549 happens in the ldm instruction, the SP might or might not
9550 have been restored. That would be bad, as then SP will no
9551 longer indicate the safe area of stack, and we can get stack
9552 corruption. Using SP as the base register means that it will
9553 be reset correctly to the original value, should an interrupt
9554 occur. If the stack pointer already points at the right
9555 place, then omit the subtraction. */
9556 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9557 || current_function_calls_alloca)
9558 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9559 4 * bit_count (saved_regs_mask));
9560 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9562 if (IS_INTERRUPT (func_type))
9563 /* Interrupt handlers will have pushed the
9564 IP onto the stack, so restore it now. */
9565 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9567 else
9569 /* Restore stack pointer if necessary. */
9570 if (offsets->outgoing_args != offsets->saved_regs)
9572 operands[0] = operands[1] = stack_pointer_rtx;
9573 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9574 output_add_immediate (operands);
9577 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9579 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9580 if (regs_ever_live[reg] && !call_used_regs[reg])
9581 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9582 reg, SP_REGNUM);
9584 else
9586 start_reg = FIRST_FPA_REGNUM;
9588 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9590 if (regs_ever_live[reg] && !call_used_regs[reg])
9592 if (reg - start_reg == 3)
9594 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9595 start_reg, SP_REGNUM);
9596 start_reg = reg + 1;
9599 else
9601 if (reg != start_reg)
9602 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9603 start_reg, reg - start_reg,
9604 SP_REGNUM);
9606 start_reg = reg + 1;
9610 /* Just in case the last register checked also needs unstacking. */
9611 if (reg != start_reg)
9612 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9613 start_reg, reg - start_reg, SP_REGNUM);
9616 if (TARGET_HARD_FLOAT && TARGET_VFP)
9618 start_reg = FIRST_VFP_REGNUM;
9619 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9621 if ((!regs_ever_live[reg] || call_used_regs[reg])
9622 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9624 if (start_reg != reg)
9625 arm_output_fldmx (f, SP_REGNUM,
9626 (start_reg - FIRST_VFP_REGNUM) / 2,
9627 (reg - start_reg) / 2);
9628 start_reg = reg + 2;
9631 if (start_reg != reg)
9632 arm_output_fldmx (f, SP_REGNUM,
9633 (start_reg - FIRST_VFP_REGNUM) / 2,
9634 (reg - start_reg) / 2);
9636 if (TARGET_IWMMXT)
9637 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9638 if (regs_ever_live[reg] && !call_used_regs[reg])
9639 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9641 /* If we can, restore the LR into the PC. */
9642 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9643 && really_return
9644 && current_function_pretend_args_size == 0
9645 && saved_regs_mask & (1 << LR_REGNUM)
9646 && !current_function_calls_eh_return)
9648 saved_regs_mask &= ~ (1 << LR_REGNUM);
9649 saved_regs_mask |= (1 << PC_REGNUM);
9652 /* Load the registers off the stack. If we only have one register
9653 to load use the LDR instruction - it is faster. */
9654 if (saved_regs_mask == (1 << LR_REGNUM))
9656 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9658 else if (saved_regs_mask)
9660 if (saved_regs_mask & (1 << SP_REGNUM))
9661 /* Note - write back to the stack register is not enabled
9662 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9663 in the list of registers and if we add writeback the
9664 instruction becomes UNPREDICTABLE. */
9665 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9666 else
9667 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9670 if (current_function_pretend_args_size)
9672 /* Unwind the pre-pushed regs. */
9673 operands[0] = operands[1] = stack_pointer_rtx;
9674 operands[2] = GEN_INT (current_function_pretend_args_size);
9675 output_add_immediate (operands);
9679 /* We may have already restored PC directly from the stack. */
9680 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9681 return "";
9683 /* Stack adjustment for exception handler. */
9684 if (current_function_calls_eh_return)
9685 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9686 ARM_EH_STACKADJ_REGNUM);
9688 /* Generate the return instruction. */
9689 switch ((int) ARM_FUNC_TYPE (func_type))
9691 case ARM_FT_ISR:
9692 case ARM_FT_FIQ:
9693 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9694 break;
9696 case ARM_FT_EXCEPTION:
9697 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9698 break;
9700 case ARM_FT_INTERWORKED:
9701 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9702 break;
9704 default:
9705 if (arm_arch5 || arm_arch4t)
9706 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9707 else
9708 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9709 break;
9712 return "";
9715 static void
9716 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9717 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9719 arm_stack_offsets *offsets;
9721 if (TARGET_THUMB)
9723 int regno;
9725 /* Emit any call-via-reg trampolines that are needed for v4t support
9726 of call_reg and call_value_reg type insns. */
9727 for (regno = 0; regno < LR_REGNUM; regno++)
9729 rtx label = cfun->machine->call_via[regno];
9731 if (label != NULL)
9733 function_section (current_function_decl);
9734 targetm.asm_out.internal_label (asm_out_file, "L",
9735 CODE_LABEL_NUMBER (label));
9736 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9740 /* ??? Probably not safe to set this here, since it assumes that a
9741 function will be emitted as assembly immediately after we generate
9742 RTL for it. This does not happen for inline functions. */
9743 return_used_this_function = 0;
9745 else
9747 /* We need to take into account any stack-frame rounding. */
9748 offsets = arm_get_frame_offsets ();
9750 gcc_assert (!use_return_insn (FALSE, NULL)
9751 || !return_used_this_function
9752 || offsets->saved_regs == offsets->outgoing_args
9753 || frame_pointer_needed);
9755 /* Reset the ARM-specific per-function variables. */
9756 after_arm_reorg = 0;
9760 /* Generate and emit an insn that we will recognize as a push_multi.
9761 Unfortunately, since this insn does not reflect very well the actual
9762 semantics of the operation, we need to annotate the insn for the benefit
9763 of DWARF2 frame unwind information. */
9764 static rtx
9765 emit_multi_reg_push (unsigned long mask)
9767 int num_regs = 0;
9768 int num_dwarf_regs;
9769 int i, j;
9770 rtx par;
9771 rtx dwarf;
9772 int dwarf_par_index;
9773 rtx tmp, reg;
9775 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9776 if (mask & (1 << i))
9777 num_regs++;
9779 gcc_assert (num_regs && num_regs <= 16);
9781 /* We don't record the PC in the dwarf frame information. */
9782 num_dwarf_regs = num_regs;
9783 if (mask & (1 << PC_REGNUM))
9784 num_dwarf_regs--;
9786 /* For the body of the insn we are going to generate an UNSPEC in
9787 parallel with several USEs. This allows the insn to be recognized
9788 by the push_multi pattern in the arm.md file. The insn looks
9789 something like this:
9791 (parallel [
9792 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9793 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9794 (use (reg:SI 11 fp))
9795 (use (reg:SI 12 ip))
9796 (use (reg:SI 14 lr))
9797 (use (reg:SI 15 pc))
9800 For the frame note however, we try to be more explicit and actually
9801 show each register being stored into the stack frame, plus a (single)
9802 decrement of the stack pointer. We do it this way in order to be
9803 friendly to the stack unwinding code, which only wants to see a single
9804 stack decrement per instruction. The RTL we generate for the note looks
9805 something like this:
9807 (sequence [
9808 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9809 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9810 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9811 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9812 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9815 This sequence is used both by the code to support stack unwinding for
9816 exceptions handlers and the code to generate dwarf2 frame debugging. */
9818 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9819 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9820 dwarf_par_index = 1;
9822 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9824 if (mask & (1 << i))
9826 reg = gen_rtx_REG (SImode, i);
9828 XVECEXP (par, 0, 0)
9829 = gen_rtx_SET (VOIDmode,
9830 gen_rtx_MEM (BLKmode,
9831 gen_rtx_PRE_DEC (BLKmode,
9832 stack_pointer_rtx)),
9833 gen_rtx_UNSPEC (BLKmode,
9834 gen_rtvec (1, reg),
9835 UNSPEC_PUSH_MULT));
9837 if (i != PC_REGNUM)
9839 tmp = gen_rtx_SET (VOIDmode,
9840 gen_rtx_MEM (SImode, stack_pointer_rtx),
9841 reg);
9842 RTX_FRAME_RELATED_P (tmp) = 1;
9843 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9844 dwarf_par_index++;
9847 break;
9851 for (j = 1, i++; j < num_regs; i++)
9853 if (mask & (1 << i))
9855 reg = gen_rtx_REG (SImode, i);
9857 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9859 if (i != PC_REGNUM)
9861 tmp = gen_rtx_SET (VOIDmode,
9862 gen_rtx_MEM (SImode,
9863 plus_constant (stack_pointer_rtx,
9864 4 * j)),
9865 reg);
9866 RTX_FRAME_RELATED_P (tmp) = 1;
9867 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9870 j++;
9874 par = emit_insn (par);
9876 tmp = gen_rtx_SET (SImode,
9877 stack_pointer_rtx,
9878 gen_rtx_PLUS (SImode,
9879 stack_pointer_rtx,
9880 GEN_INT (-4 * num_regs)));
9881 RTX_FRAME_RELATED_P (tmp) = 1;
9882 XVECEXP (dwarf, 0, 0) = tmp;
9884 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9885 REG_NOTES (par));
9886 return par;
9889 /* Calculate the size of the return value that is passed in registers. */
9890 static int
9891 arm_size_return_regs (void)
9893 enum machine_mode mode;
9895 if (current_function_return_rtx != 0)
9896 mode = GET_MODE (current_function_return_rtx);
9897 else
9898 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9900 return GET_MODE_SIZE (mode);
9903 static rtx
9904 emit_sfm (int base_reg, int count)
9906 rtx par;
9907 rtx dwarf;
9908 rtx tmp, reg;
9909 int i;
9911 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9912 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9914 reg = gen_rtx_REG (XFmode, base_reg++);
9916 XVECEXP (par, 0, 0)
9917 = gen_rtx_SET (VOIDmode,
9918 gen_rtx_MEM (BLKmode,
9919 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9920 gen_rtx_UNSPEC (BLKmode,
9921 gen_rtvec (1, reg),
9922 UNSPEC_PUSH_MULT));
9923 tmp = gen_rtx_SET (VOIDmode,
9924 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9925 RTX_FRAME_RELATED_P (tmp) = 1;
9926 XVECEXP (dwarf, 0, 1) = tmp;
9928 for (i = 1; i < count; i++)
9930 reg = gen_rtx_REG (XFmode, base_reg++);
9931 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9933 tmp = gen_rtx_SET (VOIDmode,
9934 gen_rtx_MEM (XFmode,
9935 plus_constant (stack_pointer_rtx,
9936 i * 12)),
9937 reg);
9938 RTX_FRAME_RELATED_P (tmp) = 1;
9939 XVECEXP (dwarf, 0, i + 1) = tmp;
9942 tmp = gen_rtx_SET (VOIDmode,
9943 stack_pointer_rtx,
9944 gen_rtx_PLUS (SImode,
9945 stack_pointer_rtx,
9946 GEN_INT (-12 * count)));
9947 RTX_FRAME_RELATED_P (tmp) = 1;
9948 XVECEXP (dwarf, 0, 0) = tmp;
9950 par = emit_insn (par);
9951 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9952 REG_NOTES (par));
9953 return par;
9957 /* Return true if the current function needs to save/restore LR. */
9959 static bool
9960 thumb_force_lr_save (void)
9962 return !cfun->machine->lr_save_eliminated
9963 && (!leaf_function_p ()
9964 || thumb_far_jump_used_p ()
9965 || regs_ever_live [LR_REGNUM]);
9969 /* Compute the distance from register FROM to register TO.
9970 These can be the arg pointer (26), the soft frame pointer (25),
9971 the stack pointer (13) or the hard frame pointer (11).
9972 In thumb mode r7 is used as the soft frame pointer, if needed.
9973 Typical stack layout looks like this:
9975 old stack pointer -> | |
9976 ----
9977 | | \
9978 | | saved arguments for
9979 | | vararg functions
9980 | | /
9982 hard FP & arg pointer -> | | \
9983 | | stack
9984 | | frame
9985 | | /
9987 | | \
9988 | | call saved
9989 | | registers
9990 soft frame pointer -> | | /
9992 | | \
9993 | | local
9994 | | variables
9995 | | /
9997 | | \
9998 | | outgoing
9999 | | arguments
10000 current stack pointer -> | | /
10003 For a given function some or all of these stack components
10004 may not be needed, giving rise to the possibility of
10005 eliminating some of the registers.
10007 The values returned by this function must reflect the behavior
10008 of arm_expand_prologue() and arm_compute_save_reg_mask().
10010 The sign of the number returned reflects the direction of stack
10011 growth, so the values are positive for all eliminations except
10012 from the soft frame pointer to the hard frame pointer.
10014 SFP may point just inside the local variables block to ensure correct
10015 alignment. */
10018 /* Calculate stack offsets. These are used to calculate register elimination
10019 offsets and in prologue/epilogue code. */
10021 static arm_stack_offsets *
10022 arm_get_frame_offsets (void)
10024 struct arm_stack_offsets *offsets;
10025 unsigned long func_type;
10026 int leaf;
10027 int saved;
10028 HOST_WIDE_INT frame_size;
10030 offsets = &cfun->machine->stack_offsets;
10032 /* We need to know if we are a leaf function. Unfortunately, it
10033 is possible to be called after start_sequence has been called,
10034 which causes get_insns to return the insns for the sequence,
10035 not the function, which will cause leaf_function_p to return
10036 the incorrect result.
10038 to know about leaf functions once reload has completed, and the
10039 frame size cannot be changed after that time, so we can safely
10040 use the cached value. */
10042 if (reload_completed)
10043 return offsets;
10045 /* Initially this is the size of the local variables. It will translated
10046 into an offset once we have determined the size of preceding data. */
10047 frame_size = ROUND_UP_WORD (get_frame_size ());
10049 leaf = leaf_function_p ();
10051 /* Space for variadic functions. */
10052 offsets->saved_args = current_function_pretend_args_size;
10054 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10056 if (TARGET_ARM)
10058 unsigned int regno;
10060 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10062 /* We know that SP will be doubleword aligned on entry, and we must
10063 preserve that condition at any subroutine call. We also require the
10064 soft frame pointer to be doubleword aligned. */
10066 if (TARGET_REALLY_IWMMXT)
10068 /* Check for the call-saved iWMMXt registers. */
10069 for (regno = FIRST_IWMMXT_REGNUM;
10070 regno <= LAST_IWMMXT_REGNUM;
10071 regno++)
10072 if (regs_ever_live [regno] && ! call_used_regs [regno])
10073 saved += 8;
10076 func_type = arm_current_func_type ();
10077 if (! IS_VOLATILE (func_type))
10079 /* Space for saved FPA registers. */
10080 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10081 if (regs_ever_live[regno] && ! call_used_regs[regno])
10082 saved += 12;
10084 /* Space for saved VFP registers. */
10085 if (TARGET_HARD_FLOAT && TARGET_VFP)
10086 saved += arm_get_vfp_saved_size ();
10089 else /* TARGET_THUMB */
10091 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10092 if (TARGET_BACKTRACE)
10093 saved += 16;
10096 /* Saved registers include the stack frame. */
10097 offsets->saved_regs = offsets->saved_args + saved;
10098 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10099 /* A leaf function does not need any stack alignment if it has nothing
10100 on the stack. */
10101 if (leaf && frame_size == 0)
10103 offsets->outgoing_args = offsets->soft_frame;
10104 return offsets;
10107 /* Ensure SFP has the correct alignment. */
10108 if (ARM_DOUBLEWORD_ALIGN
10109 && (offsets->soft_frame & 7))
10110 offsets->soft_frame += 4;
10112 offsets->outgoing_args = offsets->soft_frame + frame_size
10113 + current_function_outgoing_args_size;
10115 if (ARM_DOUBLEWORD_ALIGN)
10117 /* Ensure SP remains doubleword aligned. */
10118 if (offsets->outgoing_args & 7)
10119 offsets->outgoing_args += 4;
10120 gcc_assert (!(offsets->outgoing_args & 7));
10123 return offsets;
10127 /* Calculate the relative offsets for the different stack pointers. Positive
10128 offsets are in the direction of stack growth. */
10130 HOST_WIDE_INT
10131 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10133 arm_stack_offsets *offsets;
10135 offsets = arm_get_frame_offsets ();
10137 /* OK, now we have enough information to compute the distances.
10138 There must be an entry in these switch tables for each pair
10139 of registers in ELIMINABLE_REGS, even if some of the entries
10140 seem to be redundant or useless. */
10141 switch (from)
10143 case ARG_POINTER_REGNUM:
10144 switch (to)
10146 case THUMB_HARD_FRAME_POINTER_REGNUM:
10147 return 0;
10149 case FRAME_POINTER_REGNUM:
10150 /* This is the reverse of the soft frame pointer
10151 to hard frame pointer elimination below. */
10152 return offsets->soft_frame - offsets->saved_args;
10154 case ARM_HARD_FRAME_POINTER_REGNUM:
10155 /* If there is no stack frame then the hard
10156 frame pointer and the arg pointer coincide. */
10157 if (offsets->frame == offsets->saved_regs)
10158 return 0;
10159 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10160 return (frame_pointer_needed
10161 && cfun->static_chain_decl != NULL
10162 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10164 case STACK_POINTER_REGNUM:
10165 /* If nothing has been pushed on the stack at all
10166 then this will return -4. This *is* correct! */
10167 return offsets->outgoing_args - (offsets->saved_args + 4);
10169 default:
10170 gcc_unreachable ();
10172 gcc_unreachable ();
10174 case FRAME_POINTER_REGNUM:
10175 switch (to)
10177 case THUMB_HARD_FRAME_POINTER_REGNUM:
10178 return 0;
10180 case ARM_HARD_FRAME_POINTER_REGNUM:
10181 /* The hard frame pointer points to the top entry in the
10182 stack frame. The soft frame pointer to the bottom entry
10183 in the stack frame. If there is no stack frame at all,
10184 then they are identical. */
10186 return offsets->frame - offsets->soft_frame;
10188 case STACK_POINTER_REGNUM:
10189 return offsets->outgoing_args - offsets->soft_frame;
10191 default:
10192 gcc_unreachable ();
10194 gcc_unreachable ();
10196 default:
10197 /* You cannot eliminate from the stack pointer.
10198 In theory you could eliminate from the hard frame
10199 pointer to the stack pointer, but this will never
10200 happen, since if a stack frame is not needed the
10201 hard frame pointer will never be used. */
10202 gcc_unreachable ();
10207 /* Generate the prologue instructions for entry into an ARM function. */
10208 void
10209 arm_expand_prologue (void)
10211 int reg;
10212 rtx amount;
10213 rtx insn;
10214 rtx ip_rtx;
10215 unsigned long live_regs_mask;
10216 unsigned long func_type;
10217 int fp_offset = 0;
10218 int saved_pretend_args = 0;
10219 int saved_regs = 0;
10220 unsigned HOST_WIDE_INT args_to_push;
10221 arm_stack_offsets *offsets;
10223 func_type = arm_current_func_type ();
10225 /* Naked functions don't have prologues. */
10226 if (IS_NAKED (func_type))
10227 return;
10229 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10230 args_to_push = current_function_pretend_args_size;
10232 /* Compute which register we will have to save onto the stack. */
10233 live_regs_mask = arm_compute_save_reg_mask ();
10235 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10237 if (frame_pointer_needed)
10239 if (IS_INTERRUPT (func_type))
10241 /* Interrupt functions must not corrupt any registers.
10242 Creating a frame pointer however, corrupts the IP
10243 register, so we must push it first. */
10244 insn = emit_multi_reg_push (1 << IP_REGNUM);
10246 /* Do not set RTX_FRAME_RELATED_P on this insn.
10247 The dwarf stack unwinding code only wants to see one
10248 stack decrement per function, and this is not it. If
10249 this instruction is labeled as being part of the frame
10250 creation sequence then dwarf2out_frame_debug_expr will
10251 die when it encounters the assignment of IP to FP
10252 later on, since the use of SP here establishes SP as
10253 the CFA register and not IP.
10255 Anyway this instruction is not really part of the stack
10256 frame creation although it is part of the prologue. */
10258 else if (IS_NESTED (func_type))
10260 /* The Static chain register is the same as the IP register
10261 used as a scratch register during stack frame creation.
10262 To get around this need to find somewhere to store IP
10263 whilst the frame is being created. We try the following
10264 places in order:
10266 1. The last argument register.
10267 2. A slot on the stack above the frame. (This only
10268 works if the function is not a varargs function).
10269 3. Register r3, after pushing the argument registers
10270 onto the stack.
10272 Note - we only need to tell the dwarf2 backend about the SP
10273 adjustment in the second variant; the static chain register
10274 doesn't need to be unwound, as it doesn't contain a value
10275 inherited from the caller. */
10277 if (regs_ever_live[3] == 0)
10279 insn = gen_rtx_REG (SImode, 3);
10280 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10281 insn = emit_insn (insn);
10283 else if (args_to_push == 0)
10285 rtx dwarf;
10286 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10287 insn = gen_rtx_MEM (SImode, insn);
10288 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10289 insn = emit_insn (insn);
10291 fp_offset = 4;
10293 /* Just tell the dwarf backend that we adjusted SP. */
10294 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10295 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10296 GEN_INT (-fp_offset)));
10297 RTX_FRAME_RELATED_P (insn) = 1;
10298 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10299 dwarf, REG_NOTES (insn));
10301 else
10303 /* Store the args on the stack. */
10304 if (cfun->machine->uses_anonymous_args)
10305 insn = emit_multi_reg_push
10306 ((0xf0 >> (args_to_push / 4)) & 0xf);
10307 else
10308 insn = emit_insn
10309 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10310 GEN_INT (- args_to_push)));
10312 RTX_FRAME_RELATED_P (insn) = 1;
10314 saved_pretend_args = 1;
10315 fp_offset = args_to_push;
10316 args_to_push = 0;
10318 /* Now reuse r3 to preserve IP. */
10319 insn = gen_rtx_REG (SImode, 3);
10320 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10321 (void) emit_insn (insn);
10325 if (fp_offset)
10327 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10328 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10330 else
10331 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10333 insn = emit_insn (insn);
10334 RTX_FRAME_RELATED_P (insn) = 1;
10337 if (args_to_push)
10339 /* Push the argument registers, or reserve space for them. */
10340 if (cfun->machine->uses_anonymous_args)
10341 insn = emit_multi_reg_push
10342 ((0xf0 >> (args_to_push / 4)) & 0xf);
10343 else
10344 insn = emit_insn
10345 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10346 GEN_INT (- args_to_push)));
10347 RTX_FRAME_RELATED_P (insn) = 1;
10350 /* If this is an interrupt service routine, and the link register
10351 is going to be pushed, and we are not creating a stack frame,
10352 (which would involve an extra push of IP and a pop in the epilogue)
10353 subtracting four from LR now will mean that the function return
10354 can be done with a single instruction. */
10355 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10356 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10357 && ! frame_pointer_needed)
10358 emit_insn (gen_rtx_SET (SImode,
10359 gen_rtx_REG (SImode, LR_REGNUM),
10360 gen_rtx_PLUS (SImode,
10361 gen_rtx_REG (SImode, LR_REGNUM),
10362 GEN_INT (-4))));
10364 if (live_regs_mask)
10366 insn = emit_multi_reg_push (live_regs_mask);
10367 saved_regs += bit_count (live_regs_mask) * 4;
10368 RTX_FRAME_RELATED_P (insn) = 1;
10371 if (TARGET_IWMMXT)
10372 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10373 if (regs_ever_live[reg] && ! call_used_regs [reg])
10375 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10376 insn = gen_rtx_MEM (V2SImode, insn);
10377 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10378 gen_rtx_REG (V2SImode, reg)));
10379 RTX_FRAME_RELATED_P (insn) = 1;
10380 saved_regs += 8;
10383 if (! IS_VOLATILE (func_type))
10385 int start_reg;
10387 /* Save any floating point call-saved registers used by this
10388 function. */
10389 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10391 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10392 if (regs_ever_live[reg] && !call_used_regs[reg])
10394 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10395 insn = gen_rtx_MEM (XFmode, insn);
10396 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10397 gen_rtx_REG (XFmode, reg)));
10398 RTX_FRAME_RELATED_P (insn) = 1;
10399 saved_regs += 12;
10402 else
10404 start_reg = LAST_FPA_REGNUM;
10406 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10408 if (regs_ever_live[reg] && !call_used_regs[reg])
10410 if (start_reg - reg == 3)
10412 insn = emit_sfm (reg, 4);
10413 RTX_FRAME_RELATED_P (insn) = 1;
10414 saved_regs += 48;
10415 start_reg = reg - 1;
10418 else
10420 if (start_reg != reg)
10422 insn = emit_sfm (reg + 1, start_reg - reg);
10423 RTX_FRAME_RELATED_P (insn) = 1;
10424 saved_regs += (start_reg - reg) * 12;
10426 start_reg = reg - 1;
10430 if (start_reg != reg)
10432 insn = emit_sfm (reg + 1, start_reg - reg);
10433 saved_regs += (start_reg - reg) * 12;
10434 RTX_FRAME_RELATED_P (insn) = 1;
10437 if (TARGET_HARD_FLOAT && TARGET_VFP)
10439 start_reg = FIRST_VFP_REGNUM;
10441 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10443 if ((!regs_ever_live[reg] || call_used_regs[reg])
10444 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10446 if (start_reg != reg)
10447 saved_regs += vfp_emit_fstmx (start_reg,
10448 (reg - start_reg) / 2);
10449 start_reg = reg + 2;
10452 if (start_reg != reg)
10453 saved_regs += vfp_emit_fstmx (start_reg,
10454 (reg - start_reg) / 2);
10458 if (frame_pointer_needed)
10460 /* Create the new frame pointer. */
10461 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10462 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10463 RTX_FRAME_RELATED_P (insn) = 1;
10465 if (IS_NESTED (func_type))
10467 /* Recover the static chain register. */
10468 if (regs_ever_live [3] == 0
10469 || saved_pretend_args)
10470 insn = gen_rtx_REG (SImode, 3);
10471 else /* if (current_function_pretend_args_size == 0) */
10473 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10474 GEN_INT (4));
10475 insn = gen_rtx_MEM (SImode, insn);
10478 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10479 /* Add a USE to stop propagate_one_insn() from barfing. */
10480 emit_insn (gen_prologue_use (ip_rtx));
10484 offsets = arm_get_frame_offsets ();
10485 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10487 /* This add can produce multiple insns for a large constant, so we
10488 need to get tricky. */
10489 rtx last = get_last_insn ();
10491 amount = GEN_INT (offsets->saved_args + saved_regs
10492 - offsets->outgoing_args);
10494 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10495 amount));
10498 last = last ? NEXT_INSN (last) : get_insns ();
10499 RTX_FRAME_RELATED_P (last) = 1;
10501 while (last != insn);
10503 /* If the frame pointer is needed, emit a special barrier that
10504 will prevent the scheduler from moving stores to the frame
10505 before the stack adjustment. */
10506 if (frame_pointer_needed)
10507 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10508 hard_frame_pointer_rtx));
10512 if (flag_pic)
10513 arm_load_pic_register (0UL);
10515 /* If we are profiling, make sure no instructions are scheduled before
10516 the call to mcount. Similarly if the user has requested no
10517 scheduling in the prolog. */
10518 if (current_function_profile || !TARGET_SCHED_PROLOG)
10519 emit_insn (gen_blockage ());
10521 /* If the link register is being kept alive, with the return address in it,
10522 then make sure that it does not get reused by the ce2 pass. */
10523 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10525 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10526 cfun->machine->lr_save_eliminated = 1;
10530 /* If CODE is 'd', then the X is a condition operand and the instruction
10531 should only be executed if the condition is true.
10532 if CODE is 'D', then the X is a condition operand and the instruction
10533 should only be executed if the condition is false: however, if the mode
10534 of the comparison is CCFPEmode, then always execute the instruction -- we
10535 do this because in these circumstances !GE does not necessarily imply LT;
10536 in these cases the instruction pattern will take care to make sure that
10537 an instruction containing %d will follow, thereby undoing the effects of
10538 doing this instruction unconditionally.
10539 If CODE is 'N' then X is a floating point operand that must be negated
10540 before output.
10541 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10542 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10543 void
10544 arm_print_operand (FILE *stream, rtx x, int code)
10546 switch (code)
10548 case '@':
10549 fputs (ASM_COMMENT_START, stream);
10550 return;
10552 case '_':
10553 fputs (user_label_prefix, stream);
10554 return;
10556 case '|':
10557 fputs (REGISTER_PREFIX, stream);
10558 return;
10560 case '?':
10561 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10563 if (TARGET_THUMB)
10565 output_operand_lossage ("predicated Thumb instruction");
10566 break;
10568 if (current_insn_predicate != NULL)
10570 output_operand_lossage
10571 ("predicated instruction in conditional sequence");
10572 break;
10575 fputs (arm_condition_codes[arm_current_cc], stream);
10577 else if (current_insn_predicate)
10579 enum arm_cond_code code;
10581 if (TARGET_THUMB)
10583 output_operand_lossage ("predicated Thumb instruction");
10584 break;
10587 code = get_arm_condition_code (current_insn_predicate);
10588 fputs (arm_condition_codes[code], stream);
10590 return;
10592 case 'N':
10594 REAL_VALUE_TYPE r;
10595 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10596 r = REAL_VALUE_NEGATE (r);
10597 fprintf (stream, "%s", fp_const_from_val (&r));
10599 return;
10601 case 'B':
10602 if (GET_CODE (x) == CONST_INT)
10604 HOST_WIDE_INT val;
10605 val = ARM_SIGN_EXTEND (~INTVAL (x));
10606 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10608 else
10610 putc ('~', stream);
10611 output_addr_const (stream, x);
10613 return;
10615 case 'i':
10616 fprintf (stream, "%s", arithmetic_instr (x, 1));
10617 return;
10619 /* Truncate Cirrus shift counts. */
10620 case 's':
10621 if (GET_CODE (x) == CONST_INT)
10623 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10624 return;
10626 arm_print_operand (stream, x, 0);
10627 return;
10629 case 'I':
10630 fprintf (stream, "%s", arithmetic_instr (x, 0));
10631 return;
10633 case 'S':
10635 HOST_WIDE_INT val;
10636 const char * shift = shift_op (x, &val);
10638 if (shift)
10640 fprintf (stream, ", %s ", shift_op (x, &val));
10641 if (val == -1)
10642 arm_print_operand (stream, XEXP (x, 1), 0);
10643 else
10644 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10647 return;
10649 /* An explanation of the 'Q', 'R' and 'H' register operands:
10651 In a pair of registers containing a DI or DF value the 'Q'
10652 operand returns the register number of the register containing
10653 the least significant part of the value. The 'R' operand returns
10654 the register number of the register containing the most
10655 significant part of the value.
10657 The 'H' operand returns the higher of the two register numbers.
10658 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10659 same as the 'Q' operand, since the most significant part of the
10660 value is held in the lower number register. The reverse is true
10661 on systems where WORDS_BIG_ENDIAN is false.
10663 The purpose of these operands is to distinguish between cases
10664 where the endian-ness of the values is important (for example
10665 when they are added together), and cases where the endian-ness
10666 is irrelevant, but the order of register operations is important.
10667 For example when loading a value from memory into a register
10668 pair, the endian-ness does not matter. Provided that the value
10669 from the lower memory address is put into the lower numbered
10670 register, and the value from the higher address is put into the
10671 higher numbered register, the load will work regardless of whether
10672 the value being loaded is big-wordian or little-wordian. The
10673 order of the two register loads can matter however, if the address
10674 of the memory location is actually held in one of the registers
10675 being overwritten by the load. */
10676 case 'Q':
10677 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10679 output_operand_lossage ("invalid operand for code '%c'", code);
10680 return;
10683 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10684 return;
10686 case 'R':
10687 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10689 output_operand_lossage ("invalid operand for code '%c'", code);
10690 return;
10693 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10694 return;
10696 case 'H':
10697 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10699 output_operand_lossage ("invalid operand for code '%c'", code);
10700 return;
10703 asm_fprintf (stream, "%r", REGNO (x) + 1);
10704 return;
10706 case 'm':
10707 asm_fprintf (stream, "%r",
10708 GET_CODE (XEXP (x, 0)) == REG
10709 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10710 return;
10712 case 'M':
10713 asm_fprintf (stream, "{%r-%r}",
10714 REGNO (x),
10715 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10716 return;
10718 case 'd':
10719 /* CONST_TRUE_RTX means always -- that's the default. */
10720 if (x == const_true_rtx)
10721 return;
10723 if (!COMPARISON_P (x))
10725 output_operand_lossage ("invalid operand for code '%c'", code);
10726 return;
10729 fputs (arm_condition_codes[get_arm_condition_code (x)],
10730 stream);
10731 return;
10733 case 'D':
10734 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10735 want to do that. */
10736 if (x == const_true_rtx)
10738 output_operand_lossage ("instruction never exectued");
10739 return;
10741 if (!COMPARISON_P (x))
10743 output_operand_lossage ("invalid operand for code '%c'", code);
10744 return;
10747 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10748 (get_arm_condition_code (x))],
10749 stream);
10750 return;
10752 /* Cirrus registers can be accessed in a variety of ways:
10753 single floating point (f)
10754 double floating point (d)
10755 32bit integer (fx)
10756 64bit integer (dx). */
10757 case 'W': /* Cirrus register in F mode. */
10758 case 'X': /* Cirrus register in D mode. */
10759 case 'Y': /* Cirrus register in FX mode. */
10760 case 'Z': /* Cirrus register in DX mode. */
10761 gcc_assert (GET_CODE (x) == REG
10762 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10764 fprintf (stream, "mv%s%s",
10765 code == 'W' ? "f"
10766 : code == 'X' ? "d"
10767 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10769 return;
10771 /* Print cirrus register in the mode specified by the register's mode. */
10772 case 'V':
10774 int mode = GET_MODE (x);
10776 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10778 output_operand_lossage ("invalid operand for code '%c'", code);
10779 return;
10782 fprintf (stream, "mv%s%s",
10783 mode == DFmode ? "d"
10784 : mode == SImode ? "fx"
10785 : mode == DImode ? "dx"
10786 : "f", reg_names[REGNO (x)] + 2);
10788 return;
10791 case 'U':
10792 if (GET_CODE (x) != REG
10793 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10794 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10795 /* Bad value for wCG register number. */
10797 output_operand_lossage ("invalid operand for code '%c'", code);
10798 return;
10801 else
10802 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10803 return;
10805 /* Print an iWMMXt control register name. */
10806 case 'w':
10807 if (GET_CODE (x) != CONST_INT
10808 || INTVAL (x) < 0
10809 || INTVAL (x) >= 16)
10810 /* Bad value for wC register number. */
10812 output_operand_lossage ("invalid operand for code '%c'", code);
10813 return;
10816 else
10818 static const char * wc_reg_names [16] =
10820 "wCID", "wCon", "wCSSF", "wCASF",
10821 "wC4", "wC5", "wC6", "wC7",
10822 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10823 "wC12", "wC13", "wC14", "wC15"
10826 fprintf (stream, wc_reg_names [INTVAL (x)]);
10828 return;
10830 /* Print a VFP double precision register name. */
10831 case 'P':
10833 int mode = GET_MODE (x);
10834 int num;
10836 if (mode != DImode && mode != DFmode)
10838 output_operand_lossage ("invalid operand for code '%c'", code);
10839 return;
10842 if (GET_CODE (x) != REG
10843 || !IS_VFP_REGNUM (REGNO (x)))
10845 output_operand_lossage ("invalid operand for code '%c'", code);
10846 return;
10849 num = REGNO(x) - FIRST_VFP_REGNUM;
10850 if (num & 1)
10852 output_operand_lossage ("invalid operand for code '%c'", code);
10853 return;
10856 fprintf (stream, "d%d", num >> 1);
10858 return;
10860 default:
10861 if (x == 0)
10863 output_operand_lossage ("missing operand");
10864 return;
10867 switch (GET_CODE (x))
10869 case REG:
10870 asm_fprintf (stream, "%r", REGNO (x));
10871 break;
10873 case MEM:
10874 output_memory_reference_mode = GET_MODE (x);
10875 output_address (XEXP (x, 0));
10876 break;
10878 case CONST_DOUBLE:
10879 fprintf (stream, "#%s", fp_immediate_constant (x));
10880 break;
10882 default:
10883 gcc_assert (GET_CODE (x) != NEG);
10884 fputc ('#', stream);
10885 output_addr_const (stream, x);
10886 break;
10891 #ifndef AOF_ASSEMBLER
10892 /* Target hook for assembling integer objects. The ARM version needs to
10893 handle word-sized values specially. */
10894 static bool
10895 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10897 if (size == UNITS_PER_WORD && aligned_p)
10899 fputs ("\t.word\t", asm_out_file);
10900 output_addr_const (asm_out_file, x);
10902 /* Mark symbols as position independent. We only do this in the
10903 .text segment, not in the .data segment. */
10904 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10905 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10907 if (GET_CODE (x) == SYMBOL_REF
10908 && (CONSTANT_POOL_ADDRESS_P (x)
10909 || SYMBOL_REF_LOCAL_P (x)))
10910 fputs ("(GOTOFF)", asm_out_file);
10911 else if (GET_CODE (x) == LABEL_REF)
10912 fputs ("(GOTOFF)", asm_out_file);
10913 else
10914 fputs ("(GOT)", asm_out_file);
10916 fputc ('\n', asm_out_file);
10917 return true;
10920 if (arm_vector_mode_supported_p (GET_MODE (x)))
10922 int i, units;
10924 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10926 units = CONST_VECTOR_NUNITS (x);
10928 switch (GET_MODE (x))
10930 case V2SImode: size = 4; break;
10931 case V4HImode: size = 2; break;
10932 case V8QImode: size = 1; break;
10933 default:
10934 gcc_unreachable ();
10937 for (i = 0; i < units; i++)
10939 rtx elt;
10941 elt = CONST_VECTOR_ELT (x, i);
10942 assemble_integer
10943 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10946 return true;
10949 return default_assemble_integer (x, size, aligned_p);
10953 /* Add a function to the list of static constructors. */
10955 static void
10956 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10958 if (!TARGET_AAPCS_BASED)
10960 default_named_section_asm_out_constructor (symbol, priority);
10961 return;
10964 /* Put these in the .init_array section, using a special relocation. */
10965 ctors_section ();
10966 assemble_align (POINTER_SIZE);
10967 fputs ("\t.word\t", asm_out_file);
10968 output_addr_const (asm_out_file, symbol);
10969 fputs ("(target1)\n", asm_out_file);
10971 #endif
10973 /* A finite state machine takes care of noticing whether or not instructions
10974 can be conditionally executed, and thus decrease execution time and code
10975 size by deleting branch instructions. The fsm is controlled by
10976 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10978 /* The state of the fsm controlling condition codes are:
10979 0: normal, do nothing special
10980 1: make ASM_OUTPUT_OPCODE not output this instruction
10981 2: make ASM_OUTPUT_OPCODE not output this instruction
10982 3: make instructions conditional
10983 4: make instructions conditional
10985 State transitions (state->state by whom under condition):
10986 0 -> 1 final_prescan_insn if the `target' is a label
10987 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10988 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10989 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10990 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10991 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10992 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10993 (the target insn is arm_target_insn).
10995 If the jump clobbers the conditions then we use states 2 and 4.
10997 A similar thing can be done with conditional return insns.
10999 XXX In case the `target' is an unconditional branch, this conditionalising
11000 of the instructions always reduces code size, but not always execution
11001 time. But then, I want to reduce the code size to somewhere near what
11002 /bin/cc produces. */
11004 /* Returns the index of the ARM condition code string in
11005 `arm_condition_codes'. COMPARISON should be an rtx like
11006 `(eq (...) (...))'. */
11007 static enum arm_cond_code
11008 get_arm_condition_code (rtx comparison)
11010 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11011 int code;
11012 enum rtx_code comp_code = GET_CODE (comparison);
11014 if (GET_MODE_CLASS (mode) != MODE_CC)
11015 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11016 XEXP (comparison, 1));
11018 switch (mode)
11020 case CC_DNEmode: code = ARM_NE; goto dominance;
11021 case CC_DEQmode: code = ARM_EQ; goto dominance;
11022 case CC_DGEmode: code = ARM_GE; goto dominance;
11023 case CC_DGTmode: code = ARM_GT; goto dominance;
11024 case CC_DLEmode: code = ARM_LE; goto dominance;
11025 case CC_DLTmode: code = ARM_LT; goto dominance;
11026 case CC_DGEUmode: code = ARM_CS; goto dominance;
11027 case CC_DGTUmode: code = ARM_HI; goto dominance;
11028 case CC_DLEUmode: code = ARM_LS; goto dominance;
11029 case CC_DLTUmode: code = ARM_CC;
11031 dominance:
11032 gcc_assert (comp_code == EQ || comp_code == NE);
11034 if (comp_code == EQ)
11035 return ARM_INVERSE_CONDITION_CODE (code);
11036 return code;
11038 case CC_NOOVmode:
11039 switch (comp_code)
11041 case NE: return ARM_NE;
11042 case EQ: return ARM_EQ;
11043 case GE: return ARM_PL;
11044 case LT: return ARM_MI;
11045 default: gcc_unreachable ();
11048 case CC_Zmode:
11049 switch (comp_code)
11051 case NE: return ARM_NE;
11052 case EQ: return ARM_EQ;
11053 default: gcc_unreachable ();
11056 case CC_Nmode:
11057 switch (comp_code)
11059 case NE: return ARM_MI;
11060 case EQ: return ARM_PL;
11061 default: gcc_unreachable ();
11064 case CCFPEmode:
11065 case CCFPmode:
11066 /* These encodings assume that AC=1 in the FPA system control
11067 byte. This allows us to handle all cases except UNEQ and
11068 LTGT. */
11069 switch (comp_code)
11071 case GE: return ARM_GE;
11072 case GT: return ARM_GT;
11073 case LE: return ARM_LS;
11074 case LT: return ARM_MI;
11075 case NE: return ARM_NE;
11076 case EQ: return ARM_EQ;
11077 case ORDERED: return ARM_VC;
11078 case UNORDERED: return ARM_VS;
11079 case UNLT: return ARM_LT;
11080 case UNLE: return ARM_LE;
11081 case UNGT: return ARM_HI;
11082 case UNGE: return ARM_PL;
11083 /* UNEQ and LTGT do not have a representation. */
11084 case UNEQ: /* Fall through. */
11085 case LTGT: /* Fall through. */
11086 default: gcc_unreachable ();
11089 case CC_SWPmode:
11090 switch (comp_code)
11092 case NE: return ARM_NE;
11093 case EQ: return ARM_EQ;
11094 case GE: return ARM_LE;
11095 case GT: return ARM_LT;
11096 case LE: return ARM_GE;
11097 case LT: return ARM_GT;
11098 case GEU: return ARM_LS;
11099 case GTU: return ARM_CC;
11100 case LEU: return ARM_CS;
11101 case LTU: return ARM_HI;
11102 default: gcc_unreachable ();
11105 case CC_Cmode:
11106 switch (comp_code)
11108 case LTU: return ARM_CS;
11109 case GEU: return ARM_CC;
11110 default: gcc_unreachable ();
11113 case CCmode:
11114 switch (comp_code)
11116 case NE: return ARM_NE;
11117 case EQ: return ARM_EQ;
11118 case GE: return ARM_GE;
11119 case GT: return ARM_GT;
11120 case LE: return ARM_LE;
11121 case LT: return ARM_LT;
11122 case GEU: return ARM_CS;
11123 case GTU: return ARM_HI;
11124 case LEU: return ARM_LS;
11125 case LTU: return ARM_CC;
11126 default: gcc_unreachable ();
11129 default: gcc_unreachable ();
11133 void
11134 arm_final_prescan_insn (rtx insn)
11136 /* BODY will hold the body of INSN. */
11137 rtx body = PATTERN (insn);
11139 /* This will be 1 if trying to repeat the trick, and things need to be
11140 reversed if it appears to fail. */
11141 int reverse = 0;
11143 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11144 taken are clobbered, even if the rtl suggests otherwise. It also
11145 means that we have to grub around within the jump expression to find
11146 out what the conditions are when the jump isn't taken. */
11147 int jump_clobbers = 0;
11149 /* If we start with a return insn, we only succeed if we find another one. */
11150 int seeking_return = 0;
11152 /* START_INSN will hold the insn from where we start looking. This is the
11153 first insn after the following code_label if REVERSE is true. */
11154 rtx start_insn = insn;
11156 /* If in state 4, check if the target branch is reached, in order to
11157 change back to state 0. */
11158 if (arm_ccfsm_state == 4)
11160 if (insn == arm_target_insn)
11162 arm_target_insn = NULL;
11163 arm_ccfsm_state = 0;
11165 return;
11168 /* If in state 3, it is possible to repeat the trick, if this insn is an
11169 unconditional branch to a label, and immediately following this branch
11170 is the previous target label which is only used once, and the label this
11171 branch jumps to is not too far off. */
11172 if (arm_ccfsm_state == 3)
11174 if (simplejump_p (insn))
11176 start_insn = next_nonnote_insn (start_insn);
11177 if (GET_CODE (start_insn) == BARRIER)
11179 /* XXX Isn't this always a barrier? */
11180 start_insn = next_nonnote_insn (start_insn);
11182 if (GET_CODE (start_insn) == CODE_LABEL
11183 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11184 && LABEL_NUSES (start_insn) == 1)
11185 reverse = TRUE;
11186 else
11187 return;
11189 else if (GET_CODE (body) == RETURN)
11191 start_insn = next_nonnote_insn (start_insn);
11192 if (GET_CODE (start_insn) == BARRIER)
11193 start_insn = next_nonnote_insn (start_insn);
11194 if (GET_CODE (start_insn) == CODE_LABEL
11195 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11196 && LABEL_NUSES (start_insn) == 1)
11198 reverse = TRUE;
11199 seeking_return = 1;
11201 else
11202 return;
11204 else
11205 return;
11208 gcc_assert (!arm_ccfsm_state || reverse);
11209 if (GET_CODE (insn) != JUMP_INSN)
11210 return;
11212 /* This jump might be paralleled with a clobber of the condition codes
11213 the jump should always come first */
11214 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11215 body = XVECEXP (body, 0, 0);
11217 if (reverse
11218 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11219 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11221 int insns_skipped;
11222 int fail = FALSE, succeed = FALSE;
11223 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11224 int then_not_else = TRUE;
11225 rtx this_insn = start_insn, label = 0;
11227 /* If the jump cannot be done with one instruction, we cannot
11228 conditionally execute the instruction in the inverse case. */
11229 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11231 jump_clobbers = 1;
11232 return;
11235 /* Register the insn jumped to. */
11236 if (reverse)
11238 if (!seeking_return)
11239 label = XEXP (SET_SRC (body), 0);
11241 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11242 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11243 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11245 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11246 then_not_else = FALSE;
11248 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11249 seeking_return = 1;
11250 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11252 seeking_return = 1;
11253 then_not_else = FALSE;
11255 else
11256 gcc_unreachable ();
11258 /* See how many insns this branch skips, and what kind of insns. If all
11259 insns are okay, and the label or unconditional branch to the same
11260 label is not too far away, succeed. */
11261 for (insns_skipped = 0;
11262 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11264 rtx scanbody;
11266 this_insn = next_nonnote_insn (this_insn);
11267 if (!this_insn)
11268 break;
11270 switch (GET_CODE (this_insn))
11272 case CODE_LABEL:
11273 /* Succeed if it is the target label, otherwise fail since
11274 control falls in from somewhere else. */
11275 if (this_insn == label)
11277 if (jump_clobbers)
11279 arm_ccfsm_state = 2;
11280 this_insn = next_nonnote_insn (this_insn);
11282 else
11283 arm_ccfsm_state = 1;
11284 succeed = TRUE;
11286 else
11287 fail = TRUE;
11288 break;
11290 case BARRIER:
11291 /* Succeed if the following insn is the target label.
11292 Otherwise fail.
11293 If return insns are used then the last insn in a function
11294 will be a barrier. */
11295 this_insn = next_nonnote_insn (this_insn);
11296 if (this_insn && this_insn == label)
11298 if (jump_clobbers)
11300 arm_ccfsm_state = 2;
11301 this_insn = next_nonnote_insn (this_insn);
11303 else
11304 arm_ccfsm_state = 1;
11305 succeed = TRUE;
11307 else
11308 fail = TRUE;
11309 break;
11311 case CALL_INSN:
11312 /* The AAPCS says that conditional calls should not be
11313 used since they make interworking inefficient (the
11314 linker can't transform BL<cond> into BLX). That's
11315 only a problem if the machine has BLX. */
11316 if (arm_arch5)
11318 fail = TRUE;
11319 break;
11322 /* Succeed if the following insn is the target label, or
11323 if the following two insns are a barrier and the
11324 target label. */
11325 this_insn = next_nonnote_insn (this_insn);
11326 if (this_insn && GET_CODE (this_insn) == BARRIER)
11327 this_insn = next_nonnote_insn (this_insn);
11329 if (this_insn && this_insn == label
11330 && insns_skipped < max_insns_skipped)
11332 if (jump_clobbers)
11334 arm_ccfsm_state = 2;
11335 this_insn = next_nonnote_insn (this_insn);
11337 else
11338 arm_ccfsm_state = 1;
11339 succeed = TRUE;
11341 else
11342 fail = TRUE;
11343 break;
11345 case JUMP_INSN:
11346 /* If this is an unconditional branch to the same label, succeed.
11347 If it is to another label, do nothing. If it is conditional,
11348 fail. */
11349 /* XXX Probably, the tests for SET and the PC are
11350 unnecessary. */
11352 scanbody = PATTERN (this_insn);
11353 if (GET_CODE (scanbody) == SET
11354 && GET_CODE (SET_DEST (scanbody)) == PC)
11356 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11357 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11359 arm_ccfsm_state = 2;
11360 succeed = TRUE;
11362 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11363 fail = TRUE;
11365 /* Fail if a conditional return is undesirable (e.g. on a
11366 StrongARM), but still allow this if optimizing for size. */
11367 else if (GET_CODE (scanbody) == RETURN
11368 && !use_return_insn (TRUE, NULL)
11369 && !optimize_size)
11370 fail = TRUE;
11371 else if (GET_CODE (scanbody) == RETURN
11372 && seeking_return)
11374 arm_ccfsm_state = 2;
11375 succeed = TRUE;
11377 else if (GET_CODE (scanbody) == PARALLEL)
11379 switch (get_attr_conds (this_insn))
11381 case CONDS_NOCOND:
11382 break;
11383 default:
11384 fail = TRUE;
11385 break;
11388 else
11389 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11391 break;
11393 case INSN:
11394 /* Instructions using or affecting the condition codes make it
11395 fail. */
11396 scanbody = PATTERN (this_insn);
11397 if (!(GET_CODE (scanbody) == SET
11398 || GET_CODE (scanbody) == PARALLEL)
11399 || get_attr_conds (this_insn) != CONDS_NOCOND)
11400 fail = TRUE;
11402 /* A conditional cirrus instruction must be followed by
11403 a non Cirrus instruction. However, since we
11404 conditionalize instructions in this function and by
11405 the time we get here we can't add instructions
11406 (nops), because shorten_branches() has already been
11407 called, we will disable conditionalizing Cirrus
11408 instructions to be safe. */
11409 if (GET_CODE (scanbody) != USE
11410 && GET_CODE (scanbody) != CLOBBER
11411 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11412 fail = TRUE;
11413 break;
11415 default:
11416 break;
11419 if (succeed)
11421 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11422 arm_target_label = CODE_LABEL_NUMBER (label);
11423 else
11425 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11427 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11429 this_insn = next_nonnote_insn (this_insn);
11430 gcc_assert (!this_insn
11431 || (GET_CODE (this_insn) != BARRIER
11432 && GET_CODE (this_insn) != CODE_LABEL));
11434 if (!this_insn)
11436 /* Oh, dear! we ran off the end.. give up. */
11437 recog (PATTERN (insn), insn, NULL);
11438 arm_ccfsm_state = 0;
11439 arm_target_insn = NULL;
11440 return;
11442 arm_target_insn = this_insn;
11444 if (jump_clobbers)
11446 gcc_assert (!reverse);
11447 arm_current_cc =
11448 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11449 0), 0), 1));
11450 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11451 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11452 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11453 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11455 else
11457 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11458 what it was. */
11459 if (!reverse)
11460 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11461 0));
11464 if (reverse || then_not_else)
11465 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11468 /* Restore recog_data (getting the attributes of other insns can
11469 destroy this array, but final.c assumes that it remains intact
11470 across this call; since the insn has been recognized already we
11471 call recog direct). */
11472 recog (PATTERN (insn), insn, NULL);
11476 /* Returns true if REGNO is a valid register
11477 for holding a quantity of type MODE. */
11479 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11481 if (GET_MODE_CLASS (mode) == MODE_CC)
11482 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11484 if (TARGET_THUMB)
11485 /* For the Thumb we only allow values bigger than SImode in
11486 registers 0 - 6, so that there is always a second low
11487 register available to hold the upper part of the value.
11488 We probably we ought to ensure that the register is the
11489 start of an even numbered register pair. */
11490 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11492 if (IS_CIRRUS_REGNUM (regno))
11493 /* We have outlawed SI values in Cirrus registers because they
11494 reside in the lower 32 bits, but SF values reside in the
11495 upper 32 bits. This causes gcc all sorts of grief. We can't
11496 even split the registers into pairs because Cirrus SI values
11497 get sign extended to 64bits-- aldyh. */
11498 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11500 if (IS_VFP_REGNUM (regno))
11502 if (mode == SFmode || mode == SImode)
11503 return TRUE;
11505 /* DFmode values are only valid in even register pairs. */
11506 if (mode == DFmode)
11507 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11508 return FALSE;
11511 if (IS_IWMMXT_GR_REGNUM (regno))
11512 return mode == SImode;
11514 if (IS_IWMMXT_REGNUM (regno))
11515 return VALID_IWMMXT_REG_MODE (mode);
11517 /* We allow any value to be stored in the general registers.
11518 Restrict doubleword quantities to even register pairs so that we can
11519 use ldrd. */
11520 if (regno <= LAST_ARM_REGNUM)
11521 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11523 if ( regno == FRAME_POINTER_REGNUM
11524 || regno == ARG_POINTER_REGNUM)
11525 /* We only allow integers in the fake hard registers. */
11526 return GET_MODE_CLASS (mode) == MODE_INT;
11528 /* The only registers left are the FPA registers
11529 which we only allow to hold FP values. */
11530 return GET_MODE_CLASS (mode) == MODE_FLOAT
11531 && regno >= FIRST_FPA_REGNUM
11532 && regno <= LAST_FPA_REGNUM;
11536 arm_regno_class (int regno)
11538 if (TARGET_THUMB)
11540 if (regno == STACK_POINTER_REGNUM)
11541 return STACK_REG;
11542 if (regno == CC_REGNUM)
11543 return CC_REG;
11544 if (regno < 8)
11545 return LO_REGS;
11546 return HI_REGS;
11549 if ( regno <= LAST_ARM_REGNUM
11550 || regno == FRAME_POINTER_REGNUM
11551 || regno == ARG_POINTER_REGNUM)
11552 return GENERAL_REGS;
11554 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11555 return NO_REGS;
11557 if (IS_CIRRUS_REGNUM (regno))
11558 return CIRRUS_REGS;
11560 if (IS_VFP_REGNUM (regno))
11561 return VFP_REGS;
11563 if (IS_IWMMXT_REGNUM (regno))
11564 return IWMMXT_REGS;
11566 if (IS_IWMMXT_GR_REGNUM (regno))
11567 return IWMMXT_GR_REGS;
11569 return FPA_REGS;
11572 /* Handle a special case when computing the offset
11573 of an argument from the frame pointer. */
11575 arm_debugger_arg_offset (int value, rtx addr)
11577 rtx insn;
11579 /* We are only interested if dbxout_parms() failed to compute the offset. */
11580 if (value != 0)
11581 return 0;
11583 /* We can only cope with the case where the address is held in a register. */
11584 if (GET_CODE (addr) != REG)
11585 return 0;
11587 /* If we are using the frame pointer to point at the argument, then
11588 an offset of 0 is correct. */
11589 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11590 return 0;
11592 /* If we are using the stack pointer to point at the
11593 argument, then an offset of 0 is correct. */
11594 if ((TARGET_THUMB || !frame_pointer_needed)
11595 && REGNO (addr) == SP_REGNUM)
11596 return 0;
11598 /* Oh dear. The argument is pointed to by a register rather
11599 than being held in a register, or being stored at a known
11600 offset from the frame pointer. Since GDB only understands
11601 those two kinds of argument we must translate the address
11602 held in the register into an offset from the frame pointer.
11603 We do this by searching through the insns for the function
11604 looking to see where this register gets its value. If the
11605 register is initialized from the frame pointer plus an offset
11606 then we are in luck and we can continue, otherwise we give up.
11608 This code is exercised by producing debugging information
11609 for a function with arguments like this:
11611 double func (double a, double b, int c, double d) {return d;}
11613 Without this code the stab for parameter 'd' will be set to
11614 an offset of 0 from the frame pointer, rather than 8. */
11616 /* The if() statement says:
11618 If the insn is a normal instruction
11619 and if the insn is setting the value in a register
11620 and if the register being set is the register holding the address of the argument
11621 and if the address is computing by an addition
11622 that involves adding to a register
11623 which is the frame pointer
11624 a constant integer
11626 then... */
11628 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11630 if ( GET_CODE (insn) == INSN
11631 && GET_CODE (PATTERN (insn)) == SET
11632 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11633 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11634 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11635 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11636 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11639 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11641 break;
11645 if (value == 0)
11647 debug_rtx (addr);
11648 warning (0, "unable to compute real location of stacked parameter");
11649 value = 8; /* XXX magic hack */
11652 return value;
11655 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11656 do \
11658 if ((MASK) & insn_flags) \
11659 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11660 BUILT_IN_MD, NULL, NULL_TREE); \
11662 while (0)
11664 struct builtin_description
11666 const unsigned int mask;
11667 const enum insn_code icode;
11668 const char * const name;
11669 const enum arm_builtins code;
11670 const enum rtx_code comparison;
11671 const unsigned int flag;
11674 static const struct builtin_description bdesc_2arg[] =
11676 #define IWMMXT_BUILTIN(code, string, builtin) \
11677 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11678 ARM_BUILTIN_##builtin, 0, 0 },
11680 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11681 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11682 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11683 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11684 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11685 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11686 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11687 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11688 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11689 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11690 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11691 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11692 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11693 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11694 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11695 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11696 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11697 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11698 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11699 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11700 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11701 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11702 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11703 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11704 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11705 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11706 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11707 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11708 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11709 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11710 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11711 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11712 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11713 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11714 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11715 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11716 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11717 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11718 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11719 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11720 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11721 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11722 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11723 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11724 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11725 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11726 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11727 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11728 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11729 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11730 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11731 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11732 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11733 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11734 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11735 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11736 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11737 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11739 #define IWMMXT_BUILTIN2(code, builtin) \
11740 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11742 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11743 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11744 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11745 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11746 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11747 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11748 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11749 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11750 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11751 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11752 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11753 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11754 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11755 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11756 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11757 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11758 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11759 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11760 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11761 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11762 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11763 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11764 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11765 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11766 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11767 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11768 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11769 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11770 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11771 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11772 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11773 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11776 static const struct builtin_description bdesc_1arg[] =
11778 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11779 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11780 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11781 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11782 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11783 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11784 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11785 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11786 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11787 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11788 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11789 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11790 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11791 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11792 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11793 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11794 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11795 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11798 /* Set up all the iWMMXt builtins. This is
11799 not called if TARGET_IWMMXT is zero. */
11801 static void
11802 arm_init_iwmmxt_builtins (void)
11804 const struct builtin_description * d;
11805 size_t i;
11806 tree endlink = void_list_node;
11808 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11809 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11810 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11812 tree int_ftype_int
11813 = build_function_type (integer_type_node,
11814 tree_cons (NULL_TREE, integer_type_node, endlink));
11815 tree v8qi_ftype_v8qi_v8qi_int
11816 = build_function_type (V8QI_type_node,
11817 tree_cons (NULL_TREE, V8QI_type_node,
11818 tree_cons (NULL_TREE, V8QI_type_node,
11819 tree_cons (NULL_TREE,
11820 integer_type_node,
11821 endlink))));
11822 tree v4hi_ftype_v4hi_int
11823 = build_function_type (V4HI_type_node,
11824 tree_cons (NULL_TREE, V4HI_type_node,
11825 tree_cons (NULL_TREE, integer_type_node,
11826 endlink)));
11827 tree v2si_ftype_v2si_int
11828 = build_function_type (V2SI_type_node,
11829 tree_cons (NULL_TREE, V2SI_type_node,
11830 tree_cons (NULL_TREE, integer_type_node,
11831 endlink)));
11832 tree v2si_ftype_di_di
11833 = build_function_type (V2SI_type_node,
11834 tree_cons (NULL_TREE, long_long_integer_type_node,
11835 tree_cons (NULL_TREE, long_long_integer_type_node,
11836 endlink)));
11837 tree di_ftype_di_int
11838 = build_function_type (long_long_integer_type_node,
11839 tree_cons (NULL_TREE, long_long_integer_type_node,
11840 tree_cons (NULL_TREE, integer_type_node,
11841 endlink)));
11842 tree di_ftype_di_int_int
11843 = build_function_type (long_long_integer_type_node,
11844 tree_cons (NULL_TREE, long_long_integer_type_node,
11845 tree_cons (NULL_TREE, integer_type_node,
11846 tree_cons (NULL_TREE,
11847 integer_type_node,
11848 endlink))));
11849 tree int_ftype_v8qi
11850 = build_function_type (integer_type_node,
11851 tree_cons (NULL_TREE, V8QI_type_node,
11852 endlink));
11853 tree int_ftype_v4hi
11854 = build_function_type (integer_type_node,
11855 tree_cons (NULL_TREE, V4HI_type_node,
11856 endlink));
11857 tree int_ftype_v2si
11858 = build_function_type (integer_type_node,
11859 tree_cons (NULL_TREE, V2SI_type_node,
11860 endlink));
11861 tree int_ftype_v8qi_int
11862 = build_function_type (integer_type_node,
11863 tree_cons (NULL_TREE, V8QI_type_node,
11864 tree_cons (NULL_TREE, integer_type_node,
11865 endlink)));
11866 tree int_ftype_v4hi_int
11867 = build_function_type (integer_type_node,
11868 tree_cons (NULL_TREE, V4HI_type_node,
11869 tree_cons (NULL_TREE, integer_type_node,
11870 endlink)));
11871 tree int_ftype_v2si_int
11872 = build_function_type (integer_type_node,
11873 tree_cons (NULL_TREE, V2SI_type_node,
11874 tree_cons (NULL_TREE, integer_type_node,
11875 endlink)));
11876 tree v8qi_ftype_v8qi_int_int
11877 = build_function_type (V8QI_type_node,
11878 tree_cons (NULL_TREE, V8QI_type_node,
11879 tree_cons (NULL_TREE, integer_type_node,
11880 tree_cons (NULL_TREE,
11881 integer_type_node,
11882 endlink))));
11883 tree v4hi_ftype_v4hi_int_int
11884 = build_function_type (V4HI_type_node,
11885 tree_cons (NULL_TREE, V4HI_type_node,
11886 tree_cons (NULL_TREE, integer_type_node,
11887 tree_cons (NULL_TREE,
11888 integer_type_node,
11889 endlink))));
11890 tree v2si_ftype_v2si_int_int
11891 = build_function_type (V2SI_type_node,
11892 tree_cons (NULL_TREE, V2SI_type_node,
11893 tree_cons (NULL_TREE, integer_type_node,
11894 tree_cons (NULL_TREE,
11895 integer_type_node,
11896 endlink))));
11897 /* Miscellaneous. */
11898 tree v8qi_ftype_v4hi_v4hi
11899 = build_function_type (V8QI_type_node,
11900 tree_cons (NULL_TREE, V4HI_type_node,
11901 tree_cons (NULL_TREE, V4HI_type_node,
11902 endlink)));
11903 tree v4hi_ftype_v2si_v2si
11904 = build_function_type (V4HI_type_node,
11905 tree_cons (NULL_TREE, V2SI_type_node,
11906 tree_cons (NULL_TREE, V2SI_type_node,
11907 endlink)));
11908 tree v2si_ftype_v4hi_v4hi
11909 = build_function_type (V2SI_type_node,
11910 tree_cons (NULL_TREE, V4HI_type_node,
11911 tree_cons (NULL_TREE, V4HI_type_node,
11912 endlink)));
11913 tree v2si_ftype_v8qi_v8qi
11914 = build_function_type (V2SI_type_node,
11915 tree_cons (NULL_TREE, V8QI_type_node,
11916 tree_cons (NULL_TREE, V8QI_type_node,
11917 endlink)));
11918 tree v4hi_ftype_v4hi_di
11919 = build_function_type (V4HI_type_node,
11920 tree_cons (NULL_TREE, V4HI_type_node,
11921 tree_cons (NULL_TREE,
11922 long_long_integer_type_node,
11923 endlink)));
11924 tree v2si_ftype_v2si_di
11925 = build_function_type (V2SI_type_node,
11926 tree_cons (NULL_TREE, V2SI_type_node,
11927 tree_cons (NULL_TREE,
11928 long_long_integer_type_node,
11929 endlink)));
11930 tree void_ftype_int_int
11931 = build_function_type (void_type_node,
11932 tree_cons (NULL_TREE, integer_type_node,
11933 tree_cons (NULL_TREE, integer_type_node,
11934 endlink)));
11935 tree di_ftype_void
11936 = build_function_type (long_long_unsigned_type_node, endlink);
11937 tree di_ftype_v8qi
11938 = build_function_type (long_long_integer_type_node,
11939 tree_cons (NULL_TREE, V8QI_type_node,
11940 endlink));
11941 tree di_ftype_v4hi
11942 = build_function_type (long_long_integer_type_node,
11943 tree_cons (NULL_TREE, V4HI_type_node,
11944 endlink));
11945 tree di_ftype_v2si
11946 = build_function_type (long_long_integer_type_node,
11947 tree_cons (NULL_TREE, V2SI_type_node,
11948 endlink));
11949 tree v2si_ftype_v4hi
11950 = build_function_type (V2SI_type_node,
11951 tree_cons (NULL_TREE, V4HI_type_node,
11952 endlink));
11953 tree v4hi_ftype_v8qi
11954 = build_function_type (V4HI_type_node,
11955 tree_cons (NULL_TREE, V8QI_type_node,
11956 endlink));
11958 tree di_ftype_di_v4hi_v4hi
11959 = build_function_type (long_long_unsigned_type_node,
11960 tree_cons (NULL_TREE,
11961 long_long_unsigned_type_node,
11962 tree_cons (NULL_TREE, V4HI_type_node,
11963 tree_cons (NULL_TREE,
11964 V4HI_type_node,
11965 endlink))));
11967 tree di_ftype_v4hi_v4hi
11968 = build_function_type (long_long_unsigned_type_node,
11969 tree_cons (NULL_TREE, V4HI_type_node,
11970 tree_cons (NULL_TREE, V4HI_type_node,
11971 endlink)));
11973 /* Normal vector binops. */
11974 tree v8qi_ftype_v8qi_v8qi
11975 = build_function_type (V8QI_type_node,
11976 tree_cons (NULL_TREE, V8QI_type_node,
11977 tree_cons (NULL_TREE, V8QI_type_node,
11978 endlink)));
11979 tree v4hi_ftype_v4hi_v4hi
11980 = build_function_type (V4HI_type_node,
11981 tree_cons (NULL_TREE, V4HI_type_node,
11982 tree_cons (NULL_TREE, V4HI_type_node,
11983 endlink)));
11984 tree v2si_ftype_v2si_v2si
11985 = build_function_type (V2SI_type_node,
11986 tree_cons (NULL_TREE, V2SI_type_node,
11987 tree_cons (NULL_TREE, V2SI_type_node,
11988 endlink)));
11989 tree di_ftype_di_di
11990 = build_function_type (long_long_unsigned_type_node,
11991 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11992 tree_cons (NULL_TREE,
11993 long_long_unsigned_type_node,
11994 endlink)));
11996 /* Add all builtins that are more or less simple operations on two
11997 operands. */
11998 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12000 /* Use one of the operands; the target can have a different mode for
12001 mask-generating compares. */
12002 enum machine_mode mode;
12003 tree type;
12005 if (d->name == 0)
12006 continue;
12008 mode = insn_data[d->icode].operand[1].mode;
12010 switch (mode)
12012 case V8QImode:
12013 type = v8qi_ftype_v8qi_v8qi;
12014 break;
12015 case V4HImode:
12016 type = v4hi_ftype_v4hi_v4hi;
12017 break;
12018 case V2SImode:
12019 type = v2si_ftype_v2si_v2si;
12020 break;
12021 case DImode:
12022 type = di_ftype_di_di;
12023 break;
12025 default:
12026 gcc_unreachable ();
12029 def_mbuiltin (d->mask, d->name, type, d->code);
12032 /* Add the remaining MMX insns with somewhat more complicated types. */
12033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12034 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12041 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12048 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12053 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12054 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12055 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12056 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12058 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12059 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12060 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12061 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12062 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12063 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12065 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12067 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12068 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12069 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12070 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12072 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12073 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12074 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12075 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12076 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12077 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12078 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12079 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12080 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12082 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12083 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12084 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12086 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12087 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12088 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12090 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12091 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12092 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12093 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12094 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12095 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12097 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12098 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12099 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12100 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12101 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12102 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12103 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12104 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12105 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12106 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12107 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12108 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12110 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12111 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12112 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12113 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12115 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12116 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12117 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12118 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12119 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12120 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12121 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12124 static void
12125 arm_init_builtins (void)
12127 if (TARGET_REALLY_IWMMXT)
12128 arm_init_iwmmxt_builtins ();
12131 /* Errors in the source file can cause expand_expr to return const0_rtx
12132 where we expect a vector. To avoid crashing, use one of the vector
12133 clear instructions. */
12135 static rtx
12136 safe_vector_operand (rtx x, enum machine_mode mode)
12138 if (x != const0_rtx)
12139 return x;
12140 x = gen_reg_rtx (mode);
12142 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12143 : gen_rtx_SUBREG (DImode, x, 0)));
12144 return x;
12147 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12149 static rtx
12150 arm_expand_binop_builtin (enum insn_code icode,
12151 tree arglist, rtx target)
12153 rtx pat;
12154 tree arg0 = TREE_VALUE (arglist);
12155 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12156 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12157 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12158 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12159 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12160 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12162 if (VECTOR_MODE_P (mode0))
12163 op0 = safe_vector_operand (op0, mode0);
12164 if (VECTOR_MODE_P (mode1))
12165 op1 = safe_vector_operand (op1, mode1);
12167 if (! target
12168 || GET_MODE (target) != tmode
12169 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12170 target = gen_reg_rtx (tmode);
12172 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12174 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12175 op0 = copy_to_mode_reg (mode0, op0);
12176 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12177 op1 = copy_to_mode_reg (mode1, op1);
12179 pat = GEN_FCN (icode) (target, op0, op1);
12180 if (! pat)
12181 return 0;
12182 emit_insn (pat);
12183 return target;
12186 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12188 static rtx
12189 arm_expand_unop_builtin (enum insn_code icode,
12190 tree arglist, rtx target, int do_load)
12192 rtx pat;
12193 tree arg0 = TREE_VALUE (arglist);
12194 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12195 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12196 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12198 if (! target
12199 || GET_MODE (target) != tmode
12200 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12201 target = gen_reg_rtx (tmode);
12202 if (do_load)
12203 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12204 else
12206 if (VECTOR_MODE_P (mode0))
12207 op0 = safe_vector_operand (op0, mode0);
12209 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12210 op0 = copy_to_mode_reg (mode0, op0);
12213 pat = GEN_FCN (icode) (target, op0);
12214 if (! pat)
12215 return 0;
12216 emit_insn (pat);
12217 return target;
12220 /* Expand an expression EXP that calls a built-in function,
12221 with result going to TARGET if that's convenient
12222 (and in mode MODE if that's convenient).
12223 SUBTARGET may be used as the target for computing one of EXP's operands.
12224 IGNORE is nonzero if the value is to be ignored. */
12226 static rtx
12227 arm_expand_builtin (tree exp,
12228 rtx target,
12229 rtx subtarget ATTRIBUTE_UNUSED,
12230 enum machine_mode mode ATTRIBUTE_UNUSED,
12231 int ignore ATTRIBUTE_UNUSED)
12233 const struct builtin_description * d;
12234 enum insn_code icode;
12235 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12236 tree arglist = TREE_OPERAND (exp, 1);
12237 tree arg0;
12238 tree arg1;
12239 tree arg2;
12240 rtx op0;
12241 rtx op1;
12242 rtx op2;
12243 rtx pat;
12244 int fcode = DECL_FUNCTION_CODE (fndecl);
12245 size_t i;
12246 enum machine_mode tmode;
12247 enum machine_mode mode0;
12248 enum machine_mode mode1;
12249 enum machine_mode mode2;
12251 switch (fcode)
12253 case ARM_BUILTIN_TEXTRMSB:
12254 case ARM_BUILTIN_TEXTRMUB:
12255 case ARM_BUILTIN_TEXTRMSH:
12256 case ARM_BUILTIN_TEXTRMUH:
12257 case ARM_BUILTIN_TEXTRMSW:
12258 case ARM_BUILTIN_TEXTRMUW:
12259 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12260 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12261 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12262 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12263 : CODE_FOR_iwmmxt_textrmw);
12265 arg0 = TREE_VALUE (arglist);
12266 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12267 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12268 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12269 tmode = insn_data[icode].operand[0].mode;
12270 mode0 = insn_data[icode].operand[1].mode;
12271 mode1 = insn_data[icode].operand[2].mode;
12273 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12274 op0 = copy_to_mode_reg (mode0, op0);
12275 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12277 /* @@@ better error message */
12278 error ("selector must be an immediate");
12279 return gen_reg_rtx (tmode);
12281 if (target == 0
12282 || GET_MODE (target) != tmode
12283 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12284 target = gen_reg_rtx (tmode);
12285 pat = GEN_FCN (icode) (target, op0, op1);
12286 if (! pat)
12287 return 0;
12288 emit_insn (pat);
12289 return target;
12291 case ARM_BUILTIN_TINSRB:
12292 case ARM_BUILTIN_TINSRH:
12293 case ARM_BUILTIN_TINSRW:
12294 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12295 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12296 : CODE_FOR_iwmmxt_tinsrw);
12297 arg0 = TREE_VALUE (arglist);
12298 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12299 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12300 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12301 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12302 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12303 tmode = insn_data[icode].operand[0].mode;
12304 mode0 = insn_data[icode].operand[1].mode;
12305 mode1 = insn_data[icode].operand[2].mode;
12306 mode2 = insn_data[icode].operand[3].mode;
12308 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12309 op0 = copy_to_mode_reg (mode0, op0);
12310 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12311 op1 = copy_to_mode_reg (mode1, op1);
12312 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12314 /* @@@ better error message */
12315 error ("selector must be an immediate");
12316 return const0_rtx;
12318 if (target == 0
12319 || GET_MODE (target) != tmode
12320 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12321 target = gen_reg_rtx (tmode);
12322 pat = GEN_FCN (icode) (target, op0, op1, op2);
12323 if (! pat)
12324 return 0;
12325 emit_insn (pat);
12326 return target;
12328 case ARM_BUILTIN_SETWCX:
12329 arg0 = TREE_VALUE (arglist);
12330 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12331 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12332 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12333 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12334 return 0;
12336 case ARM_BUILTIN_GETWCX:
12337 arg0 = TREE_VALUE (arglist);
12338 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12339 target = gen_reg_rtx (SImode);
12340 emit_insn (gen_iwmmxt_tmrc (target, op0));
12341 return target;
12343 case ARM_BUILTIN_WSHUFH:
12344 icode = CODE_FOR_iwmmxt_wshufh;
12345 arg0 = TREE_VALUE (arglist);
12346 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12347 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12348 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12349 tmode = insn_data[icode].operand[0].mode;
12350 mode1 = insn_data[icode].operand[1].mode;
12351 mode2 = insn_data[icode].operand[2].mode;
12353 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12354 op0 = copy_to_mode_reg (mode1, op0);
12355 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12357 /* @@@ better error message */
12358 error ("mask must be an immediate");
12359 return const0_rtx;
12361 if (target == 0
12362 || GET_MODE (target) != tmode
12363 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12364 target = gen_reg_rtx (tmode);
12365 pat = GEN_FCN (icode) (target, op0, op1);
12366 if (! pat)
12367 return 0;
12368 emit_insn (pat);
12369 return target;
12371 case ARM_BUILTIN_WSADB:
12372 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12373 case ARM_BUILTIN_WSADH:
12374 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12375 case ARM_BUILTIN_WSADBZ:
12376 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12377 case ARM_BUILTIN_WSADHZ:
12378 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12380 /* Several three-argument builtins. */
12381 case ARM_BUILTIN_WMACS:
12382 case ARM_BUILTIN_WMACU:
12383 case ARM_BUILTIN_WALIGN:
12384 case ARM_BUILTIN_TMIA:
12385 case ARM_BUILTIN_TMIAPH:
12386 case ARM_BUILTIN_TMIATT:
12387 case ARM_BUILTIN_TMIATB:
12388 case ARM_BUILTIN_TMIABT:
12389 case ARM_BUILTIN_TMIABB:
12390 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12391 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12392 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12393 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12394 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12395 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12396 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12397 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12398 : CODE_FOR_iwmmxt_walign);
12399 arg0 = TREE_VALUE (arglist);
12400 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12401 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12402 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12403 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12404 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12405 tmode = insn_data[icode].operand[0].mode;
12406 mode0 = insn_data[icode].operand[1].mode;
12407 mode1 = insn_data[icode].operand[2].mode;
12408 mode2 = insn_data[icode].operand[3].mode;
12410 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12411 op0 = copy_to_mode_reg (mode0, op0);
12412 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12413 op1 = copy_to_mode_reg (mode1, op1);
12414 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12415 op2 = copy_to_mode_reg (mode2, op2);
12416 if (target == 0
12417 || GET_MODE (target) != tmode
12418 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12419 target = gen_reg_rtx (tmode);
12420 pat = GEN_FCN (icode) (target, op0, op1, op2);
12421 if (! pat)
12422 return 0;
12423 emit_insn (pat);
12424 return target;
12426 case ARM_BUILTIN_WZERO:
12427 target = gen_reg_rtx (DImode);
12428 emit_insn (gen_iwmmxt_clrdi (target));
12429 return target;
12431 default:
12432 break;
12435 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12436 if (d->code == (const enum arm_builtins) fcode)
12437 return arm_expand_binop_builtin (d->icode, arglist, target);
12439 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12440 if (d->code == (const enum arm_builtins) fcode)
12441 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12443 /* @@@ Should really do something sensible here. */
12444 return NULL_RTX;
12447 /* Return the number (counting from 0) of
12448 the least significant set bit in MASK. */
12450 inline static int
12451 number_of_first_bit_set (unsigned mask)
12453 int bit;
12455 for (bit = 0;
12456 (mask & (1 << bit)) == 0;
12457 ++bit)
12458 continue;
12460 return bit;
12463 /* Emit code to push or pop registers to or from the stack. F is the
12464 assembly file. MASK is the registers to push or pop. PUSH is
12465 nonzero if we should push, and zero if we should pop. For debugging
12466 output, if pushing, adjust CFA_OFFSET by the amount of space added
12467 to the stack. REAL_REGS should have the same number of bits set as
12468 MASK, and will be used instead (in the same order) to describe which
12469 registers were saved - this is used to mark the save slots when we
12470 push high registers after moving them to low registers. */
12471 static void
12472 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12473 unsigned long real_regs)
12475 int regno;
12476 int lo_mask = mask & 0xFF;
12477 int pushed_words = 0;
12479 gcc_assert (mask);
12481 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12483 /* Special case. Do not generate a POP PC statement here, do it in
12484 thumb_exit() */
12485 thumb_exit (f, -1);
12486 return;
12489 if (ARM_EABI_UNWIND_TABLES && push)
12491 fprintf (f, "\t.save\t{");
12492 for (regno = 0; regno < 15; regno++)
12494 if (real_regs & (1 << regno))
12496 if (real_regs & ((1 << regno) -1))
12497 fprintf (f, ", ");
12498 asm_fprintf (f, "%r", regno);
12501 fprintf (f, "}\n");
12504 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12506 /* Look at the low registers first. */
12507 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12509 if (lo_mask & 1)
12511 asm_fprintf (f, "%r", regno);
12513 if ((lo_mask & ~1) != 0)
12514 fprintf (f, ", ");
12516 pushed_words++;
12520 if (push && (mask & (1 << LR_REGNUM)))
12522 /* Catch pushing the LR. */
12523 if (mask & 0xFF)
12524 fprintf (f, ", ");
12526 asm_fprintf (f, "%r", LR_REGNUM);
12528 pushed_words++;
12530 else if (!push && (mask & (1 << PC_REGNUM)))
12532 /* Catch popping the PC. */
12533 if (TARGET_INTERWORK || TARGET_BACKTRACE
12534 || current_function_calls_eh_return)
12536 /* The PC is never poped directly, instead
12537 it is popped into r3 and then BX is used. */
12538 fprintf (f, "}\n");
12540 thumb_exit (f, -1);
12542 return;
12544 else
12546 if (mask & 0xFF)
12547 fprintf (f, ", ");
12549 asm_fprintf (f, "%r", PC_REGNUM);
12553 fprintf (f, "}\n");
12555 if (push && pushed_words && dwarf2out_do_frame ())
12557 char *l = dwarf2out_cfi_label ();
12558 int pushed_mask = real_regs;
12560 *cfa_offset += pushed_words * 4;
12561 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12563 pushed_words = 0;
12564 pushed_mask = real_regs;
12565 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12567 if (pushed_mask & 1)
12568 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12573 /* Generate code to return from a thumb function.
12574 If 'reg_containing_return_addr' is -1, then the return address is
12575 actually on the stack, at the stack pointer. */
12576 static void
12577 thumb_exit (FILE *f, int reg_containing_return_addr)
12579 unsigned regs_available_for_popping;
12580 unsigned regs_to_pop;
12581 int pops_needed;
12582 unsigned available;
12583 unsigned required;
12584 int mode;
12585 int size;
12586 int restore_a4 = FALSE;
12588 /* Compute the registers we need to pop. */
12589 regs_to_pop = 0;
12590 pops_needed = 0;
12592 if (reg_containing_return_addr == -1)
12594 regs_to_pop |= 1 << LR_REGNUM;
12595 ++pops_needed;
12598 if (TARGET_BACKTRACE)
12600 /* Restore the (ARM) frame pointer and stack pointer. */
12601 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12602 pops_needed += 2;
12605 /* If there is nothing to pop then just emit the BX instruction and
12606 return. */
12607 if (pops_needed == 0)
12609 if (current_function_calls_eh_return)
12610 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12612 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12613 return;
12615 /* Otherwise if we are not supporting interworking and we have not created
12616 a backtrace structure and the function was not entered in ARM mode then
12617 just pop the return address straight into the PC. */
12618 else if (!TARGET_INTERWORK
12619 && !TARGET_BACKTRACE
12620 && !is_called_in_ARM_mode (current_function_decl)
12621 && !current_function_calls_eh_return)
12623 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12624 return;
12627 /* Find out how many of the (return) argument registers we can corrupt. */
12628 regs_available_for_popping = 0;
12630 /* If returning via __builtin_eh_return, the bottom three registers
12631 all contain information needed for the return. */
12632 if (current_function_calls_eh_return)
12633 size = 12;
12634 else
12636 /* If we can deduce the registers used from the function's
12637 return value. This is more reliable that examining
12638 regs_ever_live[] because that will be set if the register is
12639 ever used in the function, not just if the register is used
12640 to hold a return value. */
12642 if (current_function_return_rtx != 0)
12643 mode = GET_MODE (current_function_return_rtx);
12644 else
12645 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12647 size = GET_MODE_SIZE (mode);
12649 if (size == 0)
12651 /* In a void function we can use any argument register.
12652 In a function that returns a structure on the stack
12653 we can use the second and third argument registers. */
12654 if (mode == VOIDmode)
12655 regs_available_for_popping =
12656 (1 << ARG_REGISTER (1))
12657 | (1 << ARG_REGISTER (2))
12658 | (1 << ARG_REGISTER (3));
12659 else
12660 regs_available_for_popping =
12661 (1 << ARG_REGISTER (2))
12662 | (1 << ARG_REGISTER (3));
12664 else if (size <= 4)
12665 regs_available_for_popping =
12666 (1 << ARG_REGISTER (2))
12667 | (1 << ARG_REGISTER (3));
12668 else if (size <= 8)
12669 regs_available_for_popping =
12670 (1 << ARG_REGISTER (3));
12673 /* Match registers to be popped with registers into which we pop them. */
12674 for (available = regs_available_for_popping,
12675 required = regs_to_pop;
12676 required != 0 && available != 0;
12677 available &= ~(available & - available),
12678 required &= ~(required & - required))
12679 -- pops_needed;
12681 /* If we have any popping registers left over, remove them. */
12682 if (available > 0)
12683 regs_available_for_popping &= ~available;
12685 /* Otherwise if we need another popping register we can use
12686 the fourth argument register. */
12687 else if (pops_needed)
12689 /* If we have not found any free argument registers and
12690 reg a4 contains the return address, we must move it. */
12691 if (regs_available_for_popping == 0
12692 && reg_containing_return_addr == LAST_ARG_REGNUM)
12694 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12695 reg_containing_return_addr = LR_REGNUM;
12697 else if (size > 12)
12699 /* Register a4 is being used to hold part of the return value,
12700 but we have dire need of a free, low register. */
12701 restore_a4 = TRUE;
12703 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12706 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12708 /* The fourth argument register is available. */
12709 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12711 --pops_needed;
12715 /* Pop as many registers as we can. */
12716 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12717 regs_available_for_popping);
12719 /* Process the registers we popped. */
12720 if (reg_containing_return_addr == -1)
12722 /* The return address was popped into the lowest numbered register. */
12723 regs_to_pop &= ~(1 << LR_REGNUM);
12725 reg_containing_return_addr =
12726 number_of_first_bit_set (regs_available_for_popping);
12728 /* Remove this register for the mask of available registers, so that
12729 the return address will not be corrupted by further pops. */
12730 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12733 /* If we popped other registers then handle them here. */
12734 if (regs_available_for_popping)
12736 int frame_pointer;
12738 /* Work out which register currently contains the frame pointer. */
12739 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12741 /* Move it into the correct place. */
12742 asm_fprintf (f, "\tmov\t%r, %r\n",
12743 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12745 /* (Temporarily) remove it from the mask of popped registers. */
12746 regs_available_for_popping &= ~(1 << frame_pointer);
12747 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12749 if (regs_available_for_popping)
12751 int stack_pointer;
12753 /* We popped the stack pointer as well,
12754 find the register that contains it. */
12755 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12757 /* Move it into the stack register. */
12758 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12760 /* At this point we have popped all necessary registers, so
12761 do not worry about restoring regs_available_for_popping
12762 to its correct value:
12764 assert (pops_needed == 0)
12765 assert (regs_available_for_popping == (1 << frame_pointer))
12766 assert (regs_to_pop == (1 << STACK_POINTER)) */
12768 else
12770 /* Since we have just move the popped value into the frame
12771 pointer, the popping register is available for reuse, and
12772 we know that we still have the stack pointer left to pop. */
12773 regs_available_for_popping |= (1 << frame_pointer);
12777 /* If we still have registers left on the stack, but we no longer have
12778 any registers into which we can pop them, then we must move the return
12779 address into the link register and make available the register that
12780 contained it. */
12781 if (regs_available_for_popping == 0 && pops_needed > 0)
12783 regs_available_for_popping |= 1 << reg_containing_return_addr;
12785 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12786 reg_containing_return_addr);
12788 reg_containing_return_addr = LR_REGNUM;
12791 /* If we have registers left on the stack then pop some more.
12792 We know that at most we will want to pop FP and SP. */
12793 if (pops_needed > 0)
12795 int popped_into;
12796 int move_to;
12798 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12799 regs_available_for_popping);
12801 /* We have popped either FP or SP.
12802 Move whichever one it is into the correct register. */
12803 popped_into = number_of_first_bit_set (regs_available_for_popping);
12804 move_to = number_of_first_bit_set (regs_to_pop);
12806 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12808 regs_to_pop &= ~(1 << move_to);
12810 --pops_needed;
12813 /* If we still have not popped everything then we must have only
12814 had one register available to us and we are now popping the SP. */
12815 if (pops_needed > 0)
12817 int popped_into;
12819 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12820 regs_available_for_popping);
12822 popped_into = number_of_first_bit_set (regs_available_for_popping);
12824 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12826 assert (regs_to_pop == (1 << STACK_POINTER))
12827 assert (pops_needed == 1)
12831 /* If necessary restore the a4 register. */
12832 if (restore_a4)
12834 if (reg_containing_return_addr != LR_REGNUM)
12836 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12837 reg_containing_return_addr = LR_REGNUM;
12840 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12843 if (current_function_calls_eh_return)
12844 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12846 /* Return to caller. */
12847 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12851 void
12852 thumb_final_prescan_insn (rtx insn)
12854 if (flag_print_asm_name)
12855 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12856 INSN_ADDRESSES (INSN_UID (insn)));
12860 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12862 unsigned HOST_WIDE_INT mask = 0xff;
12863 int i;
12865 if (val == 0) /* XXX */
12866 return 0;
12868 for (i = 0; i < 25; i++)
12869 if ((val & (mask << i)) == val)
12870 return 1;
12872 return 0;
12875 /* Returns nonzero if the current function contains,
12876 or might contain a far jump. */
12877 static int
12878 thumb_far_jump_used_p (void)
12880 rtx insn;
12882 /* This test is only important for leaf functions. */
12883 /* assert (!leaf_function_p ()); */
12885 /* If we have already decided that far jumps may be used,
12886 do not bother checking again, and always return true even if
12887 it turns out that they are not being used. Once we have made
12888 the decision that far jumps are present (and that hence the link
12889 register will be pushed onto the stack) we cannot go back on it. */
12890 if (cfun->machine->far_jump_used)
12891 return 1;
12893 /* If this function is not being called from the prologue/epilogue
12894 generation code then it must be being called from the
12895 INITIAL_ELIMINATION_OFFSET macro. */
12896 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12898 /* In this case we know that we are being asked about the elimination
12899 of the arg pointer register. If that register is not being used,
12900 then there are no arguments on the stack, and we do not have to
12901 worry that a far jump might force the prologue to push the link
12902 register, changing the stack offsets. In this case we can just
12903 return false, since the presence of far jumps in the function will
12904 not affect stack offsets.
12906 If the arg pointer is live (or if it was live, but has now been
12907 eliminated and so set to dead) then we do have to test to see if
12908 the function might contain a far jump. This test can lead to some
12909 false negatives, since before reload is completed, then length of
12910 branch instructions is not known, so gcc defaults to returning their
12911 longest length, which in turn sets the far jump attribute to true.
12913 A false negative will not result in bad code being generated, but it
12914 will result in a needless push and pop of the link register. We
12915 hope that this does not occur too often.
12917 If we need doubleword stack alignment this could affect the other
12918 elimination offsets so we can't risk getting it wrong. */
12919 if (regs_ever_live [ARG_POINTER_REGNUM])
12920 cfun->machine->arg_pointer_live = 1;
12921 else if (!cfun->machine->arg_pointer_live)
12922 return 0;
12925 /* Check to see if the function contains a branch
12926 insn with the far jump attribute set. */
12927 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12929 if (GET_CODE (insn) == JUMP_INSN
12930 /* Ignore tablejump patterns. */
12931 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12932 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12933 && get_attr_far_jump (insn) == FAR_JUMP_YES
12936 /* Record the fact that we have decided that
12937 the function does use far jumps. */
12938 cfun->machine->far_jump_used = 1;
12939 return 1;
12943 return 0;
12946 /* Return nonzero if FUNC must be entered in ARM mode. */
12948 is_called_in_ARM_mode (tree func)
12950 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12952 /* Ignore the problem about functions whose address is taken. */
12953 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12954 return TRUE;
12956 #ifdef ARM_PE
12957 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12958 #else
12959 return FALSE;
12960 #endif
12963 /* The bits which aren't usefully expanded as rtl. */
12964 const char *
12965 thumb_unexpanded_epilogue (void)
12967 int regno;
12968 unsigned long live_regs_mask = 0;
12969 int high_regs_pushed = 0;
12970 int had_to_push_lr;
12971 int size;
12973 if (return_used_this_function)
12974 return "";
12976 if (IS_NAKED (arm_current_func_type ()))
12977 return "";
12979 live_regs_mask = thumb_compute_save_reg_mask ();
12980 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12982 /* If we can deduce the registers used from the function's return value.
12983 This is more reliable that examining regs_ever_live[] because that
12984 will be set if the register is ever used in the function, not just if
12985 the register is used to hold a return value. */
12986 size = arm_size_return_regs ();
12988 /* The prolog may have pushed some high registers to use as
12989 work registers. e.g. the testsuite file:
12990 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12991 compiles to produce:
12992 push {r4, r5, r6, r7, lr}
12993 mov r7, r9
12994 mov r6, r8
12995 push {r6, r7}
12996 as part of the prolog. We have to undo that pushing here. */
12998 if (high_regs_pushed)
13000 unsigned long mask = live_regs_mask & 0xff;
13001 int next_hi_reg;
13003 /* The available low registers depend on the size of the value we are
13004 returning. */
13005 if (size <= 12)
13006 mask |= 1 << 3;
13007 if (size <= 8)
13008 mask |= 1 << 2;
13010 if (mask == 0)
13011 /* Oh dear! We have no low registers into which we can pop
13012 high registers! */
13013 internal_error
13014 ("no low registers available for popping high registers");
13016 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13017 if (live_regs_mask & (1 << next_hi_reg))
13018 break;
13020 while (high_regs_pushed)
13022 /* Find lo register(s) into which the high register(s) can
13023 be popped. */
13024 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13026 if (mask & (1 << regno))
13027 high_regs_pushed--;
13028 if (high_regs_pushed == 0)
13029 break;
13032 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13034 /* Pop the values into the low register(s). */
13035 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13037 /* Move the value(s) into the high registers. */
13038 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13040 if (mask & (1 << regno))
13042 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13043 regno);
13045 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13046 if (live_regs_mask & (1 << next_hi_reg))
13047 break;
13051 live_regs_mask &= ~0x0f00;
13054 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13055 live_regs_mask &= 0xff;
13057 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13059 /* Pop the return address into the PC. */
13060 if (had_to_push_lr)
13061 live_regs_mask |= 1 << PC_REGNUM;
13063 /* Either no argument registers were pushed or a backtrace
13064 structure was created which includes an adjusted stack
13065 pointer, so just pop everything. */
13066 if (live_regs_mask)
13067 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13068 live_regs_mask);
13070 /* We have either just popped the return address into the
13071 PC or it is was kept in LR for the entire function. */
13072 if (!had_to_push_lr)
13073 thumb_exit (asm_out_file, LR_REGNUM);
13075 else
13077 /* Pop everything but the return address. */
13078 if (live_regs_mask)
13079 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13080 live_regs_mask);
13082 if (had_to_push_lr)
13084 if (size > 12)
13086 /* We have no free low regs, so save one. */
13087 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13088 LAST_ARG_REGNUM);
13091 /* Get the return address into a temporary register. */
13092 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13093 1 << LAST_ARG_REGNUM);
13095 if (size > 12)
13097 /* Move the return address to lr. */
13098 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13099 LAST_ARG_REGNUM);
13100 /* Restore the low register. */
13101 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13102 IP_REGNUM);
13103 regno = LR_REGNUM;
13105 else
13106 regno = LAST_ARG_REGNUM;
13108 else
13109 regno = LR_REGNUM;
13111 /* Remove the argument registers that were pushed onto the stack. */
13112 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13113 SP_REGNUM, SP_REGNUM,
13114 current_function_pretend_args_size);
13116 thumb_exit (asm_out_file, regno);
13119 return "";
13122 /* Functions to save and restore machine-specific function data. */
13123 static struct machine_function *
13124 arm_init_machine_status (void)
13126 struct machine_function *machine;
13127 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13129 #if ARM_FT_UNKNOWN != 0
13130 machine->func_type = ARM_FT_UNKNOWN;
13131 #endif
13132 return machine;
13135 /* Return an RTX indicating where the return address to the
13136 calling function can be found. */
13138 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13140 if (count != 0)
13141 return NULL_RTX;
13143 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13146 /* Do anything needed before RTL is emitted for each function. */
13147 void
13148 arm_init_expanders (void)
13150 /* Arrange to initialize and mark the machine per-function status. */
13151 init_machine_status = arm_init_machine_status;
13153 /* This is to stop the combine pass optimizing away the alignment
13154 adjustment of va_arg. */
13155 /* ??? It is claimed that this should not be necessary. */
13156 if (cfun)
13157 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13161 /* Like arm_compute_initial_elimination offset. Simpler because
13162 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13164 HOST_WIDE_INT
13165 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13167 arm_stack_offsets *offsets;
13169 offsets = arm_get_frame_offsets ();
13171 switch (from)
13173 case ARG_POINTER_REGNUM:
13174 switch (to)
13176 case STACK_POINTER_REGNUM:
13177 return offsets->outgoing_args - offsets->saved_args;
13179 case FRAME_POINTER_REGNUM:
13180 return offsets->soft_frame - offsets->saved_args;
13182 case THUMB_HARD_FRAME_POINTER_REGNUM:
13183 case ARM_HARD_FRAME_POINTER_REGNUM:
13184 return offsets->saved_regs - offsets->saved_args;
13186 default:
13187 gcc_unreachable ();
13189 break;
13191 case FRAME_POINTER_REGNUM:
13192 switch (to)
13194 case STACK_POINTER_REGNUM:
13195 return offsets->outgoing_args - offsets->soft_frame;
13197 case THUMB_HARD_FRAME_POINTER_REGNUM:
13198 case ARM_HARD_FRAME_POINTER_REGNUM:
13199 return offsets->saved_regs - offsets->soft_frame;
13201 default:
13202 gcc_unreachable ();
13204 break;
13206 default:
13207 gcc_unreachable ();
13212 /* Generate the rest of a function's prologue. */
13213 void
13214 thumb_expand_prologue (void)
13216 rtx insn, dwarf;
13218 HOST_WIDE_INT amount;
13219 arm_stack_offsets *offsets;
13220 unsigned long func_type;
13221 int regno;
13222 unsigned long live_regs_mask;
13224 func_type = arm_current_func_type ();
13226 /* Naked functions don't have prologues. */
13227 if (IS_NAKED (func_type))
13228 return;
13230 if (IS_INTERRUPT (func_type))
13232 error ("interrupt Service Routines cannot be coded in Thumb mode");
13233 return;
13236 live_regs_mask = thumb_compute_save_reg_mask ();
13237 /* Load the pic register before setting the frame pointer,
13238 so we can use r7 as a temporary work register. */
13239 if (flag_pic)
13240 arm_load_pic_register (live_regs_mask);
13242 offsets = arm_get_frame_offsets ();
13244 if (frame_pointer_needed)
13246 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13247 stack_pointer_rtx));
13248 RTX_FRAME_RELATED_P (insn) = 1;
13250 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13251 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13252 stack_pointer_rtx);
13254 amount = offsets->outgoing_args - offsets->saved_regs;
13255 if (amount)
13257 if (amount < 512)
13259 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13260 GEN_INT (- amount)));
13261 RTX_FRAME_RELATED_P (insn) = 1;
13263 else
13265 rtx reg;
13267 /* The stack decrement is too big for an immediate value in a single
13268 insn. In theory we could issue multiple subtracts, but after
13269 three of them it becomes more space efficient to place the full
13270 value in the constant pool and load into a register. (Also the
13271 ARM debugger really likes to see only one stack decrement per
13272 function). So instead we look for a scratch register into which
13273 we can load the decrement, and then we subtract this from the
13274 stack pointer. Unfortunately on the thumb the only available
13275 scratch registers are the argument registers, and we cannot use
13276 these as they may hold arguments to the function. Instead we
13277 attempt to locate a call preserved register which is used by this
13278 function. If we can find one, then we know that it will have
13279 been pushed at the start of the prologue and so we can corrupt
13280 it now. */
13281 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13282 if (live_regs_mask & (1 << regno)
13283 && !(frame_pointer_needed
13284 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13285 break;
13287 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13289 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13291 /* Choose an arbitrary, non-argument low register. */
13292 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13294 /* Save it by copying it into a high, scratch register. */
13295 emit_insn (gen_movsi (spare, reg));
13296 /* Add a USE to stop propagate_one_insn() from barfing. */
13297 emit_insn (gen_prologue_use (spare));
13299 /* Decrement the stack. */
13300 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13301 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13302 stack_pointer_rtx, reg));
13303 RTX_FRAME_RELATED_P (insn) = 1;
13304 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13305 plus_constant (stack_pointer_rtx,
13306 -amount));
13307 RTX_FRAME_RELATED_P (dwarf) = 1;
13308 REG_NOTES (insn)
13309 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13310 REG_NOTES (insn));
13312 /* Restore the low register's original value. */
13313 emit_insn (gen_movsi (reg, spare));
13315 /* Emit a USE of the restored scratch register, so that flow
13316 analysis will not consider the restore redundant. The
13317 register won't be used again in this function and isn't
13318 restored by the epilogue. */
13319 emit_insn (gen_prologue_use (reg));
13321 else
13323 reg = gen_rtx_REG (SImode, regno);
13325 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13327 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13328 stack_pointer_rtx, reg));
13329 RTX_FRAME_RELATED_P (insn) = 1;
13330 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13331 plus_constant (stack_pointer_rtx,
13332 -amount));
13333 RTX_FRAME_RELATED_P (dwarf) = 1;
13334 REG_NOTES (insn)
13335 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13336 REG_NOTES (insn));
13339 /* If the frame pointer is needed, emit a special barrier that
13340 will prevent the scheduler from moving stores to the frame
13341 before the stack adjustment. */
13342 if (frame_pointer_needed)
13343 emit_insn (gen_stack_tie (stack_pointer_rtx,
13344 hard_frame_pointer_rtx));
13347 if (current_function_profile || !TARGET_SCHED_PROLOG)
13348 emit_insn (gen_blockage ());
13350 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13351 if (live_regs_mask & 0xff)
13352 cfun->machine->lr_save_eliminated = 0;
13354 /* If the link register is being kept alive, with the return address in it,
13355 then make sure that it does not get reused by the ce2 pass. */
13356 if (cfun->machine->lr_save_eliminated)
13357 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13361 void
13362 thumb_expand_epilogue (void)
13364 HOST_WIDE_INT amount;
13365 arm_stack_offsets *offsets;
13366 int regno;
13368 /* Naked functions don't have prologues. */
13369 if (IS_NAKED (arm_current_func_type ()))
13370 return;
13372 offsets = arm_get_frame_offsets ();
13373 amount = offsets->outgoing_args - offsets->saved_regs;
13375 if (frame_pointer_needed)
13376 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13377 else if (amount)
13379 if (amount < 512)
13380 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13381 GEN_INT (amount)));
13382 else
13384 /* r3 is always free in the epilogue. */
13385 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13387 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13388 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13392 /* Emit a USE (stack_pointer_rtx), so that
13393 the stack adjustment will not be deleted. */
13394 emit_insn (gen_prologue_use (stack_pointer_rtx));
13396 if (current_function_profile || !TARGET_SCHED_PROLOG)
13397 emit_insn (gen_blockage ());
13399 /* Emit a clobber for each insn that will be restored in the epilogue,
13400 so that flow2 will get register lifetimes correct. */
13401 for (regno = 0; regno < 13; regno++)
13402 if (regs_ever_live[regno] && !call_used_regs[regno])
13403 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13405 if (! regs_ever_live[LR_REGNUM])
13406 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13409 static void
13410 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13412 unsigned long live_regs_mask = 0;
13413 unsigned long l_mask;
13414 unsigned high_regs_pushed = 0;
13415 int cfa_offset = 0;
13416 int regno;
13418 if (IS_NAKED (arm_current_func_type ()))
13419 return;
13421 if (is_called_in_ARM_mode (current_function_decl))
13423 const char * name;
13425 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13426 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13427 == SYMBOL_REF);
13428 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13430 /* Generate code sequence to switch us into Thumb mode. */
13431 /* The .code 32 directive has already been emitted by
13432 ASM_DECLARE_FUNCTION_NAME. */
13433 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13434 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13436 /* Generate a label, so that the debugger will notice the
13437 change in instruction sets. This label is also used by
13438 the assembler to bypass the ARM code when this function
13439 is called from a Thumb encoded function elsewhere in the
13440 same file. Hence the definition of STUB_NAME here must
13441 agree with the definition in gas/config/tc-arm.c. */
13443 #define STUB_NAME ".real_start_of"
13445 fprintf (f, "\t.code\t16\n");
13446 #ifdef ARM_PE
13447 if (arm_dllexport_name_p (name))
13448 name = arm_strip_name_encoding (name);
13449 #endif
13450 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13451 fprintf (f, "\t.thumb_func\n");
13452 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13455 if (current_function_pretend_args_size)
13457 /* Output unwind directive for the stack adjustment. */
13458 if (ARM_EABI_UNWIND_TABLES)
13459 fprintf (f, "\t.pad #%d\n",
13460 current_function_pretend_args_size);
13462 if (cfun->machine->uses_anonymous_args)
13464 int num_pushes;
13466 fprintf (f, "\tpush\t{");
13468 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13470 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13471 regno <= LAST_ARG_REGNUM;
13472 regno++)
13473 asm_fprintf (f, "%r%s", regno,
13474 regno == LAST_ARG_REGNUM ? "" : ", ");
13476 fprintf (f, "}\n");
13478 else
13479 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13480 SP_REGNUM, SP_REGNUM,
13481 current_function_pretend_args_size);
13483 /* We don't need to record the stores for unwinding (would it
13484 help the debugger any if we did?), but record the change in
13485 the stack pointer. */
13486 if (dwarf2out_do_frame ())
13488 char *l = dwarf2out_cfi_label ();
13490 cfa_offset = cfa_offset + current_function_pretend_args_size;
13491 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13495 /* Get the registers we are going to push. */
13496 live_regs_mask = thumb_compute_save_reg_mask ();
13497 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13498 l_mask = live_regs_mask & 0x40ff;
13499 /* Then count how many other high registers will need to be pushed. */
13500 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13502 if (TARGET_BACKTRACE)
13504 unsigned offset;
13505 unsigned work_register;
13507 /* We have been asked to create a stack backtrace structure.
13508 The code looks like this:
13510 0 .align 2
13511 0 func:
13512 0 sub SP, #16 Reserve space for 4 registers.
13513 2 push {R7} Push low registers.
13514 4 add R7, SP, #20 Get the stack pointer before the push.
13515 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13516 8 mov R7, PC Get hold of the start of this code plus 12.
13517 10 str R7, [SP, #16] Store it.
13518 12 mov R7, FP Get hold of the current frame pointer.
13519 14 str R7, [SP, #4] Store it.
13520 16 mov R7, LR Get hold of the current return address.
13521 18 str R7, [SP, #12] Store it.
13522 20 add R7, SP, #16 Point at the start of the backtrace structure.
13523 22 mov FP, R7 Put this value into the frame pointer. */
13525 work_register = thumb_find_work_register (live_regs_mask);
13527 if (ARM_EABI_UNWIND_TABLES)
13528 asm_fprintf (f, "\t.pad #16\n");
13530 asm_fprintf
13531 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13532 SP_REGNUM, SP_REGNUM);
13534 if (dwarf2out_do_frame ())
13536 char *l = dwarf2out_cfi_label ();
13538 cfa_offset = cfa_offset + 16;
13539 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13542 if (l_mask)
13544 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13545 offset = bit_count (l_mask) * UNITS_PER_WORD;
13547 else
13548 offset = 0;
13550 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13551 offset + 16 + current_function_pretend_args_size);
13553 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13554 offset + 4);
13556 /* Make sure that the instruction fetching the PC is in the right place
13557 to calculate "start of backtrace creation code + 12". */
13558 if (l_mask)
13560 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13561 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13562 offset + 12);
13563 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13564 ARM_HARD_FRAME_POINTER_REGNUM);
13565 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13566 offset);
13568 else
13570 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13571 ARM_HARD_FRAME_POINTER_REGNUM);
13572 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13573 offset);
13574 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13575 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13576 offset + 12);
13579 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13580 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13581 offset + 8);
13582 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13583 offset + 12);
13584 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13585 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13587 /* Optimization: If we are not pushing any low registers but we are going
13588 to push some high registers then delay our first push. This will just
13589 be a push of LR and we can combine it with the push of the first high
13590 register. */
13591 else if ((l_mask & 0xff) != 0
13592 || (high_regs_pushed == 0 && l_mask))
13593 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13595 if (high_regs_pushed)
13597 unsigned pushable_regs;
13598 unsigned next_hi_reg;
13600 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13601 if (live_regs_mask & (1 << next_hi_reg))
13602 break;
13604 pushable_regs = l_mask & 0xff;
13606 if (pushable_regs == 0)
13607 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13609 while (high_regs_pushed > 0)
13611 unsigned long real_regs_mask = 0;
13613 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13615 if (pushable_regs & (1 << regno))
13617 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13619 high_regs_pushed --;
13620 real_regs_mask |= (1 << next_hi_reg);
13622 if (high_regs_pushed)
13624 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13625 next_hi_reg --)
13626 if (live_regs_mask & (1 << next_hi_reg))
13627 break;
13629 else
13631 pushable_regs &= ~((1 << regno) - 1);
13632 break;
13637 /* If we had to find a work register and we have not yet
13638 saved the LR then add it to the list of regs to push. */
13639 if (l_mask == (1 << LR_REGNUM))
13641 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13642 1, &cfa_offset,
13643 real_regs_mask | (1 << LR_REGNUM));
13644 l_mask = 0;
13646 else
13647 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13652 /* Handle the case of a double word load into a low register from
13653 a computed memory address. The computed address may involve a
13654 register which is overwritten by the load. */
13655 const char *
13656 thumb_load_double_from_address (rtx *operands)
13658 rtx addr;
13659 rtx base;
13660 rtx offset;
13661 rtx arg1;
13662 rtx arg2;
13664 gcc_assert (GET_CODE (operands[0]) == REG);
13665 gcc_assert (GET_CODE (operands[1]) == MEM);
13667 /* Get the memory address. */
13668 addr = XEXP (operands[1], 0);
13670 /* Work out how the memory address is computed. */
13671 switch (GET_CODE (addr))
13673 case REG:
13674 operands[2] = gen_rtx_MEM (SImode,
13675 plus_constant (XEXP (operands[1], 0), 4));
13677 if (REGNO (operands[0]) == REGNO (addr))
13679 output_asm_insn ("ldr\t%H0, %2", operands);
13680 output_asm_insn ("ldr\t%0, %1", operands);
13682 else
13684 output_asm_insn ("ldr\t%0, %1", operands);
13685 output_asm_insn ("ldr\t%H0, %2", operands);
13687 break;
13689 case CONST:
13690 /* Compute <address> + 4 for the high order load. */
13691 operands[2] = gen_rtx_MEM (SImode,
13692 plus_constant (XEXP (operands[1], 0), 4));
13694 output_asm_insn ("ldr\t%0, %1", operands);
13695 output_asm_insn ("ldr\t%H0, %2", operands);
13696 break;
13698 case PLUS:
13699 arg1 = XEXP (addr, 0);
13700 arg2 = XEXP (addr, 1);
13702 if (CONSTANT_P (arg1))
13703 base = arg2, offset = arg1;
13704 else
13705 base = arg1, offset = arg2;
13707 gcc_assert (GET_CODE (base) == REG);
13709 /* Catch the case of <address> = <reg> + <reg> */
13710 if (GET_CODE (offset) == REG)
13712 int reg_offset = REGNO (offset);
13713 int reg_base = REGNO (base);
13714 int reg_dest = REGNO (operands[0]);
13716 /* Add the base and offset registers together into the
13717 higher destination register. */
13718 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13719 reg_dest + 1, reg_base, reg_offset);
13721 /* Load the lower destination register from the address in
13722 the higher destination register. */
13723 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13724 reg_dest, reg_dest + 1);
13726 /* Load the higher destination register from its own address
13727 plus 4. */
13728 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13729 reg_dest + 1, reg_dest + 1);
13731 else
13733 /* Compute <address> + 4 for the high order load. */
13734 operands[2] = gen_rtx_MEM (SImode,
13735 plus_constant (XEXP (operands[1], 0), 4));
13737 /* If the computed address is held in the low order register
13738 then load the high order register first, otherwise always
13739 load the low order register first. */
13740 if (REGNO (operands[0]) == REGNO (base))
13742 output_asm_insn ("ldr\t%H0, %2", operands);
13743 output_asm_insn ("ldr\t%0, %1", operands);
13745 else
13747 output_asm_insn ("ldr\t%0, %1", operands);
13748 output_asm_insn ("ldr\t%H0, %2", operands);
13751 break;
13753 case LABEL_REF:
13754 /* With no registers to worry about we can just load the value
13755 directly. */
13756 operands[2] = gen_rtx_MEM (SImode,
13757 plus_constant (XEXP (operands[1], 0), 4));
13759 output_asm_insn ("ldr\t%H0, %2", operands);
13760 output_asm_insn ("ldr\t%0, %1", operands);
13761 break;
13763 default:
13764 gcc_unreachable ();
13767 return "";
13770 const char *
13771 thumb_output_move_mem_multiple (int n, rtx *operands)
13773 rtx tmp;
13775 switch (n)
13777 case 2:
13778 if (REGNO (operands[4]) > REGNO (operands[5]))
13780 tmp = operands[4];
13781 operands[4] = operands[5];
13782 operands[5] = tmp;
13784 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13785 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13786 break;
13788 case 3:
13789 if (REGNO (operands[4]) > REGNO (operands[5]))
13791 tmp = operands[4];
13792 operands[4] = operands[5];
13793 operands[5] = tmp;
13795 if (REGNO (operands[5]) > REGNO (operands[6]))
13797 tmp = operands[5];
13798 operands[5] = operands[6];
13799 operands[6] = tmp;
13801 if (REGNO (operands[4]) > REGNO (operands[5]))
13803 tmp = operands[4];
13804 operands[4] = operands[5];
13805 operands[5] = tmp;
13808 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13809 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13810 break;
13812 default:
13813 gcc_unreachable ();
13816 return "";
13819 /* Output a call-via instruction for thumb state. */
13820 const char *
13821 thumb_call_via_reg (rtx reg)
13823 int regno = REGNO (reg);
13824 rtx *labelp;
13826 gcc_assert (regno < LR_REGNUM);
13828 /* If we are in the normal text section we can use a single instance
13829 per compilation unit. If we are doing function sections, then we need
13830 an entry per section, since we can't rely on reachability. */
13831 if (in_text_section ())
13833 thumb_call_reg_needed = 1;
13835 if (thumb_call_via_label[regno] == NULL)
13836 thumb_call_via_label[regno] = gen_label_rtx ();
13837 labelp = thumb_call_via_label + regno;
13839 else
13841 if (cfun->machine->call_via[regno] == NULL)
13842 cfun->machine->call_via[regno] = gen_label_rtx ();
13843 labelp = cfun->machine->call_via + regno;
13846 output_asm_insn ("bl\t%a0", labelp);
13847 return "";
13850 /* Routines for generating rtl. */
13851 void
13852 thumb_expand_movmemqi (rtx *operands)
13854 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13855 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13856 HOST_WIDE_INT len = INTVAL (operands[2]);
13857 HOST_WIDE_INT offset = 0;
13859 while (len >= 12)
13861 emit_insn (gen_movmem12b (out, in, out, in));
13862 len -= 12;
13865 if (len >= 8)
13867 emit_insn (gen_movmem8b (out, in, out, in));
13868 len -= 8;
13871 if (len >= 4)
13873 rtx reg = gen_reg_rtx (SImode);
13874 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13875 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13876 len -= 4;
13877 offset += 4;
13880 if (len >= 2)
13882 rtx reg = gen_reg_rtx (HImode);
13883 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13884 plus_constant (in, offset))));
13885 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13886 reg));
13887 len -= 2;
13888 offset += 2;
13891 if (len)
13893 rtx reg = gen_reg_rtx (QImode);
13894 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13895 plus_constant (in, offset))));
13896 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13897 reg));
13901 void
13902 thumb_reload_out_hi (rtx *operands)
13904 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13907 /* Handle reading a half-word from memory during reload. */
13908 void
13909 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13911 gcc_unreachable ();
13914 /* Return the length of a function name prefix
13915 that starts with the character 'c'. */
13916 static int
13917 arm_get_strip_length (int c)
13919 switch (c)
13921 ARM_NAME_ENCODING_LENGTHS
13922 default: return 0;
13926 /* Return a pointer to a function's name with any
13927 and all prefix encodings stripped from it. */
13928 const char *
13929 arm_strip_name_encoding (const char *name)
13931 int skip;
13933 while ((skip = arm_get_strip_length (* name)))
13934 name += skip;
13936 return name;
13939 /* If there is a '*' anywhere in the name's prefix, then
13940 emit the stripped name verbatim, otherwise prepend an
13941 underscore if leading underscores are being used. */
13942 void
13943 arm_asm_output_labelref (FILE *stream, const char *name)
13945 int skip;
13946 int verbatim = 0;
13948 while ((skip = arm_get_strip_length (* name)))
13950 verbatim |= (*name == '*');
13951 name += skip;
13954 if (verbatim)
13955 fputs (name, stream);
13956 else
13957 asm_fprintf (stream, "%U%s", name);
13960 static void
13961 arm_file_end (void)
13963 int regno;
13965 if (! thumb_call_reg_needed)
13966 return;
13968 text_section ();
13969 asm_fprintf (asm_out_file, "\t.code 16\n");
13970 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13972 for (regno = 0; regno < LR_REGNUM; regno++)
13974 rtx label = thumb_call_via_label[regno];
13976 if (label != 0)
13978 targetm.asm_out.internal_label (asm_out_file, "L",
13979 CODE_LABEL_NUMBER (label));
13980 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13985 rtx aof_pic_label;
13987 #ifdef AOF_ASSEMBLER
13988 /* Special functions only needed when producing AOF syntax assembler. */
13990 struct pic_chain
13992 struct pic_chain * next;
13993 const char * symname;
13996 static struct pic_chain * aof_pic_chain = NULL;
13999 aof_pic_entry (rtx x)
14001 struct pic_chain ** chainp;
14002 int offset;
14004 if (aof_pic_label == NULL_RTX)
14006 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14009 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14010 offset += 4, chainp = &(*chainp)->next)
14011 if ((*chainp)->symname == XSTR (x, 0))
14012 return plus_constant (aof_pic_label, offset);
14014 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14015 (*chainp)->next = NULL;
14016 (*chainp)->symname = XSTR (x, 0);
14017 return plus_constant (aof_pic_label, offset);
14020 void
14021 aof_dump_pic_table (FILE *f)
14023 struct pic_chain * chain;
14025 if (aof_pic_chain == NULL)
14026 return;
14028 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14029 PIC_OFFSET_TABLE_REGNUM,
14030 PIC_OFFSET_TABLE_REGNUM);
14031 fputs ("|x$adcons|\n", f);
14033 for (chain = aof_pic_chain; chain; chain = chain->next)
14035 fputs ("\tDCD\t", f);
14036 assemble_name (f, chain->symname);
14037 fputs ("\n", f);
14041 int arm_text_section_count = 1;
14043 char *
14044 aof_text_section (void )
14046 static char buf[100];
14047 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
14048 arm_text_section_count++);
14049 if (flag_pic)
14050 strcat (buf, ", PIC, REENTRANT");
14051 return buf;
14054 static int arm_data_section_count = 1;
14056 char *
14057 aof_data_section (void)
14059 static char buf[100];
14060 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14061 return buf;
14064 /* The AOF assembler is religiously strict about declarations of
14065 imported and exported symbols, so that it is impossible to declare
14066 a function as imported near the beginning of the file, and then to
14067 export it later on. It is, however, possible to delay the decision
14068 until all the functions in the file have been compiled. To get
14069 around this, we maintain a list of the imports and exports, and
14070 delete from it any that are subsequently defined. At the end of
14071 compilation we spit the remainder of the list out before the END
14072 directive. */
14074 struct import
14076 struct import * next;
14077 const char * name;
14080 static struct import * imports_list = NULL;
14082 void
14083 aof_add_import (const char *name)
14085 struct import * new;
14087 for (new = imports_list; new; new = new->next)
14088 if (new->name == name)
14089 return;
14091 new = (struct import *) xmalloc (sizeof (struct import));
14092 new->next = imports_list;
14093 imports_list = new;
14094 new->name = name;
14097 void
14098 aof_delete_import (const char *name)
14100 struct import ** old;
14102 for (old = &imports_list; *old; old = & (*old)->next)
14104 if ((*old)->name == name)
14106 *old = (*old)->next;
14107 return;
14112 int arm_main_function = 0;
14114 static void
14115 aof_dump_imports (FILE *f)
14117 /* The AOF assembler needs this to cause the startup code to be extracted
14118 from the library. Brining in __main causes the whole thing to work
14119 automagically. */
14120 if (arm_main_function)
14122 text_section ();
14123 fputs ("\tIMPORT __main\n", f);
14124 fputs ("\tDCD __main\n", f);
14127 /* Now dump the remaining imports. */
14128 while (imports_list)
14130 fprintf (f, "\tIMPORT\t");
14131 assemble_name (f, imports_list->name);
14132 fputc ('\n', f);
14133 imports_list = imports_list->next;
14137 static void
14138 aof_globalize_label (FILE *stream, const char *name)
14140 default_globalize_label (stream, name);
14141 if (! strcmp (name, "main"))
14142 arm_main_function = 1;
14145 static void
14146 aof_file_start (void)
14148 fputs ("__r0\tRN\t0\n", asm_out_file);
14149 fputs ("__a1\tRN\t0\n", asm_out_file);
14150 fputs ("__a2\tRN\t1\n", asm_out_file);
14151 fputs ("__a3\tRN\t2\n", asm_out_file);
14152 fputs ("__a4\tRN\t3\n", asm_out_file);
14153 fputs ("__v1\tRN\t4\n", asm_out_file);
14154 fputs ("__v2\tRN\t5\n", asm_out_file);
14155 fputs ("__v3\tRN\t6\n", asm_out_file);
14156 fputs ("__v4\tRN\t7\n", asm_out_file);
14157 fputs ("__v5\tRN\t8\n", asm_out_file);
14158 fputs ("__v6\tRN\t9\n", asm_out_file);
14159 fputs ("__sl\tRN\t10\n", asm_out_file);
14160 fputs ("__fp\tRN\t11\n", asm_out_file);
14161 fputs ("__ip\tRN\t12\n", asm_out_file);
14162 fputs ("__sp\tRN\t13\n", asm_out_file);
14163 fputs ("__lr\tRN\t14\n", asm_out_file);
14164 fputs ("__pc\tRN\t15\n", asm_out_file);
14165 fputs ("__f0\tFN\t0\n", asm_out_file);
14166 fputs ("__f1\tFN\t1\n", asm_out_file);
14167 fputs ("__f2\tFN\t2\n", asm_out_file);
14168 fputs ("__f3\tFN\t3\n", asm_out_file);
14169 fputs ("__f4\tFN\t4\n", asm_out_file);
14170 fputs ("__f5\tFN\t5\n", asm_out_file);
14171 fputs ("__f6\tFN\t6\n", asm_out_file);
14172 fputs ("__f7\tFN\t7\n", asm_out_file);
14173 text_section ();
14176 static void
14177 aof_file_end (void)
14179 if (flag_pic)
14180 aof_dump_pic_table (asm_out_file);
14181 arm_file_end ();
14182 aof_dump_imports (asm_out_file);
14183 fputs ("\tEND\n", asm_out_file);
14185 #endif /* AOF_ASSEMBLER */
14187 #ifndef ARM_PE
14188 /* Symbols in the text segment can be accessed without indirecting via the
14189 constant pool; it may take an extra binary operation, but this is still
14190 faster than indirecting via memory. Don't do this when not optimizing,
14191 since we won't be calculating al of the offsets necessary to do this
14192 simplification. */
14194 static void
14195 arm_encode_section_info (tree decl, rtx rtl, int first)
14197 /* This doesn't work with AOF syntax, since the string table may be in
14198 a different AREA. */
14199 #ifndef AOF_ASSEMBLER
14200 if (optimize > 0 && TREE_CONSTANT (decl))
14201 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14202 #endif
14204 /* If we are referencing a function that is weak then encode a long call
14205 flag in the function name, otherwise if the function is static or
14206 or known to be defined in this file then encode a short call flag. */
14207 if (first && DECL_P (decl))
14209 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14210 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14211 else if (! TREE_PUBLIC (decl))
14212 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14215 #endif /* !ARM_PE */
14217 static void
14218 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14220 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14221 && !strcmp (prefix, "L"))
14223 arm_ccfsm_state = 0;
14224 arm_target_insn = NULL;
14226 default_internal_label (stream, prefix, labelno);
14229 /* Output code to add DELTA to the first argument, and then jump
14230 to FUNCTION. Used for C++ multiple inheritance. */
14231 static void
14232 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14233 HOST_WIDE_INT delta,
14234 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14235 tree function)
14237 static int thunk_label = 0;
14238 char label[256];
14239 int mi_delta = delta;
14240 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14241 int shift = 0;
14242 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14243 ? 1 : 0);
14244 if (mi_delta < 0)
14245 mi_delta = - mi_delta;
14246 if (TARGET_THUMB)
14248 int labelno = thunk_label++;
14249 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14250 fputs ("\tldr\tr12, ", file);
14251 assemble_name (file, label);
14252 fputc ('\n', file);
14254 while (mi_delta != 0)
14256 if ((mi_delta & (3 << shift)) == 0)
14257 shift += 2;
14258 else
14260 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14261 mi_op, this_regno, this_regno,
14262 mi_delta & (0xff << shift));
14263 mi_delta &= ~(0xff << shift);
14264 shift += 8;
14267 if (TARGET_THUMB)
14269 fprintf (file, "\tbx\tr12\n");
14270 ASM_OUTPUT_ALIGN (file, 2);
14271 assemble_name (file, label);
14272 fputs (":\n", file);
14273 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14275 else
14277 fputs ("\tb\t", file);
14278 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14279 if (NEED_PLT_RELOC)
14280 fputs ("(PLT)", file);
14281 fputc ('\n', file);
14286 arm_emit_vector_const (FILE *file, rtx x)
14288 int i;
14289 const char * pattern;
14291 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14293 switch (GET_MODE (x))
14295 case V2SImode: pattern = "%08x"; break;
14296 case V4HImode: pattern = "%04x"; break;
14297 case V8QImode: pattern = "%02x"; break;
14298 default: gcc_unreachable ();
14301 fprintf (file, "0x");
14302 for (i = CONST_VECTOR_NUNITS (x); i--;)
14304 rtx element;
14306 element = CONST_VECTOR_ELT (x, i);
14307 fprintf (file, pattern, INTVAL (element));
14310 return 1;
14313 const char *
14314 arm_output_load_gr (rtx *operands)
14316 rtx reg;
14317 rtx offset;
14318 rtx wcgr;
14319 rtx sum;
14321 if (GET_CODE (operands [1]) != MEM
14322 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14323 || GET_CODE (reg = XEXP (sum, 0)) != REG
14324 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14325 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14326 return "wldrw%?\t%0, %1";
14328 /* Fix up an out-of-range load of a GR register. */
14329 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14330 wcgr = operands[0];
14331 operands[0] = reg;
14332 output_asm_insn ("ldr%?\t%0, %1", operands);
14334 operands[0] = wcgr;
14335 operands[1] = reg;
14336 output_asm_insn ("tmcr%?\t%0, %1", operands);
14337 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14339 return "";
14342 static rtx
14343 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14344 int incoming ATTRIBUTE_UNUSED)
14346 #if 0
14347 /* FIXME: The ARM backend has special code to handle structure
14348 returns, and will reserve its own hidden first argument. So
14349 if this macro is enabled a *second* hidden argument will be
14350 reserved, which will break binary compatibility with old
14351 toolchains and also thunk handling. One day this should be
14352 fixed. */
14353 return 0;
14354 #else
14355 /* Register in which address to store a structure value
14356 is passed to a function. */
14357 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14358 #endif
14361 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14363 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14364 named arg and all anonymous args onto the stack.
14365 XXX I know the prologue shouldn't be pushing registers, but it is faster
14366 that way. */
14368 static void
14369 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14370 enum machine_mode mode ATTRIBUTE_UNUSED,
14371 tree type ATTRIBUTE_UNUSED,
14372 int *pretend_size,
14373 int second_time ATTRIBUTE_UNUSED)
14375 cfun->machine->uses_anonymous_args = 1;
14376 if (cum->nregs < NUM_ARG_REGS)
14377 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14380 /* Return nonzero if the CONSUMER instruction (a store) does not need
14381 PRODUCER's value to calculate the address. */
14384 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14386 rtx value = PATTERN (producer);
14387 rtx addr = PATTERN (consumer);
14389 if (GET_CODE (value) == COND_EXEC)
14390 value = COND_EXEC_CODE (value);
14391 if (GET_CODE (value) == PARALLEL)
14392 value = XVECEXP (value, 0, 0);
14393 value = XEXP (value, 0);
14394 if (GET_CODE (addr) == COND_EXEC)
14395 addr = COND_EXEC_CODE (addr);
14396 if (GET_CODE (addr) == PARALLEL)
14397 addr = XVECEXP (addr, 0, 0);
14398 addr = XEXP (addr, 0);
14400 return !reg_overlap_mentioned_p (value, addr);
14403 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14404 have an early register shift value or amount dependency on the
14405 result of PRODUCER. */
14408 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14410 rtx value = PATTERN (producer);
14411 rtx op = PATTERN (consumer);
14412 rtx early_op;
14414 if (GET_CODE (value) == COND_EXEC)
14415 value = COND_EXEC_CODE (value);
14416 if (GET_CODE (value) == PARALLEL)
14417 value = XVECEXP (value, 0, 0);
14418 value = XEXP (value, 0);
14419 if (GET_CODE (op) == COND_EXEC)
14420 op = COND_EXEC_CODE (op);
14421 if (GET_CODE (op) == PARALLEL)
14422 op = XVECEXP (op, 0, 0);
14423 op = XEXP (op, 1);
14425 early_op = XEXP (op, 0);
14426 /* This is either an actual independent shift, or a shift applied to
14427 the first operand of another operation. We want the whole shift
14428 operation. */
14429 if (GET_CODE (early_op) == REG)
14430 early_op = op;
14432 return !reg_overlap_mentioned_p (value, early_op);
14435 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14436 have an early register shift value dependency on the result of
14437 PRODUCER. */
14440 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14442 rtx value = PATTERN (producer);
14443 rtx op = PATTERN (consumer);
14444 rtx early_op;
14446 if (GET_CODE (value) == COND_EXEC)
14447 value = COND_EXEC_CODE (value);
14448 if (GET_CODE (value) == PARALLEL)
14449 value = XVECEXP (value, 0, 0);
14450 value = XEXP (value, 0);
14451 if (GET_CODE (op) == COND_EXEC)
14452 op = COND_EXEC_CODE (op);
14453 if (GET_CODE (op) == PARALLEL)
14454 op = XVECEXP (op, 0, 0);
14455 op = XEXP (op, 1);
14457 early_op = XEXP (op, 0);
14459 /* This is either an actual independent shift, or a shift applied to
14460 the first operand of another operation. We want the value being
14461 shifted, in either case. */
14462 if (GET_CODE (early_op) != REG)
14463 early_op = XEXP (early_op, 0);
14465 return !reg_overlap_mentioned_p (value, early_op);
14468 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14469 have an early register mult dependency on the result of
14470 PRODUCER. */
14473 arm_no_early_mul_dep (rtx producer, rtx consumer)
14475 rtx value = PATTERN (producer);
14476 rtx op = PATTERN (consumer);
14478 if (GET_CODE (value) == COND_EXEC)
14479 value = COND_EXEC_CODE (value);
14480 if (GET_CODE (value) == PARALLEL)
14481 value = XVECEXP (value, 0, 0);
14482 value = XEXP (value, 0);
14483 if (GET_CODE (op) == COND_EXEC)
14484 op = COND_EXEC_CODE (op);
14485 if (GET_CODE (op) == PARALLEL)
14486 op = XVECEXP (op, 0, 0);
14487 op = XEXP (op, 1);
14489 return (GET_CODE (op) == PLUS
14490 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14494 /* We can't rely on the caller doing the proper promotion when
14495 using APCS or ATPCS. */
14497 static bool
14498 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14500 return !TARGET_AAPCS_BASED;
14504 /* AAPCS based ABIs use short enums by default. */
14506 static bool
14507 arm_default_short_enums (void)
14509 return TARGET_AAPCS_BASED;
14513 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14515 static bool
14516 arm_align_anon_bitfield (void)
14518 return TARGET_AAPCS_BASED;
14522 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14524 static tree
14525 arm_cxx_guard_type (void)
14527 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14531 /* The EABI says test the least significant bit of a guard variable. */
14533 static bool
14534 arm_cxx_guard_mask_bit (void)
14536 return TARGET_AAPCS_BASED;
14540 /* The EABI specifies that all array cookies are 8 bytes long. */
14542 static tree
14543 arm_get_cookie_size (tree type)
14545 tree size;
14547 if (!TARGET_AAPCS_BASED)
14548 return default_cxx_get_cookie_size (type);
14550 size = build_int_cst (sizetype, 8);
14551 return size;
14555 /* The EABI says that array cookies should also contain the element size. */
14557 static bool
14558 arm_cookie_has_size (void)
14560 return TARGET_AAPCS_BASED;
14564 /* The EABI says constructors and destructors should return a pointer to
14565 the object constructed/destroyed. */
14567 static bool
14568 arm_cxx_cdtor_returns_this (void)
14570 return TARGET_AAPCS_BASED;
14573 /* The EABI says that an inline function may never be the key
14574 method. */
14576 static bool
14577 arm_cxx_key_method_may_be_inline (void)
14579 return !TARGET_AAPCS_BASED;
14582 static void
14583 arm_cxx_determine_class_data_visibility (tree decl)
14585 if (!TARGET_AAPCS_BASED)
14586 return;
14588 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14589 is exported. However, on systems without dynamic vague linkage,
14590 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14591 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14592 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14593 else
14594 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14595 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14598 static bool
14599 arm_cxx_class_data_always_comdat (void)
14601 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14602 vague linkage if the class has no key function. */
14603 return !TARGET_AAPCS_BASED;
14607 /* The EABI says __aeabi_atexit should be used to register static
14608 destructors. */
14610 static bool
14611 arm_cxx_use_aeabi_atexit (void)
14613 return TARGET_AAPCS_BASED;
14617 void
14618 arm_set_return_address (rtx source, rtx scratch)
14620 arm_stack_offsets *offsets;
14621 HOST_WIDE_INT delta;
14622 rtx addr;
14623 unsigned long saved_regs;
14625 saved_regs = arm_compute_save_reg_mask ();
14627 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14628 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14629 else
14631 if (frame_pointer_needed)
14632 addr = plus_constant(hard_frame_pointer_rtx, -4);
14633 else
14635 /* LR will be the first saved register. */
14636 offsets = arm_get_frame_offsets ();
14637 delta = offsets->outgoing_args - (offsets->frame + 4);
14640 if (delta >= 4096)
14642 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14643 GEN_INT (delta & ~4095)));
14644 addr = scratch;
14645 delta &= 4095;
14647 else
14648 addr = stack_pointer_rtx;
14650 addr = plus_constant (addr, delta);
14652 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14657 void
14658 thumb_set_return_address (rtx source, rtx scratch)
14660 arm_stack_offsets *offsets;
14661 HOST_WIDE_INT delta;
14662 int reg;
14663 rtx addr;
14664 unsigned long mask;
14666 emit_insn (gen_rtx_USE (VOIDmode, source));
14668 mask = thumb_compute_save_reg_mask ();
14669 if (mask & (1 << LR_REGNUM))
14671 offsets = arm_get_frame_offsets ();
14673 /* Find the saved regs. */
14674 if (frame_pointer_needed)
14676 delta = offsets->soft_frame - offsets->saved_args;
14677 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14679 else
14681 delta = offsets->outgoing_args - offsets->saved_args;
14682 reg = SP_REGNUM;
14684 /* Allow for the stack frame. */
14685 if (TARGET_BACKTRACE)
14686 delta -= 16;
14687 /* The link register is always the first saved register. */
14688 delta -= 4;
14690 /* Construct the address. */
14691 addr = gen_rtx_REG (SImode, reg);
14692 if ((reg != SP_REGNUM && delta >= 128)
14693 || delta >= 1024)
14695 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14696 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14697 addr = scratch;
14699 else
14700 addr = plus_constant (addr, delta);
14702 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14704 else
14705 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14708 /* Implements target hook vector_mode_supported_p. */
14709 bool
14710 arm_vector_mode_supported_p (enum machine_mode mode)
14712 if ((mode == V2SImode)
14713 || (mode == V4HImode)
14714 || (mode == V8QImode))
14715 return true;
14717 return false;
14720 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14721 ARM insns and therefore guarantee that the shift count is modulo 256.
14722 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14723 guarantee no particular behavior for out-of-range counts. */
14725 static unsigned HOST_WIDE_INT
14726 arm_shift_truncation_mask (enum machine_mode mode)
14728 return mode == SImode ? 255 : 0;
14732 /* Map internal gcc register numbers to DWARF2 register numbers. */
14734 unsigned int
14735 arm_dbx_register_number (unsigned int regno)
14737 if (regno < 16)
14738 return regno;
14740 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14741 compatibility. The EABI defines them as registers 96-103. */
14742 if (IS_FPA_REGNUM (regno))
14743 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14745 if (IS_VFP_REGNUM (regno))
14746 return 64 + regno - FIRST_VFP_REGNUM;
14748 if (IS_IWMMXT_GR_REGNUM (regno))
14749 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14751 if (IS_IWMMXT_REGNUM (regno))
14752 return 112 + regno - FIRST_IWMMXT_REGNUM;
14754 gcc_unreachable ();
14758 #ifdef TARGET_UNWIND_INFO
14759 /* Emit unwind directives for a store-multiple instruction. This should
14760 only ever be generated by the function prologue code, so we expect it
14761 to have a particular form. */
14763 static void
14764 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
14766 int i;
14767 HOST_WIDE_INT offset;
14768 HOST_WIDE_INT nregs;
14769 int reg_size;
14770 unsigned reg;
14771 unsigned lastreg;
14772 rtx e;
14774 /* First insn will adjust the stack pointer. */
14775 e = XVECEXP (p, 0, 0);
14776 if (GET_CODE (e) != SET
14777 || GET_CODE (XEXP (e, 0)) != REG
14778 || REGNO (XEXP (e, 0)) != SP_REGNUM
14779 || GET_CODE (XEXP (e, 1)) != PLUS)
14780 abort ();
14782 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
14783 nregs = XVECLEN (p, 0) - 1;
14785 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
14786 if (reg < 16)
14788 /* The function prologue may also push pc, but not annotate it as it is
14789 never restored. We turn this into a stack pointer adjustment. */
14790 if (nregs * 4 == offset - 4)
14792 fprintf (asm_out_file, "\t.pad #4\n");
14793 offset -= 4;
14795 reg_size = 4;
14797 else if (IS_VFP_REGNUM (reg))
14799 /* FPA register saves use an additional word. */
14800 offset -= 4;
14801 reg_size = 8;
14803 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
14805 /* FPA registers are done differently. */
14806 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
14807 return;
14809 else
14810 /* Unknown register type. */
14811 abort ();
14813 /* If the stack increment doesn't match the size of the saved registers,
14814 something has gone horribly wrong. */
14815 if (offset != nregs * reg_size)
14816 abort ();
14818 fprintf (asm_out_file, "\t.save {");
14820 offset = 0;
14821 lastreg = 0;
14822 /* The remaining insns will describe the stores. */
14823 for (i = 1; i <= nregs; i++)
14825 /* Expect (set (mem <addr>) (reg)).
14826 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
14827 e = XVECEXP (p, 0, i);
14828 if (GET_CODE (e) != SET
14829 || GET_CODE (XEXP (e, 0)) != MEM
14830 || GET_CODE (XEXP (e, 1)) != REG)
14831 abort ();
14833 reg = REGNO (XEXP (e, 1));
14834 if (reg < lastreg)
14835 abort ();
14837 if (i != 1)
14838 fprintf (asm_out_file, ", ");
14839 /* We can't use %r for vfp because we need to use the
14840 double precision register names. */
14841 if (IS_VFP_REGNUM (reg))
14842 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
14843 else
14844 asm_fprintf (asm_out_file, "%r", reg);
14846 #ifdef ENABLE_CHECKING
14847 /* Check that the addresses are consecutive. */
14848 e = XEXP (XEXP (e, 0), 0);
14849 if (GET_CODE (e) == PLUS)
14851 offset += reg_size;
14852 if (GET_CODE (XEXP (e, 0)) != REG
14853 || REGNO (XEXP (e, 0)) != SP_REGNUM
14854 || GET_CODE (XEXP (e, 1)) != CONST_INT
14855 || offset != INTVAL (XEXP (e, 1)))
14856 abort ();
14858 else if (i != 1
14859 || GET_CODE (e) != REG
14860 || REGNO (e) != SP_REGNUM)
14861 abort ();
14862 #endif
14864 fprintf (asm_out_file, "}\n");
14867 /* Emit unwind directives for a SET. */
14869 static void
14870 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
14872 rtx e0;
14873 rtx e1;
14875 e0 = XEXP (p, 0);
14876 e1 = XEXP (p, 1);
14877 switch (GET_CODE (e0))
14879 case MEM:
14880 /* Pushing a single register. */
14881 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
14882 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
14883 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
14884 abort ();
14886 asm_fprintf (asm_out_file, "\t.save ");
14887 if (IS_VFP_REGNUM (REGNO (e1)))
14888 asm_fprintf(asm_out_file, "{d%d}\n",
14889 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
14890 else
14891 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
14892 break;
14894 case REG:
14895 if (REGNO (e0) == SP_REGNUM)
14897 /* A stack increment. */
14898 if (GET_CODE (e1) != PLUS
14899 || GET_CODE (XEXP (e1, 0)) != REG
14900 || REGNO (XEXP (e1, 0)) != SP_REGNUM
14901 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14902 abort ();
14904 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
14905 -INTVAL (XEXP (e1, 1)));
14907 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
14909 HOST_WIDE_INT offset;
14910 unsigned reg;
14912 if (GET_CODE (e1) == PLUS)
14914 if (GET_CODE (XEXP (e1, 0)) != REG
14915 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14916 abort ();
14917 reg = REGNO (XEXP (e1, 0));
14918 offset = INTVAL (XEXP (e1, 1));
14919 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
14920 HARD_FRAME_POINTER_REGNUM, reg,
14921 INTVAL (XEXP (e1, 1)));
14923 else if (GET_CODE (e1) == REG)
14925 reg = REGNO (e1);
14926 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
14927 HARD_FRAME_POINTER_REGNUM, reg);
14929 else
14930 abort ();
14932 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
14934 /* Move from sp to reg. */
14935 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
14937 else
14938 abort ();
14939 break;
14941 default:
14942 abort ();
14947 /* Emit unwind directives for the given insn. */
14949 static void
14950 arm_unwind_emit (FILE * asm_out_file, rtx insn)
14952 rtx pat;
14954 if (!ARM_EABI_UNWIND_TABLES)
14955 return;
14957 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
14958 return;
14960 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
14961 if (pat)
14962 pat = XEXP (pat, 0);
14963 else
14964 pat = PATTERN (insn);
14966 switch (GET_CODE (pat))
14968 case SET:
14969 arm_unwind_emit_set (asm_out_file, pat);
14970 break;
14972 case SEQUENCE:
14973 /* Store multiple. */
14974 arm_unwind_emit_stm (asm_out_file, pat);
14975 break;
14977 default:
14978 abort();
14983 /* Output a reference from a function exception table to the type_info
14984 object X. The EABI specifies that the symbol should be relocated by
14985 an R_ARM_TARGET2 relocation. */
14987 static bool
14988 arm_output_ttype (rtx x)
14990 fputs ("\t.word\t", asm_out_file);
14991 output_addr_const (asm_out_file, x);
14992 /* Use special relocations for symbol references. */
14993 if (GET_CODE (x) != CONST_INT)
14994 fputs ("(TARGET2)", asm_out_file);
14995 fputc ('\n', asm_out_file);
14997 return TRUE;
14999 #endif /* TARGET_UNWIND_INFO */
15002 /* Output unwind directives for the start/end of a function. */
15004 void
15005 arm_output_fn_unwind (FILE * f, bool prologue)
15007 if (!ARM_EABI_UNWIND_TABLES)
15008 return;
15010 if (prologue)
15011 fputs ("\t.fnstart\n", f);
15012 else
15013 fputs ("\t.fnend\n", f);