* config/arm/arm.c (replace_symbols_in_block): Remove static
[official-gcc.git] / gcc / config / arm / arm.c
blob817185673daef42d3ddab55d6ed90219a5d3fe17
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void thumb_exit (FILE *, int);
88 static rtx is_jump_table (rtx);
89 static HOST_WIDE_INT get_jump_table_size (rtx);
90 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
91 static Mnode *add_minipool_forward_ref (Mfix *);
92 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_backward_ref (Mfix *);
94 static void assign_minipool_offsets (Mfix *);
95 static void arm_print_value (FILE *, rtx);
96 static void dump_minipool (rtx);
97 static int arm_barrier_cost (rtx);
98 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
99 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
100 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
101 rtx);
102 static void arm_reorg (void);
103 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
104 static int current_file_function_operand (rtx);
105 static unsigned long arm_compute_save_reg0_reg12_mask (void);
106 static unsigned long arm_compute_save_reg_mask (void);
107 static unsigned long arm_isr_value (tree);
108 static unsigned long arm_compute_func_type (void);
109 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
110 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
111 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
112 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
113 #endif
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_size_rtx_costs (rtx, int, int, int *);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
144 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
145 tree, bool);
147 #ifndef ARM_PE
148 static void arm_encode_section_info (tree, rtx, int);
149 #endif
151 static void arm_file_end (void);
153 #ifdef AOF_ASSEMBLER
154 static void aof_globalize_label (FILE *, const char *);
155 static void aof_dump_imports (FILE *);
156 static void aof_dump_pic_table (FILE *);
157 static void aof_file_start (void);
158 static void aof_file_end (void);
159 #endif
160 static rtx arm_struct_value_rtx (tree, int);
161 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
162 tree, int *, int);
163 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
164 enum machine_mode, tree, bool);
165 static bool arm_promote_prototypes (tree);
166 static bool arm_default_short_enums (void);
167 static bool arm_align_anon_bitfield (void);
169 static tree arm_cxx_guard_type (void);
170 static bool arm_cxx_guard_mask_bit (void);
171 static tree arm_get_cookie_size (tree);
172 static bool arm_cookie_has_size (void);
173 static bool arm_cxx_cdtor_returns_this (void);
174 static bool arm_cxx_key_method_may_be_inline (void);
175 static bool arm_cxx_export_class_data (void);
176 static void arm_init_libfuncs (void);
177 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
179 /* Initialize the GCC target structure. */
180 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
181 #undef TARGET_MERGE_DECL_ATTRIBUTES
182 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
183 #endif
185 #undef TARGET_ATTRIBUTE_TABLE
186 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
188 #undef TARGET_ASM_FILE_END
189 #define TARGET_ASM_FILE_END arm_file_end
191 #ifdef AOF_ASSEMBLER
192 #undef TARGET_ASM_BYTE_OP
193 #define TARGET_ASM_BYTE_OP "\tDCB\t"
194 #undef TARGET_ASM_ALIGNED_HI_OP
195 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
196 #undef TARGET_ASM_ALIGNED_SI_OP
197 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
198 #undef TARGET_ASM_GLOBALIZE_LABEL
199 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
200 #undef TARGET_ASM_FILE_START
201 #define TARGET_ASM_FILE_START aof_file_start
202 #undef TARGET_ASM_FILE_END
203 #define TARGET_ASM_FILE_END aof_file_end
204 #else
205 #undef TARGET_ASM_ALIGNED_SI_OP
206 #define TARGET_ASM_ALIGNED_SI_OP NULL
207 #undef TARGET_ASM_INTEGER
208 #define TARGET_ASM_INTEGER arm_assemble_integer
209 #endif
211 #undef TARGET_ASM_FUNCTION_PROLOGUE
212 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
214 #undef TARGET_ASM_FUNCTION_EPILOGUE
215 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
217 #undef TARGET_COMP_TYPE_ATTRIBUTES
218 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
220 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
221 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
223 #undef TARGET_SCHED_ADJUST_COST
224 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
226 #undef TARGET_ENCODE_SECTION_INFO
227 #ifdef ARM_PE
228 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
229 #else
230 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
231 #endif
233 #undef TARGET_STRIP_NAME_ENCODING
234 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
236 #undef TARGET_ASM_INTERNAL_LABEL
237 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
239 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
240 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
242 #undef TARGET_ASM_OUTPUT_MI_THUNK
243 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
244 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
245 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
247 /* This will be overridden in arm_override_options. */
248 #undef TARGET_RTX_COSTS
249 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
250 #undef TARGET_ADDRESS_COST
251 #define TARGET_ADDRESS_COST arm_address_cost
253 #undef TARGET_SHIFT_TRUNCATION_MASK
254 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
255 #undef TARGET_VECTOR_MODE_SUPPORTED_P
256 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
258 #undef TARGET_MACHINE_DEPENDENT_REORG
259 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
261 #undef TARGET_INIT_BUILTINS
262 #define TARGET_INIT_BUILTINS arm_init_builtins
263 #undef TARGET_EXPAND_BUILTIN
264 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
266 #undef TARGET_INIT_LIBFUNCS
267 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
269 #undef TARGET_PROMOTE_FUNCTION_ARGS
270 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
271 #undef TARGET_PROMOTE_FUNCTION_RETURN
272 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
273 #undef TARGET_PROMOTE_PROTOTYPES
274 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
275 #undef TARGET_PASS_BY_REFERENCE
276 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
277 #undef TARGET_ARG_PARTIAL_BYTES
278 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
280 #undef TARGET_STRUCT_VALUE_RTX
281 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
283 #undef TARGET_SETUP_INCOMING_VARARGS
284 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
286 #undef TARGET_DEFAULT_SHORT_ENUMS
287 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
289 #undef TARGET_ALIGN_ANON_BITFIELD
290 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
292 #undef TARGET_CXX_GUARD_TYPE
293 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
295 #undef TARGET_CXX_GUARD_MASK_BIT
296 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
298 #undef TARGET_CXX_GET_COOKIE_SIZE
299 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
301 #undef TARGET_CXX_COOKIE_HAS_SIZE
302 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
304 #undef TARGET_CXX_CDTOR_RETURNS_THIS
305 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
307 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
308 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
310 #undef TARGET_CXX_EXPORT_CLASS_DATA
311 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
313 struct gcc_target targetm = TARGET_INITIALIZER;
315 /* Obstack for minipool constant handling. */
316 static struct obstack minipool_obstack;
317 static char * minipool_startobj;
319 /* The maximum number of insns skipped which
320 will be conditionalised if possible. */
321 static int max_insns_skipped = 5;
323 extern FILE * asm_out_file;
325 /* True if we are currently building a constant table. */
326 int making_const_table;
328 /* Define the information needed to generate branch insns. This is
329 stored from the compare operation. */
330 rtx arm_compare_op0, arm_compare_op1;
332 /* The processor for which instructions should be scheduled. */
333 enum processor_type arm_tune = arm_none;
335 /* Which floating point model to use. */
336 enum arm_fp_model arm_fp_model;
338 /* Which floating point hardware is available. */
339 enum fputype arm_fpu_arch;
341 /* Which floating point hardware to schedule for. */
342 enum fputype arm_fpu_tune;
344 /* Whether to use floating point hardware. */
345 enum float_abi_type arm_float_abi;
347 /* Which ABI to use. */
348 enum arm_abi_type arm_abi;
350 /* Set by the -mfpu=... option. */
351 const char * target_fpu_name = NULL;
353 /* Set by the -mfpe=... option. */
354 const char * target_fpe_name = NULL;
356 /* Set by the -mfloat-abi=... option. */
357 const char * target_float_abi_name = NULL;
359 /* Set by the legacy -mhard-float and -msoft-float options. */
360 const char * target_float_switch = NULL;
362 /* Set by the -mabi=... option. */
363 const char * target_abi_name = NULL;
365 /* Used to parse -mstructure_size_boundary command line option. */
366 const char * structure_size_string = NULL;
367 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
369 /* Used for Thumb call_via trampolines. */
370 rtx thumb_call_via_label[14];
371 static int thumb_call_reg_needed;
373 /* Bit values used to identify processor capabilities. */
374 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
375 #define FL_ARCH3M (1 << 1) /* Extended multiply */
376 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
377 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
378 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
379 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
380 #define FL_THUMB (1 << 6) /* Thumb aware */
381 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
382 #define FL_STRONG (1 << 8) /* StrongARM */
383 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
384 #define FL_XSCALE (1 << 10) /* XScale */
385 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
386 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
387 media instructions. */
388 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
390 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
392 #define FL_FOR_ARCH2 0
393 #define FL_FOR_ARCH3 FL_MODE32
394 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
395 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
396 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
397 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
398 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
399 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
400 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
401 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
402 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
403 #define FL_FOR_ARCH6J FL_FOR_ARCH6
404 #define FL_FOR_ARCH6K FL_FOR_ARCH6
405 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
406 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
408 /* The bits in this mask specify which
409 instructions we are allowed to generate. */
410 static unsigned long insn_flags = 0;
412 /* The bits in this mask specify which instruction scheduling options should
413 be used. */
414 static unsigned long tune_flags = 0;
416 /* The following are used in the arm.md file as equivalents to bits
417 in the above two flag variables. */
419 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
420 int arm_arch3m = 0;
422 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
423 int arm_arch4 = 0;
425 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
426 int arm_arch4t = 0;
428 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
429 int arm_arch5 = 0;
431 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
432 int arm_arch5e = 0;
434 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
435 int arm_arch6 = 0;
437 /* Nonzero if this chip can benefit from load scheduling. */
438 int arm_ld_sched = 0;
440 /* Nonzero if this chip is a StrongARM. */
441 int arm_is_strong = 0;
443 /* Nonzero if this chip is a Cirrus variant. */
444 int arm_arch_cirrus = 0;
446 /* Nonzero if this chip supports Intel Wireless MMX technology. */
447 int arm_arch_iwmmxt = 0;
449 /* Nonzero if this chip is an XScale. */
450 int arm_arch_xscale = 0;
452 /* Nonzero if tuning for XScale */
453 int arm_tune_xscale = 0;
455 /* Nonzero if this chip is an ARM6 or an ARM7. */
456 int arm_is_6_or_7 = 0;
458 /* Nonzero if generating Thumb instructions. */
459 int thumb_code = 0;
461 /* Nonzero if we should define __THUMB_INTERWORK__ in the
462 preprocessor.
463 XXX This is a bit of a hack, it's intended to help work around
464 problems in GLD which doesn't understand that armv5t code is
465 interworking clean. */
466 int arm_cpp_interwork = 0;
468 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
469 must report the mode of the memory reference from PRINT_OPERAND to
470 PRINT_OPERAND_ADDRESS. */
471 enum machine_mode output_memory_reference_mode;
473 /* The register number to be used for the PIC offset register. */
474 const char * arm_pic_register_string = NULL;
475 int arm_pic_register = INVALID_REGNUM;
477 /* Set to 1 when a return insn is output, this means that the epilogue
478 is not needed. */
479 int return_used_this_function;
481 /* Set to 1 after arm_reorg has started. Reset to start at the start of
482 the next function. */
483 static int after_arm_reorg = 0;
485 /* The maximum number of insns to be used when loading a constant. */
486 static int arm_constant_limit = 3;
488 /* For an explanation of these variables, see final_prescan_insn below. */
489 int arm_ccfsm_state;
490 enum arm_cond_code arm_current_cc;
491 rtx arm_target_insn;
492 int arm_target_label;
494 /* The condition codes of the ARM, and the inverse function. */
495 static const char * const arm_condition_codes[] =
497 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
498 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
501 #define streq(string1, string2) (strcmp (string1, string2) == 0)
503 /* Initialization code. */
505 struct processors
507 const char *const name;
508 enum processor_type core;
509 const char *arch;
510 const unsigned long flags;
511 bool (* rtx_costs) (rtx, int, int, int *);
514 /* Not all of these give usefully different compilation alternatives,
515 but there is no simple way of generalizing them. */
516 static const struct processors all_cores[] =
518 /* ARM Cores */
519 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
520 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
521 #include "arm-cores.def"
522 #undef ARM_CORE
523 {NULL, arm_none, NULL, 0, NULL}
526 static const struct processors all_architectures[] =
528 /* ARM Architectures */
529 /* We don't specify rtx_costs here as it will be figured out
530 from the core. */
532 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
533 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
534 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
535 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
536 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
537 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
538 implementations that support it, so we will leave it out for now. */
539 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
540 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
541 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
542 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
543 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
544 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
545 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
546 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
547 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
548 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
549 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
550 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
551 {NULL, arm_none, NULL, 0 , NULL}
554 /* This is a magic structure. The 'string' field is magically filled in
555 with a pointer to the value specified by the user on the command line
556 assuming that the user has specified such a value. */
558 struct arm_cpu_select arm_select[] =
560 /* string name processors */
561 { NULL, "-mcpu=", all_cores },
562 { NULL, "-march=", all_architectures },
563 { NULL, "-mtune=", all_cores }
567 /* The name of the proprocessor macro to define for this architecture. */
569 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
571 struct fpu_desc
573 const char * name;
574 enum fputype fpu;
578 /* Available values for for -mfpu=. */
580 static const struct fpu_desc all_fpus[] =
582 {"fpa", FPUTYPE_FPA},
583 {"fpe2", FPUTYPE_FPA_EMU2},
584 {"fpe3", FPUTYPE_FPA_EMU2},
585 {"maverick", FPUTYPE_MAVERICK},
586 {"vfp", FPUTYPE_VFP}
590 /* Floating point models used by the different hardware.
591 See fputype in arm.h. */
593 static const enum fputype fp_model_for_fpu[] =
595 /* No FP hardware. */
596 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
597 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
598 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
599 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
600 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
601 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
605 struct float_abi
607 const char * name;
608 enum float_abi_type abi_type;
612 /* Available values for -mfloat-abi=. */
614 static const struct float_abi all_float_abis[] =
616 {"soft", ARM_FLOAT_ABI_SOFT},
617 {"softfp", ARM_FLOAT_ABI_SOFTFP},
618 {"hard", ARM_FLOAT_ABI_HARD}
622 struct abi_name
624 const char *name;
625 enum arm_abi_type abi_type;
629 /* Available values for -mabi=. */
631 static const struct abi_name arm_all_abis[] =
633 {"apcs-gnu", ARM_ABI_APCS},
634 {"atpcs", ARM_ABI_ATPCS},
635 {"aapcs", ARM_ABI_AAPCS},
636 {"iwmmxt", ARM_ABI_IWMMXT}
639 /* Return the number of bits set in VALUE. */
640 static unsigned
641 bit_count (unsigned long value)
643 unsigned long count = 0;
645 while (value)
647 count++;
648 value &= value - 1; /* Clear the least-significant set bit. */
651 return count;
654 /* Set up library functions unique to ARM. */
656 static void
657 arm_init_libfuncs (void)
659 /* There are no special library functions unless we are using the
660 ARM BPABI. */
661 if (!TARGET_BPABI)
662 return;
664 /* The functions below are described in Section 4 of the "Run-Time
665 ABI for the ARM architecture", Version 1.0. */
667 /* Double-precision floating-point arithmetic. Table 2. */
668 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
669 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
670 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
671 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
672 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
674 /* Double-precision comparisons. Table 3. */
675 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
676 set_optab_libfunc (ne_optab, DFmode, NULL);
677 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
678 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
679 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
680 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
681 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
683 /* Single-precision floating-point arithmetic. Table 4. */
684 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
685 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
686 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
687 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
688 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
690 /* Single-precision comparisons. Table 5. */
691 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
692 set_optab_libfunc (ne_optab, SFmode, NULL);
693 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
694 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
695 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
696 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
697 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
699 /* Floating-point to integer conversions. Table 6. */
700 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
701 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
702 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
703 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
704 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
705 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
706 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
707 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
709 /* Conversions between floating types. Table 7. */
710 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
711 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
713 /* Integer to floating-point conversions. Table 8. */
714 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
715 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
716 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
717 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
718 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
719 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
720 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
721 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
723 /* Long long. Table 9. */
724 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
725 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
726 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
727 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
728 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
729 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
730 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
731 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
733 /* Integer (32/32->32) division. \S 4.3.1. */
734 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
735 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
737 /* The divmod functions are designed so that they can be used for
738 plain division, even though they return both the quotient and the
739 remainder. The quotient is returned in the usual location (i.e.,
740 r0 for SImode, {r0, r1} for DImode), just as would be expected
741 for an ordinary division routine. Because the AAPCS calling
742 conventions specify that all of { r0, r1, r2, r3 } are
743 callee-saved registers, there is no need to tell the compiler
744 explicitly that those registers are clobbered by these
745 routines. */
746 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
747 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
748 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
749 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
752 /* Fix up any incompatible options that the user has specified.
753 This has now turned into a maze. */
754 void
755 arm_override_options (void)
757 unsigned i;
759 /* Set up the flags based on the cpu/architecture selected by the user. */
760 for (i = ARRAY_SIZE (arm_select); i--;)
762 struct arm_cpu_select * ptr = arm_select + i;
764 if (ptr->string != NULL && ptr->string[0] != '\0')
766 const struct processors * sel;
768 for (sel = ptr->processors; sel->name != NULL; sel++)
769 if (streq (ptr->string, sel->name))
771 /* Set the architecture define. */
772 if (i != 2)
773 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
775 /* Determine the processor core for which we should
776 tune code-generation. */
777 if (/* -mcpu= is a sensible default. */
778 i == 0
779 /* If -march= is used, and -mcpu= has not been used,
780 assume that we should tune for a representative
781 CPU from that architecture. */
782 || i == 1
783 /* -mtune= overrides -mcpu= and -march=. */
784 || i == 2)
785 arm_tune = (enum processor_type) (sel - ptr->processors);
787 if (i != 2)
789 /* If we have been given an architecture and a processor
790 make sure that they are compatible. We only generate
791 a warning though, and we prefer the CPU over the
792 architecture. */
793 if (insn_flags != 0 && (insn_flags ^ sel->flags))
794 warning ("switch -mcpu=%s conflicts with -march= switch",
795 ptr->string);
797 insn_flags = sel->flags;
800 break;
803 if (sel->name == NULL)
804 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
808 /* If the user did not specify a processor, choose one for them. */
809 if (insn_flags == 0)
811 const struct processors * sel;
812 unsigned int sought;
813 enum processor_type cpu;
815 cpu = TARGET_CPU_DEFAULT;
816 if (cpu == arm_none)
818 #ifdef SUBTARGET_CPU_DEFAULT
819 /* Use the subtarget default CPU if none was specified by
820 configure. */
821 cpu = SUBTARGET_CPU_DEFAULT;
822 #endif
823 /* Default to ARM6. */
824 if (cpu == arm_none)
825 cpu = arm6;
827 sel = &all_cores[cpu];
829 insn_flags = sel->flags;
831 /* Now check to see if the user has specified some command line
832 switch that require certain abilities from the cpu. */
833 sought = 0;
835 if (TARGET_INTERWORK || TARGET_THUMB)
837 sought |= (FL_THUMB | FL_MODE32);
839 /* There are no ARM processors that support both APCS-26 and
840 interworking. Therefore we force FL_MODE26 to be removed
841 from insn_flags here (if it was set), so that the search
842 below will always be able to find a compatible processor. */
843 insn_flags &= ~FL_MODE26;
846 if (sought != 0 && ((sought & insn_flags) != sought))
848 /* Try to locate a CPU type that supports all of the abilities
849 of the default CPU, plus the extra abilities requested by
850 the user. */
851 for (sel = all_cores; sel->name != NULL; sel++)
852 if ((sel->flags & sought) == (sought | insn_flags))
853 break;
855 if (sel->name == NULL)
857 unsigned current_bit_count = 0;
858 const struct processors * best_fit = NULL;
860 /* Ideally we would like to issue an error message here
861 saying that it was not possible to find a CPU compatible
862 with the default CPU, but which also supports the command
863 line options specified by the programmer, and so they
864 ought to use the -mcpu=<name> command line option to
865 override the default CPU type.
867 If we cannot find a cpu that has both the
868 characteristics of the default cpu and the given
869 command line options we scan the array again looking
870 for a best match. */
871 for (sel = all_cores; sel->name != NULL; sel++)
872 if ((sel->flags & sought) == sought)
874 unsigned count;
876 count = bit_count (sel->flags & insn_flags);
878 if (count >= current_bit_count)
880 best_fit = sel;
881 current_bit_count = count;
885 if (best_fit == NULL)
886 abort ();
887 else
888 sel = best_fit;
891 insn_flags = sel->flags;
893 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
894 if (arm_tune == arm_none)
895 arm_tune = (enum processor_type) (sel - all_cores);
898 /* The processor for which we should tune should now have been
899 chosen. */
900 if (arm_tune == arm_none)
901 abort ();
903 tune_flags = all_cores[(int)arm_tune].flags;
904 if (optimize_size)
905 targetm.rtx_costs = arm_size_rtx_costs;
906 else
907 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
909 /* Make sure that the processor choice does not conflict with any of the
910 other command line choices. */
911 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
913 warning ("target CPU does not support interworking" );
914 target_flags &= ~ARM_FLAG_INTERWORK;
917 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
919 warning ("target CPU does not support THUMB instructions");
920 target_flags &= ~ARM_FLAG_THUMB;
923 if (TARGET_APCS_FRAME && TARGET_THUMB)
925 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
926 target_flags &= ~ARM_FLAG_APCS_FRAME;
929 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
930 from here where no function is being compiled currently. */
931 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
932 && TARGET_ARM)
933 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
935 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
936 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
938 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
939 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
941 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
943 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
944 target_flags |= ARM_FLAG_APCS_FRAME;
947 if (TARGET_POKE_FUNCTION_NAME)
948 target_flags |= ARM_FLAG_APCS_FRAME;
950 if (TARGET_APCS_REENT && flag_pic)
951 error ("-fpic and -mapcs-reent are incompatible");
953 if (TARGET_APCS_REENT)
954 warning ("APCS reentrant code not supported. Ignored");
956 /* If this target is normally configured to use APCS frames, warn if they
957 are turned off and debugging is turned on. */
958 if (TARGET_ARM
959 && write_symbols != NO_DEBUG
960 && !TARGET_APCS_FRAME
961 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
962 warning ("-g with -mno-apcs-frame may not give sensible debugging");
964 /* If stack checking is disabled, we can use r10 as the PIC register,
965 which keeps r9 available. */
966 if (flag_pic)
967 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
969 if (TARGET_APCS_FLOAT)
970 warning ("passing floating point arguments in fp regs not yet supported");
972 /* Initialize boolean versions of the flags, for use in the arm.md file. */
973 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
974 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
975 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
976 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
977 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
978 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
979 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
980 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
982 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
983 arm_is_strong = (tune_flags & FL_STRONG) != 0;
984 thumb_code = (TARGET_ARM == 0);
985 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
986 && !(tune_flags & FL_ARCH4))) != 0;
987 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
988 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
990 /* V5 code we generate is completely interworking capable, so we turn off
991 TARGET_INTERWORK here to avoid many tests later on. */
993 /* XXX However, we must pass the right pre-processor defines to CPP
994 or GLD can get confused. This is a hack. */
995 if (TARGET_INTERWORK)
996 arm_cpp_interwork = 1;
998 if (arm_arch5)
999 target_flags &= ~ARM_FLAG_INTERWORK;
1001 if (target_abi_name)
1003 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1005 if (streq (arm_all_abis[i].name, target_abi_name))
1007 arm_abi = arm_all_abis[i].abi_type;
1008 break;
1011 if (i == ARRAY_SIZE (arm_all_abis))
1012 error ("invalid ABI option: -mabi=%s", target_abi_name);
1014 else
1015 arm_abi = ARM_DEFAULT_ABI;
1017 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1018 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1020 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1021 error ("iwmmxt abi requires an iwmmxt capable cpu");
1023 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1024 if (target_fpu_name == NULL && target_fpe_name != NULL)
1026 if (streq (target_fpe_name, "2"))
1027 target_fpu_name = "fpe2";
1028 else if (streq (target_fpe_name, "3"))
1029 target_fpu_name = "fpe3";
1030 else
1031 error ("invalid floating point emulation option: -mfpe=%s",
1032 target_fpe_name);
1034 if (target_fpu_name != NULL)
1036 /* The user specified a FPU. */
1037 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1039 if (streq (all_fpus[i].name, target_fpu_name))
1041 arm_fpu_arch = all_fpus[i].fpu;
1042 arm_fpu_tune = arm_fpu_arch;
1043 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1044 break;
1047 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1048 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1050 else
1052 #ifdef FPUTYPE_DEFAULT
1053 /* Use the default if it is specified for this platform. */
1054 arm_fpu_arch = FPUTYPE_DEFAULT;
1055 arm_fpu_tune = FPUTYPE_DEFAULT;
1056 #else
1057 /* Pick one based on CPU type. */
1058 /* ??? Some targets assume FPA is the default.
1059 if ((insn_flags & FL_VFP) != 0)
1060 arm_fpu_arch = FPUTYPE_VFP;
1061 else
1063 if (arm_arch_cirrus)
1064 arm_fpu_arch = FPUTYPE_MAVERICK;
1065 else
1066 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1067 #endif
1068 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1069 arm_fpu_tune = FPUTYPE_FPA;
1070 else
1071 arm_fpu_tune = arm_fpu_arch;
1072 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1073 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1074 abort ();
1077 if (target_float_abi_name != NULL)
1079 /* The user specified a FP ABI. */
1080 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1082 if (streq (all_float_abis[i].name, target_float_abi_name))
1084 arm_float_abi = all_float_abis[i].abi_type;
1085 break;
1088 if (i == ARRAY_SIZE (all_float_abis))
1089 error ("invalid floating point abi: -mfloat-abi=%s",
1090 target_float_abi_name);
1092 else if (target_float_switch)
1094 /* This is a bit of a hack to avoid needing target flags for these. */
1095 if (target_float_switch[0] == 'h')
1096 arm_float_abi = ARM_FLOAT_ABI_HARD;
1097 else
1098 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1100 else
1101 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1103 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1104 sorry ("-mfloat-abi=hard and VFP");
1106 /* If soft-float is specified then don't use FPU. */
1107 if (TARGET_SOFT_FLOAT)
1108 arm_fpu_arch = FPUTYPE_NONE;
1110 /* For arm2/3 there is no need to do any scheduling if there is only
1111 a floating point emulator, or we are doing software floating-point. */
1112 if ((TARGET_SOFT_FLOAT
1113 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1114 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1115 && (tune_flags & FL_MODE32) == 0)
1116 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1118 /* Override the default structure alignment for AAPCS ABI. */
1119 if (arm_abi == ARM_ABI_AAPCS)
1120 arm_structure_size_boundary = 8;
1122 if (structure_size_string != NULL)
1124 int size = strtol (structure_size_string, NULL, 0);
1126 if (size == 8 || size == 32
1127 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1128 arm_structure_size_boundary = size;
1129 else
1130 warning ("structure size boundary can only be set to %s",
1131 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1134 if (arm_pic_register_string != NULL)
1136 int pic_register = decode_reg_name (arm_pic_register_string);
1138 if (!flag_pic)
1139 warning ("-mpic-register= is useless without -fpic");
1141 /* Prevent the user from choosing an obviously stupid PIC register. */
1142 else if (pic_register < 0 || call_used_regs[pic_register]
1143 || pic_register == HARD_FRAME_POINTER_REGNUM
1144 || pic_register == STACK_POINTER_REGNUM
1145 || pic_register >= PC_REGNUM)
1146 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1147 else
1148 arm_pic_register = pic_register;
1151 if (TARGET_THUMB && flag_schedule_insns)
1153 /* Don't warn since it's on by default in -O2. */
1154 flag_schedule_insns = 0;
1157 if (optimize_size)
1159 /* There's some dispute as to whether this should be 1 or 2. However,
1160 experiments seem to show that in pathological cases a setting of
1161 1 degrades less severely than a setting of 2. This could change if
1162 other parts of the compiler change their behavior. */
1163 arm_constant_limit = 1;
1165 /* If optimizing for size, bump the number of instructions that we
1166 are prepared to conditionally execute (even on a StrongARM). */
1167 max_insns_skipped = 6;
1169 else
1171 /* For processors with load scheduling, it never costs more than
1172 2 cycles to load a constant, and the load scheduler may well
1173 reduce that to 1. */
1174 if (arm_ld_sched)
1175 arm_constant_limit = 1;
1177 /* On XScale the longer latency of a load makes it more difficult
1178 to achieve a good schedule, so it's faster to synthesize
1179 constants that can be done in two insns. */
1180 if (arm_tune_xscale)
1181 arm_constant_limit = 2;
1183 /* StrongARM has early execution of branches, so a sequence
1184 that is worth skipping is shorter. */
1185 if (arm_is_strong)
1186 max_insns_skipped = 3;
1189 /* Register global variables with the garbage collector. */
1190 arm_add_gc_roots ();
1193 static void
1194 arm_add_gc_roots (void)
1196 gcc_obstack_init(&minipool_obstack);
1197 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1200 /* A table of known ARM exception types.
1201 For use with the interrupt function attribute. */
1203 typedef struct
1205 const char *const arg;
1206 const unsigned long return_value;
1208 isr_attribute_arg;
1210 static const isr_attribute_arg isr_attribute_args [] =
1212 { "IRQ", ARM_FT_ISR },
1213 { "irq", ARM_FT_ISR },
1214 { "FIQ", ARM_FT_FIQ },
1215 { "fiq", ARM_FT_FIQ },
1216 { "ABORT", ARM_FT_ISR },
1217 { "abort", ARM_FT_ISR },
1218 { "ABORT", ARM_FT_ISR },
1219 { "abort", ARM_FT_ISR },
1220 { "UNDEF", ARM_FT_EXCEPTION },
1221 { "undef", ARM_FT_EXCEPTION },
1222 { "SWI", ARM_FT_EXCEPTION },
1223 { "swi", ARM_FT_EXCEPTION },
1224 { NULL, ARM_FT_NORMAL }
1227 /* Returns the (interrupt) function type of the current
1228 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1230 static unsigned long
1231 arm_isr_value (tree argument)
1233 const isr_attribute_arg * ptr;
1234 const char * arg;
1236 /* No argument - default to IRQ. */
1237 if (argument == NULL_TREE)
1238 return ARM_FT_ISR;
1240 /* Get the value of the argument. */
1241 if (TREE_VALUE (argument) == NULL_TREE
1242 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1243 return ARM_FT_UNKNOWN;
1245 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1247 /* Check it against the list of known arguments. */
1248 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1249 if (streq (arg, ptr->arg))
1250 return ptr->return_value;
1252 /* An unrecognized interrupt type. */
1253 return ARM_FT_UNKNOWN;
1256 /* Computes the type of the current function. */
1258 static unsigned long
1259 arm_compute_func_type (void)
1261 unsigned long type = ARM_FT_UNKNOWN;
1262 tree a;
1263 tree attr;
1265 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1266 abort ();
1268 /* Decide if the current function is volatile. Such functions
1269 never return, and many memory cycles can be saved by not storing
1270 register values that will never be needed again. This optimization
1271 was added to speed up context switching in a kernel application. */
1272 if (optimize > 0
1273 && TREE_NOTHROW (current_function_decl)
1274 && TREE_THIS_VOLATILE (current_function_decl))
1275 type |= ARM_FT_VOLATILE;
1277 if (cfun->static_chain_decl != NULL)
1278 type |= ARM_FT_NESTED;
1280 attr = DECL_ATTRIBUTES (current_function_decl);
1282 a = lookup_attribute ("naked", attr);
1283 if (a != NULL_TREE)
1284 type |= ARM_FT_NAKED;
1286 a = lookup_attribute ("isr", attr);
1287 if (a == NULL_TREE)
1288 a = lookup_attribute ("interrupt", attr);
1290 if (a == NULL_TREE)
1291 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1292 else
1293 type |= arm_isr_value (TREE_VALUE (a));
1295 return type;
1298 /* Returns the type of the current function. */
1300 unsigned long
1301 arm_current_func_type (void)
1303 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1304 cfun->machine->func_type = arm_compute_func_type ();
1306 return cfun->machine->func_type;
1309 /* Return 1 if it is possible to return using a single instruction.
1310 If SIBLING is non-null, this is a test for a return before a sibling
1311 call. SIBLING is the call insn, so we can examine its register usage. */
1314 use_return_insn (int iscond, rtx sibling)
1316 int regno;
1317 unsigned int func_type;
1318 unsigned long saved_int_regs;
1319 unsigned HOST_WIDE_INT stack_adjust;
1320 arm_stack_offsets *offsets;
1322 /* Never use a return instruction before reload has run. */
1323 if (!reload_completed)
1324 return 0;
1326 func_type = arm_current_func_type ();
1328 /* Naked functions and volatile functions need special
1329 consideration. */
1330 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1331 return 0;
1333 /* So do interrupt functions that use the frame pointer. */
1334 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1335 return 0;
1337 offsets = arm_get_frame_offsets ();
1338 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1340 /* As do variadic functions. */
1341 if (current_function_pretend_args_size
1342 || cfun->machine->uses_anonymous_args
1343 /* Or if the function calls __builtin_eh_return () */
1344 || current_function_calls_eh_return
1345 /* Or if the function calls alloca */
1346 || current_function_calls_alloca
1347 /* Or if there is a stack adjustment. However, if the stack pointer
1348 is saved on the stack, we can use a pre-incrementing stack load. */
1349 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1350 return 0;
1352 saved_int_regs = arm_compute_save_reg_mask ();
1354 /* Unfortunately, the insn
1356 ldmib sp, {..., sp, ...}
1358 triggers a bug on most SA-110 based devices, such that the stack
1359 pointer won't be correctly restored if the instruction takes a
1360 page fault. We work around this problem by popping r3 along with
1361 the other registers, since that is never slower than executing
1362 another instruction.
1364 We test for !arm_arch5 here, because code for any architecture
1365 less than this could potentially be run on one of the buggy
1366 chips. */
1367 if (stack_adjust == 4 && !arm_arch5)
1369 /* Validate that r3 is a call-clobbered register (always true in
1370 the default abi) ... */
1371 if (!call_used_regs[3])
1372 return 0;
1374 /* ... that it isn't being used for a return value (always true
1375 until we implement return-in-regs), or for a tail-call
1376 argument ... */
1377 if (sibling)
1379 if (GET_CODE (sibling) != CALL_INSN)
1380 abort ();
1382 if (find_regno_fusage (sibling, USE, 3))
1383 return 0;
1386 /* ... and that there are no call-saved registers in r0-r2
1387 (always true in the default ABI). */
1388 if (saved_int_regs & 0x7)
1389 return 0;
1392 /* Can't be done if interworking with Thumb, and any registers have been
1393 stacked. */
1394 if (TARGET_INTERWORK && saved_int_regs != 0)
1395 return 0;
1397 /* On StrongARM, conditional returns are expensive if they aren't
1398 taken and multiple registers have been stacked. */
1399 if (iscond && arm_is_strong)
1401 /* Conditional return when just the LR is stored is a simple
1402 conditional-load instruction, that's not expensive. */
1403 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1404 return 0;
1406 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1407 return 0;
1410 /* If there are saved registers but the LR isn't saved, then we need
1411 two instructions for the return. */
1412 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1413 return 0;
1415 /* Can't be done if any of the FPA regs are pushed,
1416 since this also requires an insn. */
1417 if (TARGET_HARD_FLOAT && TARGET_FPA)
1418 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1419 if (regs_ever_live[regno] && !call_used_regs[regno])
1420 return 0;
1422 /* Likewise VFP regs. */
1423 if (TARGET_HARD_FLOAT && TARGET_VFP)
1424 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1425 if (regs_ever_live[regno] && !call_used_regs[regno])
1426 return 0;
1428 if (TARGET_REALLY_IWMMXT)
1429 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1430 if (regs_ever_live[regno] && ! call_used_regs [regno])
1431 return 0;
1433 return 1;
1436 /* Return TRUE if int I is a valid immediate ARM constant. */
1439 const_ok_for_arm (HOST_WIDE_INT i)
1441 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1443 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1444 be all zero, or all one. */
1445 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1446 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1447 != ((~(unsigned HOST_WIDE_INT) 0)
1448 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1449 return FALSE;
1451 /* Fast return for 0 and powers of 2 */
1452 if ((i & (i - 1)) == 0)
1453 return TRUE;
1457 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1458 return TRUE;
1459 mask =
1460 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1461 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1463 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1465 return FALSE;
1468 /* Return true if I is a valid constant for the operation CODE. */
1469 static int
1470 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1472 if (const_ok_for_arm (i))
1473 return 1;
1475 switch (code)
1477 case PLUS:
1478 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1480 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1481 case XOR:
1482 case IOR:
1483 return 0;
1485 case AND:
1486 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1488 default:
1489 abort ();
1493 /* Emit a sequence of insns to handle a large constant.
1494 CODE is the code of the operation required, it can be any of SET, PLUS,
1495 IOR, AND, XOR, MINUS;
1496 MODE is the mode in which the operation is being performed;
1497 VAL is the integer to operate on;
1498 SOURCE is the other operand (a register, or a null-pointer for SET);
1499 SUBTARGETS means it is safe to create scratch registers if that will
1500 either produce a simpler sequence, or we will want to cse the values.
1501 Return value is the number of insns emitted. */
1504 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1505 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1507 rtx cond;
1509 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1510 cond = COND_EXEC_TEST (PATTERN (insn));
1511 else
1512 cond = NULL_RTX;
1514 if (subtargets || code == SET
1515 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1516 && REGNO (target) != REGNO (source)))
1518 /* After arm_reorg has been called, we can't fix up expensive
1519 constants by pushing them into memory so we must synthesize
1520 them in-line, regardless of the cost. This is only likely to
1521 be more costly on chips that have load delay slots and we are
1522 compiling without running the scheduler (so no splitting
1523 occurred before the final instruction emission).
1525 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1527 if (!after_arm_reorg
1528 && !cond
1529 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1530 1, 0)
1531 > arm_constant_limit + (code != SET)))
1533 if (code == SET)
1535 /* Currently SET is the only monadic value for CODE, all
1536 the rest are diadic. */
1537 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1538 return 1;
1540 else
1542 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1544 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1545 /* For MINUS, the value is subtracted from, since we never
1546 have subtraction of a constant. */
1547 if (code == MINUS)
1548 emit_insn (gen_rtx_SET (VOIDmode, target,
1549 gen_rtx_MINUS (mode, temp, source)));
1550 else
1551 emit_insn (gen_rtx_SET (VOIDmode, target,
1552 gen_rtx_fmt_ee (code, mode, source, temp)));
1553 return 2;
1558 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1562 static int
1563 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1565 HOST_WIDE_INT temp1;
1566 int num_insns = 0;
1569 int end;
1571 if (i <= 0)
1572 i += 32;
1573 if (remainder & (3 << (i - 2)))
1575 end = i - 8;
1576 if (end < 0)
1577 end += 32;
1578 temp1 = remainder & ((0x0ff << end)
1579 | ((i < end) ? (0xff >> (32 - end)) : 0));
1580 remainder &= ~temp1;
1581 num_insns++;
1582 i -= 6;
1584 i -= 2;
1585 } while (remainder);
1586 return num_insns;
1589 /* Emit an instruction with the indicated PATTERN. If COND is
1590 non-NULL, conditionalize the execution of the instruction on COND
1591 being true. */
1593 static void
1594 emit_constant_insn (rtx cond, rtx pattern)
1596 if (cond)
1597 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1598 emit_insn (pattern);
1601 /* As above, but extra parameter GENERATE which, if clear, suppresses
1602 RTL generation. */
1604 static int
1605 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1606 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1607 int generate)
1609 int can_invert = 0;
1610 int can_negate = 0;
1611 int can_negate_initial = 0;
1612 int can_shift = 0;
1613 int i;
1614 int num_bits_set = 0;
1615 int set_sign_bit_copies = 0;
1616 int clear_sign_bit_copies = 0;
1617 int clear_zero_bit_copies = 0;
1618 int set_zero_bit_copies = 0;
1619 int insns = 0;
1620 unsigned HOST_WIDE_INT temp1, temp2;
1621 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1623 /* Find out which operations are safe for a given CODE. Also do a quick
1624 check for degenerate cases; these can occur when DImode operations
1625 are split. */
1626 switch (code)
1628 case SET:
1629 can_invert = 1;
1630 can_shift = 1;
1631 can_negate = 1;
1632 break;
1634 case PLUS:
1635 can_negate = 1;
1636 can_negate_initial = 1;
1637 break;
1639 case IOR:
1640 if (remainder == 0xffffffff)
1642 if (generate)
1643 emit_constant_insn (cond,
1644 gen_rtx_SET (VOIDmode, target,
1645 GEN_INT (ARM_SIGN_EXTEND (val))));
1646 return 1;
1648 if (remainder == 0)
1650 if (reload_completed && rtx_equal_p (target, source))
1651 return 0;
1652 if (generate)
1653 emit_constant_insn (cond,
1654 gen_rtx_SET (VOIDmode, target, source));
1655 return 1;
1657 break;
1659 case AND:
1660 if (remainder == 0)
1662 if (generate)
1663 emit_constant_insn (cond,
1664 gen_rtx_SET (VOIDmode, target, const0_rtx));
1665 return 1;
1667 if (remainder == 0xffffffff)
1669 if (reload_completed && rtx_equal_p (target, source))
1670 return 0;
1671 if (generate)
1672 emit_constant_insn (cond,
1673 gen_rtx_SET (VOIDmode, target, source));
1674 return 1;
1676 can_invert = 1;
1677 break;
1679 case XOR:
1680 if (remainder == 0)
1682 if (reload_completed && rtx_equal_p (target, source))
1683 return 0;
1684 if (generate)
1685 emit_constant_insn (cond,
1686 gen_rtx_SET (VOIDmode, target, source));
1687 return 1;
1689 if (remainder == 0xffffffff)
1691 if (generate)
1692 emit_constant_insn (cond,
1693 gen_rtx_SET (VOIDmode, target,
1694 gen_rtx_NOT (mode, source)));
1695 return 1;
1698 /* We don't know how to handle this yet below. */
1699 abort ();
1701 case MINUS:
1702 /* We treat MINUS as (val - source), since (source - val) is always
1703 passed as (source + (-val)). */
1704 if (remainder == 0)
1706 if (generate)
1707 emit_constant_insn (cond,
1708 gen_rtx_SET (VOIDmode, target,
1709 gen_rtx_NEG (mode, source)));
1710 return 1;
1712 if (const_ok_for_arm (val))
1714 if (generate)
1715 emit_constant_insn (cond,
1716 gen_rtx_SET (VOIDmode, target,
1717 gen_rtx_MINUS (mode, GEN_INT (val),
1718 source)));
1719 return 1;
1721 can_negate = 1;
1723 break;
1725 default:
1726 abort ();
1729 /* If we can do it in one insn get out quickly. */
1730 if (const_ok_for_arm (val)
1731 || (can_negate_initial && const_ok_for_arm (-val))
1732 || (can_invert && const_ok_for_arm (~val)))
1734 if (generate)
1735 emit_constant_insn (cond,
1736 gen_rtx_SET (VOIDmode, target,
1737 (source
1738 ? gen_rtx_fmt_ee (code, mode, source,
1739 GEN_INT (val))
1740 : GEN_INT (val))));
1741 return 1;
1744 /* Calculate a few attributes that may be useful for specific
1745 optimizations. */
1746 for (i = 31; i >= 0; i--)
1748 if ((remainder & (1 << i)) == 0)
1749 clear_sign_bit_copies++;
1750 else
1751 break;
1754 for (i = 31; i >= 0; i--)
1756 if ((remainder & (1 << i)) != 0)
1757 set_sign_bit_copies++;
1758 else
1759 break;
1762 for (i = 0; i <= 31; i++)
1764 if ((remainder & (1 << i)) == 0)
1765 clear_zero_bit_copies++;
1766 else
1767 break;
1770 for (i = 0; i <= 31; i++)
1772 if ((remainder & (1 << i)) != 0)
1773 set_zero_bit_copies++;
1774 else
1775 break;
1778 switch (code)
1780 case SET:
1781 /* See if we can do this by sign_extending a constant that is known
1782 to be negative. This is a good, way of doing it, since the shift
1783 may well merge into a subsequent insn. */
1784 if (set_sign_bit_copies > 1)
1786 if (const_ok_for_arm
1787 (temp1 = ARM_SIGN_EXTEND (remainder
1788 << (set_sign_bit_copies - 1))))
1790 if (generate)
1792 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1793 emit_constant_insn (cond,
1794 gen_rtx_SET (VOIDmode, new_src,
1795 GEN_INT (temp1)));
1796 emit_constant_insn (cond,
1797 gen_ashrsi3 (target, new_src,
1798 GEN_INT (set_sign_bit_copies - 1)));
1800 return 2;
1802 /* For an inverted constant, we will need to set the low bits,
1803 these will be shifted out of harm's way. */
1804 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1805 if (const_ok_for_arm (~temp1))
1807 if (generate)
1809 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1810 emit_constant_insn (cond,
1811 gen_rtx_SET (VOIDmode, new_src,
1812 GEN_INT (temp1)));
1813 emit_constant_insn (cond,
1814 gen_ashrsi3 (target, new_src,
1815 GEN_INT (set_sign_bit_copies - 1)));
1817 return 2;
1821 /* See if we can generate this by setting the bottom (or the top)
1822 16 bits, and then shifting these into the other half of the
1823 word. We only look for the simplest cases, to do more would cost
1824 too much. Be careful, however, not to generate this when the
1825 alternative would take fewer insns. */
1826 if (val & 0xffff0000)
1828 temp1 = remainder & 0xffff0000;
1829 temp2 = remainder & 0x0000ffff;
1831 /* Overlaps outside this range are best done using other methods. */
1832 for (i = 9; i < 24; i++)
1834 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1835 && !const_ok_for_arm (temp2))
1837 rtx new_src = (subtargets
1838 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1839 : target);
1840 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1841 source, subtargets, generate);
1842 source = new_src;
1843 if (generate)
1844 emit_constant_insn
1845 (cond,
1846 gen_rtx_SET
1847 (VOIDmode, target,
1848 gen_rtx_IOR (mode,
1849 gen_rtx_ASHIFT (mode, source,
1850 GEN_INT (i)),
1851 source)));
1852 return insns + 1;
1856 /* Don't duplicate cases already considered. */
1857 for (i = 17; i < 24; i++)
1859 if (((temp1 | (temp1 >> i)) == remainder)
1860 && !const_ok_for_arm (temp1))
1862 rtx new_src = (subtargets
1863 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1864 : target);
1865 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1866 source, subtargets, generate);
1867 source = new_src;
1868 if (generate)
1869 emit_constant_insn
1870 (cond,
1871 gen_rtx_SET (VOIDmode, target,
1872 gen_rtx_IOR
1873 (mode,
1874 gen_rtx_LSHIFTRT (mode, source,
1875 GEN_INT (i)),
1876 source)));
1877 return insns + 1;
1881 break;
1883 case IOR:
1884 case XOR:
1885 /* If we have IOR or XOR, and the constant can be loaded in a
1886 single instruction, and we can find a temporary to put it in,
1887 then this can be done in two instructions instead of 3-4. */
1888 if (subtargets
1889 /* TARGET can't be NULL if SUBTARGETS is 0 */
1890 || (reload_completed && !reg_mentioned_p (target, source)))
1892 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1894 if (generate)
1896 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1898 emit_constant_insn (cond,
1899 gen_rtx_SET (VOIDmode, sub,
1900 GEN_INT (val)));
1901 emit_constant_insn (cond,
1902 gen_rtx_SET (VOIDmode, target,
1903 gen_rtx_fmt_ee (code, mode,
1904 source, sub)));
1906 return 2;
1910 if (code == XOR)
1911 break;
1913 if (set_sign_bit_copies > 8
1914 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1916 if (generate)
1918 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1919 rtx shift = GEN_INT (set_sign_bit_copies);
1921 emit_constant_insn
1922 (cond,
1923 gen_rtx_SET (VOIDmode, sub,
1924 gen_rtx_NOT (mode,
1925 gen_rtx_ASHIFT (mode,
1926 source,
1927 shift))));
1928 emit_constant_insn
1929 (cond,
1930 gen_rtx_SET (VOIDmode, target,
1931 gen_rtx_NOT (mode,
1932 gen_rtx_LSHIFTRT (mode, sub,
1933 shift))));
1935 return 2;
1938 if (set_zero_bit_copies > 8
1939 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1941 if (generate)
1943 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1944 rtx shift = GEN_INT (set_zero_bit_copies);
1946 emit_constant_insn
1947 (cond,
1948 gen_rtx_SET (VOIDmode, sub,
1949 gen_rtx_NOT (mode,
1950 gen_rtx_LSHIFTRT (mode,
1951 source,
1952 shift))));
1953 emit_constant_insn
1954 (cond,
1955 gen_rtx_SET (VOIDmode, target,
1956 gen_rtx_NOT (mode,
1957 gen_rtx_ASHIFT (mode, sub,
1958 shift))));
1960 return 2;
1963 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1965 if (generate)
1967 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1968 emit_constant_insn (cond,
1969 gen_rtx_SET (VOIDmode, sub,
1970 gen_rtx_NOT (mode, source)));
1971 source = sub;
1972 if (subtargets)
1973 sub = gen_reg_rtx (mode);
1974 emit_constant_insn (cond,
1975 gen_rtx_SET (VOIDmode, sub,
1976 gen_rtx_AND (mode, source,
1977 GEN_INT (temp1))));
1978 emit_constant_insn (cond,
1979 gen_rtx_SET (VOIDmode, target,
1980 gen_rtx_NOT (mode, sub)));
1982 return 3;
1984 break;
1986 case AND:
1987 /* See if two shifts will do 2 or more insn's worth of work. */
1988 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1990 HOST_WIDE_INT shift_mask = ((0xffffffff
1991 << (32 - clear_sign_bit_copies))
1992 & 0xffffffff);
1994 if ((remainder | shift_mask) != 0xffffffff)
1996 if (generate)
1998 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1999 insns = arm_gen_constant (AND, mode, cond,
2000 remainder | shift_mask,
2001 new_src, source, subtargets, 1);
2002 source = new_src;
2004 else
2006 rtx targ = subtargets ? NULL_RTX : target;
2007 insns = arm_gen_constant (AND, mode, cond,
2008 remainder | shift_mask,
2009 targ, source, subtargets, 0);
2013 if (generate)
2015 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2016 rtx shift = GEN_INT (clear_sign_bit_copies);
2018 emit_insn (gen_ashlsi3 (new_src, source, shift));
2019 emit_insn (gen_lshrsi3 (target, new_src, shift));
2022 return insns + 2;
2025 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2027 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2029 if ((remainder | shift_mask) != 0xffffffff)
2031 if (generate)
2033 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2035 insns = arm_gen_constant (AND, mode, cond,
2036 remainder | shift_mask,
2037 new_src, source, subtargets, 1);
2038 source = new_src;
2040 else
2042 rtx targ = subtargets ? NULL_RTX : target;
2044 insns = arm_gen_constant (AND, mode, cond,
2045 remainder | shift_mask,
2046 targ, source, subtargets, 0);
2050 if (generate)
2052 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2053 rtx shift = GEN_INT (clear_zero_bit_copies);
2055 emit_insn (gen_lshrsi3 (new_src, source, shift));
2056 emit_insn (gen_ashlsi3 (target, new_src, shift));
2059 return insns + 2;
2062 break;
2064 default:
2065 break;
2068 for (i = 0; i < 32; i++)
2069 if (remainder & (1 << i))
2070 num_bits_set++;
2072 if (code == AND || (can_invert && num_bits_set > 16))
2073 remainder = (~remainder) & 0xffffffff;
2074 else if (code == PLUS && num_bits_set > 16)
2075 remainder = (-remainder) & 0xffffffff;
2076 else
2078 can_invert = 0;
2079 can_negate = 0;
2082 /* Now try and find a way of doing the job in either two or three
2083 instructions.
2084 We start by looking for the largest block of zeros that are aligned on
2085 a 2-bit boundary, we then fill up the temps, wrapping around to the
2086 top of the word when we drop off the bottom.
2087 In the worst case this code should produce no more than four insns. */
2089 int best_start = 0;
2090 int best_consecutive_zeros = 0;
2092 for (i = 0; i < 32; i += 2)
2094 int consecutive_zeros = 0;
2096 if (!(remainder & (3 << i)))
2098 while ((i < 32) && !(remainder & (3 << i)))
2100 consecutive_zeros += 2;
2101 i += 2;
2103 if (consecutive_zeros > best_consecutive_zeros)
2105 best_consecutive_zeros = consecutive_zeros;
2106 best_start = i - consecutive_zeros;
2108 i -= 2;
2112 /* So long as it won't require any more insns to do so, it's
2113 desirable to emit a small constant (in bits 0...9) in the last
2114 insn. This way there is more chance that it can be combined with
2115 a later addressing insn to form a pre-indexed load or store
2116 operation. Consider:
2118 *((volatile int *)0xe0000100) = 1;
2119 *((volatile int *)0xe0000110) = 2;
2121 We want this to wind up as:
2123 mov rA, #0xe0000000
2124 mov rB, #1
2125 str rB, [rA, #0x100]
2126 mov rB, #2
2127 str rB, [rA, #0x110]
2129 rather than having to synthesize both large constants from scratch.
2131 Therefore, we calculate how many insns would be required to emit
2132 the constant starting from `best_start', and also starting from
2133 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2134 yield a shorter sequence, we may as well use zero. */
2135 if (best_start != 0
2136 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2137 && (count_insns_for_constant (remainder, 0) <=
2138 count_insns_for_constant (remainder, best_start)))
2139 best_start = 0;
2141 /* Now start emitting the insns. */
2142 i = best_start;
2145 int end;
2147 if (i <= 0)
2148 i += 32;
2149 if (remainder & (3 << (i - 2)))
2151 end = i - 8;
2152 if (end < 0)
2153 end += 32;
2154 temp1 = remainder & ((0x0ff << end)
2155 | ((i < end) ? (0xff >> (32 - end)) : 0));
2156 remainder &= ~temp1;
2158 if (generate)
2160 rtx new_src, temp1_rtx;
2162 if (code == SET || code == MINUS)
2164 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2165 if (can_invert && code != MINUS)
2166 temp1 = ~temp1;
2168 else
2170 if (remainder && subtargets)
2171 new_src = gen_reg_rtx (mode);
2172 else
2173 new_src = target;
2174 if (can_invert)
2175 temp1 = ~temp1;
2176 else if (can_negate)
2177 temp1 = -temp1;
2180 temp1 = trunc_int_for_mode (temp1, mode);
2181 temp1_rtx = GEN_INT (temp1);
2183 if (code == SET)
2185 else if (code == MINUS)
2186 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2187 else
2188 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2190 emit_constant_insn (cond,
2191 gen_rtx_SET (VOIDmode, new_src,
2192 temp1_rtx));
2193 source = new_src;
2196 if (code == SET)
2198 can_invert = 0;
2199 code = PLUS;
2201 else if (code == MINUS)
2202 code = PLUS;
2204 insns++;
2205 i -= 6;
2207 i -= 2;
2209 while (remainder);
2212 return insns;
2215 /* Canonicalize a comparison so that we are more likely to recognize it.
2216 This can be done for a few constant compares, where we can make the
2217 immediate value easier to load. */
2219 enum rtx_code
2220 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2222 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2224 switch (code)
2226 case EQ:
2227 case NE:
2228 return code;
2230 case GT:
2231 case LE:
2232 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2233 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2235 *op1 = GEN_INT (i + 1);
2236 return code == GT ? GE : LT;
2238 break;
2240 case GE:
2241 case LT:
2242 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2243 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2245 *op1 = GEN_INT (i - 1);
2246 return code == GE ? GT : LE;
2248 break;
2250 case GTU:
2251 case LEU:
2252 if (i != ~((unsigned HOST_WIDE_INT) 0)
2253 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2255 *op1 = GEN_INT (i + 1);
2256 return code == GTU ? GEU : LTU;
2258 break;
2260 case GEU:
2261 case LTU:
2262 if (i != 0
2263 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2265 *op1 = GEN_INT (i - 1);
2266 return code == GEU ? GTU : LEU;
2268 break;
2270 default:
2271 abort ();
2274 return code;
2278 /* Define how to find the value returned by a function. */
2281 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2283 enum machine_mode mode;
2284 int unsignedp ATTRIBUTE_UNUSED;
2285 rtx r ATTRIBUTE_UNUSED;
2288 mode = TYPE_MODE (type);
2289 /* Promote integer types. */
2290 if (INTEGRAL_TYPE_P (type))
2291 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2292 return LIBCALL_VALUE(mode);
2295 /* Determine the amount of memory needed to store the possible return
2296 registers of an untyped call. */
2298 arm_apply_result_size (void)
2300 int size = 16;
2302 if (TARGET_ARM)
2304 if (TARGET_HARD_FLOAT_ABI)
2306 if (TARGET_FPA)
2307 size += 12;
2308 if (TARGET_MAVERICK)
2309 size += 8;
2311 if (TARGET_IWMMXT_ABI)
2312 size += 8;
2315 return size;
2318 /* Decide whether a type should be returned in memory (true)
2319 or in a register (false). This is called by the macro
2320 RETURN_IN_MEMORY. */
2322 arm_return_in_memory (tree type)
2324 HOST_WIDE_INT size;
2326 if (!AGGREGATE_TYPE_P (type) &&
2327 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2328 /* All simple types are returned in registers.
2329 For AAPCS, complex types are treated the same as aggregates. */
2330 return 0;
2332 size = int_size_in_bytes (type);
2334 if (arm_abi != ARM_ABI_APCS)
2336 /* ATPCS and later return aggregate types in memory only if they are
2337 larger than a word (or are variable size). */
2338 return (size < 0 || size > UNITS_PER_WORD);
2341 /* For the arm-wince targets we choose to be compatible with Microsoft's
2342 ARM and Thumb compilers, which always return aggregates in memory. */
2343 #ifndef ARM_WINCE
2344 /* All structures/unions bigger than one word are returned in memory.
2345 Also catch the case where int_size_in_bytes returns -1. In this case
2346 the aggregate is either huge or of variable size, and in either case
2347 we will want to return it via memory and not in a register. */
2348 if (size < 0 || size > UNITS_PER_WORD)
2349 return 1;
2351 if (TREE_CODE (type) == RECORD_TYPE)
2353 tree field;
2355 /* For a struct the APCS says that we only return in a register
2356 if the type is 'integer like' and every addressable element
2357 has an offset of zero. For practical purposes this means
2358 that the structure can have at most one non bit-field element
2359 and that this element must be the first one in the structure. */
2361 /* Find the first field, ignoring non FIELD_DECL things which will
2362 have been created by C++. */
2363 for (field = TYPE_FIELDS (type);
2364 field && TREE_CODE (field) != FIELD_DECL;
2365 field = TREE_CHAIN (field))
2366 continue;
2368 if (field == NULL)
2369 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2371 /* Check that the first field is valid for returning in a register. */
2373 /* ... Floats are not allowed */
2374 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2375 return 1;
2377 /* ... Aggregates that are not themselves valid for returning in
2378 a register are not allowed. */
2379 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2380 return 1;
2382 /* Now check the remaining fields, if any. Only bitfields are allowed,
2383 since they are not addressable. */
2384 for (field = TREE_CHAIN (field);
2385 field;
2386 field = TREE_CHAIN (field))
2388 if (TREE_CODE (field) != FIELD_DECL)
2389 continue;
2391 if (!DECL_BIT_FIELD_TYPE (field))
2392 return 1;
2395 return 0;
2398 if (TREE_CODE (type) == UNION_TYPE)
2400 tree field;
2402 /* Unions can be returned in registers if every element is
2403 integral, or can be returned in an integer register. */
2404 for (field = TYPE_FIELDS (type);
2405 field;
2406 field = TREE_CHAIN (field))
2408 if (TREE_CODE (field) != FIELD_DECL)
2409 continue;
2411 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2412 return 1;
2414 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2415 return 1;
2418 return 0;
2420 #endif /* not ARM_WINCE */
2422 /* Return all other types in memory. */
2423 return 1;
2426 /* Indicate whether or not words of a double are in big-endian order. */
2429 arm_float_words_big_endian (void)
2431 if (TARGET_MAVERICK)
2432 return 0;
2434 /* For FPA, float words are always big-endian. For VFP, floats words
2435 follow the memory system mode. */
2437 if (TARGET_FPA)
2439 return 1;
2442 if (TARGET_VFP)
2443 return (TARGET_BIG_END ? 1 : 0);
2445 return 1;
2448 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2449 for a call to a function whose data type is FNTYPE.
2450 For a library call, FNTYPE is NULL. */
2451 void
2452 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2453 rtx libname ATTRIBUTE_UNUSED,
2454 tree fndecl ATTRIBUTE_UNUSED)
2456 /* On the ARM, the offset starts at 0. */
2457 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2458 pcum->iwmmxt_nregs = 0;
2459 pcum->can_split = true;
2461 pcum->call_cookie = CALL_NORMAL;
2463 if (TARGET_LONG_CALLS)
2464 pcum->call_cookie = CALL_LONG;
2466 /* Check for long call/short call attributes. The attributes
2467 override any command line option. */
2468 if (fntype)
2470 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2471 pcum->call_cookie = CALL_SHORT;
2472 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2473 pcum->call_cookie = CALL_LONG;
2476 /* Varargs vectors are treated the same as long long.
2477 named_count avoids having to change the way arm handles 'named' */
2478 pcum->named_count = 0;
2479 pcum->nargs = 0;
2481 if (TARGET_REALLY_IWMMXT && fntype)
2483 tree fn_arg;
2485 for (fn_arg = TYPE_ARG_TYPES (fntype);
2486 fn_arg;
2487 fn_arg = TREE_CHAIN (fn_arg))
2488 pcum->named_count += 1;
2490 if (! pcum->named_count)
2491 pcum->named_count = INT_MAX;
2496 /* Return true if mode/type need doubleword alignment. */
2497 bool
2498 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2500 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2501 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2505 /* Determine where to put an argument to a function.
2506 Value is zero to push the argument on the stack,
2507 or a hard register in which to store the argument.
2509 MODE is the argument's machine mode.
2510 TYPE is the data type of the argument (as a tree).
2511 This is null for libcalls where that information may
2512 not be available.
2513 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2514 the preceding args and about the function being called.
2515 NAMED is nonzero if this argument is a named parameter
2516 (otherwise it is an extra parameter matching an ellipsis). */
2519 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2520 tree type, int named)
2522 int nregs;
2524 /* Varargs vectors are treated the same as long long.
2525 named_count avoids having to change the way arm handles 'named' */
2526 if (TARGET_IWMMXT_ABI
2527 && arm_vector_mode_supported_p (mode)
2528 && pcum->named_count > pcum->nargs + 1)
2530 if (pcum->iwmmxt_nregs <= 9)
2531 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2532 else
2534 pcum->can_split = false;
2535 return NULL_RTX;
2539 /* Put doubleword aligned quantities in even register pairs. */
2540 if (pcum->nregs & 1
2541 && ARM_DOUBLEWORD_ALIGN
2542 && arm_needs_doubleword_align (mode, type))
2543 pcum->nregs++;
2545 if (mode == VOIDmode)
2546 /* Compute operand 2 of the call insn. */
2547 return GEN_INT (pcum->call_cookie);
2549 /* Only allow splitting an arg between regs and memory if all preceding
2550 args were allocated to regs. For args passed by reference we only count
2551 the reference pointer. */
2552 if (pcum->can_split)
2553 nregs = 1;
2554 else
2555 nregs = ARM_NUM_REGS2 (mode, type);
2557 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2558 return NULL_RTX;
2560 return gen_rtx_REG (mode, pcum->nregs);
2563 static int
2564 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2565 tree type, bool named ATTRIBUTE_UNUSED)
2567 int nregs = pcum->nregs;
2569 if (arm_vector_mode_supported_p (mode))
2570 return 0;
2572 if (NUM_ARG_REGS > nregs
2573 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2574 && pcum->can_split)
2575 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2577 return 0;
2580 /* Variable sized types are passed by reference. This is a GCC
2581 extension to the ARM ABI. */
2583 static bool
2584 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2585 enum machine_mode mode ATTRIBUTE_UNUSED,
2586 tree type, bool named ATTRIBUTE_UNUSED)
2588 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2591 /* Encode the current state of the #pragma [no_]long_calls. */
2592 typedef enum
2594 OFF, /* No #pramgma [no_]long_calls is in effect. */
2595 LONG, /* #pragma long_calls is in effect. */
2596 SHORT /* #pragma no_long_calls is in effect. */
2597 } arm_pragma_enum;
2599 static arm_pragma_enum arm_pragma_long_calls = OFF;
2601 void
2602 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2604 arm_pragma_long_calls = LONG;
2607 void
2608 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2610 arm_pragma_long_calls = SHORT;
2613 void
2614 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2616 arm_pragma_long_calls = OFF;
2619 /* Table of machine attributes. */
2620 const struct attribute_spec arm_attribute_table[] =
2622 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2623 /* Function calls made to this symbol must be done indirectly, because
2624 it may lie outside of the 26 bit addressing range of a normal function
2625 call. */
2626 { "long_call", 0, 0, false, true, true, NULL },
2627 /* Whereas these functions are always known to reside within the 26 bit
2628 addressing range. */
2629 { "short_call", 0, 0, false, true, true, NULL },
2630 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2631 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2632 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2633 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2634 #ifdef ARM_PE
2635 /* ARM/PE has three new attributes:
2636 interfacearm - ?
2637 dllexport - for exporting a function/variable that will live in a dll
2638 dllimport - for importing a function/variable from a dll
2640 Microsoft allows multiple declspecs in one __declspec, separating
2641 them with spaces. We do NOT support this. Instead, use __declspec
2642 multiple times.
2644 { "dllimport", 0, 0, true, false, false, NULL },
2645 { "dllexport", 0, 0, true, false, false, NULL },
2646 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2647 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2648 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2649 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2650 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2651 #endif
2652 { NULL, 0, 0, false, false, false, NULL }
2655 /* Handle an attribute requiring a FUNCTION_DECL;
2656 arguments as in struct attribute_spec.handler. */
2657 static tree
2658 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2659 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2661 if (TREE_CODE (*node) != FUNCTION_DECL)
2663 warning ("%qs attribute only applies to functions",
2664 IDENTIFIER_POINTER (name));
2665 *no_add_attrs = true;
2668 return NULL_TREE;
2671 /* Handle an "interrupt" or "isr" attribute;
2672 arguments as in struct attribute_spec.handler. */
2673 static tree
2674 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2675 bool *no_add_attrs)
2677 if (DECL_P (*node))
2679 if (TREE_CODE (*node) != FUNCTION_DECL)
2681 warning ("%qs attribute only applies to functions",
2682 IDENTIFIER_POINTER (name));
2683 *no_add_attrs = true;
2685 /* FIXME: the argument if any is checked for type attributes;
2686 should it be checked for decl ones? */
2688 else
2690 if (TREE_CODE (*node) == FUNCTION_TYPE
2691 || TREE_CODE (*node) == METHOD_TYPE)
2693 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2695 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2696 *no_add_attrs = true;
2699 else if (TREE_CODE (*node) == POINTER_TYPE
2700 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2701 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2702 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2704 *node = build_variant_type_copy (*node);
2705 TREE_TYPE (*node) = build_type_attribute_variant
2706 (TREE_TYPE (*node),
2707 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2708 *no_add_attrs = true;
2710 else
2712 /* Possibly pass this attribute on from the type to a decl. */
2713 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2714 | (int) ATTR_FLAG_FUNCTION_NEXT
2715 | (int) ATTR_FLAG_ARRAY_NEXT))
2717 *no_add_attrs = true;
2718 return tree_cons (name, args, NULL_TREE);
2720 else
2722 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2727 return NULL_TREE;
2730 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2731 /* Handle the "notshared" attribute. This attribute is another way of
2732 requesting hidden visibility. ARM's compiler supports
2733 "__declspec(notshared)"; we support the same thing via an
2734 attribute. */
2736 static tree
2737 arm_handle_notshared_attribute (tree *node,
2738 tree name ATTRIBUTE_UNUSED,
2739 tree args ATTRIBUTE_UNUSED,
2740 int flags ATTRIBUTE_UNUSED,
2741 bool *no_add_attrs)
2743 tree decl = TYPE_NAME (*node);
2745 if (decl)
2747 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2748 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2749 *no_add_attrs = false;
2751 return NULL_TREE;
2753 #endif
2755 /* Return 0 if the attributes for two types are incompatible, 1 if they
2756 are compatible, and 2 if they are nearly compatible (which causes a
2757 warning to be generated). */
2758 static int
2759 arm_comp_type_attributes (tree type1, tree type2)
2761 int l1, l2, s1, s2;
2763 /* Check for mismatch of non-default calling convention. */
2764 if (TREE_CODE (type1) != FUNCTION_TYPE)
2765 return 1;
2767 /* Check for mismatched call attributes. */
2768 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2769 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2770 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2771 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2773 /* Only bother to check if an attribute is defined. */
2774 if (l1 | l2 | s1 | s2)
2776 /* If one type has an attribute, the other must have the same attribute. */
2777 if ((l1 != l2) || (s1 != s2))
2778 return 0;
2780 /* Disallow mixed attributes. */
2781 if ((l1 & s2) || (l2 & s1))
2782 return 0;
2785 /* Check for mismatched ISR attribute. */
2786 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2787 if (! l1)
2788 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2789 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2790 if (! l2)
2791 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2792 if (l1 != l2)
2793 return 0;
2795 return 1;
2798 /* Encode long_call or short_call attribute by prefixing
2799 symbol name in DECL with a special character FLAG. */
2800 void
2801 arm_encode_call_attribute (tree decl, int flag)
2803 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2804 int len = strlen (str);
2805 char * newstr;
2807 /* Do not allow weak functions to be treated as short call. */
2808 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2809 return;
2811 newstr = alloca (len + 2);
2812 newstr[0] = flag;
2813 strcpy (newstr + 1, str);
2815 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2816 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2819 /* Assigns default attributes to newly defined type. This is used to
2820 set short_call/long_call attributes for function types of
2821 functions defined inside corresponding #pragma scopes. */
2822 static void
2823 arm_set_default_type_attributes (tree type)
2825 /* Add __attribute__ ((long_call)) to all functions, when
2826 inside #pragma long_calls or __attribute__ ((short_call)),
2827 when inside #pragma no_long_calls. */
2828 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2830 tree type_attr_list, attr_name;
2831 type_attr_list = TYPE_ATTRIBUTES (type);
2833 if (arm_pragma_long_calls == LONG)
2834 attr_name = get_identifier ("long_call");
2835 else if (arm_pragma_long_calls == SHORT)
2836 attr_name = get_identifier ("short_call");
2837 else
2838 return;
2840 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2841 TYPE_ATTRIBUTES (type) = type_attr_list;
2845 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2846 defined within the current compilation unit. If this cannot be
2847 determined, then 0 is returned. */
2848 static int
2849 current_file_function_operand (rtx sym_ref)
2851 /* This is a bit of a fib. A function will have a short call flag
2852 applied to its name if it has the short call attribute, or it has
2853 already been defined within the current compilation unit. */
2854 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2855 return 1;
2857 /* The current function is always defined within the current compilation
2858 unit. If it s a weak definition however, then this may not be the real
2859 definition of the function, and so we have to say no. */
2860 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2861 && !DECL_WEAK (current_function_decl))
2862 return 1;
2864 /* We cannot make the determination - default to returning 0. */
2865 return 0;
2868 /* Return nonzero if a 32 bit "long_call" should be generated for
2869 this call. We generate a long_call if the function:
2871 a. has an __attribute__((long call))
2872 or b. is within the scope of a #pragma long_calls
2873 or c. the -mlong-calls command line switch has been specified
2874 . and either:
2875 1. -ffunction-sections is in effect
2876 or 2. the current function has __attribute__ ((section))
2877 or 3. the target function has __attribute__ ((section))
2879 However we do not generate a long call if the function:
2881 d. has an __attribute__ ((short_call))
2882 or e. is inside the scope of a #pragma no_long_calls
2883 or f. is defined within the current compilation unit.
2885 This function will be called by C fragments contained in the machine
2886 description file. SYM_REF and CALL_COOKIE correspond to the matched
2887 rtl operands. CALL_SYMBOL is used to distinguish between
2888 two different callers of the function. It is set to 1 in the
2889 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2890 and "call_value" patterns. This is because of the difference in the
2891 SYM_REFs passed by these patterns. */
2893 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2895 if (!call_symbol)
2897 if (GET_CODE (sym_ref) != MEM)
2898 return 0;
2900 sym_ref = XEXP (sym_ref, 0);
2903 if (GET_CODE (sym_ref) != SYMBOL_REF)
2904 return 0;
2906 if (call_cookie & CALL_SHORT)
2907 return 0;
2909 if (TARGET_LONG_CALLS)
2911 if (flag_function_sections
2912 || DECL_SECTION_NAME (current_function_decl))
2913 /* c.3 is handled by the definition of the
2914 ARM_DECLARE_FUNCTION_SIZE macro. */
2915 return 1;
2918 if (current_file_function_operand (sym_ref))
2919 return 0;
2921 return (call_cookie & CALL_LONG)
2922 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2923 || TARGET_LONG_CALLS;
2926 /* Return nonzero if it is ok to make a tail-call to DECL. */
2927 static bool
2928 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2930 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2932 if (cfun->machine->sibcall_blocked)
2933 return false;
2935 /* Never tailcall something for which we have no decl, or if we
2936 are in Thumb mode. */
2937 if (decl == NULL || TARGET_THUMB)
2938 return false;
2940 /* Get the calling method. */
2941 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2942 call_type = CALL_SHORT;
2943 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2944 call_type = CALL_LONG;
2946 /* Cannot tail-call to long calls, since these are out of range of
2947 a branch instruction. However, if not compiling PIC, we know
2948 we can reach the symbol if it is in this compilation unit. */
2949 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2950 return false;
2952 /* If we are interworking and the function is not declared static
2953 then we can't tail-call it unless we know that it exists in this
2954 compilation unit (since it might be a Thumb routine). */
2955 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2956 return false;
2958 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2959 if (IS_INTERRUPT (arm_current_func_type ()))
2960 return false;
2962 /* Everything else is ok. */
2963 return true;
2967 /* Addressing mode support functions. */
2969 /* Return nonzero if X is a legitimate immediate operand when compiling
2970 for PIC. */
2972 legitimate_pic_operand_p (rtx x)
2974 if (CONSTANT_P (x)
2975 && flag_pic
2976 && (GET_CODE (x) == SYMBOL_REF
2977 || (GET_CODE (x) == CONST
2978 && GET_CODE (XEXP (x, 0)) == PLUS
2979 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2980 return 0;
2982 return 1;
2986 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2988 if (GET_CODE (orig) == SYMBOL_REF
2989 || GET_CODE (orig) == LABEL_REF)
2991 #ifndef AOF_ASSEMBLER
2992 rtx pic_ref, address;
2993 #endif
2994 rtx insn;
2995 int subregs = 0;
2997 if (reg == 0)
2999 if (no_new_pseudos)
3000 abort ();
3001 else
3002 reg = gen_reg_rtx (Pmode);
3004 subregs = 1;
3007 #ifdef AOF_ASSEMBLER
3008 /* The AOF assembler can generate relocations for these directly, and
3009 understands that the PIC register has to be added into the offset. */
3010 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3011 #else
3012 if (subregs)
3013 address = gen_reg_rtx (Pmode);
3014 else
3015 address = reg;
3017 if (TARGET_ARM)
3018 emit_insn (gen_pic_load_addr_arm (address, orig));
3019 else
3020 emit_insn (gen_pic_load_addr_thumb (address, orig));
3022 if ((GET_CODE (orig) == LABEL_REF
3023 || (GET_CODE (orig) == SYMBOL_REF &&
3024 SYMBOL_REF_LOCAL_P (orig)))
3025 && NEED_GOT_RELOC)
3026 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3027 else
3029 pic_ref = gen_const_mem (Pmode,
3030 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3031 address));
3034 insn = emit_move_insn (reg, pic_ref);
3035 #endif
3036 current_function_uses_pic_offset_table = 1;
3037 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3038 by loop. */
3039 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3040 REG_NOTES (insn));
3041 return reg;
3043 else if (GET_CODE (orig) == CONST)
3045 rtx base, offset;
3047 if (GET_CODE (XEXP (orig, 0)) == PLUS
3048 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3049 return orig;
3051 if (reg == 0)
3053 if (no_new_pseudos)
3054 abort ();
3055 else
3056 reg = gen_reg_rtx (Pmode);
3059 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3061 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3062 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3063 base == reg ? 0 : reg);
3065 else
3066 abort ();
3068 if (GET_CODE (offset) == CONST_INT)
3070 /* The base register doesn't really matter, we only want to
3071 test the index for the appropriate mode. */
3072 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3074 if (!no_new_pseudos)
3075 offset = force_reg (Pmode, offset);
3076 else
3077 abort ();
3080 if (GET_CODE (offset) == CONST_INT)
3081 return plus_constant (base, INTVAL (offset));
3084 if (GET_MODE_SIZE (mode) > 4
3085 && (GET_MODE_CLASS (mode) == MODE_INT
3086 || TARGET_SOFT_FLOAT))
3088 emit_insn (gen_addsi3 (reg, base, offset));
3089 return reg;
3092 return gen_rtx_PLUS (Pmode, base, offset);
3095 return orig;
3099 /* Find a spare low register to use during the prolog of a function. */
3101 static int
3102 thumb_find_work_register (unsigned long pushed_regs_mask)
3104 int reg;
3106 /* Check the argument registers first as these are call-used. The
3107 register allocation order means that sometimes r3 might be used
3108 but earlier argument registers might not, so check them all. */
3109 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3110 if (!regs_ever_live[reg])
3111 return reg;
3113 /* Before going on to check the call-saved registers we can try a couple
3114 more ways of deducing that r3 is available. The first is when we are
3115 pushing anonymous arguments onto the stack and we have less than 4
3116 registers worth of fixed arguments(*). In this case r3 will be part of
3117 the variable argument list and so we can be sure that it will be
3118 pushed right at the start of the function. Hence it will be available
3119 for the rest of the prologue.
3120 (*): ie current_function_pretend_args_size is greater than 0. */
3121 if (cfun->machine->uses_anonymous_args
3122 && current_function_pretend_args_size > 0)
3123 return LAST_ARG_REGNUM;
3125 /* The other case is when we have fixed arguments but less than 4 registers
3126 worth. In this case r3 might be used in the body of the function, but
3127 it is not being used to convey an argument into the function. In theory
3128 we could just check current_function_args_size to see how many bytes are
3129 being passed in argument registers, but it seems that it is unreliable.
3130 Sometimes it will have the value 0 when in fact arguments are being
3131 passed. (See testcase execute/20021111-1.c for an example). So we also
3132 check the args_info.nregs field as well. The problem with this field is
3133 that it makes no allowances for arguments that are passed to the
3134 function but which are not used. Hence we could miss an opportunity
3135 when a function has an unused argument in r3. But it is better to be
3136 safe than to be sorry. */
3137 if (! cfun->machine->uses_anonymous_args
3138 && current_function_args_size >= 0
3139 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3140 && cfun->args_info.nregs < 4)
3141 return LAST_ARG_REGNUM;
3143 /* Otherwise look for a call-saved register that is going to be pushed. */
3144 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3145 if (pushed_regs_mask & (1 << reg))
3146 return reg;
3148 /* Something went wrong - thumb_compute_save_reg_mask()
3149 should have arranged for a suitable register to be pushed. */
3150 abort ();
3154 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3155 low register. */
3157 void
3158 arm_load_pic_register (unsigned int scratch)
3160 #ifndef AOF_ASSEMBLER
3161 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3162 rtx global_offset_table;
3164 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3165 return;
3167 if (!flag_pic)
3168 abort ();
3170 l1 = gen_label_rtx ();
3172 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3173 /* On the ARM the PC register contains 'dot + 8' at the time of the
3174 addition, on the Thumb it is 'dot + 4'. */
3175 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3176 if (GOT_PCREL)
3177 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3178 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3179 else
3180 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3182 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3184 if (TARGET_ARM)
3186 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3187 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3189 else
3191 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3193 /* We will have pushed the pic register, so should always be
3194 able to find a work register. */
3195 pic_tmp = gen_rtx_REG (SImode, scratch);
3196 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3197 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3199 else
3200 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3201 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3204 /* Need to emit this whether or not we obey regdecls,
3205 since setjmp/longjmp can cause life info to screw up. */
3206 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3207 #endif /* AOF_ASSEMBLER */
3211 /* Return nonzero if X is valid as an ARM state addressing register. */
3212 static int
3213 arm_address_register_rtx_p (rtx x, int strict_p)
3215 int regno;
3217 if (GET_CODE (x) != REG)
3218 return 0;
3220 regno = REGNO (x);
3222 if (strict_p)
3223 return ARM_REGNO_OK_FOR_BASE_P (regno);
3225 return (regno <= LAST_ARM_REGNUM
3226 || regno >= FIRST_PSEUDO_REGISTER
3227 || regno == FRAME_POINTER_REGNUM
3228 || regno == ARG_POINTER_REGNUM);
3231 /* Return nonzero if X is a valid ARM state address operand. */
3233 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3234 int strict_p)
3236 bool use_ldrd;
3237 enum rtx_code code = GET_CODE (x);
3239 if (arm_address_register_rtx_p (x, strict_p))
3240 return 1;
3242 use_ldrd = (TARGET_LDRD
3243 && (mode == DImode
3244 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3246 if (code == POST_INC || code == PRE_DEC
3247 || ((code == PRE_INC || code == POST_DEC)
3248 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3249 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3251 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3252 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3253 && GET_CODE (XEXP (x, 1)) == PLUS
3254 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3256 rtx addend = XEXP (XEXP (x, 1), 1);
3258 /* Don't allow ldrd post increment by register because it's hard
3259 to fixup invalid register choices. */
3260 if (use_ldrd
3261 && GET_CODE (x) == POST_MODIFY
3262 && GET_CODE (addend) == REG)
3263 return 0;
3265 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3266 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3269 /* After reload constants split into minipools will have addresses
3270 from a LABEL_REF. */
3271 else if (reload_completed
3272 && (code == LABEL_REF
3273 || (code == CONST
3274 && GET_CODE (XEXP (x, 0)) == PLUS
3275 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3276 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3277 return 1;
3279 else if (mode == TImode)
3280 return 0;
3282 else if (code == PLUS)
3284 rtx xop0 = XEXP (x, 0);
3285 rtx xop1 = XEXP (x, 1);
3287 return ((arm_address_register_rtx_p (xop0, strict_p)
3288 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3289 || (arm_address_register_rtx_p (xop1, strict_p)
3290 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3293 #if 0
3294 /* Reload currently can't handle MINUS, so disable this for now */
3295 else if (GET_CODE (x) == MINUS)
3297 rtx xop0 = XEXP (x, 0);
3298 rtx xop1 = XEXP (x, 1);
3300 return (arm_address_register_rtx_p (xop0, strict_p)
3301 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3303 #endif
3305 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3306 && code == SYMBOL_REF
3307 && CONSTANT_POOL_ADDRESS_P (x)
3308 && ! (flag_pic
3309 && symbol_mentioned_p (get_pool_constant (x))))
3310 return 1;
3312 return 0;
3315 /* Return nonzero if INDEX is valid for an address index operand in
3316 ARM state. */
3317 static int
3318 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3319 int strict_p)
3321 HOST_WIDE_INT range;
3322 enum rtx_code code = GET_CODE (index);
3324 /* Standard coprocessor addressing modes. */
3325 if (TARGET_HARD_FLOAT
3326 && (TARGET_FPA || TARGET_MAVERICK)
3327 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3328 || (TARGET_MAVERICK && mode == DImode)))
3329 return (code == CONST_INT && INTVAL (index) < 1024
3330 && INTVAL (index) > -1024
3331 && (INTVAL (index) & 3) == 0);
3333 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3334 return (code == CONST_INT
3335 && INTVAL (index) < 1024
3336 && INTVAL (index) > -1024
3337 && (INTVAL (index) & 3) == 0);
3339 if (arm_address_register_rtx_p (index, strict_p)
3340 && (GET_MODE_SIZE (mode) <= 4))
3341 return 1;
3343 if (mode == DImode || mode == DFmode)
3345 if (code == CONST_INT)
3347 HOST_WIDE_INT val = INTVAL (index);
3349 if (TARGET_LDRD)
3350 return val > -256 && val < 256;
3351 else
3352 return val > -4096 && val < 4092;
3355 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3358 if (GET_MODE_SIZE (mode) <= 4
3359 && ! (arm_arch4
3360 && (mode == HImode
3361 || (mode == QImode && outer == SIGN_EXTEND))))
3363 if (code == MULT)
3365 rtx xiop0 = XEXP (index, 0);
3366 rtx xiop1 = XEXP (index, 1);
3368 return ((arm_address_register_rtx_p (xiop0, strict_p)
3369 && power_of_two_operand (xiop1, SImode))
3370 || (arm_address_register_rtx_p (xiop1, strict_p)
3371 && power_of_two_operand (xiop0, SImode)));
3373 else if (code == LSHIFTRT || code == ASHIFTRT
3374 || code == ASHIFT || code == ROTATERT)
3376 rtx op = XEXP (index, 1);
3378 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3379 && GET_CODE (op) == CONST_INT
3380 && INTVAL (op) > 0
3381 && INTVAL (op) <= 31);
3385 /* For ARM v4 we may be doing a sign-extend operation during the
3386 load. */
3387 if (arm_arch4)
3389 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3390 range = 256;
3391 else
3392 range = 4096;
3394 else
3395 range = (mode == HImode) ? 4095 : 4096;
3397 return (code == CONST_INT
3398 && INTVAL (index) < range
3399 && INTVAL (index) > -range);
3402 /* Return nonzero if X is valid as a Thumb state base register. */
3403 static int
3404 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3406 int regno;
3408 if (GET_CODE (x) != REG)
3409 return 0;
3411 regno = REGNO (x);
3413 if (strict_p)
3414 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3416 return (regno <= LAST_LO_REGNUM
3417 || regno > LAST_VIRTUAL_REGISTER
3418 || regno == FRAME_POINTER_REGNUM
3419 || (GET_MODE_SIZE (mode) >= 4
3420 && (regno == STACK_POINTER_REGNUM
3421 || regno >= FIRST_PSEUDO_REGISTER
3422 || x == hard_frame_pointer_rtx
3423 || x == arg_pointer_rtx)));
3426 /* Return nonzero if x is a legitimate index register. This is the case
3427 for any base register that can access a QImode object. */
3428 inline static int
3429 thumb_index_register_rtx_p (rtx x, int strict_p)
3431 return thumb_base_register_rtx_p (x, QImode, strict_p);
3434 /* Return nonzero if x is a legitimate Thumb-state address.
3436 The AP may be eliminated to either the SP or the FP, so we use the
3437 least common denominator, e.g. SImode, and offsets from 0 to 64.
3439 ??? Verify whether the above is the right approach.
3441 ??? Also, the FP may be eliminated to the SP, so perhaps that
3442 needs special handling also.
3444 ??? Look at how the mips16 port solves this problem. It probably uses
3445 better ways to solve some of these problems.
3447 Although it is not incorrect, we don't accept QImode and HImode
3448 addresses based on the frame pointer or arg pointer until the
3449 reload pass starts. This is so that eliminating such addresses
3450 into stack based ones won't produce impossible code. */
3452 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3454 /* ??? Not clear if this is right. Experiment. */
3455 if (GET_MODE_SIZE (mode) < 4
3456 && !(reload_in_progress || reload_completed)
3457 && (reg_mentioned_p (frame_pointer_rtx, x)
3458 || reg_mentioned_p (arg_pointer_rtx, x)
3459 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3460 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3461 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3462 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3463 return 0;
3465 /* Accept any base register. SP only in SImode or larger. */
3466 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3467 return 1;
3469 /* This is PC relative data before arm_reorg runs. */
3470 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3471 && GET_CODE (x) == SYMBOL_REF
3472 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3473 return 1;
3475 /* This is PC relative data after arm_reorg runs. */
3476 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3477 && (GET_CODE (x) == LABEL_REF
3478 || (GET_CODE (x) == CONST
3479 && GET_CODE (XEXP (x, 0)) == PLUS
3480 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3481 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3482 return 1;
3484 /* Post-inc indexing only supported for SImode and larger. */
3485 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3486 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3487 return 1;
3489 else if (GET_CODE (x) == PLUS)
3491 /* REG+REG address can be any two index registers. */
3492 /* We disallow FRAME+REG addressing since we know that FRAME
3493 will be replaced with STACK, and SP relative addressing only
3494 permits SP+OFFSET. */
3495 if (GET_MODE_SIZE (mode) <= 4
3496 && XEXP (x, 0) != frame_pointer_rtx
3497 && XEXP (x, 1) != frame_pointer_rtx
3498 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3499 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3500 return 1;
3502 /* REG+const has 5-7 bit offset for non-SP registers. */
3503 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3504 || XEXP (x, 0) == arg_pointer_rtx)
3505 && GET_CODE (XEXP (x, 1)) == CONST_INT
3506 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3507 return 1;
3509 /* REG+const has 10 bit offset for SP, but only SImode and
3510 larger is supported. */
3511 /* ??? Should probably check for DI/DFmode overflow here
3512 just like GO_IF_LEGITIMATE_OFFSET does. */
3513 else if (GET_CODE (XEXP (x, 0)) == REG
3514 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3515 && GET_MODE_SIZE (mode) >= 4
3516 && GET_CODE (XEXP (x, 1)) == CONST_INT
3517 && INTVAL (XEXP (x, 1)) >= 0
3518 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3519 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3520 return 1;
3522 else if (GET_CODE (XEXP (x, 0)) == REG
3523 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3524 && GET_MODE_SIZE (mode) >= 4
3525 && GET_CODE (XEXP (x, 1)) == CONST_INT
3526 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3527 return 1;
3530 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3531 && GET_MODE_SIZE (mode) == 4
3532 && GET_CODE (x) == SYMBOL_REF
3533 && CONSTANT_POOL_ADDRESS_P (x)
3534 && !(flag_pic
3535 && symbol_mentioned_p (get_pool_constant (x))))
3536 return 1;
3538 return 0;
3541 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3542 instruction of mode MODE. */
3544 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3546 switch (GET_MODE_SIZE (mode))
3548 case 1:
3549 return val >= 0 && val < 32;
3551 case 2:
3552 return val >= 0 && val < 64 && (val & 1) == 0;
3554 default:
3555 return (val >= 0
3556 && (val + GET_MODE_SIZE (mode)) <= 128
3557 && (val & 3) == 0);
3561 /* Try machine-dependent ways of modifying an illegitimate address
3562 to be legitimate. If we find one, return the new, valid address. */
3564 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3566 if (GET_CODE (x) == PLUS)
3568 rtx xop0 = XEXP (x, 0);
3569 rtx xop1 = XEXP (x, 1);
3571 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3572 xop0 = force_reg (SImode, xop0);
3574 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3575 xop1 = force_reg (SImode, xop1);
3577 if (ARM_BASE_REGISTER_RTX_P (xop0)
3578 && GET_CODE (xop1) == CONST_INT)
3580 HOST_WIDE_INT n, low_n;
3581 rtx base_reg, val;
3582 n = INTVAL (xop1);
3584 /* VFP addressing modes actually allow greater offsets, but for
3585 now we just stick with the lowest common denominator. */
3586 if (mode == DImode
3587 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3589 low_n = n & 0x0f;
3590 n &= ~0x0f;
3591 if (low_n > 4)
3593 n += 16;
3594 low_n -= 16;
3597 else
3599 low_n = ((mode) == TImode ? 0
3600 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3601 n -= low_n;
3604 base_reg = gen_reg_rtx (SImode);
3605 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3606 GEN_INT (n)), NULL_RTX);
3607 emit_move_insn (base_reg, val);
3608 x = (low_n == 0 ? base_reg
3609 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3611 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3612 x = gen_rtx_PLUS (SImode, xop0, xop1);
3615 /* XXX We don't allow MINUS any more -- see comment in
3616 arm_legitimate_address_p (). */
3617 else if (GET_CODE (x) == MINUS)
3619 rtx xop0 = XEXP (x, 0);
3620 rtx xop1 = XEXP (x, 1);
3622 if (CONSTANT_P (xop0))
3623 xop0 = force_reg (SImode, xop0);
3625 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3626 xop1 = force_reg (SImode, xop1);
3628 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3629 x = gen_rtx_MINUS (SImode, xop0, xop1);
3632 if (flag_pic)
3634 /* We need to find and carefully transform any SYMBOL and LABEL
3635 references; so go back to the original address expression. */
3636 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3638 if (new_x != orig_x)
3639 x = new_x;
3642 return x;
3646 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3647 to be legitimate. If we find one, return the new, valid address. */
3649 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3651 if (GET_CODE (x) == PLUS
3652 && GET_CODE (XEXP (x, 1)) == CONST_INT
3653 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3654 || INTVAL (XEXP (x, 1)) < 0))
3656 rtx xop0 = XEXP (x, 0);
3657 rtx xop1 = XEXP (x, 1);
3658 HOST_WIDE_INT offset = INTVAL (xop1);
3660 /* Try and fold the offset into a biasing of the base register and
3661 then offsetting that. Don't do this when optimizing for space
3662 since it can cause too many CSEs. */
3663 if (optimize_size && offset >= 0
3664 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3666 HOST_WIDE_INT delta;
3668 if (offset >= 256)
3669 delta = offset - (256 - GET_MODE_SIZE (mode));
3670 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3671 delta = 31 * GET_MODE_SIZE (mode);
3672 else
3673 delta = offset & (~31 * GET_MODE_SIZE (mode));
3675 xop0 = force_operand (plus_constant (xop0, offset - delta),
3676 NULL_RTX);
3677 x = plus_constant (xop0, delta);
3679 else if (offset < 0 && offset > -256)
3680 /* Small negative offsets are best done with a subtract before the
3681 dereference, forcing these into a register normally takes two
3682 instructions. */
3683 x = force_operand (x, NULL_RTX);
3684 else
3686 /* For the remaining cases, force the constant into a register. */
3687 xop1 = force_reg (SImode, xop1);
3688 x = gen_rtx_PLUS (SImode, xop0, xop1);
3691 else if (GET_CODE (x) == PLUS
3692 && s_register_operand (XEXP (x, 1), SImode)
3693 && !s_register_operand (XEXP (x, 0), SImode))
3695 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3697 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3700 if (flag_pic)
3702 /* We need to find and carefully transform any SYMBOL and LABEL
3703 references; so go back to the original address expression. */
3704 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3706 if (new_x != orig_x)
3707 x = new_x;
3710 return x;
3715 #define REG_OR_SUBREG_REG(X) \
3716 (GET_CODE (X) == REG \
3717 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3719 #define REG_OR_SUBREG_RTX(X) \
3720 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3722 #ifndef COSTS_N_INSNS
3723 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3724 #endif
3725 static inline int
3726 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3728 enum machine_mode mode = GET_MODE (x);
3730 switch (code)
3732 case ASHIFT:
3733 case ASHIFTRT:
3734 case LSHIFTRT:
3735 case ROTATERT:
3736 case PLUS:
3737 case MINUS:
3738 case COMPARE:
3739 case NEG:
3740 case NOT:
3741 return COSTS_N_INSNS (1);
3743 case MULT:
3744 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3746 int cycles = 0;
3747 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3749 while (i)
3751 i >>= 2;
3752 cycles++;
3754 return COSTS_N_INSNS (2) + cycles;
3756 return COSTS_N_INSNS (1) + 16;
3758 case SET:
3759 return (COSTS_N_INSNS (1)
3760 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3761 + GET_CODE (SET_DEST (x)) == MEM));
3763 case CONST_INT:
3764 if (outer == SET)
3766 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3767 return 0;
3768 if (thumb_shiftable_const (INTVAL (x)))
3769 return COSTS_N_INSNS (2);
3770 return COSTS_N_INSNS (3);
3772 else if ((outer == PLUS || outer == COMPARE)
3773 && INTVAL (x) < 256 && INTVAL (x) > -256)
3774 return 0;
3775 else if (outer == AND
3776 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3777 return COSTS_N_INSNS (1);
3778 else if (outer == ASHIFT || outer == ASHIFTRT
3779 || outer == LSHIFTRT)
3780 return 0;
3781 return COSTS_N_INSNS (2);
3783 case CONST:
3784 case CONST_DOUBLE:
3785 case LABEL_REF:
3786 case SYMBOL_REF:
3787 return COSTS_N_INSNS (3);
3789 case UDIV:
3790 case UMOD:
3791 case DIV:
3792 case MOD:
3793 return 100;
3795 case TRUNCATE:
3796 return 99;
3798 case AND:
3799 case XOR:
3800 case IOR:
3801 /* XXX guess. */
3802 return 8;
3804 case MEM:
3805 /* XXX another guess. */
3806 /* Memory costs quite a lot for the first word, but subsequent words
3807 load at the equivalent of a single insn each. */
3808 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3809 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3810 ? 4 : 0));
3812 case IF_THEN_ELSE:
3813 /* XXX a guess. */
3814 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3815 return 14;
3816 return 2;
3818 case ZERO_EXTEND:
3819 /* XXX still guessing. */
3820 switch (GET_MODE (XEXP (x, 0)))
3822 case QImode:
3823 return (1 + (mode == DImode ? 4 : 0)
3824 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3826 case HImode:
3827 return (4 + (mode == DImode ? 4 : 0)
3828 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3830 case SImode:
3831 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3833 default:
3834 return 99;
3837 default:
3838 return 99;
3843 /* Worker routine for arm_rtx_costs. */
3844 static inline int
3845 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3847 enum machine_mode mode = GET_MODE (x);
3848 enum rtx_code subcode;
3849 int extra_cost;
3851 switch (code)
3853 case MEM:
3854 /* Memory costs quite a lot for the first word, but subsequent words
3855 load at the equivalent of a single insn each. */
3856 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3857 + (GET_CODE (x) == SYMBOL_REF
3858 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3860 case DIV:
3861 case MOD:
3862 case UDIV:
3863 case UMOD:
3864 return optimize_size ? COSTS_N_INSNS (2) : 100;
3866 case ROTATE:
3867 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3868 return 4;
3869 /* Fall through */
3870 case ROTATERT:
3871 if (mode != SImode)
3872 return 8;
3873 /* Fall through */
3874 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3875 if (mode == DImode)
3876 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3877 + ((GET_CODE (XEXP (x, 0)) == REG
3878 || (GET_CODE (XEXP (x, 0)) == SUBREG
3879 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3880 ? 0 : 8));
3881 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3882 || (GET_CODE (XEXP (x, 0)) == SUBREG
3883 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3884 ? 0 : 4)
3885 + ((GET_CODE (XEXP (x, 1)) == REG
3886 || (GET_CODE (XEXP (x, 1)) == SUBREG
3887 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3888 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3889 ? 0 : 4));
3891 case MINUS:
3892 if (mode == DImode)
3893 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3894 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3895 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3896 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3897 ? 0 : 8));
3899 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3900 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3901 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3902 && arm_const_double_rtx (XEXP (x, 1))))
3903 ? 0 : 8)
3904 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3905 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3906 && arm_const_double_rtx (XEXP (x, 0))))
3907 ? 0 : 8));
3909 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3910 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3911 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3912 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3913 || subcode == ASHIFTRT || subcode == LSHIFTRT
3914 || subcode == ROTATE || subcode == ROTATERT
3915 || (subcode == MULT
3916 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3917 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3918 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3919 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3920 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3921 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3922 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3923 return 1;
3924 /* Fall through */
3926 case PLUS:
3927 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3928 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3929 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3930 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3931 && arm_const_double_rtx (XEXP (x, 1))))
3932 ? 0 : 8));
3934 /* Fall through */
3935 case AND: case XOR: case IOR:
3936 extra_cost = 0;
3938 /* Normally the frame registers will be spilt into reg+const during
3939 reload, so it is a bad idea to combine them with other instructions,
3940 since then they might not be moved outside of loops. As a compromise
3941 we allow integration with ops that have a constant as their second
3942 operand. */
3943 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3944 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3945 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3946 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3947 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3948 extra_cost = 4;
3950 if (mode == DImode)
3951 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3952 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3953 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3954 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3955 ? 0 : 8));
3957 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3958 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3959 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3960 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3961 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3962 ? 0 : 4));
3964 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3965 return (1 + extra_cost
3966 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3967 || subcode == LSHIFTRT || subcode == ASHIFTRT
3968 || subcode == ROTATE || subcode == ROTATERT
3969 || (subcode == MULT
3970 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3971 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3972 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3973 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3974 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3975 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3976 ? 0 : 4));
3978 return 8;
3980 case MULT:
3981 /* This should have been handled by the CPU specific routines. */
3982 abort ();
3984 case TRUNCATE:
3985 if (arm_arch3m && mode == SImode
3986 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3987 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3988 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3989 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3990 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3991 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3992 return 8;
3993 return 99;
3995 case NEG:
3996 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3997 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3998 /* Fall through */
3999 case NOT:
4000 if (mode == DImode)
4001 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4003 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4005 case IF_THEN_ELSE:
4006 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4007 return 14;
4008 return 2;
4010 case COMPARE:
4011 return 1;
4013 case ABS:
4014 return 4 + (mode == DImode ? 4 : 0);
4016 case SIGN_EXTEND:
4017 if (GET_MODE (XEXP (x, 0)) == QImode)
4018 return (4 + (mode == DImode ? 4 : 0)
4019 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4020 /* Fall through */
4021 case ZERO_EXTEND:
4022 switch (GET_MODE (XEXP (x, 0)))
4024 case QImode:
4025 return (1 + (mode == DImode ? 4 : 0)
4026 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4028 case HImode:
4029 return (4 + (mode == DImode ? 4 : 0)
4030 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4032 case SImode:
4033 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4035 case V8QImode:
4036 case V4HImode:
4037 case V2SImode:
4038 case V4QImode:
4039 case V2HImode:
4040 return 1;
4042 default:
4043 break;
4045 abort ();
4047 case CONST_INT:
4048 if (const_ok_for_arm (INTVAL (x)))
4049 return outer == SET ? 2 : -1;
4050 else if (outer == AND
4051 && const_ok_for_arm (~INTVAL (x)))
4052 return -1;
4053 else if ((outer == COMPARE
4054 || outer == PLUS || outer == MINUS)
4055 && const_ok_for_arm (-INTVAL (x)))
4056 return -1;
4057 else
4058 return 5;
4060 case CONST:
4061 case LABEL_REF:
4062 case SYMBOL_REF:
4063 return 6;
4065 case CONST_DOUBLE:
4066 if (arm_const_double_rtx (x))
4067 return outer == SET ? 2 : -1;
4068 else if ((outer == COMPARE || outer == PLUS)
4069 && neg_const_double_rtx_ok_for_fpa (x))
4070 return -1;
4071 return 7;
4073 default:
4074 return 99;
4078 /* RTX costs when optimizing for size. */
4079 static bool
4080 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4082 enum machine_mode mode = GET_MODE (x);
4084 if (TARGET_THUMB)
4086 /* XXX TBD. For now, use the standard costs. */
4087 *total = thumb_rtx_costs (x, code, outer_code);
4088 return true;
4091 switch (code)
4093 case MEM:
4094 /* A memory access costs 1 insn if the mode is small, or the address is
4095 a single register, otherwise it costs one insn per word. */
4096 if (REG_P (XEXP (x, 0)))
4097 *total = COSTS_N_INSNS (1);
4098 else
4099 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4100 return true;
4102 case DIV:
4103 case MOD:
4104 case UDIV:
4105 case UMOD:
4106 /* Needs a libcall, so it costs about this. */
4107 *total = COSTS_N_INSNS (2);
4108 return false;
4110 case ROTATE:
4111 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4113 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4114 return true;
4116 /* Fall through */
4117 case ROTATERT:
4118 case ASHIFT:
4119 case LSHIFTRT:
4120 case ASHIFTRT:
4121 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4123 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4124 return true;
4126 else if (mode == SImode)
4128 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4129 /* Slightly disparage register shifts, but not by much. */
4130 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4131 *total += 1 + rtx_cost (XEXP (x, 1), code);
4132 return true;
4135 /* Needs a libcall. */
4136 *total = COSTS_N_INSNS (2);
4137 return false;
4139 case MINUS:
4140 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4142 *total = COSTS_N_INSNS (1);
4143 return false;
4146 if (mode == SImode)
4148 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4149 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4151 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4152 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4153 || subcode1 == ROTATE || subcode1 == ROTATERT
4154 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4155 || subcode1 == ASHIFTRT)
4157 /* It's just the cost of the two operands. */
4158 *total = 0;
4159 return false;
4162 *total = COSTS_N_INSNS (1);
4163 return false;
4166 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4167 return false;
4169 case PLUS:
4170 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4172 *total = COSTS_N_INSNS (1);
4173 return false;
4176 /* Fall through */
4177 case AND: case XOR: case IOR:
4178 if (mode == SImode)
4180 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4182 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4183 || subcode == LSHIFTRT || subcode == ASHIFTRT
4184 || (code == AND && subcode == NOT))
4186 /* It's just the cost of the two operands. */
4187 *total = 0;
4188 return false;
4192 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4193 return false;
4195 case MULT:
4196 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4197 return false;
4199 case NEG:
4200 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4201 *total = COSTS_N_INSNS (1);
4202 /* Fall through */
4203 case NOT:
4204 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4206 return false;
4208 case IF_THEN_ELSE:
4209 *total = 0;
4210 return false;
4212 case COMPARE:
4213 if (cc_register (XEXP (x, 0), VOIDmode))
4214 * total = 0;
4215 else
4216 *total = COSTS_N_INSNS (1);
4217 return false;
4219 case ABS:
4220 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4221 *total = COSTS_N_INSNS (1);
4222 else
4223 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4224 return false;
4226 case SIGN_EXTEND:
4227 *total = 0;
4228 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4230 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4231 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4233 if (mode == DImode)
4234 *total += COSTS_N_INSNS (1);
4235 return false;
4237 case ZERO_EXTEND:
4238 *total = 0;
4239 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4241 switch (GET_MODE (XEXP (x, 0)))
4243 case QImode:
4244 *total += COSTS_N_INSNS (1);
4245 break;
4247 case HImode:
4248 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4250 case SImode:
4251 break;
4253 default:
4254 *total += COSTS_N_INSNS (2);
4258 if (mode == DImode)
4259 *total += COSTS_N_INSNS (1);
4261 return false;
4263 case CONST_INT:
4264 if (const_ok_for_arm (INTVAL (x)))
4265 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4266 else if (const_ok_for_arm (~INTVAL (x)))
4267 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4268 else if (const_ok_for_arm (-INTVAL (x)))
4270 if (outer_code == COMPARE || outer_code == PLUS
4271 || outer_code == MINUS)
4272 *total = 0;
4273 else
4274 *total = COSTS_N_INSNS (1);
4276 else
4277 *total = COSTS_N_INSNS (2);
4278 return true;
4280 case CONST:
4281 case LABEL_REF:
4282 case SYMBOL_REF:
4283 *total = COSTS_N_INSNS (2);
4284 return true;
4286 case CONST_DOUBLE:
4287 *total = COSTS_N_INSNS (4);
4288 return true;
4290 default:
4291 if (mode != VOIDmode)
4292 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4293 else
4294 *total = COSTS_N_INSNS (4); /* How knows? */
4295 return false;
4299 /* RTX costs for cores with a slow MUL implementation. */
4301 static bool
4302 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4304 enum machine_mode mode = GET_MODE (x);
4306 if (TARGET_THUMB)
4308 *total = thumb_rtx_costs (x, code, outer_code);
4309 return true;
4312 switch (code)
4314 case MULT:
4315 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4316 || mode == DImode)
4318 *total = 30;
4319 return true;
4322 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4324 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4325 & (unsigned HOST_WIDE_INT) 0xffffffff);
4326 int cost, const_ok = const_ok_for_arm (i);
4327 int j, booth_unit_size;
4329 /* Tune as appropriate. */
4330 cost = const_ok ? 4 : 8;
4331 booth_unit_size = 2;
4332 for (j = 0; i && j < 32; j += booth_unit_size)
4334 i >>= booth_unit_size;
4335 cost += 2;
4338 *total = cost;
4339 return true;
4342 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4343 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4344 return true;
4346 default:
4347 *total = arm_rtx_costs_1 (x, code, outer_code);
4348 return true;
4353 /* RTX cost for cores with a fast multiply unit (M variants). */
4355 static bool
4356 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4358 enum machine_mode mode = GET_MODE (x);
4360 if (TARGET_THUMB)
4362 *total = thumb_rtx_costs (x, code, outer_code);
4363 return true;
4366 switch (code)
4368 case MULT:
4369 /* There is no point basing this on the tuning, since it is always the
4370 fast variant if it exists at all. */
4371 if (mode == DImode
4372 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4373 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4374 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4376 *total = 8;
4377 return true;
4381 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4382 || mode == DImode)
4384 *total = 30;
4385 return true;
4388 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4390 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4391 & (unsigned HOST_WIDE_INT) 0xffffffff);
4392 int cost, const_ok = const_ok_for_arm (i);
4393 int j, booth_unit_size;
4395 /* Tune as appropriate. */
4396 cost = const_ok ? 4 : 8;
4397 booth_unit_size = 8;
4398 for (j = 0; i && j < 32; j += booth_unit_size)
4400 i >>= booth_unit_size;
4401 cost += 2;
4404 *total = cost;
4405 return true;
4408 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4409 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4410 return true;
4412 default:
4413 *total = arm_rtx_costs_1 (x, code, outer_code);
4414 return true;
4419 /* RTX cost for XScale CPUs. */
4421 static bool
4422 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4424 enum machine_mode mode = GET_MODE (x);
4426 if (TARGET_THUMB)
4428 *total = thumb_rtx_costs (x, code, outer_code);
4429 return true;
4432 switch (code)
4434 case MULT:
4435 /* There is no point basing this on the tuning, since it is always the
4436 fast variant if it exists at all. */
4437 if (mode == DImode
4438 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4439 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4440 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4442 *total = 8;
4443 return true;
4447 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4448 || mode == DImode)
4450 *total = 30;
4451 return true;
4454 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4456 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4457 & (unsigned HOST_WIDE_INT) 0xffffffff);
4458 int cost, const_ok = const_ok_for_arm (i);
4459 unsigned HOST_WIDE_INT masked_const;
4461 /* The cost will be related to two insns.
4462 First a load of the constant (MOV or LDR), then a multiply. */
4463 cost = 2;
4464 if (! const_ok)
4465 cost += 1; /* LDR is probably more expensive because
4466 of longer result latency. */
4467 masked_const = i & 0xffff8000;
4468 if (masked_const != 0 && masked_const != 0xffff8000)
4470 masked_const = i & 0xf8000000;
4471 if (masked_const == 0 || masked_const == 0xf8000000)
4472 cost += 1;
4473 else
4474 cost += 2;
4476 *total = cost;
4477 return true;
4480 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4481 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4482 return true;
4484 case COMPARE:
4485 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4486 will stall until the multiplication is complete. */
4487 if (GET_CODE (XEXP (x, 0)) == MULT)
4488 *total = 4 + rtx_cost (XEXP (x, 0), code);
4489 else
4490 *total = arm_rtx_costs_1 (x, code, outer_code);
4491 return true;
4493 default:
4494 *total = arm_rtx_costs_1 (x, code, outer_code);
4495 return true;
4500 /* RTX costs for 9e (and later) cores. */
4502 static bool
4503 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4505 enum machine_mode mode = GET_MODE (x);
4506 int nonreg_cost;
4507 int cost;
4509 if (TARGET_THUMB)
4511 switch (code)
4513 case MULT:
4514 *total = COSTS_N_INSNS (3);
4515 return true;
4517 default:
4518 *total = thumb_rtx_costs (x, code, outer_code);
4519 return true;
4523 switch (code)
4525 case MULT:
4526 /* There is no point basing this on the tuning, since it is always the
4527 fast variant if it exists at all. */
4528 if (mode == DImode
4529 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4530 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4531 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4533 *total = 3;
4534 return true;
4538 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4540 *total = 30;
4541 return true;
4543 if (mode == DImode)
4545 cost = 7;
4546 nonreg_cost = 8;
4548 else
4550 cost = 2;
4551 nonreg_cost = 4;
4555 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4556 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4557 return true;
4559 default:
4560 *total = arm_rtx_costs_1 (x, code, outer_code);
4561 return true;
4564 /* All address computations that can be done are free, but rtx cost returns
4565 the same for practically all of them. So we weight the different types
4566 of address here in the order (most pref first):
4567 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4568 static inline int
4569 arm_arm_address_cost (rtx x)
4571 enum rtx_code c = GET_CODE (x);
4573 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4574 return 0;
4575 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4576 return 10;
4578 if (c == PLUS || c == MINUS)
4580 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4581 return 2;
4583 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4584 return 3;
4586 return 4;
4589 return 6;
4592 static inline int
4593 arm_thumb_address_cost (rtx x)
4595 enum rtx_code c = GET_CODE (x);
4597 if (c == REG)
4598 return 1;
4599 if (c == PLUS
4600 && GET_CODE (XEXP (x, 0)) == REG
4601 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4602 return 1;
4604 return 2;
4607 static int
4608 arm_address_cost (rtx x)
4610 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4613 static int
4614 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4616 rtx i_pat, d_pat;
4618 /* Some true dependencies can have a higher cost depending
4619 on precisely how certain input operands are used. */
4620 if (arm_tune_xscale
4621 && REG_NOTE_KIND (link) == 0
4622 && recog_memoized (insn) >= 0
4623 && recog_memoized (dep) >= 0)
4625 int shift_opnum = get_attr_shift (insn);
4626 enum attr_type attr_type = get_attr_type (dep);
4628 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4629 operand for INSN. If we have a shifted input operand and the
4630 instruction we depend on is another ALU instruction, then we may
4631 have to account for an additional stall. */
4632 if (shift_opnum != 0
4633 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4635 rtx shifted_operand;
4636 int opno;
4638 /* Get the shifted operand. */
4639 extract_insn (insn);
4640 shifted_operand = recog_data.operand[shift_opnum];
4642 /* Iterate over all the operands in DEP. If we write an operand
4643 that overlaps with SHIFTED_OPERAND, then we have increase the
4644 cost of this dependency. */
4645 extract_insn (dep);
4646 preprocess_constraints ();
4647 for (opno = 0; opno < recog_data.n_operands; opno++)
4649 /* We can ignore strict inputs. */
4650 if (recog_data.operand_type[opno] == OP_IN)
4651 continue;
4653 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4654 shifted_operand))
4655 return 2;
4660 /* XXX This is not strictly true for the FPA. */
4661 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4662 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4663 return 0;
4665 /* Call insns don't incur a stall, even if they follow a load. */
4666 if (REG_NOTE_KIND (link) == 0
4667 && GET_CODE (insn) == CALL_INSN)
4668 return 1;
4670 if ((i_pat = single_set (insn)) != NULL
4671 && GET_CODE (SET_SRC (i_pat)) == MEM
4672 && (d_pat = single_set (dep)) != NULL
4673 && GET_CODE (SET_DEST (d_pat)) == MEM)
4675 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4676 /* This is a load after a store, there is no conflict if the load reads
4677 from a cached area. Assume that loads from the stack, and from the
4678 constant pool are cached, and that others will miss. This is a
4679 hack. */
4681 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4682 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4683 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4684 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4685 return 1;
4688 return cost;
4691 static int fp_consts_inited = 0;
4693 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4694 static const char * const strings_fp[8] =
4696 "0", "1", "2", "3",
4697 "4", "5", "0.5", "10"
4700 static REAL_VALUE_TYPE values_fp[8];
4702 static void
4703 init_fp_table (void)
4705 int i;
4706 REAL_VALUE_TYPE r;
4708 if (TARGET_VFP)
4709 fp_consts_inited = 1;
4710 else
4711 fp_consts_inited = 8;
4713 for (i = 0; i < fp_consts_inited; i++)
4715 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4716 values_fp[i] = r;
4720 /* Return TRUE if rtx X is a valid immediate FP constant. */
4722 arm_const_double_rtx (rtx x)
4724 REAL_VALUE_TYPE r;
4725 int i;
4727 if (!fp_consts_inited)
4728 init_fp_table ();
4730 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4731 if (REAL_VALUE_MINUS_ZERO (r))
4732 return 0;
4734 for (i = 0; i < fp_consts_inited; i++)
4735 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4736 return 1;
4738 return 0;
4741 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4743 neg_const_double_rtx_ok_for_fpa (rtx x)
4745 REAL_VALUE_TYPE r;
4746 int i;
4748 if (!fp_consts_inited)
4749 init_fp_table ();
4751 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4752 r = REAL_VALUE_NEGATE (r);
4753 if (REAL_VALUE_MINUS_ZERO (r))
4754 return 0;
4756 for (i = 0; i < 8; i++)
4757 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4758 return 1;
4760 return 0;
4763 /* Predicates for `match_operand' and `match_operator'. */
4765 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4767 cirrus_memory_offset (rtx op)
4769 /* Reject eliminable registers. */
4770 if (! (reload_in_progress || reload_completed)
4771 && ( reg_mentioned_p (frame_pointer_rtx, op)
4772 || reg_mentioned_p (arg_pointer_rtx, op)
4773 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4774 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4775 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4776 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4777 return 0;
4779 if (GET_CODE (op) == MEM)
4781 rtx ind;
4783 ind = XEXP (op, 0);
4785 /* Match: (mem (reg)). */
4786 if (GET_CODE (ind) == REG)
4787 return 1;
4789 /* Match:
4790 (mem (plus (reg)
4791 (const))). */
4792 if (GET_CODE (ind) == PLUS
4793 && GET_CODE (XEXP (ind, 0)) == REG
4794 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4795 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4796 return 1;
4799 return 0;
4802 /* Return TRUE if OP is a valid VFP memory address pattern.
4803 WB if true if writeback address modes are allowed. */
4806 arm_coproc_mem_operand (rtx op, bool wb)
4808 rtx ind;
4810 /* Reject eliminable registers. */
4811 if (! (reload_in_progress || reload_completed)
4812 && ( reg_mentioned_p (frame_pointer_rtx, op)
4813 || reg_mentioned_p (arg_pointer_rtx, op)
4814 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4815 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4816 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4817 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4818 return FALSE;
4820 /* Constants are converted into offsets from labels. */
4821 if (GET_CODE (op) != MEM)
4822 return FALSE;
4824 ind = XEXP (op, 0);
4826 if (reload_completed
4827 && (GET_CODE (ind) == LABEL_REF
4828 || (GET_CODE (ind) == CONST
4829 && GET_CODE (XEXP (ind, 0)) == PLUS
4830 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4831 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4832 return TRUE;
4834 /* Match: (mem (reg)). */
4835 if (GET_CODE (ind) == REG)
4836 return arm_address_register_rtx_p (ind, 0);
4838 /* Autoincremment addressing modes. */
4839 if (wb
4840 && (GET_CODE (ind) == PRE_INC
4841 || GET_CODE (ind) == POST_INC
4842 || GET_CODE (ind) == PRE_DEC
4843 || GET_CODE (ind) == POST_DEC))
4844 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4846 if (wb
4847 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4848 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4849 && GET_CODE (XEXP (ind, 1)) == PLUS
4850 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4851 ind = XEXP (ind, 1);
4853 /* Match:
4854 (plus (reg)
4855 (const)). */
4856 if (GET_CODE (ind) == PLUS
4857 && GET_CODE (XEXP (ind, 0)) == REG
4858 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4859 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4860 && INTVAL (XEXP (ind, 1)) > -1024
4861 && INTVAL (XEXP (ind, 1)) < 1024
4862 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4863 return TRUE;
4865 return FALSE;
4868 /* Return true if X is a register that will be eliminated later on. */
4870 arm_eliminable_register (rtx x)
4872 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4873 || REGNO (x) == ARG_POINTER_REGNUM
4874 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4875 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4878 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4879 VFP registers. Otherwise return NO_REGS. */
4881 enum reg_class
4882 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4884 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4885 return NO_REGS;
4887 return GENERAL_REGS;
4891 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4892 Use by the Cirrus Maverick code which has to workaround
4893 a hardware bug triggered by such instructions. */
4894 static bool
4895 arm_memory_load_p (rtx insn)
4897 rtx body, lhs, rhs;;
4899 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4900 return false;
4902 body = PATTERN (insn);
4904 if (GET_CODE (body) != SET)
4905 return false;
4907 lhs = XEXP (body, 0);
4908 rhs = XEXP (body, 1);
4910 lhs = REG_OR_SUBREG_RTX (lhs);
4912 /* If the destination is not a general purpose
4913 register we do not have to worry. */
4914 if (GET_CODE (lhs) != REG
4915 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4916 return false;
4918 /* As well as loads from memory we also have to react
4919 to loads of invalid constants which will be turned
4920 into loads from the minipool. */
4921 return (GET_CODE (rhs) == MEM
4922 || GET_CODE (rhs) == SYMBOL_REF
4923 || note_invalid_constants (insn, -1, false));
4926 /* Return TRUE if INSN is a Cirrus instruction. */
4927 static bool
4928 arm_cirrus_insn_p (rtx insn)
4930 enum attr_cirrus attr;
4932 /* get_attr aborts on USE and CLOBBER. */
4933 if (!insn
4934 || GET_CODE (insn) != INSN
4935 || GET_CODE (PATTERN (insn)) == USE
4936 || GET_CODE (PATTERN (insn)) == CLOBBER)
4937 return 0;
4939 attr = get_attr_cirrus (insn);
4941 return attr != CIRRUS_NOT;
4944 /* Cirrus reorg for invalid instruction combinations. */
4945 static void
4946 cirrus_reorg (rtx first)
4948 enum attr_cirrus attr;
4949 rtx body = PATTERN (first);
4950 rtx t;
4951 int nops;
4953 /* Any branch must be followed by 2 non Cirrus instructions. */
4954 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4956 nops = 0;
4957 t = next_nonnote_insn (first);
4959 if (arm_cirrus_insn_p (t))
4960 ++ nops;
4962 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4963 ++ nops;
4965 while (nops --)
4966 emit_insn_after (gen_nop (), first);
4968 return;
4971 /* (float (blah)) is in parallel with a clobber. */
4972 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4973 body = XVECEXP (body, 0, 0);
4975 if (GET_CODE (body) == SET)
4977 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4979 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4980 be followed by a non Cirrus insn. */
4981 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4983 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4984 emit_insn_after (gen_nop (), first);
4986 return;
4988 else if (arm_memory_load_p (first))
4990 unsigned int arm_regno;
4992 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4993 ldr/cfmv64hr combination where the Rd field is the same
4994 in both instructions must be split with a non Cirrus
4995 insn. Example:
4997 ldr r0, blah
4999 cfmvsr mvf0, r0. */
5001 /* Get Arm register number for ldr insn. */
5002 if (GET_CODE (lhs) == REG)
5003 arm_regno = REGNO (lhs);
5004 else if (GET_CODE (rhs) == REG)
5005 arm_regno = REGNO (rhs);
5006 else
5007 abort ();
5009 /* Next insn. */
5010 first = next_nonnote_insn (first);
5012 if (! arm_cirrus_insn_p (first))
5013 return;
5015 body = PATTERN (first);
5017 /* (float (blah)) is in parallel with a clobber. */
5018 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5019 body = XVECEXP (body, 0, 0);
5021 if (GET_CODE (body) == FLOAT)
5022 body = XEXP (body, 0);
5024 if (get_attr_cirrus (first) == CIRRUS_MOVE
5025 && GET_CODE (XEXP (body, 1)) == REG
5026 && arm_regno == REGNO (XEXP (body, 1)))
5027 emit_insn_after (gen_nop (), first);
5029 return;
5033 /* get_attr aborts on USE and CLOBBER. */
5034 if (!first
5035 || GET_CODE (first) != INSN
5036 || GET_CODE (PATTERN (first)) == USE
5037 || GET_CODE (PATTERN (first)) == CLOBBER)
5038 return;
5040 attr = get_attr_cirrus (first);
5042 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5043 must be followed by a non-coprocessor instruction. */
5044 if (attr == CIRRUS_COMPARE)
5046 nops = 0;
5048 t = next_nonnote_insn (first);
5050 if (arm_cirrus_insn_p (t))
5051 ++ nops;
5053 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5054 ++ nops;
5056 while (nops --)
5057 emit_insn_after (gen_nop (), first);
5059 return;
5063 /* Return TRUE if X references a SYMBOL_REF. */
5065 symbol_mentioned_p (rtx x)
5067 const char * fmt;
5068 int i;
5070 if (GET_CODE (x) == SYMBOL_REF)
5071 return 1;
5073 fmt = GET_RTX_FORMAT (GET_CODE (x));
5075 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5077 if (fmt[i] == 'E')
5079 int j;
5081 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5082 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5083 return 1;
5085 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5086 return 1;
5089 return 0;
5092 /* Return TRUE if X references a LABEL_REF. */
5094 label_mentioned_p (rtx x)
5096 const char * fmt;
5097 int i;
5099 if (GET_CODE (x) == LABEL_REF)
5100 return 1;
5102 fmt = GET_RTX_FORMAT (GET_CODE (x));
5103 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5105 if (fmt[i] == 'E')
5107 int j;
5109 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5110 if (label_mentioned_p (XVECEXP (x, i, j)))
5111 return 1;
5113 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5114 return 1;
5117 return 0;
5120 enum rtx_code
5121 minmax_code (rtx x)
5123 enum rtx_code code = GET_CODE (x);
5125 if (code == SMAX)
5126 return GE;
5127 else if (code == SMIN)
5128 return LE;
5129 else if (code == UMIN)
5130 return LEU;
5131 else if (code == UMAX)
5132 return GEU;
5134 abort ();
5137 /* Return 1 if memory locations are adjacent. */
5139 adjacent_mem_locations (rtx a, rtx b)
5141 /* We don't guarantee to preserve the order of these memory refs. */
5142 if (volatile_refs_p (a) || volatile_refs_p (b))
5143 return 0;
5145 if ((GET_CODE (XEXP (a, 0)) == REG
5146 || (GET_CODE (XEXP (a, 0)) == PLUS
5147 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5148 && (GET_CODE (XEXP (b, 0)) == REG
5149 || (GET_CODE (XEXP (b, 0)) == PLUS
5150 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5152 HOST_WIDE_INT val0 = 0, val1 = 0;
5153 rtx reg0, reg1;
5154 int val_diff;
5156 if (GET_CODE (XEXP (a, 0)) == PLUS)
5158 reg0 = XEXP (XEXP (a, 0), 0);
5159 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5161 else
5162 reg0 = XEXP (a, 0);
5164 if (GET_CODE (XEXP (b, 0)) == PLUS)
5166 reg1 = XEXP (XEXP (b, 0), 0);
5167 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5169 else
5170 reg1 = XEXP (b, 0);
5172 /* Don't accept any offset that will require multiple
5173 instructions to handle, since this would cause the
5174 arith_adjacentmem pattern to output an overlong sequence. */
5175 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5176 return 0;
5178 /* Don't allow an eliminable register: register elimination can make
5179 the offset too large. */
5180 if (arm_eliminable_register (reg0))
5181 return 0;
5183 val_diff = val1 - val0;
5185 if (arm_ld_sched)
5187 /* If the target has load delay slots, then there's no benefit
5188 to using an ldm instruction unless the offset is zero and
5189 we are optimizing for size. */
5190 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5191 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5192 && (val_diff == 4 || val_diff == -4));
5195 return ((REGNO (reg0) == REGNO (reg1))
5196 && (val_diff == 4 || val_diff == -4));
5199 return 0;
5203 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5204 HOST_WIDE_INT *load_offset)
5206 int unsorted_regs[4];
5207 HOST_WIDE_INT unsorted_offsets[4];
5208 int order[4];
5209 int base_reg = -1;
5210 int i;
5212 /* Can only handle 2, 3, or 4 insns at present,
5213 though could be easily extended if required. */
5214 if (nops < 2 || nops > 4)
5215 abort ();
5217 /* Loop over the operands and check that the memory references are
5218 suitable (i.e. immediate offsets from the same base register). At
5219 the same time, extract the target register, and the memory
5220 offsets. */
5221 for (i = 0; i < nops; i++)
5223 rtx reg;
5224 rtx offset;
5226 /* Convert a subreg of a mem into the mem itself. */
5227 if (GET_CODE (operands[nops + i]) == SUBREG)
5228 operands[nops + i] = alter_subreg (operands + (nops + i));
5230 if (GET_CODE (operands[nops + i]) != MEM)
5231 abort ();
5233 /* Don't reorder volatile memory references; it doesn't seem worth
5234 looking for the case where the order is ok anyway. */
5235 if (MEM_VOLATILE_P (operands[nops + i]))
5236 return 0;
5238 offset = const0_rtx;
5240 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5241 || (GET_CODE (reg) == SUBREG
5242 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5243 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5244 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5245 == REG)
5246 || (GET_CODE (reg) == SUBREG
5247 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5248 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5249 == CONST_INT)))
5251 if (i == 0)
5253 base_reg = REGNO (reg);
5254 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5255 ? REGNO (operands[i])
5256 : REGNO (SUBREG_REG (operands[i])));
5257 order[0] = 0;
5259 else
5261 if (base_reg != (int) REGNO (reg))
5262 /* Not addressed from the same base register. */
5263 return 0;
5265 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5266 ? REGNO (operands[i])
5267 : REGNO (SUBREG_REG (operands[i])));
5268 if (unsorted_regs[i] < unsorted_regs[order[0]])
5269 order[0] = i;
5272 /* If it isn't an integer register, or if it overwrites the
5273 base register but isn't the last insn in the list, then
5274 we can't do this. */
5275 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5276 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5277 return 0;
5279 unsorted_offsets[i] = INTVAL (offset);
5281 else
5282 /* Not a suitable memory address. */
5283 return 0;
5286 /* All the useful information has now been extracted from the
5287 operands into unsorted_regs and unsorted_offsets; additionally,
5288 order[0] has been set to the lowest numbered register in the
5289 list. Sort the registers into order, and check that the memory
5290 offsets are ascending and adjacent. */
5292 for (i = 1; i < nops; i++)
5294 int j;
5296 order[i] = order[i - 1];
5297 for (j = 0; j < nops; j++)
5298 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5299 && (order[i] == order[i - 1]
5300 || unsorted_regs[j] < unsorted_regs[order[i]]))
5301 order[i] = j;
5303 /* Have we found a suitable register? if not, one must be used more
5304 than once. */
5305 if (order[i] == order[i - 1])
5306 return 0;
5308 /* Is the memory address adjacent and ascending? */
5309 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5310 return 0;
5313 if (base)
5315 *base = base_reg;
5317 for (i = 0; i < nops; i++)
5318 regs[i] = unsorted_regs[order[i]];
5320 *load_offset = unsorted_offsets[order[0]];
5323 if (unsorted_offsets[order[0]] == 0)
5324 return 1; /* ldmia */
5326 if (unsorted_offsets[order[0]] == 4)
5327 return 2; /* ldmib */
5329 if (unsorted_offsets[order[nops - 1]] == 0)
5330 return 3; /* ldmda */
5332 if (unsorted_offsets[order[nops - 1]] == -4)
5333 return 4; /* ldmdb */
5335 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5336 if the offset isn't small enough. The reason 2 ldrs are faster
5337 is because these ARMs are able to do more than one cache access
5338 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5339 whilst the ARM8 has a double bandwidth cache. This means that
5340 these cores can do both an instruction fetch and a data fetch in
5341 a single cycle, so the trick of calculating the address into a
5342 scratch register (one of the result regs) and then doing a load
5343 multiple actually becomes slower (and no smaller in code size).
5344 That is the transformation
5346 ldr rd1, [rbase + offset]
5347 ldr rd2, [rbase + offset + 4]
5351 add rd1, rbase, offset
5352 ldmia rd1, {rd1, rd2}
5354 produces worse code -- '3 cycles + any stalls on rd2' instead of
5355 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5356 access per cycle, the first sequence could never complete in less
5357 than 6 cycles, whereas the ldm sequence would only take 5 and
5358 would make better use of sequential accesses if not hitting the
5359 cache.
5361 We cheat here and test 'arm_ld_sched' which we currently know to
5362 only be true for the ARM8, ARM9 and StrongARM. If this ever
5363 changes, then the test below needs to be reworked. */
5364 if (nops == 2 && arm_ld_sched)
5365 return 0;
5367 /* Can't do it without setting up the offset, only do this if it takes
5368 no more than one insn. */
5369 return (const_ok_for_arm (unsorted_offsets[order[0]])
5370 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5373 const char *
5374 emit_ldm_seq (rtx *operands, int nops)
5376 int regs[4];
5377 int base_reg;
5378 HOST_WIDE_INT offset;
5379 char buf[100];
5380 int i;
5382 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5384 case 1:
5385 strcpy (buf, "ldm%?ia\t");
5386 break;
5388 case 2:
5389 strcpy (buf, "ldm%?ib\t");
5390 break;
5392 case 3:
5393 strcpy (buf, "ldm%?da\t");
5394 break;
5396 case 4:
5397 strcpy (buf, "ldm%?db\t");
5398 break;
5400 case 5:
5401 if (offset >= 0)
5402 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5403 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5404 (long) offset);
5405 else
5406 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5407 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5408 (long) -offset);
5409 output_asm_insn (buf, operands);
5410 base_reg = regs[0];
5411 strcpy (buf, "ldm%?ia\t");
5412 break;
5414 default:
5415 abort ();
5418 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5419 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5421 for (i = 1; i < nops; i++)
5422 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5423 reg_names[regs[i]]);
5425 strcat (buf, "}\t%@ phole ldm");
5427 output_asm_insn (buf, operands);
5428 return "";
5432 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5433 HOST_WIDE_INT * load_offset)
5435 int unsorted_regs[4];
5436 HOST_WIDE_INT unsorted_offsets[4];
5437 int order[4];
5438 int base_reg = -1;
5439 int i;
5441 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5442 extended if required. */
5443 if (nops < 2 || nops > 4)
5444 abort ();
5446 /* Loop over the operands and check that the memory references are
5447 suitable (i.e. immediate offsets from the same base register). At
5448 the same time, extract the target register, and the memory
5449 offsets. */
5450 for (i = 0; i < nops; i++)
5452 rtx reg;
5453 rtx offset;
5455 /* Convert a subreg of a mem into the mem itself. */
5456 if (GET_CODE (operands[nops + i]) == SUBREG)
5457 operands[nops + i] = alter_subreg (operands + (nops + i));
5459 if (GET_CODE (operands[nops + i]) != MEM)
5460 abort ();
5462 /* Don't reorder volatile memory references; it doesn't seem worth
5463 looking for the case where the order is ok anyway. */
5464 if (MEM_VOLATILE_P (operands[nops + i]))
5465 return 0;
5467 offset = const0_rtx;
5469 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5470 || (GET_CODE (reg) == SUBREG
5471 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5472 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5473 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5474 == REG)
5475 || (GET_CODE (reg) == SUBREG
5476 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5477 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5478 == CONST_INT)))
5480 if (i == 0)
5482 base_reg = REGNO (reg);
5483 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5484 ? REGNO (operands[i])
5485 : REGNO (SUBREG_REG (operands[i])));
5486 order[0] = 0;
5488 else
5490 if (base_reg != (int) REGNO (reg))
5491 /* Not addressed from the same base register. */
5492 return 0;
5494 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5495 ? REGNO (operands[i])
5496 : REGNO (SUBREG_REG (operands[i])));
5497 if (unsorted_regs[i] < unsorted_regs[order[0]])
5498 order[0] = i;
5501 /* If it isn't an integer register, then we can't do this. */
5502 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5503 return 0;
5505 unsorted_offsets[i] = INTVAL (offset);
5507 else
5508 /* Not a suitable memory address. */
5509 return 0;
5512 /* All the useful information has now been extracted from the
5513 operands into unsorted_regs and unsorted_offsets; additionally,
5514 order[0] has been set to the lowest numbered register in the
5515 list. Sort the registers into order, and check that the memory
5516 offsets are ascending and adjacent. */
5518 for (i = 1; i < nops; i++)
5520 int j;
5522 order[i] = order[i - 1];
5523 for (j = 0; j < nops; j++)
5524 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5525 && (order[i] == order[i - 1]
5526 || unsorted_regs[j] < unsorted_regs[order[i]]))
5527 order[i] = j;
5529 /* Have we found a suitable register? if not, one must be used more
5530 than once. */
5531 if (order[i] == order[i - 1])
5532 return 0;
5534 /* Is the memory address adjacent and ascending? */
5535 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5536 return 0;
5539 if (base)
5541 *base = base_reg;
5543 for (i = 0; i < nops; i++)
5544 regs[i] = unsorted_regs[order[i]];
5546 *load_offset = unsorted_offsets[order[0]];
5549 if (unsorted_offsets[order[0]] == 0)
5550 return 1; /* stmia */
5552 if (unsorted_offsets[order[0]] == 4)
5553 return 2; /* stmib */
5555 if (unsorted_offsets[order[nops - 1]] == 0)
5556 return 3; /* stmda */
5558 if (unsorted_offsets[order[nops - 1]] == -4)
5559 return 4; /* stmdb */
5561 return 0;
5564 const char *
5565 emit_stm_seq (rtx *operands, int nops)
5567 int regs[4];
5568 int base_reg;
5569 HOST_WIDE_INT offset;
5570 char buf[100];
5571 int i;
5573 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5575 case 1:
5576 strcpy (buf, "stm%?ia\t");
5577 break;
5579 case 2:
5580 strcpy (buf, "stm%?ib\t");
5581 break;
5583 case 3:
5584 strcpy (buf, "stm%?da\t");
5585 break;
5587 case 4:
5588 strcpy (buf, "stm%?db\t");
5589 break;
5591 default:
5592 abort ();
5595 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5596 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5598 for (i = 1; i < nops; i++)
5599 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5600 reg_names[regs[i]]);
5602 strcat (buf, "}\t%@ phole stm");
5604 output_asm_insn (buf, operands);
5605 return "";
5609 /* Routines for use in generating RTL. */
5612 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5613 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5615 HOST_WIDE_INT offset = *offsetp;
5616 int i = 0, j;
5617 rtx result;
5618 int sign = up ? 1 : -1;
5619 rtx mem, addr;
5621 /* XScale has load-store double instructions, but they have stricter
5622 alignment requirements than load-store multiple, so we cannot
5623 use them.
5625 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5626 the pipeline until completion.
5628 NREGS CYCLES
5634 An ldr instruction takes 1-3 cycles, but does not block the
5635 pipeline.
5637 NREGS CYCLES
5638 1 1-3
5639 2 2-6
5640 3 3-9
5641 4 4-12
5643 Best case ldr will always win. However, the more ldr instructions
5644 we issue, the less likely we are to be able to schedule them well.
5645 Using ldr instructions also increases code size.
5647 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5648 for counts of 3 or 4 regs. */
5649 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5651 rtx seq;
5653 start_sequence ();
5655 for (i = 0; i < count; i++)
5657 addr = plus_constant (from, i * 4 * sign);
5658 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5659 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5660 offset += 4 * sign;
5663 if (write_back)
5665 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5666 *offsetp = offset;
5669 seq = get_insns ();
5670 end_sequence ();
5672 return seq;
5675 result = gen_rtx_PARALLEL (VOIDmode,
5676 rtvec_alloc (count + (write_back ? 1 : 0)));
5677 if (write_back)
5679 XVECEXP (result, 0, 0)
5680 = gen_rtx_SET (GET_MODE (from), from,
5681 plus_constant (from, count * 4 * sign));
5682 i = 1;
5683 count++;
5686 for (j = 0; i < count; i++, j++)
5688 addr = plus_constant (from, j * 4 * sign);
5689 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5690 XVECEXP (result, 0, i)
5691 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5692 offset += 4 * sign;
5695 if (write_back)
5696 *offsetp = offset;
5698 return result;
5702 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5703 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5705 HOST_WIDE_INT offset = *offsetp;
5706 int i = 0, j;
5707 rtx result;
5708 int sign = up ? 1 : -1;
5709 rtx mem, addr;
5711 /* See arm_gen_load_multiple for discussion of
5712 the pros/cons of ldm/stm usage for XScale. */
5713 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5715 rtx seq;
5717 start_sequence ();
5719 for (i = 0; i < count; i++)
5721 addr = plus_constant (to, i * 4 * sign);
5722 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5723 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5724 offset += 4 * sign;
5727 if (write_back)
5729 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5730 *offsetp = offset;
5733 seq = get_insns ();
5734 end_sequence ();
5736 return seq;
5739 result = gen_rtx_PARALLEL (VOIDmode,
5740 rtvec_alloc (count + (write_back ? 1 : 0)));
5741 if (write_back)
5743 XVECEXP (result, 0, 0)
5744 = gen_rtx_SET (GET_MODE (to), to,
5745 plus_constant (to, count * 4 * sign));
5746 i = 1;
5747 count++;
5750 for (j = 0; i < count; i++, j++)
5752 addr = plus_constant (to, j * 4 * sign);
5753 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5754 XVECEXP (result, 0, i)
5755 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5756 offset += 4 * sign;
5759 if (write_back)
5760 *offsetp = offset;
5762 return result;
5766 arm_gen_movmemqi (rtx *operands)
5768 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5769 HOST_WIDE_INT srcoffset, dstoffset;
5770 int i;
5771 rtx src, dst, srcbase, dstbase;
5772 rtx part_bytes_reg = NULL;
5773 rtx mem;
5775 if (GET_CODE (operands[2]) != CONST_INT
5776 || GET_CODE (operands[3]) != CONST_INT
5777 || INTVAL (operands[2]) > 64
5778 || INTVAL (operands[3]) & 3)
5779 return 0;
5781 dstbase = operands[0];
5782 srcbase = operands[1];
5784 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5785 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5787 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5788 out_words_to_go = INTVAL (operands[2]) / 4;
5789 last_bytes = INTVAL (operands[2]) & 3;
5790 dstoffset = srcoffset = 0;
5792 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5793 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5795 for (i = 0; in_words_to_go >= 2; i+=4)
5797 if (in_words_to_go > 4)
5798 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5799 srcbase, &srcoffset));
5800 else
5801 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5802 FALSE, srcbase, &srcoffset));
5804 if (out_words_to_go)
5806 if (out_words_to_go > 4)
5807 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5808 dstbase, &dstoffset));
5809 else if (out_words_to_go != 1)
5810 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5811 dst, TRUE,
5812 (last_bytes == 0
5813 ? FALSE : TRUE),
5814 dstbase, &dstoffset));
5815 else
5817 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5818 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5819 if (last_bytes != 0)
5821 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5822 dstoffset += 4;
5827 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5828 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5831 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5832 if (out_words_to_go)
5834 rtx sreg;
5836 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5837 sreg = copy_to_reg (mem);
5839 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5840 emit_move_insn (mem, sreg);
5841 in_words_to_go--;
5843 if (in_words_to_go) /* Sanity check */
5844 abort ();
5847 if (in_words_to_go)
5849 if (in_words_to_go < 0)
5850 abort ();
5852 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5853 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5856 if (last_bytes && part_bytes_reg == NULL)
5857 abort ();
5859 if (BYTES_BIG_ENDIAN && last_bytes)
5861 rtx tmp = gen_reg_rtx (SImode);
5863 /* The bytes we want are in the top end of the word. */
5864 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5865 GEN_INT (8 * (4 - last_bytes))));
5866 part_bytes_reg = tmp;
5868 while (last_bytes)
5870 mem = adjust_automodify_address (dstbase, QImode,
5871 plus_constant (dst, last_bytes - 1),
5872 dstoffset + last_bytes - 1);
5873 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5875 if (--last_bytes)
5877 tmp = gen_reg_rtx (SImode);
5878 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5879 part_bytes_reg = tmp;
5884 else
5886 if (last_bytes > 1)
5888 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5889 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5890 last_bytes -= 2;
5891 if (last_bytes)
5893 rtx tmp = gen_reg_rtx (SImode);
5894 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5895 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5896 part_bytes_reg = tmp;
5897 dstoffset += 2;
5901 if (last_bytes)
5903 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5904 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5908 return 1;
5911 /* Generate a memory reference for a half word, such that it will be loaded
5912 into the top 16 bits of the word. We can assume that the address is
5913 known to be alignable and of the form reg, or plus (reg, const). */
5916 arm_gen_rotated_half_load (rtx memref)
5918 HOST_WIDE_INT offset = 0;
5919 rtx base = XEXP (memref, 0);
5921 if (GET_CODE (base) == PLUS)
5923 offset = INTVAL (XEXP (base, 1));
5924 base = XEXP (base, 0);
5927 /* If we aren't allowed to generate unaligned addresses, then fail. */
5928 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5929 return NULL;
5931 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5933 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5934 return base;
5936 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5939 /* Select a dominance comparison mode if possible for a test of the general
5940 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5941 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5942 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5943 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5944 In all cases OP will be either EQ or NE, but we don't need to know which
5945 here. If we are unable to support a dominance comparison we return
5946 CC mode. This will then fail to match for the RTL expressions that
5947 generate this call. */
5948 enum machine_mode
5949 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5951 enum rtx_code cond1, cond2;
5952 int swapped = 0;
5954 /* Currently we will probably get the wrong result if the individual
5955 comparisons are not simple. This also ensures that it is safe to
5956 reverse a comparison if necessary. */
5957 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5958 != CCmode)
5959 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5960 != CCmode))
5961 return CCmode;
5963 /* The if_then_else variant of this tests the second condition if the
5964 first passes, but is true if the first fails. Reverse the first
5965 condition to get a true "inclusive-or" expression. */
5966 if (cond_or == DOM_CC_NX_OR_Y)
5967 cond1 = reverse_condition (cond1);
5969 /* If the comparisons are not equal, and one doesn't dominate the other,
5970 then we can't do this. */
5971 if (cond1 != cond2
5972 && !comparison_dominates_p (cond1, cond2)
5973 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5974 return CCmode;
5976 if (swapped)
5978 enum rtx_code temp = cond1;
5979 cond1 = cond2;
5980 cond2 = temp;
5983 switch (cond1)
5985 case EQ:
5986 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5987 return CC_DEQmode;
5989 switch (cond2)
5991 case LE: return CC_DLEmode;
5992 case LEU: return CC_DLEUmode;
5993 case GE: return CC_DGEmode;
5994 case GEU: return CC_DGEUmode;
5995 default: break;
5998 break;
6000 case LT:
6001 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6002 return CC_DLTmode;
6003 if (cond2 == LE)
6004 return CC_DLEmode;
6005 if (cond2 == NE)
6006 return CC_DNEmode;
6007 break;
6009 case GT:
6010 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6011 return CC_DGTmode;
6012 if (cond2 == GE)
6013 return CC_DGEmode;
6014 if (cond2 == NE)
6015 return CC_DNEmode;
6016 break;
6018 case LTU:
6019 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6020 return CC_DLTUmode;
6021 if (cond2 == LEU)
6022 return CC_DLEUmode;
6023 if (cond2 == NE)
6024 return CC_DNEmode;
6025 break;
6027 case GTU:
6028 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6029 return CC_DGTUmode;
6030 if (cond2 == GEU)
6031 return CC_DGEUmode;
6032 if (cond2 == NE)
6033 return CC_DNEmode;
6034 break;
6036 /* The remaining cases only occur when both comparisons are the
6037 same. */
6038 case NE:
6039 return CC_DNEmode;
6041 case LE:
6042 return CC_DLEmode;
6044 case GE:
6045 return CC_DGEmode;
6047 case LEU:
6048 return CC_DLEUmode;
6050 case GEU:
6051 return CC_DGEUmode;
6053 default:
6054 break;
6057 abort ();
6060 enum machine_mode
6061 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6063 /* All floating point compares return CCFP if it is an equality
6064 comparison, and CCFPE otherwise. */
6065 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6067 switch (op)
6069 case EQ:
6070 case NE:
6071 case UNORDERED:
6072 case ORDERED:
6073 case UNLT:
6074 case UNLE:
6075 case UNGT:
6076 case UNGE:
6077 case UNEQ:
6078 case LTGT:
6079 return CCFPmode;
6081 case LT:
6082 case LE:
6083 case GT:
6084 case GE:
6085 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6086 return CCFPmode;
6087 return CCFPEmode;
6089 default:
6090 abort ();
6094 /* A compare with a shifted operand. Because of canonicalization, the
6095 comparison will have to be swapped when we emit the assembler. */
6096 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6097 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6098 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6099 || GET_CODE (x) == ROTATERT))
6100 return CC_SWPmode;
6102 /* This operation is performed swapped, but since we only rely on the Z
6103 flag we don't need an additional mode. */
6104 if (GET_MODE (y) == SImode && REG_P (y)
6105 && GET_CODE (x) == NEG
6106 && (op == EQ || op == NE))
6107 return CC_Zmode;
6109 /* This is a special case that is used by combine to allow a
6110 comparison of a shifted byte load to be split into a zero-extend
6111 followed by a comparison of the shifted integer (only valid for
6112 equalities and unsigned inequalities). */
6113 if (GET_MODE (x) == SImode
6114 && GET_CODE (x) == ASHIFT
6115 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6116 && GET_CODE (XEXP (x, 0)) == SUBREG
6117 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6118 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6119 && (op == EQ || op == NE
6120 || op == GEU || op == GTU || op == LTU || op == LEU)
6121 && GET_CODE (y) == CONST_INT)
6122 return CC_Zmode;
6124 /* A construct for a conditional compare, if the false arm contains
6125 0, then both conditions must be true, otherwise either condition
6126 must be true. Not all conditions are possible, so CCmode is
6127 returned if it can't be done. */
6128 if (GET_CODE (x) == IF_THEN_ELSE
6129 && (XEXP (x, 2) == const0_rtx
6130 || XEXP (x, 2) == const1_rtx)
6131 && COMPARISON_P (XEXP (x, 0))
6132 && COMPARISON_P (XEXP (x, 1)))
6133 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6134 INTVAL (XEXP (x, 2)));
6136 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6137 if (GET_CODE (x) == AND
6138 && COMPARISON_P (XEXP (x, 0))
6139 && COMPARISON_P (XEXP (x, 1)))
6140 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6141 DOM_CC_X_AND_Y);
6143 if (GET_CODE (x) == IOR
6144 && COMPARISON_P (XEXP (x, 0))
6145 && COMPARISON_P (XEXP (x, 1)))
6146 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6147 DOM_CC_X_OR_Y);
6149 /* An operation (on Thumb) where we want to test for a single bit.
6150 This is done by shifting that bit up into the top bit of a
6151 scratch register; we can then branch on the sign bit. */
6152 if (TARGET_THUMB
6153 && GET_MODE (x) == SImode
6154 && (op == EQ || op == NE)
6155 && (GET_CODE (x) == ZERO_EXTRACT))
6156 return CC_Nmode;
6158 /* An operation that sets the condition codes as a side-effect, the
6159 V flag is not set correctly, so we can only use comparisons where
6160 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6161 instead.) */
6162 if (GET_MODE (x) == SImode
6163 && y == const0_rtx
6164 && (op == EQ || op == NE || op == LT || op == GE)
6165 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6166 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6167 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6168 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6169 || GET_CODE (x) == LSHIFTRT
6170 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6171 || GET_CODE (x) == ROTATERT
6172 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6173 return CC_NOOVmode;
6175 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6176 return CC_Zmode;
6178 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6179 && GET_CODE (x) == PLUS
6180 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6181 return CC_Cmode;
6183 return CCmode;
6186 /* X and Y are two things to compare using CODE. Emit the compare insn and
6187 return the rtx for register 0 in the proper mode. FP means this is a
6188 floating point compare: I don't think that it is needed on the arm. */
6190 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6192 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6193 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6195 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6196 gen_rtx_COMPARE (mode, x, y)));
6198 return cc_reg;
6201 /* Generate a sequence of insns that will generate the correct return
6202 address mask depending on the physical architecture that the program
6203 is running on. */
6205 arm_gen_return_addr_mask (void)
6207 rtx reg = gen_reg_rtx (Pmode);
6209 emit_insn (gen_return_addr_mask (reg));
6210 return reg;
6213 void
6214 arm_reload_in_hi (rtx *operands)
6216 rtx ref = operands[1];
6217 rtx base, scratch;
6218 HOST_WIDE_INT offset = 0;
6220 if (GET_CODE (ref) == SUBREG)
6222 offset = SUBREG_BYTE (ref);
6223 ref = SUBREG_REG (ref);
6226 if (GET_CODE (ref) == REG)
6228 /* We have a pseudo which has been spilt onto the stack; there
6229 are two cases here: the first where there is a simple
6230 stack-slot replacement and a second where the stack-slot is
6231 out of range, or is used as a subreg. */
6232 if (reg_equiv_mem[REGNO (ref)])
6234 ref = reg_equiv_mem[REGNO (ref)];
6235 base = find_replacement (&XEXP (ref, 0));
6237 else
6238 /* The slot is out of range, or was dressed up in a SUBREG. */
6239 base = reg_equiv_address[REGNO (ref)];
6241 else
6242 base = find_replacement (&XEXP (ref, 0));
6244 /* Handle the case where the address is too complex to be offset by 1. */
6245 if (GET_CODE (base) == MINUS
6246 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6248 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6250 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6251 base = base_plus;
6253 else if (GET_CODE (base) == PLUS)
6255 /* The addend must be CONST_INT, or we would have dealt with it above. */
6256 HOST_WIDE_INT hi, lo;
6258 offset += INTVAL (XEXP (base, 1));
6259 base = XEXP (base, 0);
6261 /* Rework the address into a legal sequence of insns. */
6262 /* Valid range for lo is -4095 -> 4095 */
6263 lo = (offset >= 0
6264 ? (offset & 0xfff)
6265 : -((-offset) & 0xfff));
6267 /* Corner case, if lo is the max offset then we would be out of range
6268 once we have added the additional 1 below, so bump the msb into the
6269 pre-loading insn(s). */
6270 if (lo == 4095)
6271 lo &= 0x7ff;
6273 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6274 ^ (HOST_WIDE_INT) 0x80000000)
6275 - (HOST_WIDE_INT) 0x80000000);
6277 if (hi + lo != offset)
6278 abort ();
6280 if (hi != 0)
6282 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6284 /* Get the base address; addsi3 knows how to handle constants
6285 that require more than one insn. */
6286 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6287 base = base_plus;
6288 offset = lo;
6292 /* Operands[2] may overlap operands[0] (though it won't overlap
6293 operands[1]), that's why we asked for a DImode reg -- so we can
6294 use the bit that does not overlap. */
6295 if (REGNO (operands[2]) == REGNO (operands[0]))
6296 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6297 else
6298 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6300 emit_insn (gen_zero_extendqisi2 (scratch,
6301 gen_rtx_MEM (QImode,
6302 plus_constant (base,
6303 offset))));
6304 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6305 gen_rtx_MEM (QImode,
6306 plus_constant (base,
6307 offset + 1))));
6308 if (!BYTES_BIG_ENDIAN)
6309 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6310 gen_rtx_IOR (SImode,
6311 gen_rtx_ASHIFT
6312 (SImode,
6313 gen_rtx_SUBREG (SImode, operands[0], 0),
6314 GEN_INT (8)),
6315 scratch)));
6316 else
6317 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6318 gen_rtx_IOR (SImode,
6319 gen_rtx_ASHIFT (SImode, scratch,
6320 GEN_INT (8)),
6321 gen_rtx_SUBREG (SImode, operands[0],
6322 0))));
6325 /* Handle storing a half-word to memory during reload by synthesizing as two
6326 byte stores. Take care not to clobber the input values until after we
6327 have moved them somewhere safe. This code assumes that if the DImode
6328 scratch in operands[2] overlaps either the input value or output address
6329 in some way, then that value must die in this insn (we absolutely need
6330 two scratch registers for some corner cases). */
6331 void
6332 arm_reload_out_hi (rtx *operands)
6334 rtx ref = operands[0];
6335 rtx outval = operands[1];
6336 rtx base, scratch;
6337 HOST_WIDE_INT offset = 0;
6339 if (GET_CODE (ref) == SUBREG)
6341 offset = SUBREG_BYTE (ref);
6342 ref = SUBREG_REG (ref);
6345 if (GET_CODE (ref) == REG)
6347 /* We have a pseudo which has been spilt onto the stack; there
6348 are two cases here: the first where there is a simple
6349 stack-slot replacement and a second where the stack-slot is
6350 out of range, or is used as a subreg. */
6351 if (reg_equiv_mem[REGNO (ref)])
6353 ref = reg_equiv_mem[REGNO (ref)];
6354 base = find_replacement (&XEXP (ref, 0));
6356 else
6357 /* The slot is out of range, or was dressed up in a SUBREG. */
6358 base = reg_equiv_address[REGNO (ref)];
6360 else
6361 base = find_replacement (&XEXP (ref, 0));
6363 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6365 /* Handle the case where the address is too complex to be offset by 1. */
6366 if (GET_CODE (base) == MINUS
6367 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6369 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6371 /* Be careful not to destroy OUTVAL. */
6372 if (reg_overlap_mentioned_p (base_plus, outval))
6374 /* Updating base_plus might destroy outval, see if we can
6375 swap the scratch and base_plus. */
6376 if (!reg_overlap_mentioned_p (scratch, outval))
6378 rtx tmp = scratch;
6379 scratch = base_plus;
6380 base_plus = tmp;
6382 else
6384 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6386 /* Be conservative and copy OUTVAL into the scratch now,
6387 this should only be necessary if outval is a subreg
6388 of something larger than a word. */
6389 /* XXX Might this clobber base? I can't see how it can,
6390 since scratch is known to overlap with OUTVAL, and
6391 must be wider than a word. */
6392 emit_insn (gen_movhi (scratch_hi, outval));
6393 outval = scratch_hi;
6397 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6398 base = base_plus;
6400 else if (GET_CODE (base) == PLUS)
6402 /* The addend must be CONST_INT, or we would have dealt with it above. */
6403 HOST_WIDE_INT hi, lo;
6405 offset += INTVAL (XEXP (base, 1));
6406 base = XEXP (base, 0);
6408 /* Rework the address into a legal sequence of insns. */
6409 /* Valid range for lo is -4095 -> 4095 */
6410 lo = (offset >= 0
6411 ? (offset & 0xfff)
6412 : -((-offset) & 0xfff));
6414 /* Corner case, if lo is the max offset then we would be out of range
6415 once we have added the additional 1 below, so bump the msb into the
6416 pre-loading insn(s). */
6417 if (lo == 4095)
6418 lo &= 0x7ff;
6420 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6421 ^ (HOST_WIDE_INT) 0x80000000)
6422 - (HOST_WIDE_INT) 0x80000000);
6424 if (hi + lo != offset)
6425 abort ();
6427 if (hi != 0)
6429 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6431 /* Be careful not to destroy OUTVAL. */
6432 if (reg_overlap_mentioned_p (base_plus, outval))
6434 /* Updating base_plus might destroy outval, see if we
6435 can swap the scratch and base_plus. */
6436 if (!reg_overlap_mentioned_p (scratch, outval))
6438 rtx tmp = scratch;
6439 scratch = base_plus;
6440 base_plus = tmp;
6442 else
6444 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6446 /* Be conservative and copy outval into scratch now,
6447 this should only be necessary if outval is a
6448 subreg of something larger than a word. */
6449 /* XXX Might this clobber base? I can't see how it
6450 can, since scratch is known to overlap with
6451 outval. */
6452 emit_insn (gen_movhi (scratch_hi, outval));
6453 outval = scratch_hi;
6457 /* Get the base address; addsi3 knows how to handle constants
6458 that require more than one insn. */
6459 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6460 base = base_plus;
6461 offset = lo;
6465 if (BYTES_BIG_ENDIAN)
6467 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6468 plus_constant (base, offset + 1)),
6469 gen_lowpart (QImode, outval)));
6470 emit_insn (gen_lshrsi3 (scratch,
6471 gen_rtx_SUBREG (SImode, outval, 0),
6472 GEN_INT (8)));
6473 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6474 gen_lowpart (QImode, scratch)));
6476 else
6478 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6479 gen_lowpart (QImode, outval)));
6480 emit_insn (gen_lshrsi3 (scratch,
6481 gen_rtx_SUBREG (SImode, outval, 0),
6482 GEN_INT (8)));
6483 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6484 plus_constant (base, offset + 1)),
6485 gen_lowpart (QImode, scratch)));
6489 /* Print a symbolic form of X to the debug file, F. */
6490 static void
6491 arm_print_value (FILE *f, rtx x)
6493 switch (GET_CODE (x))
6495 case CONST_INT:
6496 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6497 return;
6499 case CONST_DOUBLE:
6500 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6501 return;
6503 case CONST_VECTOR:
6505 int i;
6507 fprintf (f, "<");
6508 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6510 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6511 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6512 fputc (',', f);
6514 fprintf (f, ">");
6516 return;
6518 case CONST_STRING:
6519 fprintf (f, "\"%s\"", XSTR (x, 0));
6520 return;
6522 case SYMBOL_REF:
6523 fprintf (f, "`%s'", XSTR (x, 0));
6524 return;
6526 case LABEL_REF:
6527 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6528 return;
6530 case CONST:
6531 arm_print_value (f, XEXP (x, 0));
6532 return;
6534 case PLUS:
6535 arm_print_value (f, XEXP (x, 0));
6536 fprintf (f, "+");
6537 arm_print_value (f, XEXP (x, 1));
6538 return;
6540 case PC:
6541 fprintf (f, "pc");
6542 return;
6544 default:
6545 fprintf (f, "????");
6546 return;
6550 /* Routines for manipulation of the constant pool. */
6552 /* Arm instructions cannot load a large constant directly into a
6553 register; they have to come from a pc relative load. The constant
6554 must therefore be placed in the addressable range of the pc
6555 relative load. Depending on the precise pc relative load
6556 instruction the range is somewhere between 256 bytes and 4k. This
6557 means that we often have to dump a constant inside a function, and
6558 generate code to branch around it.
6560 It is important to minimize this, since the branches will slow
6561 things down and make the code larger.
6563 Normally we can hide the table after an existing unconditional
6564 branch so that there is no interruption of the flow, but in the
6565 worst case the code looks like this:
6567 ldr rn, L1
6569 b L2
6570 align
6571 L1: .long value
6575 ldr rn, L3
6577 b L4
6578 align
6579 L3: .long value
6583 We fix this by performing a scan after scheduling, which notices
6584 which instructions need to have their operands fetched from the
6585 constant table and builds the table.
6587 The algorithm starts by building a table of all the constants that
6588 need fixing up and all the natural barriers in the function (places
6589 where a constant table can be dropped without breaking the flow).
6590 For each fixup we note how far the pc-relative replacement will be
6591 able to reach and the offset of the instruction into the function.
6593 Having built the table we then group the fixes together to form
6594 tables that are as large as possible (subject to addressing
6595 constraints) and emit each table of constants after the last
6596 barrier that is within range of all the instructions in the group.
6597 If a group does not contain a barrier, then we forcibly create one
6598 by inserting a jump instruction into the flow. Once the table has
6599 been inserted, the insns are then modified to reference the
6600 relevant entry in the pool.
6602 Possible enhancements to the algorithm (not implemented) are:
6604 1) For some processors and object formats, there may be benefit in
6605 aligning the pools to the start of cache lines; this alignment
6606 would need to be taken into account when calculating addressability
6607 of a pool. */
6609 /* These typedefs are located at the start of this file, so that
6610 they can be used in the prototypes there. This comment is to
6611 remind readers of that fact so that the following structures
6612 can be understood more easily.
6614 typedef struct minipool_node Mnode;
6615 typedef struct minipool_fixup Mfix; */
6617 struct minipool_node
6619 /* Doubly linked chain of entries. */
6620 Mnode * next;
6621 Mnode * prev;
6622 /* The maximum offset into the code that this entry can be placed. While
6623 pushing fixes for forward references, all entries are sorted in order
6624 of increasing max_address. */
6625 HOST_WIDE_INT max_address;
6626 /* Similarly for an entry inserted for a backwards ref. */
6627 HOST_WIDE_INT min_address;
6628 /* The number of fixes referencing this entry. This can become zero
6629 if we "unpush" an entry. In this case we ignore the entry when we
6630 come to emit the code. */
6631 int refcount;
6632 /* The offset from the start of the minipool. */
6633 HOST_WIDE_INT offset;
6634 /* The value in table. */
6635 rtx value;
6636 /* The mode of value. */
6637 enum machine_mode mode;
6638 /* The size of the value. With iWMMXt enabled
6639 sizes > 4 also imply an alignment of 8-bytes. */
6640 int fix_size;
6643 struct minipool_fixup
6645 Mfix * next;
6646 rtx insn;
6647 HOST_WIDE_INT address;
6648 rtx * loc;
6649 enum machine_mode mode;
6650 int fix_size;
6651 rtx value;
6652 Mnode * minipool;
6653 HOST_WIDE_INT forwards;
6654 HOST_WIDE_INT backwards;
6657 /* Fixes less than a word need padding out to a word boundary. */
6658 #define MINIPOOL_FIX_SIZE(mode) \
6659 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6661 static Mnode * minipool_vector_head;
6662 static Mnode * minipool_vector_tail;
6663 static rtx minipool_vector_label;
6665 /* The linked list of all minipool fixes required for this function. */
6666 Mfix * minipool_fix_head;
6667 Mfix * minipool_fix_tail;
6668 /* The fix entry for the current minipool, once it has been placed. */
6669 Mfix * minipool_barrier;
6671 /* Determines if INSN is the start of a jump table. Returns the end
6672 of the TABLE or NULL_RTX. */
6673 static rtx
6674 is_jump_table (rtx insn)
6676 rtx table;
6678 if (GET_CODE (insn) == JUMP_INSN
6679 && JUMP_LABEL (insn) != NULL
6680 && ((table = next_real_insn (JUMP_LABEL (insn)))
6681 == next_real_insn (insn))
6682 && table != NULL
6683 && GET_CODE (table) == JUMP_INSN
6684 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6685 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6686 return table;
6688 return NULL_RTX;
6691 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6692 #define JUMP_TABLES_IN_TEXT_SECTION 0
6693 #endif
6695 static HOST_WIDE_INT
6696 get_jump_table_size (rtx insn)
6698 /* ADDR_VECs only take room if read-only data does into the text
6699 section. */
6700 if (JUMP_TABLES_IN_TEXT_SECTION
6701 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6702 || 1
6703 #endif
6706 rtx body = PATTERN (insn);
6707 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6709 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6712 return 0;
6715 /* Move a minipool fix MP from its current location to before MAX_MP.
6716 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6717 constraints may need updating. */
6718 static Mnode *
6719 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6720 HOST_WIDE_INT max_address)
6722 /* This should never be true and the code below assumes these are
6723 different. */
6724 if (mp == max_mp)
6725 abort ();
6727 if (max_mp == NULL)
6729 if (max_address < mp->max_address)
6730 mp->max_address = max_address;
6732 else
6734 if (max_address > max_mp->max_address - mp->fix_size)
6735 mp->max_address = max_mp->max_address - mp->fix_size;
6736 else
6737 mp->max_address = max_address;
6739 /* Unlink MP from its current position. Since max_mp is non-null,
6740 mp->prev must be non-null. */
6741 mp->prev->next = mp->next;
6742 if (mp->next != NULL)
6743 mp->next->prev = mp->prev;
6744 else
6745 minipool_vector_tail = mp->prev;
6747 /* Re-insert it before MAX_MP. */
6748 mp->next = max_mp;
6749 mp->prev = max_mp->prev;
6750 max_mp->prev = mp;
6752 if (mp->prev != NULL)
6753 mp->prev->next = mp;
6754 else
6755 minipool_vector_head = mp;
6758 /* Save the new entry. */
6759 max_mp = mp;
6761 /* Scan over the preceding entries and adjust their addresses as
6762 required. */
6763 while (mp->prev != NULL
6764 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6766 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6767 mp = mp->prev;
6770 return max_mp;
6773 /* Add a constant to the minipool for a forward reference. Returns the
6774 node added or NULL if the constant will not fit in this pool. */
6775 static Mnode *
6776 add_minipool_forward_ref (Mfix *fix)
6778 /* If set, max_mp is the first pool_entry that has a lower
6779 constraint than the one we are trying to add. */
6780 Mnode * max_mp = NULL;
6781 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6782 Mnode * mp;
6784 /* If this fix's address is greater than the address of the first
6785 entry, then we can't put the fix in this pool. We subtract the
6786 size of the current fix to ensure that if the table is fully
6787 packed we still have enough room to insert this value by suffling
6788 the other fixes forwards. */
6789 if (minipool_vector_head &&
6790 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6791 return NULL;
6793 /* Scan the pool to see if a constant with the same value has
6794 already been added. While we are doing this, also note the
6795 location where we must insert the constant if it doesn't already
6796 exist. */
6797 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6799 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6800 && fix->mode == mp->mode
6801 && (GET_CODE (fix->value) != CODE_LABEL
6802 || (CODE_LABEL_NUMBER (fix->value)
6803 == CODE_LABEL_NUMBER (mp->value)))
6804 && rtx_equal_p (fix->value, mp->value))
6806 /* More than one fix references this entry. */
6807 mp->refcount++;
6808 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6811 /* Note the insertion point if necessary. */
6812 if (max_mp == NULL
6813 && mp->max_address > max_address)
6814 max_mp = mp;
6816 /* If we are inserting an 8-bytes aligned quantity and
6817 we have not already found an insertion point, then
6818 make sure that all such 8-byte aligned quantities are
6819 placed at the start of the pool. */
6820 if (ARM_DOUBLEWORD_ALIGN
6821 && max_mp == NULL
6822 && fix->fix_size == 8
6823 && mp->fix_size != 8)
6825 max_mp = mp;
6826 max_address = mp->max_address;
6830 /* The value is not currently in the minipool, so we need to create
6831 a new entry for it. If MAX_MP is NULL, the entry will be put on
6832 the end of the list since the placement is less constrained than
6833 any existing entry. Otherwise, we insert the new fix before
6834 MAX_MP and, if necessary, adjust the constraints on the other
6835 entries. */
6836 mp = xmalloc (sizeof (* mp));
6837 mp->fix_size = fix->fix_size;
6838 mp->mode = fix->mode;
6839 mp->value = fix->value;
6840 mp->refcount = 1;
6841 /* Not yet required for a backwards ref. */
6842 mp->min_address = -65536;
6844 if (max_mp == NULL)
6846 mp->max_address = max_address;
6847 mp->next = NULL;
6848 mp->prev = minipool_vector_tail;
6850 if (mp->prev == NULL)
6852 minipool_vector_head = mp;
6853 minipool_vector_label = gen_label_rtx ();
6855 else
6856 mp->prev->next = mp;
6858 minipool_vector_tail = mp;
6860 else
6862 if (max_address > max_mp->max_address - mp->fix_size)
6863 mp->max_address = max_mp->max_address - mp->fix_size;
6864 else
6865 mp->max_address = max_address;
6867 mp->next = max_mp;
6868 mp->prev = max_mp->prev;
6869 max_mp->prev = mp;
6870 if (mp->prev != NULL)
6871 mp->prev->next = mp;
6872 else
6873 minipool_vector_head = mp;
6876 /* Save the new entry. */
6877 max_mp = mp;
6879 /* Scan over the preceding entries and adjust their addresses as
6880 required. */
6881 while (mp->prev != NULL
6882 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6884 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6885 mp = mp->prev;
6888 return max_mp;
6891 static Mnode *
6892 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6893 HOST_WIDE_INT min_address)
6895 HOST_WIDE_INT offset;
6897 /* This should never be true, and the code below assumes these are
6898 different. */
6899 if (mp == min_mp)
6900 abort ();
6902 if (min_mp == NULL)
6904 if (min_address > mp->min_address)
6905 mp->min_address = min_address;
6907 else
6909 /* We will adjust this below if it is too loose. */
6910 mp->min_address = min_address;
6912 /* Unlink MP from its current position. Since min_mp is non-null,
6913 mp->next must be non-null. */
6914 mp->next->prev = mp->prev;
6915 if (mp->prev != NULL)
6916 mp->prev->next = mp->next;
6917 else
6918 minipool_vector_head = mp->next;
6920 /* Reinsert it after MIN_MP. */
6921 mp->prev = min_mp;
6922 mp->next = min_mp->next;
6923 min_mp->next = mp;
6924 if (mp->next != NULL)
6925 mp->next->prev = mp;
6926 else
6927 minipool_vector_tail = mp;
6930 min_mp = mp;
6932 offset = 0;
6933 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6935 mp->offset = offset;
6936 if (mp->refcount > 0)
6937 offset += mp->fix_size;
6939 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6940 mp->next->min_address = mp->min_address + mp->fix_size;
6943 return min_mp;
6946 /* Add a constant to the minipool for a backward reference. Returns the
6947 node added or NULL if the constant will not fit in this pool.
6949 Note that the code for insertion for a backwards reference can be
6950 somewhat confusing because the calculated offsets for each fix do
6951 not take into account the size of the pool (which is still under
6952 construction. */
6953 static Mnode *
6954 add_minipool_backward_ref (Mfix *fix)
6956 /* If set, min_mp is the last pool_entry that has a lower constraint
6957 than the one we are trying to add. */
6958 Mnode *min_mp = NULL;
6959 /* This can be negative, since it is only a constraint. */
6960 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6961 Mnode *mp;
6963 /* If we can't reach the current pool from this insn, or if we can't
6964 insert this entry at the end of the pool without pushing other
6965 fixes out of range, then we don't try. This ensures that we
6966 can't fail later on. */
6967 if (min_address >= minipool_barrier->address
6968 || (minipool_vector_tail->min_address + fix->fix_size
6969 >= minipool_barrier->address))
6970 return NULL;
6972 /* Scan the pool to see if a constant with the same value has
6973 already been added. While we are doing this, also note the
6974 location where we must insert the constant if it doesn't already
6975 exist. */
6976 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6978 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6979 && fix->mode == mp->mode
6980 && (GET_CODE (fix->value) != CODE_LABEL
6981 || (CODE_LABEL_NUMBER (fix->value)
6982 == CODE_LABEL_NUMBER (mp->value)))
6983 && rtx_equal_p (fix->value, mp->value)
6984 /* Check that there is enough slack to move this entry to the
6985 end of the table (this is conservative). */
6986 && (mp->max_address
6987 > (minipool_barrier->address
6988 + minipool_vector_tail->offset
6989 + minipool_vector_tail->fix_size)))
6991 mp->refcount++;
6992 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6995 if (min_mp != NULL)
6996 mp->min_address += fix->fix_size;
6997 else
6999 /* Note the insertion point if necessary. */
7000 if (mp->min_address < min_address)
7002 /* For now, we do not allow the insertion of 8-byte alignment
7003 requiring nodes anywhere but at the start of the pool. */
7004 if (ARM_DOUBLEWORD_ALIGN
7005 && fix->fix_size == 8 && mp->fix_size != 8)
7006 return NULL;
7007 else
7008 min_mp = mp;
7010 else if (mp->max_address
7011 < minipool_barrier->address + mp->offset + fix->fix_size)
7013 /* Inserting before this entry would push the fix beyond
7014 its maximum address (which can happen if we have
7015 re-located a forwards fix); force the new fix to come
7016 after it. */
7017 min_mp = mp;
7018 min_address = mp->min_address + fix->fix_size;
7020 /* If we are inserting an 8-bytes aligned quantity and
7021 we have not already found an insertion point, then
7022 make sure that all such 8-byte aligned quantities are
7023 placed at the start of the pool. */
7024 else if (ARM_DOUBLEWORD_ALIGN
7025 && min_mp == NULL
7026 && fix->fix_size == 8
7027 && mp->fix_size < 8)
7029 min_mp = mp;
7030 min_address = mp->min_address + fix->fix_size;
7035 /* We need to create a new entry. */
7036 mp = xmalloc (sizeof (* mp));
7037 mp->fix_size = fix->fix_size;
7038 mp->mode = fix->mode;
7039 mp->value = fix->value;
7040 mp->refcount = 1;
7041 mp->max_address = minipool_barrier->address + 65536;
7043 mp->min_address = min_address;
7045 if (min_mp == NULL)
7047 mp->prev = NULL;
7048 mp->next = minipool_vector_head;
7050 if (mp->next == NULL)
7052 minipool_vector_tail = mp;
7053 minipool_vector_label = gen_label_rtx ();
7055 else
7056 mp->next->prev = mp;
7058 minipool_vector_head = mp;
7060 else
7062 mp->next = min_mp->next;
7063 mp->prev = min_mp;
7064 min_mp->next = mp;
7066 if (mp->next != NULL)
7067 mp->next->prev = mp;
7068 else
7069 minipool_vector_tail = mp;
7072 /* Save the new entry. */
7073 min_mp = mp;
7075 if (mp->prev)
7076 mp = mp->prev;
7077 else
7078 mp->offset = 0;
7080 /* Scan over the following entries and adjust their offsets. */
7081 while (mp->next != NULL)
7083 if (mp->next->min_address < mp->min_address + mp->fix_size)
7084 mp->next->min_address = mp->min_address + mp->fix_size;
7086 if (mp->refcount)
7087 mp->next->offset = mp->offset + mp->fix_size;
7088 else
7089 mp->next->offset = mp->offset;
7091 mp = mp->next;
7094 return min_mp;
7097 static void
7098 assign_minipool_offsets (Mfix *barrier)
7100 HOST_WIDE_INT offset = 0;
7101 Mnode *mp;
7103 minipool_barrier = barrier;
7105 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7107 mp->offset = offset;
7109 if (mp->refcount > 0)
7110 offset += mp->fix_size;
7114 /* Output the literal table */
7115 static void
7116 dump_minipool (rtx scan)
7118 Mnode * mp;
7119 Mnode * nmp;
7120 int align64 = 0;
7122 if (ARM_DOUBLEWORD_ALIGN)
7123 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7124 if (mp->refcount > 0 && mp->fix_size == 8)
7126 align64 = 1;
7127 break;
7130 if (dump_file)
7131 fprintf (dump_file,
7132 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7133 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7135 scan = emit_label_after (gen_label_rtx (), scan);
7136 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7137 scan = emit_label_after (minipool_vector_label, scan);
7139 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7141 if (mp->refcount > 0)
7143 if (dump_file)
7145 fprintf (dump_file,
7146 ";; Offset %u, min %ld, max %ld ",
7147 (unsigned) mp->offset, (unsigned long) mp->min_address,
7148 (unsigned long) mp->max_address);
7149 arm_print_value (dump_file, mp->value);
7150 fputc ('\n', dump_file);
7153 switch (mp->fix_size)
7155 #ifdef HAVE_consttable_1
7156 case 1:
7157 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7158 break;
7160 #endif
7161 #ifdef HAVE_consttable_2
7162 case 2:
7163 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7164 break;
7166 #endif
7167 #ifdef HAVE_consttable_4
7168 case 4:
7169 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7170 break;
7172 #endif
7173 #ifdef HAVE_consttable_8
7174 case 8:
7175 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7176 break;
7178 #endif
7179 default:
7180 abort ();
7181 break;
7185 nmp = mp->next;
7186 free (mp);
7189 minipool_vector_head = minipool_vector_tail = NULL;
7190 scan = emit_insn_after (gen_consttable_end (), scan);
7191 scan = emit_barrier_after (scan);
7194 /* Return the cost of forcibly inserting a barrier after INSN. */
7195 static int
7196 arm_barrier_cost (rtx insn)
7198 /* Basing the location of the pool on the loop depth is preferable,
7199 but at the moment, the basic block information seems to be
7200 corrupt by this stage of the compilation. */
7201 int base_cost = 50;
7202 rtx next = next_nonnote_insn (insn);
7204 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7205 base_cost -= 20;
7207 switch (GET_CODE (insn))
7209 case CODE_LABEL:
7210 /* It will always be better to place the table before the label, rather
7211 than after it. */
7212 return 50;
7214 case INSN:
7215 case CALL_INSN:
7216 return base_cost;
7218 case JUMP_INSN:
7219 return base_cost - 10;
7221 default:
7222 return base_cost + 10;
7226 /* Find the best place in the insn stream in the range
7227 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7228 Create the barrier by inserting a jump and add a new fix entry for
7229 it. */
7230 static Mfix *
7231 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7233 HOST_WIDE_INT count = 0;
7234 rtx barrier;
7235 rtx from = fix->insn;
7236 rtx selected = from;
7237 int selected_cost;
7238 HOST_WIDE_INT selected_address;
7239 Mfix * new_fix;
7240 HOST_WIDE_INT max_count = max_address - fix->address;
7241 rtx label = gen_label_rtx ();
7243 selected_cost = arm_barrier_cost (from);
7244 selected_address = fix->address;
7246 while (from && count < max_count)
7248 rtx tmp;
7249 int new_cost;
7251 /* This code shouldn't have been called if there was a natural barrier
7252 within range. */
7253 if (GET_CODE (from) == BARRIER)
7254 abort ();
7256 /* Count the length of this insn. */
7257 count += get_attr_length (from);
7259 /* If there is a jump table, add its length. */
7260 tmp = is_jump_table (from);
7261 if (tmp != NULL)
7263 count += get_jump_table_size (tmp);
7265 /* Jump tables aren't in a basic block, so base the cost on
7266 the dispatch insn. If we select this location, we will
7267 still put the pool after the table. */
7268 new_cost = arm_barrier_cost (from);
7270 if (count < max_count && new_cost <= selected_cost)
7272 selected = tmp;
7273 selected_cost = new_cost;
7274 selected_address = fix->address + count;
7277 /* Continue after the dispatch table. */
7278 from = NEXT_INSN (tmp);
7279 continue;
7282 new_cost = arm_barrier_cost (from);
7284 if (count < max_count && new_cost <= selected_cost)
7286 selected = from;
7287 selected_cost = new_cost;
7288 selected_address = fix->address + count;
7291 from = NEXT_INSN (from);
7294 /* Create a new JUMP_INSN that branches around a barrier. */
7295 from = emit_jump_insn_after (gen_jump (label), selected);
7296 JUMP_LABEL (from) = label;
7297 barrier = emit_barrier_after (from);
7298 emit_label_after (label, barrier);
7300 /* Create a minipool barrier entry for the new barrier. */
7301 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7302 new_fix->insn = barrier;
7303 new_fix->address = selected_address;
7304 new_fix->next = fix->next;
7305 fix->next = new_fix;
7307 return new_fix;
7310 /* Record that there is a natural barrier in the insn stream at
7311 ADDRESS. */
7312 static void
7313 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7315 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7317 fix->insn = insn;
7318 fix->address = address;
7320 fix->next = NULL;
7321 if (minipool_fix_head != NULL)
7322 minipool_fix_tail->next = fix;
7323 else
7324 minipool_fix_head = fix;
7326 minipool_fix_tail = fix;
7329 /* Record INSN, which will need fixing up to load a value from the
7330 minipool. ADDRESS is the offset of the insn since the start of the
7331 function; LOC is a pointer to the part of the insn which requires
7332 fixing; VALUE is the constant that must be loaded, which is of type
7333 MODE. */
7334 static void
7335 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7336 enum machine_mode mode, rtx value)
7338 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7340 #ifdef AOF_ASSEMBLER
7341 /* PIC symbol references need to be converted into offsets into the
7342 based area. */
7343 /* XXX This shouldn't be done here. */
7344 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7345 value = aof_pic_entry (value);
7346 #endif /* AOF_ASSEMBLER */
7348 fix->insn = insn;
7349 fix->address = address;
7350 fix->loc = loc;
7351 fix->mode = mode;
7352 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7353 fix->value = value;
7354 fix->forwards = get_attr_pool_range (insn);
7355 fix->backwards = get_attr_neg_pool_range (insn);
7356 fix->minipool = NULL;
7358 /* If an insn doesn't have a range defined for it, then it isn't
7359 expecting to be reworked by this code. Better to abort now than
7360 to generate duff assembly code. */
7361 if (fix->forwards == 0 && fix->backwards == 0)
7362 abort ();
7364 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7365 So there might be an empty word before the start of the pool.
7366 Hence we reduce the forward range by 4 to allow for this
7367 possibility. */
7368 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7369 fix->forwards -= 4;
7371 if (dump_file)
7373 fprintf (dump_file,
7374 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7375 GET_MODE_NAME (mode),
7376 INSN_UID (insn), (unsigned long) address,
7377 -1 * (long)fix->backwards, (long)fix->forwards);
7378 arm_print_value (dump_file, fix->value);
7379 fprintf (dump_file, "\n");
7382 /* Add it to the chain of fixes. */
7383 fix->next = NULL;
7385 if (minipool_fix_head != NULL)
7386 minipool_fix_tail->next = fix;
7387 else
7388 minipool_fix_head = fix;
7390 minipool_fix_tail = fix;
7393 /* Return the cost of synthesizing the const_double VAL inline.
7394 Returns the number of insns needed, or 99 if we don't know how to
7395 do it. */
7397 arm_const_double_inline_cost (rtx val)
7399 long parts[2];
7401 if (GET_MODE (val) == DFmode)
7403 REAL_VALUE_TYPE r;
7404 if (!TARGET_SOFT_FLOAT)
7405 return 99;
7406 REAL_VALUE_FROM_CONST_DOUBLE (r, val);
7407 REAL_VALUE_TO_TARGET_DOUBLE (r, parts);
7409 else if (GET_MODE (val) != VOIDmode)
7410 return 99;
7411 else
7413 parts[0] = CONST_DOUBLE_LOW (val);
7414 parts[1] = CONST_DOUBLE_HIGH (val);
7417 return (arm_gen_constant (SET, SImode, NULL_RTX, parts[0],
7418 NULL_RTX, NULL_RTX, 0, 0)
7419 + arm_gen_constant (SET, SImode, NULL_RTX, parts[1],
7420 NULL_RTX, NULL_RTX, 0, 0));
7423 /* Determine if a CONST_DOUBLE should be pushed to the minipool */
7424 static bool
7425 const_double_needs_minipool (rtx val)
7427 /* thumb only knows to load a CONST_DOUBLE from memory at the moment */
7428 if (TARGET_THUMB)
7429 return true;
7431 /* Don't push anything to the minipool if a CONST_DOUBLE can be built with
7432 a few ALU insns directly. On balance, the optimum is likely to be around
7433 3 insns, except when there are no load delay slots where it should be 4.
7434 When optimizing for size, a limit of 3 allows saving at least one word
7435 except for cases where a single minipool entry could be shared more than
7436 2 times which is rather unlikely to outweight the overall savings. */
7437 return (arm_const_double_inline_cost (val)
7438 > ((optimize_size || arm_ld_sched) ? 3 : 4));
7441 /* Scan INSN and note any of its operands that need fixing.
7442 If DO_PUSHES is false we do not actually push any of the fixups
7443 needed. The function returns TRUE is any fixups were needed/pushed.
7444 This is used by arm_memory_load_p() which needs to know about loads
7445 of constants that will be converted into minipool loads. */
7446 static bool
7447 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7449 bool result = false;
7450 int opno;
7452 extract_insn (insn);
7454 if (!constrain_operands (1))
7455 fatal_insn_not_found (insn);
7457 if (recog_data.n_alternatives == 0)
7458 return false;
7460 /* Fill in recog_op_alt with information about the constraints of this insn. */
7461 preprocess_constraints ();
7463 for (opno = 0; opno < recog_data.n_operands; opno++)
7465 /* Things we need to fix can only occur in inputs. */
7466 if (recog_data.operand_type[opno] != OP_IN)
7467 continue;
7469 /* If this alternative is a memory reference, then any mention
7470 of constants in this alternative is really to fool reload
7471 into allowing us to accept one there. We need to fix them up
7472 now so that we output the right code. */
7473 if (recog_op_alt[opno][which_alternative].memory_ok)
7475 rtx op = recog_data.operand[opno];
7477 if (CONSTANT_P (op)
7478 && (GET_CODE (op) != CONST_DOUBLE
7479 || const_double_needs_minipool (op)))
7481 if (do_pushes)
7482 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7483 recog_data.operand_mode[opno], op);
7484 result = true;
7486 else if (GET_CODE (op) == MEM
7487 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7488 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7490 if (do_pushes)
7492 rtx cop = avoid_constant_pool_reference (op);
7494 /* Casting the address of something to a mode narrower
7495 than a word can cause avoid_constant_pool_reference()
7496 to return the pool reference itself. That's no good to
7497 us here. Lets just hope that we can use the
7498 constant pool value directly. */
7499 if (op == cop)
7500 cop = get_pool_constant (XEXP (op, 0));
7502 push_minipool_fix (insn, address,
7503 recog_data.operand_loc[opno],
7504 recog_data.operand_mode[opno], cop);
7507 result = true;
7512 return result;
7515 /* Gcc puts the pool in the wrong place for ARM, since we can only
7516 load addresses a limited distance around the pc. We do some
7517 special munging to move the constant pool values to the correct
7518 point in the code. */
7519 static void
7520 arm_reorg (void)
7522 rtx insn;
7523 HOST_WIDE_INT address = 0;
7524 Mfix * fix;
7526 minipool_fix_head = minipool_fix_tail = NULL;
7528 /* The first insn must always be a note, or the code below won't
7529 scan it properly. */
7530 insn = get_insns ();
7531 if (GET_CODE (insn) != NOTE)
7532 abort ();
7534 /* Scan all the insns and record the operands that will need fixing. */
7535 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7537 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7538 && (arm_cirrus_insn_p (insn)
7539 || GET_CODE (insn) == JUMP_INSN
7540 || arm_memory_load_p (insn)))
7541 cirrus_reorg (insn);
7543 if (GET_CODE (insn) == BARRIER)
7544 push_minipool_barrier (insn, address);
7545 else if (INSN_P (insn))
7547 rtx table;
7549 note_invalid_constants (insn, address, true);
7550 address += get_attr_length (insn);
7552 /* If the insn is a vector jump, add the size of the table
7553 and skip the table. */
7554 if ((table = is_jump_table (insn)) != NULL)
7556 address += get_jump_table_size (table);
7557 insn = table;
7562 fix = minipool_fix_head;
7564 /* Now scan the fixups and perform the required changes. */
7565 while (fix)
7567 Mfix * ftmp;
7568 Mfix * fdel;
7569 Mfix * last_added_fix;
7570 Mfix * last_barrier = NULL;
7571 Mfix * this_fix;
7573 /* Skip any further barriers before the next fix. */
7574 while (fix && GET_CODE (fix->insn) == BARRIER)
7575 fix = fix->next;
7577 /* No more fixes. */
7578 if (fix == NULL)
7579 break;
7581 last_added_fix = NULL;
7583 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7585 if (GET_CODE (ftmp->insn) == BARRIER)
7587 if (ftmp->address >= minipool_vector_head->max_address)
7588 break;
7590 last_barrier = ftmp;
7592 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7593 break;
7595 last_added_fix = ftmp; /* Keep track of the last fix added. */
7598 /* If we found a barrier, drop back to that; any fixes that we
7599 could have reached but come after the barrier will now go in
7600 the next mini-pool. */
7601 if (last_barrier != NULL)
7603 /* Reduce the refcount for those fixes that won't go into this
7604 pool after all. */
7605 for (fdel = last_barrier->next;
7606 fdel && fdel != ftmp;
7607 fdel = fdel->next)
7609 fdel->minipool->refcount--;
7610 fdel->minipool = NULL;
7613 ftmp = last_barrier;
7615 else
7617 /* ftmp is first fix that we can't fit into this pool and
7618 there no natural barriers that we could use. Insert a
7619 new barrier in the code somewhere between the previous
7620 fix and this one, and arrange to jump around it. */
7621 HOST_WIDE_INT max_address;
7623 /* The last item on the list of fixes must be a barrier, so
7624 we can never run off the end of the list of fixes without
7625 last_barrier being set. */
7626 if (ftmp == NULL)
7627 abort ();
7629 max_address = minipool_vector_head->max_address;
7630 /* Check that there isn't another fix that is in range that
7631 we couldn't fit into this pool because the pool was
7632 already too large: we need to put the pool before such an
7633 instruction. */
7634 if (ftmp->address < max_address)
7635 max_address = ftmp->address;
7637 last_barrier = create_fix_barrier (last_added_fix, max_address);
7640 assign_minipool_offsets (last_barrier);
7642 while (ftmp)
7644 if (GET_CODE (ftmp->insn) != BARRIER
7645 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7646 == NULL))
7647 break;
7649 ftmp = ftmp->next;
7652 /* Scan over the fixes we have identified for this pool, fixing them
7653 up and adding the constants to the pool itself. */
7654 for (this_fix = fix; this_fix && ftmp != this_fix;
7655 this_fix = this_fix->next)
7656 if (GET_CODE (this_fix->insn) != BARRIER)
7658 rtx addr
7659 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7660 minipool_vector_label),
7661 this_fix->minipool->offset);
7662 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7665 dump_minipool (last_barrier->insn);
7666 fix = ftmp;
7669 /* From now on we must synthesize any constants that we can't handle
7670 directly. This can happen if the RTL gets split during final
7671 instruction generation. */
7672 after_arm_reorg = 1;
7674 /* Free the minipool memory. */
7675 obstack_free (&minipool_obstack, minipool_startobj);
7678 /* Routines to output assembly language. */
7680 /* If the rtx is the correct value then return the string of the number.
7681 In this way we can ensure that valid double constants are generated even
7682 when cross compiling. */
7683 const char *
7684 fp_immediate_constant (rtx x)
7686 REAL_VALUE_TYPE r;
7687 int i;
7689 if (!fp_consts_inited)
7690 init_fp_table ();
7692 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7693 for (i = 0; i < 8; i++)
7694 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7695 return strings_fp[i];
7697 abort ();
7700 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7701 static const char *
7702 fp_const_from_val (REAL_VALUE_TYPE *r)
7704 int i;
7706 if (!fp_consts_inited)
7707 init_fp_table ();
7709 for (i = 0; i < 8; i++)
7710 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7711 return strings_fp[i];
7713 abort ();
7716 /* Output the operands of a LDM/STM instruction to STREAM.
7717 MASK is the ARM register set mask of which only bits 0-15 are important.
7718 REG is the base register, either the frame pointer or the stack pointer,
7719 INSTR is the possibly suffixed load or store instruction. */
7721 static void
7722 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7723 unsigned long mask)
7725 unsigned i;
7726 bool not_first = FALSE;
7728 fputc ('\t', stream);
7729 asm_fprintf (stream, instr, reg);
7730 fputs (", {", stream);
7732 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7733 if (mask & (1 << i))
7735 if (not_first)
7736 fprintf (stream, ", ");
7738 asm_fprintf (stream, "%r", i);
7739 not_first = TRUE;
7742 fprintf (stream, "}\n");
7746 /* Output a FLDMX instruction to STREAM.
7747 BASE if the register containing the address.
7748 REG and COUNT specify the register range.
7749 Extra registers may be added to avoid hardware bugs. */
7751 static void
7752 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7754 int i;
7756 /* Workaround ARM10 VFPr1 bug. */
7757 if (count == 2 && !arm_arch6)
7759 if (reg == 15)
7760 reg--;
7761 count++;
7764 fputc ('\t', stream);
7765 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7767 for (i = reg; i < reg + count; i++)
7769 if (i > reg)
7770 fputs (", ", stream);
7771 asm_fprintf (stream, "d%d", i);
7773 fputs ("}\n", stream);
7778 /* Output the assembly for a store multiple. */
7780 const char *
7781 vfp_output_fstmx (rtx * operands)
7783 char pattern[100];
7784 int p;
7785 int base;
7786 int i;
7788 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7789 p = strlen (pattern);
7791 if (GET_CODE (operands[1]) != REG)
7792 abort ();
7794 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7795 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7797 p += sprintf (&pattern[p], ", d%d", base + i);
7799 strcpy (&pattern[p], "}");
7801 output_asm_insn (pattern, operands);
7802 return "";
7806 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7807 number of bytes pushed. */
7809 static int
7810 vfp_emit_fstmx (int base_reg, int count)
7812 rtx par;
7813 rtx dwarf;
7814 rtx tmp, reg;
7815 int i;
7817 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7818 register pairs are stored by a store multiple insn. We avoid this
7819 by pushing an extra pair. */
7820 if (count == 2 && !arm_arch6)
7822 if (base_reg == LAST_VFP_REGNUM - 3)
7823 base_reg -= 2;
7824 count++;
7827 /* ??? The frame layout is implementation defined. We describe
7828 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7829 We really need some way of representing the whole block so that the
7830 unwinder can figure it out at runtime. */
7831 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7832 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7834 reg = gen_rtx_REG (DFmode, base_reg);
7835 base_reg += 2;
7837 XVECEXP (par, 0, 0)
7838 = gen_rtx_SET (VOIDmode,
7839 gen_rtx_MEM (BLKmode,
7840 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7841 gen_rtx_UNSPEC (BLKmode,
7842 gen_rtvec (1, reg),
7843 UNSPEC_PUSH_MULT));
7845 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7846 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7847 GEN_INT (-(count * 8 + 4))));
7848 RTX_FRAME_RELATED_P (tmp) = 1;
7849 XVECEXP (dwarf, 0, 0) = tmp;
7851 tmp = gen_rtx_SET (VOIDmode,
7852 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7853 reg);
7854 RTX_FRAME_RELATED_P (tmp) = 1;
7855 XVECEXP (dwarf, 0, 1) = tmp;
7857 for (i = 1; i < count; i++)
7859 reg = gen_rtx_REG (DFmode, base_reg);
7860 base_reg += 2;
7861 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7863 tmp = gen_rtx_SET (VOIDmode,
7864 gen_rtx_MEM (DFmode,
7865 gen_rtx_PLUS (SImode,
7866 stack_pointer_rtx,
7867 GEN_INT (i * 8))),
7868 reg);
7869 RTX_FRAME_RELATED_P (tmp) = 1;
7870 XVECEXP (dwarf, 0, i + 1) = tmp;
7873 par = emit_insn (par);
7874 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7875 REG_NOTES (par));
7876 RTX_FRAME_RELATED_P (par) = 1;
7878 return count * 8 + 4;
7882 /* Output a 'call' insn. */
7883 const char *
7884 output_call (rtx *operands)
7886 if (arm_arch5)
7887 abort (); /* Patterns should call blx <reg> directly. */
7889 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7890 if (REGNO (operands[0]) == LR_REGNUM)
7892 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7893 output_asm_insn ("mov%?\t%0, %|lr", operands);
7896 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7898 if (TARGET_INTERWORK || arm_arch4t)
7899 output_asm_insn ("bx%?\t%0", operands);
7900 else
7901 output_asm_insn ("mov%?\t%|pc, %0", operands);
7903 return "";
7906 /* Output a 'call' insn that is a reference in memory. */
7907 const char *
7908 output_call_mem (rtx *operands)
7910 if (TARGET_INTERWORK && !arm_arch5)
7912 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7913 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7914 output_asm_insn ("bx%?\t%|ip", operands);
7916 else if (regno_use_in (LR_REGNUM, operands[0]))
7918 /* LR is used in the memory address. We load the address in the
7919 first instruction. It's safe to use IP as the target of the
7920 load since the call will kill it anyway. */
7921 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7922 if (arm_arch5)
7923 output_asm_insn ("blx%?\t%|ip", operands);
7924 else
7926 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7927 if (arm_arch4t)
7928 output_asm_insn ("bx%?\t%|ip", operands);
7929 else
7930 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7933 else
7935 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7936 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7939 return "";
7943 /* Output a move from arm registers to an fpa registers.
7944 OPERANDS[0] is an fpa register.
7945 OPERANDS[1] is the first registers of an arm register pair. */
7946 const char *
7947 output_mov_long_double_fpa_from_arm (rtx *operands)
7949 int arm_reg0 = REGNO (operands[1]);
7950 rtx ops[3];
7952 if (arm_reg0 == IP_REGNUM)
7953 abort ();
7955 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7956 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7957 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7959 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7960 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7962 return "";
7965 /* Output a move from an fpa register to arm registers.
7966 OPERANDS[0] is the first registers of an arm register pair.
7967 OPERANDS[1] is an fpa register. */
7968 const char *
7969 output_mov_long_double_arm_from_fpa (rtx *operands)
7971 int arm_reg0 = REGNO (operands[0]);
7972 rtx ops[3];
7974 if (arm_reg0 == IP_REGNUM)
7975 abort ();
7977 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7978 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7979 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7981 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7982 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7983 return "";
7986 /* Output a move from arm registers to arm registers of a long double
7987 OPERANDS[0] is the destination.
7988 OPERANDS[1] is the source. */
7989 const char *
7990 output_mov_long_double_arm_from_arm (rtx *operands)
7992 /* We have to be careful here because the two might overlap. */
7993 int dest_start = REGNO (operands[0]);
7994 int src_start = REGNO (operands[1]);
7995 rtx ops[2];
7996 int i;
7998 if (dest_start < src_start)
8000 for (i = 0; i < 3; i++)
8002 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8003 ops[1] = gen_rtx_REG (SImode, src_start + i);
8004 output_asm_insn ("mov%?\t%0, %1", ops);
8007 else
8009 for (i = 2; i >= 0; i--)
8011 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8012 ops[1] = gen_rtx_REG (SImode, src_start + i);
8013 output_asm_insn ("mov%?\t%0, %1", ops);
8017 return "";
8021 /* Output a move from arm registers to an fpa registers.
8022 OPERANDS[0] is an fpa register.
8023 OPERANDS[1] is the first registers of an arm register pair. */
8024 const char *
8025 output_mov_double_fpa_from_arm (rtx *operands)
8027 int arm_reg0 = REGNO (operands[1]);
8028 rtx ops[2];
8030 if (arm_reg0 == IP_REGNUM)
8031 abort ();
8033 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8034 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8035 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8036 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8037 return "";
8040 /* Output a move from an fpa register to arm registers.
8041 OPERANDS[0] is the first registers of an arm register pair.
8042 OPERANDS[1] is an fpa register. */
8043 const char *
8044 output_mov_double_arm_from_fpa (rtx *operands)
8046 int arm_reg0 = REGNO (operands[0]);
8047 rtx ops[2];
8049 if (arm_reg0 == IP_REGNUM)
8050 abort ();
8052 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8053 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8054 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8055 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8056 return "";
8059 /* Output a move between double words.
8060 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8061 or MEM<-REG and all MEMs must be offsettable addresses. */
8062 const char *
8063 output_move_double (rtx *operands)
8065 enum rtx_code code0 = GET_CODE (operands[0]);
8066 enum rtx_code code1 = GET_CODE (operands[1]);
8067 rtx otherops[3];
8069 if (code0 == REG)
8071 int reg0 = REGNO (operands[0]);
8073 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8075 if (code1 == REG)
8077 int reg1 = REGNO (operands[1]);
8078 if (reg1 == IP_REGNUM)
8079 abort ();
8081 /* Ensure the second source is not overwritten. */
8082 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8083 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8084 else
8085 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8087 else if (code1 == CONST_VECTOR)
8089 HOST_WIDE_INT hint = 0;
8091 switch (GET_MODE (operands[1]))
8093 case V2SImode:
8094 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8095 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8096 break;
8098 case V4HImode:
8099 if (BYTES_BIG_ENDIAN)
8101 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8102 hint <<= 16;
8103 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8105 else
8107 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8108 hint <<= 16;
8109 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8112 otherops[1] = GEN_INT (hint);
8113 hint = 0;
8115 if (BYTES_BIG_ENDIAN)
8117 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8118 hint <<= 16;
8119 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8121 else
8123 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8124 hint <<= 16;
8125 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8128 operands[1] = GEN_INT (hint);
8129 break;
8131 case V8QImode:
8132 if (BYTES_BIG_ENDIAN)
8134 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8135 hint <<= 8;
8136 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8137 hint <<= 8;
8138 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8139 hint <<= 8;
8140 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8142 else
8144 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8145 hint <<= 8;
8146 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8147 hint <<= 8;
8148 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8149 hint <<= 8;
8150 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8153 otherops[1] = GEN_INT (hint);
8154 hint = 0;
8156 if (BYTES_BIG_ENDIAN)
8158 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8159 hint <<= 8;
8160 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8161 hint <<= 8;
8162 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8163 hint <<= 8;
8164 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8166 else
8168 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8169 hint <<= 8;
8170 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8171 hint <<= 8;
8172 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8173 hint <<= 8;
8174 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8177 operands[1] = GEN_INT (hint);
8178 break;
8180 default:
8181 abort ();
8183 output_mov_immediate (operands);
8184 output_mov_immediate (otherops);
8186 else if (code1 == CONST_DOUBLE)
8188 if (GET_MODE (operands[1]) == DFmode)
8190 REAL_VALUE_TYPE r;
8191 long l[2];
8193 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8194 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8195 otherops[1] = GEN_INT (l[1]);
8196 operands[1] = GEN_INT (l[0]);
8198 else if (GET_MODE (operands[1]) != VOIDmode)
8199 abort ();
8200 else if (WORDS_BIG_ENDIAN)
8202 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8203 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8205 else
8207 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8208 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8211 output_mov_immediate (operands);
8212 output_mov_immediate (otherops);
8214 else if (code1 == CONST_INT)
8216 #if HOST_BITS_PER_WIDE_INT > 32
8217 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8218 what the upper word is. */
8219 if (WORDS_BIG_ENDIAN)
8221 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8222 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8224 else
8226 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8227 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8229 #else
8230 /* Sign extend the intval into the high-order word. */
8231 if (WORDS_BIG_ENDIAN)
8233 otherops[1] = operands[1];
8234 operands[1] = (INTVAL (operands[1]) < 0
8235 ? constm1_rtx : const0_rtx);
8237 else
8238 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8239 #endif
8240 output_mov_immediate (otherops);
8241 output_mov_immediate (operands);
8243 else if (code1 == MEM)
8245 switch (GET_CODE (XEXP (operands[1], 0)))
8247 case REG:
8248 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8249 break;
8251 case PRE_INC:
8252 if (!TARGET_LDRD)
8253 abort (); /* Should never happen now. */
8254 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8255 break;
8257 case PRE_DEC:
8258 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8259 break;
8261 case POST_INC:
8262 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8263 break;
8265 case POST_DEC:
8266 if (!TARGET_LDRD)
8267 abort (); /* Should never happen now. */
8268 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8269 break;
8271 case PRE_MODIFY:
8272 case POST_MODIFY:
8273 otherops[0] = operands[0];
8274 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8275 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8277 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8279 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8281 /* Registers overlap so split out the increment. */
8282 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8283 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8285 else
8286 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8288 else
8290 /* We only allow constant increments, so this is safe. */
8291 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8293 break;
8295 case LABEL_REF:
8296 case CONST:
8297 output_asm_insn ("adr%?\t%0, %1", operands);
8298 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8299 break;
8301 default:
8302 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8303 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8305 otherops[0] = operands[0];
8306 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8307 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8309 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8311 if (GET_CODE (otherops[2]) == CONST_INT)
8313 switch ((int) INTVAL (otherops[2]))
8315 case -8:
8316 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8317 return "";
8318 case -4:
8319 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8320 return "";
8321 case 4:
8322 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8323 return "";
8326 if (TARGET_LDRD
8327 && (GET_CODE (otherops[2]) == REG
8328 || (GET_CODE (otherops[2]) == CONST_INT
8329 && INTVAL (otherops[2]) > -256
8330 && INTVAL (otherops[2]) < 256)))
8332 if (reg_overlap_mentioned_p (otherops[0],
8333 otherops[2]))
8335 /* Swap base and index registers over to
8336 avoid a conflict. */
8337 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8338 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8341 /* If both registers conflict, it will usually
8342 have been fixed by a splitter. */
8343 if (reg_overlap_mentioned_p (otherops[0],
8344 otherops[2]))
8346 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8347 output_asm_insn ("ldr%?d\t%0, [%1]",
8348 otherops);
8349 return "";
8351 else
8353 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8354 otherops);
8355 return "";
8358 if (GET_CODE (otherops[2]) == CONST_INT)
8360 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8361 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8362 else
8363 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8365 else
8366 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8368 else
8369 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8371 return "ldm%?ia\t%0, %M0";
8373 else
8375 otherops[1] = adjust_address (operands[1], SImode, 4);
8376 /* Take care of overlapping base/data reg. */
8377 if (reg_mentioned_p (operands[0], operands[1]))
8379 output_asm_insn ("ldr%?\t%0, %1", otherops);
8380 output_asm_insn ("ldr%?\t%0, %1", operands);
8382 else
8384 output_asm_insn ("ldr%?\t%0, %1", operands);
8385 output_asm_insn ("ldr%?\t%0, %1", otherops);
8390 else
8391 abort (); /* Constraints should prevent this. */
8393 else if (code0 == MEM && code1 == REG)
8395 if (REGNO (operands[1]) == IP_REGNUM)
8396 abort ();
8398 switch (GET_CODE (XEXP (operands[0], 0)))
8400 case REG:
8401 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8402 break;
8404 case PRE_INC:
8405 if (!TARGET_LDRD)
8406 abort (); /* Should never happen now. */
8407 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8408 break;
8410 case PRE_DEC:
8411 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8412 break;
8414 case POST_INC:
8415 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8416 break;
8418 case POST_DEC:
8419 if (!TARGET_LDRD)
8420 abort (); /* Should never happen now. */
8421 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8422 break;
8424 case PRE_MODIFY:
8425 case POST_MODIFY:
8426 otherops[0] = operands[1];
8427 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8428 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8430 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8431 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8432 else
8433 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8434 break;
8436 case PLUS:
8437 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8438 if (GET_CODE (otherops[2]) == CONST_INT)
8440 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8442 case -8:
8443 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8444 return "";
8446 case -4:
8447 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8448 return "";
8450 case 4:
8451 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8452 return "";
8455 if (TARGET_LDRD
8456 && (GET_CODE (otherops[2]) == REG
8457 || (GET_CODE (otherops[2]) == CONST_INT
8458 && INTVAL (otherops[2]) > -256
8459 && INTVAL (otherops[2]) < 256)))
8461 otherops[0] = operands[1];
8462 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8463 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8464 return "";
8466 /* Fall through */
8468 default:
8469 otherops[0] = adjust_address (operands[0], SImode, 4);
8470 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8471 output_asm_insn ("str%?\t%1, %0", operands);
8472 output_asm_insn ("str%?\t%1, %0", otherops);
8475 else
8476 /* Constraints should prevent this. */
8477 abort ();
8479 return "";
8483 /* Output an arbitrary MOV reg, #n.
8484 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8485 const char *
8486 output_mov_immediate (rtx *operands)
8488 HOST_WIDE_INT n = INTVAL (operands[1]);
8490 /* Try to use one MOV. */
8491 if (const_ok_for_arm (n))
8492 output_asm_insn ("mov%?\t%0, %1", operands);
8494 /* Try to use one MVN. */
8495 else if (const_ok_for_arm (~n))
8497 operands[1] = GEN_INT (~n);
8498 output_asm_insn ("mvn%?\t%0, %1", operands);
8500 else
8502 int n_ones = 0;
8503 int i;
8505 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8506 for (i = 0; i < 32; i++)
8507 if (n & 1 << i)
8508 n_ones++;
8510 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8511 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8512 else
8513 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8516 return "";
8519 /* Output an ADD r, s, #n where n may be too big for one instruction.
8520 If adding zero to one register, output nothing. */
8521 const char *
8522 output_add_immediate (rtx *operands)
8524 HOST_WIDE_INT n = INTVAL (operands[2]);
8526 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8528 if (n < 0)
8529 output_multi_immediate (operands,
8530 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8531 -n);
8532 else
8533 output_multi_immediate (operands,
8534 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8538 return "";
8541 /* Output a multiple immediate operation.
8542 OPERANDS is the vector of operands referred to in the output patterns.
8543 INSTR1 is the output pattern to use for the first constant.
8544 INSTR2 is the output pattern to use for subsequent constants.
8545 IMMED_OP is the index of the constant slot in OPERANDS.
8546 N is the constant value. */
8547 static const char *
8548 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8549 int immed_op, HOST_WIDE_INT n)
8551 #if HOST_BITS_PER_WIDE_INT > 32
8552 n &= 0xffffffff;
8553 #endif
8555 if (n == 0)
8557 /* Quick and easy output. */
8558 operands[immed_op] = const0_rtx;
8559 output_asm_insn (instr1, operands);
8561 else
8563 int i;
8564 const char * instr = instr1;
8566 /* Note that n is never zero here (which would give no output). */
8567 for (i = 0; i < 32; i += 2)
8569 if (n & (3 << i))
8571 operands[immed_op] = GEN_INT (n & (255 << i));
8572 output_asm_insn (instr, operands);
8573 instr = instr2;
8574 i += 6;
8579 return "";
8582 /* Return the appropriate ARM instruction for the operation code.
8583 The returned result should not be overwritten. OP is the rtx of the
8584 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8585 was shifted. */
8586 const char *
8587 arithmetic_instr (rtx op, int shift_first_arg)
8589 switch (GET_CODE (op))
8591 case PLUS:
8592 return "add";
8594 case MINUS:
8595 return shift_first_arg ? "rsb" : "sub";
8597 case IOR:
8598 return "orr";
8600 case XOR:
8601 return "eor";
8603 case AND:
8604 return "and";
8606 default:
8607 abort ();
8611 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8612 for the operation code. The returned result should not be overwritten.
8613 OP is the rtx code of the shift.
8614 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8615 shift. */
8616 static const char *
8617 shift_op (rtx op, HOST_WIDE_INT *amountp)
8619 const char * mnem;
8620 enum rtx_code code = GET_CODE (op);
8622 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8623 *amountp = -1;
8624 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8625 *amountp = INTVAL (XEXP (op, 1));
8626 else
8627 abort ();
8629 switch (code)
8631 case ASHIFT:
8632 mnem = "asl";
8633 break;
8635 case ASHIFTRT:
8636 mnem = "asr";
8637 break;
8639 case LSHIFTRT:
8640 mnem = "lsr";
8641 break;
8643 case ROTATE:
8644 if (*amountp == -1)
8645 abort ();
8646 *amountp = 32 - *amountp;
8648 /* Fall through. */
8650 case ROTATERT:
8651 mnem = "ror";
8652 break;
8654 case MULT:
8655 /* We never have to worry about the amount being other than a
8656 power of 2, since this case can never be reloaded from a reg. */
8657 if (*amountp != -1)
8658 *amountp = int_log2 (*amountp);
8659 else
8660 abort ();
8661 return "asl";
8663 default:
8664 abort ();
8667 if (*amountp != -1)
8669 /* This is not 100% correct, but follows from the desire to merge
8670 multiplication by a power of 2 with the recognizer for a
8671 shift. >=32 is not a valid shift for "asl", so we must try and
8672 output a shift that produces the correct arithmetical result.
8673 Using lsr #32 is identical except for the fact that the carry bit
8674 is not set correctly if we set the flags; but we never use the
8675 carry bit from such an operation, so we can ignore that. */
8676 if (code == ROTATERT)
8677 /* Rotate is just modulo 32. */
8678 *amountp &= 31;
8679 else if (*amountp != (*amountp & 31))
8681 if (code == ASHIFT)
8682 mnem = "lsr";
8683 *amountp = 32;
8686 /* Shifts of 0 are no-ops. */
8687 if (*amountp == 0)
8688 return NULL;
8691 return mnem;
8694 /* Obtain the shift from the POWER of two. */
8696 static HOST_WIDE_INT
8697 int_log2 (HOST_WIDE_INT power)
8699 HOST_WIDE_INT shift = 0;
8701 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8703 if (shift > 31)
8704 abort ();
8705 shift++;
8708 return shift;
8711 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8712 because /bin/as is horribly restrictive. The judgement about
8713 whether or not each character is 'printable' (and can be output as
8714 is) or not (and must be printed with an octal escape) must be made
8715 with reference to the *host* character set -- the situation is
8716 similar to that discussed in the comments above pp_c_char in
8717 c-pretty-print.c. */
8719 #define MAX_ASCII_LEN 51
8721 void
8722 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8724 int i;
8725 int len_so_far = 0;
8727 fputs ("\t.ascii\t\"", stream);
8729 for (i = 0; i < len; i++)
8731 int c = p[i];
8733 if (len_so_far >= MAX_ASCII_LEN)
8735 fputs ("\"\n\t.ascii\t\"", stream);
8736 len_so_far = 0;
8739 if (ISPRINT (c))
8741 if (c == '\\' || c == '\"')
8743 putc ('\\', stream);
8744 len_so_far++;
8746 putc (c, stream);
8747 len_so_far++;
8749 else
8751 fprintf (stream, "\\%03o", c);
8752 len_so_far += 4;
8756 fputs ("\"\n", stream);
8759 /* Compute the register save mask for registers 0 through 12
8760 inclusive. This code is used by arm_compute_save_reg_mask. */
8762 static unsigned long
8763 arm_compute_save_reg0_reg12_mask (void)
8765 unsigned long func_type = arm_current_func_type ();
8766 unsigned long save_reg_mask = 0;
8767 unsigned int reg;
8769 if (IS_INTERRUPT (func_type))
8771 unsigned int max_reg;
8772 /* Interrupt functions must not corrupt any registers,
8773 even call clobbered ones. If this is a leaf function
8774 we can just examine the registers used by the RTL, but
8775 otherwise we have to assume that whatever function is
8776 called might clobber anything, and so we have to save
8777 all the call-clobbered registers as well. */
8778 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8779 /* FIQ handlers have registers r8 - r12 banked, so
8780 we only need to check r0 - r7, Normal ISRs only
8781 bank r14 and r15, so we must check up to r12.
8782 r13 is the stack pointer which is always preserved,
8783 so we do not need to consider it here. */
8784 max_reg = 7;
8785 else
8786 max_reg = 12;
8788 for (reg = 0; reg <= max_reg; reg++)
8789 if (regs_ever_live[reg]
8790 || (! current_function_is_leaf && call_used_regs [reg]))
8791 save_reg_mask |= (1 << reg);
8793 /* Also save the pic base register if necessary. */
8794 if (flag_pic
8795 && !TARGET_SINGLE_PIC_BASE
8796 && current_function_uses_pic_offset_table)
8797 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8799 else
8801 /* In the normal case we only need to save those registers
8802 which are call saved and which are used by this function. */
8803 for (reg = 0; reg <= 10; reg++)
8804 if (regs_ever_live[reg] && ! call_used_regs [reg])
8805 save_reg_mask |= (1 << reg);
8807 /* Handle the frame pointer as a special case. */
8808 if (! TARGET_APCS_FRAME
8809 && ! frame_pointer_needed
8810 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8811 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8812 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8814 /* If we aren't loading the PIC register,
8815 don't stack it even though it may be live. */
8816 if (flag_pic
8817 && !TARGET_SINGLE_PIC_BASE
8818 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8819 || current_function_uses_pic_offset_table))
8820 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8823 /* Save registers so the exception handler can modify them. */
8824 if (current_function_calls_eh_return)
8826 unsigned int i;
8828 for (i = 0; ; i++)
8830 reg = EH_RETURN_DATA_REGNO (i);
8831 if (reg == INVALID_REGNUM)
8832 break;
8833 save_reg_mask |= 1 << reg;
8837 return save_reg_mask;
8840 /* Compute a bit mask of which registers need to be
8841 saved on the stack for the current function. */
8843 static unsigned long
8844 arm_compute_save_reg_mask (void)
8846 unsigned int save_reg_mask = 0;
8847 unsigned long func_type = arm_current_func_type ();
8849 if (IS_NAKED (func_type))
8850 /* This should never really happen. */
8851 return 0;
8853 /* If we are creating a stack frame, then we must save the frame pointer,
8854 IP (which will hold the old stack pointer), LR and the PC. */
8855 if (frame_pointer_needed)
8856 save_reg_mask |=
8857 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8858 | (1 << IP_REGNUM)
8859 | (1 << LR_REGNUM)
8860 | (1 << PC_REGNUM);
8862 /* Volatile functions do not return, so there
8863 is no need to save any other registers. */
8864 if (IS_VOLATILE (func_type))
8865 return save_reg_mask;
8867 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8869 /* Decide if we need to save the link register.
8870 Interrupt routines have their own banked link register,
8871 so they never need to save it.
8872 Otherwise if we do not use the link register we do not need to save
8873 it. If we are pushing other registers onto the stack however, we
8874 can save an instruction in the epilogue by pushing the link register
8875 now and then popping it back into the PC. This incurs extra memory
8876 accesses though, so we only do it when optimizing for size, and only
8877 if we know that we will not need a fancy return sequence. */
8878 if (regs_ever_live [LR_REGNUM]
8879 || (save_reg_mask
8880 && optimize_size
8881 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8882 && !current_function_calls_eh_return))
8883 save_reg_mask |= 1 << LR_REGNUM;
8885 if (cfun->machine->lr_save_eliminated)
8886 save_reg_mask &= ~ (1 << LR_REGNUM);
8888 if (TARGET_REALLY_IWMMXT
8889 && ((bit_count (save_reg_mask)
8890 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8892 unsigned int reg;
8894 /* The total number of registers that are going to be pushed
8895 onto the stack is odd. We need to ensure that the stack
8896 is 64-bit aligned before we start to save iWMMXt registers,
8897 and also before we start to create locals. (A local variable
8898 might be a double or long long which we will load/store using
8899 an iWMMXt instruction). Therefore we need to push another
8900 ARM register, so that the stack will be 64-bit aligned. We
8901 try to avoid using the arg registers (r0 -r3) as they might be
8902 used to pass values in a tail call. */
8903 for (reg = 4; reg <= 12; reg++)
8904 if ((save_reg_mask & (1 << reg)) == 0)
8905 break;
8907 if (reg <= 12)
8908 save_reg_mask |= (1 << reg);
8909 else
8911 cfun->machine->sibcall_blocked = 1;
8912 save_reg_mask |= (1 << 3);
8916 return save_reg_mask;
8920 /* Compute a bit mask of which registers need to be
8921 saved on the stack for the current function. */
8922 static unsigned long
8923 thumb_compute_save_reg_mask (void)
8925 unsigned long mask;
8926 unsigned reg;
8928 mask = 0;
8929 for (reg = 0; reg < 12; reg ++)
8930 if (regs_ever_live[reg] && !call_used_regs[reg])
8931 mask |= 1 << reg;
8933 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8934 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8936 if (TARGET_SINGLE_PIC_BASE)
8937 mask &= ~(1 << arm_pic_register);
8939 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8940 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8941 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8943 /* LR will also be pushed if any lo regs are pushed. */
8944 if (mask & 0xff || thumb_force_lr_save ())
8945 mask |= (1 << LR_REGNUM);
8947 /* Make sure we have a low work register if we need one.
8948 We will need one if we are going to push a high register,
8949 but we are not currently intending to push a low register. */
8950 if ((mask & 0xff) == 0
8951 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8953 /* Use thumb_find_work_register to choose which register
8954 we will use. If the register is live then we will
8955 have to push it. Use LAST_LO_REGNUM as our fallback
8956 choice for the register to select. */
8957 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8959 if (! call_used_regs[reg])
8960 mask |= 1 << reg;
8963 return mask;
8967 /* Return the number of bytes required to save VFP registers. */
8968 static int
8969 arm_get_vfp_saved_size (void)
8971 unsigned int regno;
8972 int count;
8973 int saved;
8975 saved = 0;
8976 /* Space for saved VFP registers. */
8977 if (TARGET_HARD_FLOAT && TARGET_VFP)
8979 count = 0;
8980 for (regno = FIRST_VFP_REGNUM;
8981 regno < LAST_VFP_REGNUM;
8982 regno += 2)
8984 if ((!regs_ever_live[regno] || call_used_regs[regno])
8985 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8987 if (count > 0)
8989 /* Workaround ARM10 VFPr1 bug. */
8990 if (count == 2 && !arm_arch6)
8991 count++;
8992 saved += count * 8 + 4;
8994 count = 0;
8996 else
8997 count++;
8999 if (count > 0)
9001 if (count == 2 && !arm_arch6)
9002 count++;
9003 saved += count * 8 + 4;
9006 return saved;
9010 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9011 everything bar the final return instruction. */
9012 const char *
9013 output_return_instruction (rtx operand, int really_return, int reverse)
9015 char conditional[10];
9016 char instr[100];
9017 unsigned reg;
9018 unsigned long live_regs_mask;
9019 unsigned long func_type;
9020 arm_stack_offsets *offsets;
9022 func_type = arm_current_func_type ();
9024 if (IS_NAKED (func_type))
9025 return "";
9027 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9029 /* If this function was declared non-returning, and we have
9030 found a tail call, then we have to trust that the called
9031 function won't return. */
9032 if (really_return)
9034 rtx ops[2];
9036 /* Otherwise, trap an attempted return by aborting. */
9037 ops[0] = operand;
9038 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9039 : "abort");
9040 assemble_external_libcall (ops[1]);
9041 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9044 return "";
9047 if (current_function_calls_alloca && !really_return)
9048 abort ();
9050 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9052 return_used_this_function = 1;
9054 live_regs_mask = arm_compute_save_reg_mask ();
9056 if (live_regs_mask)
9058 const char * return_reg;
9060 /* If we do not have any special requirements for function exit
9061 (e.g. interworking, or ISR) then we can load the return address
9062 directly into the PC. Otherwise we must load it into LR. */
9063 if (really_return
9064 && ! TARGET_INTERWORK)
9065 return_reg = reg_names[PC_REGNUM];
9066 else
9067 return_reg = reg_names[LR_REGNUM];
9069 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9071 /* There are three possible reasons for the IP register
9072 being saved. 1) a stack frame was created, in which case
9073 IP contains the old stack pointer, or 2) an ISR routine
9074 corrupted it, or 3) it was saved to align the stack on
9075 iWMMXt. In case 1, restore IP into SP, otherwise just
9076 restore IP. */
9077 if (frame_pointer_needed)
9079 live_regs_mask &= ~ (1 << IP_REGNUM);
9080 live_regs_mask |= (1 << SP_REGNUM);
9082 else
9084 if (! IS_INTERRUPT (func_type)
9085 && ! TARGET_REALLY_IWMMXT)
9086 abort ();
9090 /* On some ARM architectures it is faster to use LDR rather than
9091 LDM to load a single register. On other architectures, the
9092 cost is the same. In 26 bit mode, or for exception handlers,
9093 we have to use LDM to load the PC so that the CPSR is also
9094 restored. */
9095 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9096 if (live_regs_mask == (1U << reg))
9097 break;
9099 if (reg <= LAST_ARM_REGNUM
9100 && (reg != LR_REGNUM
9101 || ! really_return
9102 || ! IS_INTERRUPT (func_type)))
9104 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9105 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9107 else
9109 char *p;
9110 int first = 1;
9112 /* Generate the load multiple instruction to restore the
9113 registers. Note we can get here, even if
9114 frame_pointer_needed is true, but only if sp already
9115 points to the base of the saved core registers. */
9116 if (live_regs_mask & (1 << SP_REGNUM))
9118 unsigned HOST_WIDE_INT stack_adjust;
9120 offsets = arm_get_frame_offsets ();
9121 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9122 if (stack_adjust != 0 && stack_adjust != 4)
9123 abort ();
9125 if (stack_adjust && arm_arch5)
9126 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9127 else
9129 /* If we can't use ldmib (SA110 bug),
9130 then try to pop r3 instead. */
9131 if (stack_adjust)
9132 live_regs_mask |= 1 << 3;
9133 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9136 else
9137 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9139 p = instr + strlen (instr);
9141 for (reg = 0; reg <= SP_REGNUM; reg++)
9142 if (live_regs_mask & (1 << reg))
9144 int l = strlen (reg_names[reg]);
9146 if (first)
9147 first = 0;
9148 else
9150 memcpy (p, ", ", 2);
9151 p += 2;
9154 memcpy (p, "%|", 2);
9155 memcpy (p + 2, reg_names[reg], l);
9156 p += l + 2;
9159 if (live_regs_mask & (1 << LR_REGNUM))
9161 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9162 /* If returning from an interrupt, restore the CPSR. */
9163 if (IS_INTERRUPT (func_type))
9164 strcat (p, "^");
9166 else
9167 strcpy (p, "}");
9170 output_asm_insn (instr, & operand);
9172 /* See if we need to generate an extra instruction to
9173 perform the actual function return. */
9174 if (really_return
9175 && func_type != ARM_FT_INTERWORKED
9176 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9178 /* The return has already been handled
9179 by loading the LR into the PC. */
9180 really_return = 0;
9184 if (really_return)
9186 switch ((int) ARM_FUNC_TYPE (func_type))
9188 case ARM_FT_ISR:
9189 case ARM_FT_FIQ:
9190 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9191 break;
9193 case ARM_FT_INTERWORKED:
9194 sprintf (instr, "bx%s\t%%|lr", conditional);
9195 break;
9197 case ARM_FT_EXCEPTION:
9198 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9199 break;
9201 default:
9202 /* Use bx if it's available. */
9203 if (arm_arch5 || arm_arch4t)
9204 sprintf (instr, "bx%s\t%%|lr", conditional);
9205 else
9206 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9207 break;
9210 output_asm_insn (instr, & operand);
9213 return "";
9216 /* Write the function name into the code section, directly preceding
9217 the function prologue.
9219 Code will be output similar to this:
9221 .ascii "arm_poke_function_name", 0
9222 .align
9224 .word 0xff000000 + (t1 - t0)
9225 arm_poke_function_name
9226 mov ip, sp
9227 stmfd sp!, {fp, ip, lr, pc}
9228 sub fp, ip, #4
9230 When performing a stack backtrace, code can inspect the value
9231 of 'pc' stored at 'fp' + 0. If the trace function then looks
9232 at location pc - 12 and the top 8 bits are set, then we know
9233 that there is a function name embedded immediately preceding this
9234 location and has length ((pc[-3]) & 0xff000000).
9236 We assume that pc is declared as a pointer to an unsigned long.
9238 It is of no benefit to output the function name if we are assembling
9239 a leaf function. These function types will not contain a stack
9240 backtrace structure, therefore it is not possible to determine the
9241 function name. */
9242 void
9243 arm_poke_function_name (FILE *stream, const char *name)
9245 unsigned long alignlength;
9246 unsigned long length;
9247 rtx x;
9249 length = strlen (name) + 1;
9250 alignlength = ROUND_UP_WORD (length);
9252 ASM_OUTPUT_ASCII (stream, name, length);
9253 ASM_OUTPUT_ALIGN (stream, 2);
9254 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9255 assemble_aligned_integer (UNITS_PER_WORD, x);
9258 /* Place some comments into the assembler stream
9259 describing the current function. */
9260 static void
9261 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9263 unsigned long func_type;
9265 if (!TARGET_ARM)
9267 thumb_output_function_prologue (f, frame_size);
9268 return;
9271 /* Sanity check. */
9272 if (arm_ccfsm_state || arm_target_insn)
9273 abort ();
9275 func_type = arm_current_func_type ();
9277 switch ((int) ARM_FUNC_TYPE (func_type))
9279 default:
9280 case ARM_FT_NORMAL:
9281 break;
9282 case ARM_FT_INTERWORKED:
9283 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9284 break;
9285 case ARM_FT_ISR:
9286 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9287 break;
9288 case ARM_FT_FIQ:
9289 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9290 break;
9291 case ARM_FT_EXCEPTION:
9292 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9293 break;
9296 if (IS_NAKED (func_type))
9297 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9299 if (IS_VOLATILE (func_type))
9300 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9302 if (IS_NESTED (func_type))
9303 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9305 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9306 current_function_args_size,
9307 current_function_pretend_args_size, frame_size);
9309 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9310 frame_pointer_needed,
9311 cfun->machine->uses_anonymous_args);
9313 if (cfun->machine->lr_save_eliminated)
9314 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9316 if (current_function_calls_eh_return)
9317 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9319 #ifdef AOF_ASSEMBLER
9320 if (flag_pic)
9321 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9322 #endif
9324 return_used_this_function = 0;
9327 const char *
9328 arm_output_epilogue (rtx sibling)
9330 int reg;
9331 unsigned long saved_regs_mask;
9332 unsigned long func_type;
9333 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9334 frame that is $fp + 4 for a non-variadic function. */
9335 int floats_offset = 0;
9336 rtx operands[3];
9337 FILE * f = asm_out_file;
9338 unsigned int lrm_count = 0;
9339 int really_return = (sibling == NULL);
9340 int start_reg;
9341 arm_stack_offsets *offsets;
9343 /* If we have already generated the return instruction
9344 then it is futile to generate anything else. */
9345 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9346 return "";
9348 func_type = arm_current_func_type ();
9350 if (IS_NAKED (func_type))
9351 /* Naked functions don't have epilogues. */
9352 return "";
9354 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9356 rtx op;
9358 /* A volatile function should never return. Call abort. */
9359 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9360 assemble_external_libcall (op);
9361 output_asm_insn ("bl\t%a0", &op);
9363 return "";
9366 if (current_function_calls_eh_return
9367 && ! really_return)
9368 /* If we are throwing an exception, then we really must
9369 be doing a return, so we can't tail-call. */
9370 abort ();
9372 offsets = arm_get_frame_offsets ();
9373 saved_regs_mask = arm_compute_save_reg_mask ();
9375 if (TARGET_IWMMXT)
9376 lrm_count = bit_count (saved_regs_mask);
9378 floats_offset = offsets->saved_args;
9379 /* Compute how far away the floats will be. */
9380 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9381 if (saved_regs_mask & (1 << reg))
9382 floats_offset += 4;
9384 if (frame_pointer_needed)
9386 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9387 int vfp_offset = offsets->frame;
9389 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9391 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9392 if (regs_ever_live[reg] && !call_used_regs[reg])
9394 floats_offset += 12;
9395 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9396 reg, FP_REGNUM, floats_offset - vfp_offset);
9399 else
9401 start_reg = LAST_FPA_REGNUM;
9403 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9405 if (regs_ever_live[reg] && !call_used_regs[reg])
9407 floats_offset += 12;
9409 /* We can't unstack more than four registers at once. */
9410 if (start_reg - reg == 3)
9412 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9413 reg, FP_REGNUM, floats_offset - vfp_offset);
9414 start_reg = reg - 1;
9417 else
9419 if (reg != start_reg)
9420 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9421 reg + 1, start_reg - reg,
9422 FP_REGNUM, floats_offset - vfp_offset);
9423 start_reg = reg - 1;
9427 /* Just in case the last register checked also needs unstacking. */
9428 if (reg != start_reg)
9429 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9430 reg + 1, start_reg - reg,
9431 FP_REGNUM, floats_offset - vfp_offset);
9434 if (TARGET_HARD_FLOAT && TARGET_VFP)
9436 int saved_size;
9438 /* The fldmx insn does not have base+offset addressing modes,
9439 so we use IP to hold the address. */
9440 saved_size = arm_get_vfp_saved_size ();
9442 if (saved_size > 0)
9444 floats_offset += saved_size;
9445 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9446 FP_REGNUM, floats_offset - vfp_offset);
9448 start_reg = FIRST_VFP_REGNUM;
9449 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9451 if ((!regs_ever_live[reg] || call_used_regs[reg])
9452 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9454 if (start_reg != reg)
9455 arm_output_fldmx (f, IP_REGNUM,
9456 (start_reg - FIRST_VFP_REGNUM) / 2,
9457 (reg - start_reg) / 2);
9458 start_reg = reg + 2;
9461 if (start_reg != reg)
9462 arm_output_fldmx (f, IP_REGNUM,
9463 (start_reg - FIRST_VFP_REGNUM) / 2,
9464 (reg - start_reg) / 2);
9467 if (TARGET_IWMMXT)
9469 /* The frame pointer is guaranteed to be non-double-word aligned.
9470 This is because it is set to (old_stack_pointer - 4) and the
9471 old_stack_pointer was double word aligned. Thus the offset to
9472 the iWMMXt registers to be loaded must also be non-double-word
9473 sized, so that the resultant address *is* double-word aligned.
9474 We can ignore floats_offset since that was already included in
9475 the live_regs_mask. */
9476 lrm_count += (lrm_count % 2 ? 2 : 1);
9478 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9479 if (regs_ever_live[reg] && !call_used_regs[reg])
9481 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9482 reg, FP_REGNUM, lrm_count * 4);
9483 lrm_count += 2;
9487 /* saved_regs_mask should contain the IP, which at the time of stack
9488 frame generation actually contains the old stack pointer. So a
9489 quick way to unwind the stack is just pop the IP register directly
9490 into the stack pointer. */
9491 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9492 abort ();
9493 saved_regs_mask &= ~ (1 << IP_REGNUM);
9494 saved_regs_mask |= (1 << SP_REGNUM);
9496 /* There are two registers left in saved_regs_mask - LR and PC. We
9497 only need to restore the LR register (the return address), but to
9498 save time we can load it directly into the PC, unless we need a
9499 special function exit sequence, or we are not really returning. */
9500 if (really_return
9501 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9502 && !current_function_calls_eh_return)
9503 /* Delete the LR from the register mask, so that the LR on
9504 the stack is loaded into the PC in the register mask. */
9505 saved_regs_mask &= ~ (1 << LR_REGNUM);
9506 else
9507 saved_regs_mask &= ~ (1 << PC_REGNUM);
9509 /* We must use SP as the base register, because SP is one of the
9510 registers being restored. If an interrupt or page fault
9511 happens in the ldm instruction, the SP might or might not
9512 have been restored. That would be bad, as then SP will no
9513 longer indicate the safe area of stack, and we can get stack
9514 corruption. Using SP as the base register means that it will
9515 be reset correctly to the original value, should an interrupt
9516 occur. If the stack pointer already points at the right
9517 place, then omit the subtraction. */
9518 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9519 || current_function_calls_alloca)
9520 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9521 4 * bit_count (saved_regs_mask));
9522 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9524 if (IS_INTERRUPT (func_type))
9525 /* Interrupt handlers will have pushed the
9526 IP onto the stack, so restore it now. */
9527 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9529 else
9531 /* Restore stack pointer if necessary. */
9532 if (offsets->outgoing_args != offsets->saved_regs)
9534 operands[0] = operands[1] = stack_pointer_rtx;
9535 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9536 output_add_immediate (operands);
9539 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9541 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9542 if (regs_ever_live[reg] && !call_used_regs[reg])
9543 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9544 reg, SP_REGNUM);
9546 else
9548 start_reg = FIRST_FPA_REGNUM;
9550 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9552 if (regs_ever_live[reg] && !call_used_regs[reg])
9554 if (reg - start_reg == 3)
9556 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9557 start_reg, SP_REGNUM);
9558 start_reg = reg + 1;
9561 else
9563 if (reg != start_reg)
9564 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9565 start_reg, reg - start_reg,
9566 SP_REGNUM);
9568 start_reg = reg + 1;
9572 /* Just in case the last register checked also needs unstacking. */
9573 if (reg != start_reg)
9574 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9575 start_reg, reg - start_reg, SP_REGNUM);
9578 if (TARGET_HARD_FLOAT && TARGET_VFP)
9580 start_reg = FIRST_VFP_REGNUM;
9581 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9583 if ((!regs_ever_live[reg] || call_used_regs[reg])
9584 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9586 if (start_reg != reg)
9587 arm_output_fldmx (f, SP_REGNUM,
9588 (start_reg - FIRST_VFP_REGNUM) / 2,
9589 (reg - start_reg) / 2);
9590 start_reg = reg + 2;
9593 if (start_reg != reg)
9594 arm_output_fldmx (f, SP_REGNUM,
9595 (start_reg - FIRST_VFP_REGNUM) / 2,
9596 (reg - start_reg) / 2);
9598 if (TARGET_IWMMXT)
9599 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9600 if (regs_ever_live[reg] && !call_used_regs[reg])
9601 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9603 /* If we can, restore the LR into the PC. */
9604 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9605 && really_return
9606 && current_function_pretend_args_size == 0
9607 && saved_regs_mask & (1 << LR_REGNUM)
9608 && !current_function_calls_eh_return)
9610 saved_regs_mask &= ~ (1 << LR_REGNUM);
9611 saved_regs_mask |= (1 << PC_REGNUM);
9614 /* Load the registers off the stack. If we only have one register
9615 to load use the LDR instruction - it is faster. */
9616 if (saved_regs_mask == (1 << LR_REGNUM))
9618 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9620 else if (saved_regs_mask)
9622 if (saved_regs_mask & (1 << SP_REGNUM))
9623 /* Note - write back to the stack register is not enabled
9624 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9625 in the list of registers and if we add writeback the
9626 instruction becomes UNPREDICTABLE. */
9627 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9628 else
9629 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9632 if (current_function_pretend_args_size)
9634 /* Unwind the pre-pushed regs. */
9635 operands[0] = operands[1] = stack_pointer_rtx;
9636 operands[2] = GEN_INT (current_function_pretend_args_size);
9637 output_add_immediate (operands);
9641 /* We may have already restored PC directly from the stack. */
9642 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9643 return "";
9645 /* Stack adjustment for exception handler. */
9646 if (current_function_calls_eh_return)
9647 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9648 ARM_EH_STACKADJ_REGNUM);
9650 /* Generate the return instruction. */
9651 switch ((int) ARM_FUNC_TYPE (func_type))
9653 case ARM_FT_ISR:
9654 case ARM_FT_FIQ:
9655 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9656 break;
9658 case ARM_FT_EXCEPTION:
9659 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9660 break;
9662 case ARM_FT_INTERWORKED:
9663 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9664 break;
9666 default:
9667 if (arm_arch5 || arm_arch4t)
9668 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9669 else
9670 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9671 break;
9674 return "";
9677 static void
9678 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9679 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9681 arm_stack_offsets *offsets;
9683 if (TARGET_THUMB)
9685 int regno;
9687 /* Emit any call-via-reg trampolines that are needed for v4t support
9688 of call_reg and call_value_reg type insns. */
9689 for (regno = 0; regno < LR_REGNUM; regno++)
9691 rtx label = cfun->machine->call_via[regno];
9693 if (label != NULL)
9695 function_section (current_function_decl);
9696 targetm.asm_out.internal_label (asm_out_file, "L",
9697 CODE_LABEL_NUMBER (label));
9698 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9702 /* ??? Probably not safe to set this here, since it assumes that a
9703 function will be emitted as assembly immediately after we generate
9704 RTL for it. This does not happen for inline functions. */
9705 return_used_this_function = 0;
9707 else
9709 /* We need to take into account any stack-frame rounding. */
9710 offsets = arm_get_frame_offsets ();
9712 if (use_return_insn (FALSE, NULL)
9713 && return_used_this_function
9714 && offsets->saved_regs != offsets->outgoing_args
9715 && !frame_pointer_needed)
9716 abort ();
9718 /* Reset the ARM-specific per-function variables. */
9719 after_arm_reorg = 0;
9723 /* Generate and emit an insn that we will recognize as a push_multi.
9724 Unfortunately, since this insn does not reflect very well the actual
9725 semantics of the operation, we need to annotate the insn for the benefit
9726 of DWARF2 frame unwind information. */
9727 static rtx
9728 emit_multi_reg_push (unsigned long mask)
9730 int num_regs = 0;
9731 int num_dwarf_regs;
9732 int i, j;
9733 rtx par;
9734 rtx dwarf;
9735 int dwarf_par_index;
9736 rtx tmp, reg;
9738 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9739 if (mask & (1 << i))
9740 num_regs++;
9742 if (num_regs == 0 || num_regs > 16)
9743 abort ();
9745 /* We don't record the PC in the dwarf frame information. */
9746 num_dwarf_regs = num_regs;
9747 if (mask & (1 << PC_REGNUM))
9748 num_dwarf_regs--;
9750 /* For the body of the insn we are going to generate an UNSPEC in
9751 parallel with several USEs. This allows the insn to be recognized
9752 by the push_multi pattern in the arm.md file. The insn looks
9753 something like this:
9755 (parallel [
9756 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9757 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9758 (use (reg:SI 11 fp))
9759 (use (reg:SI 12 ip))
9760 (use (reg:SI 14 lr))
9761 (use (reg:SI 15 pc))
9764 For the frame note however, we try to be more explicit and actually
9765 show each register being stored into the stack frame, plus a (single)
9766 decrement of the stack pointer. We do it this way in order to be
9767 friendly to the stack unwinding code, which only wants to see a single
9768 stack decrement per instruction. The RTL we generate for the note looks
9769 something like this:
9771 (sequence [
9772 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9773 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9774 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9775 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9776 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9779 This sequence is used both by the code to support stack unwinding for
9780 exceptions handlers and the code to generate dwarf2 frame debugging. */
9782 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9783 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9784 dwarf_par_index = 1;
9786 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9788 if (mask & (1 << i))
9790 reg = gen_rtx_REG (SImode, i);
9792 XVECEXP (par, 0, 0)
9793 = gen_rtx_SET (VOIDmode,
9794 gen_rtx_MEM (BLKmode,
9795 gen_rtx_PRE_DEC (BLKmode,
9796 stack_pointer_rtx)),
9797 gen_rtx_UNSPEC (BLKmode,
9798 gen_rtvec (1, reg),
9799 UNSPEC_PUSH_MULT));
9801 if (i != PC_REGNUM)
9803 tmp = gen_rtx_SET (VOIDmode,
9804 gen_rtx_MEM (SImode, stack_pointer_rtx),
9805 reg);
9806 RTX_FRAME_RELATED_P (tmp) = 1;
9807 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9808 dwarf_par_index++;
9811 break;
9815 for (j = 1, i++; j < num_regs; i++)
9817 if (mask & (1 << i))
9819 reg = gen_rtx_REG (SImode, i);
9821 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9823 if (i != PC_REGNUM)
9825 tmp = gen_rtx_SET (VOIDmode,
9826 gen_rtx_MEM (SImode,
9827 plus_constant (stack_pointer_rtx,
9828 4 * j)),
9829 reg);
9830 RTX_FRAME_RELATED_P (tmp) = 1;
9831 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9834 j++;
9838 par = emit_insn (par);
9840 tmp = gen_rtx_SET (SImode,
9841 stack_pointer_rtx,
9842 gen_rtx_PLUS (SImode,
9843 stack_pointer_rtx,
9844 GEN_INT (-4 * num_regs)));
9845 RTX_FRAME_RELATED_P (tmp) = 1;
9846 XVECEXP (dwarf, 0, 0) = tmp;
9848 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9849 REG_NOTES (par));
9850 return par;
9853 static rtx
9854 emit_sfm (int base_reg, int count)
9856 rtx par;
9857 rtx dwarf;
9858 rtx tmp, reg;
9859 int i;
9861 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9862 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9864 reg = gen_rtx_REG (XFmode, base_reg++);
9866 XVECEXP (par, 0, 0)
9867 = gen_rtx_SET (VOIDmode,
9868 gen_rtx_MEM (BLKmode,
9869 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9870 gen_rtx_UNSPEC (BLKmode,
9871 gen_rtvec (1, reg),
9872 UNSPEC_PUSH_MULT));
9873 tmp = gen_rtx_SET (VOIDmode,
9874 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9875 RTX_FRAME_RELATED_P (tmp) = 1;
9876 XVECEXP (dwarf, 0, 1) = tmp;
9878 for (i = 1; i < count; i++)
9880 reg = gen_rtx_REG (XFmode, base_reg++);
9881 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9883 tmp = gen_rtx_SET (VOIDmode,
9884 gen_rtx_MEM (XFmode,
9885 plus_constant (stack_pointer_rtx,
9886 i * 12)),
9887 reg);
9888 RTX_FRAME_RELATED_P (tmp) = 1;
9889 XVECEXP (dwarf, 0, i + 1) = tmp;
9892 tmp = gen_rtx_SET (VOIDmode,
9893 stack_pointer_rtx,
9894 gen_rtx_PLUS (SImode,
9895 stack_pointer_rtx,
9896 GEN_INT (-12 * count)));
9897 RTX_FRAME_RELATED_P (tmp) = 1;
9898 XVECEXP (dwarf, 0, 0) = tmp;
9900 par = emit_insn (par);
9901 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9902 REG_NOTES (par));
9903 return par;
9907 /* Return true if the current function needs to save/restore LR. */
9909 static bool
9910 thumb_force_lr_save (void)
9912 return !cfun->machine->lr_save_eliminated
9913 && (!leaf_function_p ()
9914 || thumb_far_jump_used_p ()
9915 || regs_ever_live [LR_REGNUM]);
9919 /* Compute the distance from register FROM to register TO.
9920 These can be the arg pointer (26), the soft frame pointer (25),
9921 the stack pointer (13) or the hard frame pointer (11).
9922 In thumb mode r7 is used as the soft frame pointer, if needed.
9923 Typical stack layout looks like this:
9925 old stack pointer -> | |
9926 ----
9927 | | \
9928 | | saved arguments for
9929 | | vararg functions
9930 | | /
9932 hard FP & arg pointer -> | | \
9933 | | stack
9934 | | frame
9935 | | /
9937 | | \
9938 | | call saved
9939 | | registers
9940 soft frame pointer -> | | /
9942 | | \
9943 | | local
9944 | | variables
9945 | | /
9947 | | \
9948 | | outgoing
9949 | | arguments
9950 current stack pointer -> | | /
9953 For a given function some or all of these stack components
9954 may not be needed, giving rise to the possibility of
9955 eliminating some of the registers.
9957 The values returned by this function must reflect the behavior
9958 of arm_expand_prologue() and arm_compute_save_reg_mask().
9960 The sign of the number returned reflects the direction of stack
9961 growth, so the values are positive for all eliminations except
9962 from the soft frame pointer to the hard frame pointer.
9964 SFP may point just inside the local variables block to ensure correct
9965 alignment. */
9968 /* Calculate stack offsets. These are used to calculate register elimination
9969 offsets and in prologue/epilogue code. */
9971 static arm_stack_offsets *
9972 arm_get_frame_offsets (void)
9974 struct arm_stack_offsets *offsets;
9975 unsigned long func_type;
9976 int leaf;
9977 int saved;
9978 HOST_WIDE_INT frame_size;
9980 offsets = &cfun->machine->stack_offsets;
9982 /* We need to know if we are a leaf function. Unfortunately, it
9983 is possible to be called after start_sequence has been called,
9984 which causes get_insns to return the insns for the sequence,
9985 not the function, which will cause leaf_function_p to return
9986 the incorrect result.
9988 to know about leaf functions once reload has completed, and the
9989 frame size cannot be changed after that time, so we can safely
9990 use the cached value. */
9992 if (reload_completed)
9993 return offsets;
9995 /* Initially this is the size of the local variables. It will translated
9996 into an offset once we have determined the size of preceding data. */
9997 frame_size = ROUND_UP_WORD (get_frame_size ());
9999 leaf = leaf_function_p ();
10001 /* Space for variadic functions. */
10002 offsets->saved_args = current_function_pretend_args_size;
10004 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10006 if (TARGET_ARM)
10008 unsigned int regno;
10010 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10012 /* We know that SP will be doubleword aligned on entry, and we must
10013 preserve that condition at any subroutine call. We also require the
10014 soft frame pointer to be doubleword aligned. */
10016 if (TARGET_REALLY_IWMMXT)
10018 /* Check for the call-saved iWMMXt registers. */
10019 for (regno = FIRST_IWMMXT_REGNUM;
10020 regno <= LAST_IWMMXT_REGNUM;
10021 regno++)
10022 if (regs_ever_live [regno] && ! call_used_regs [regno])
10023 saved += 8;
10026 func_type = arm_current_func_type ();
10027 if (! IS_VOLATILE (func_type))
10029 /* Space for saved FPA registers. */
10030 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10031 if (regs_ever_live[regno] && ! call_used_regs[regno])
10032 saved += 12;
10034 /* Space for saved VFP registers. */
10035 if (TARGET_HARD_FLOAT && TARGET_VFP)
10036 saved += arm_get_vfp_saved_size ();
10039 else /* TARGET_THUMB */
10041 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10042 if (TARGET_BACKTRACE)
10043 saved += 16;
10046 /* Saved registers include the stack frame. */
10047 offsets->saved_regs = offsets->saved_args + saved;
10048 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10049 /* A leaf function does not need any stack alignment if it has nothing
10050 on the stack. */
10051 if (leaf && frame_size == 0)
10053 offsets->outgoing_args = offsets->soft_frame;
10054 return offsets;
10057 /* Ensure SFP has the correct alignment. */
10058 if (ARM_DOUBLEWORD_ALIGN
10059 && (offsets->soft_frame & 7))
10060 offsets->soft_frame += 4;
10062 offsets->outgoing_args = offsets->soft_frame + frame_size
10063 + current_function_outgoing_args_size;
10065 if (ARM_DOUBLEWORD_ALIGN)
10067 /* Ensure SP remains doubleword aligned. */
10068 if (offsets->outgoing_args & 7)
10069 offsets->outgoing_args += 4;
10070 if (offsets->outgoing_args & 7)
10071 abort ();
10074 return offsets;
10078 /* Calculate the relative offsets for the different stack pointers. Positive
10079 offsets are in the direction of stack growth. */
10081 HOST_WIDE_INT
10082 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10084 arm_stack_offsets *offsets;
10086 offsets = arm_get_frame_offsets ();
10088 /* OK, now we have enough information to compute the distances.
10089 There must be an entry in these switch tables for each pair
10090 of registers in ELIMINABLE_REGS, even if some of the entries
10091 seem to be redundant or useless. */
10092 switch (from)
10094 case ARG_POINTER_REGNUM:
10095 switch (to)
10097 case THUMB_HARD_FRAME_POINTER_REGNUM:
10098 return 0;
10100 case FRAME_POINTER_REGNUM:
10101 /* This is the reverse of the soft frame pointer
10102 to hard frame pointer elimination below. */
10103 return offsets->soft_frame - offsets->saved_args;
10105 case ARM_HARD_FRAME_POINTER_REGNUM:
10106 /* If there is no stack frame then the hard
10107 frame pointer and the arg pointer coincide. */
10108 if (offsets->frame == offsets->saved_regs)
10109 return 0;
10110 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10111 return (frame_pointer_needed
10112 && cfun->static_chain_decl != NULL
10113 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10115 case STACK_POINTER_REGNUM:
10116 /* If nothing has been pushed on the stack at all
10117 then this will return -4. This *is* correct! */
10118 return offsets->outgoing_args - (offsets->saved_args + 4);
10120 default:
10121 abort ();
10123 break;
10125 case FRAME_POINTER_REGNUM:
10126 switch (to)
10128 case THUMB_HARD_FRAME_POINTER_REGNUM:
10129 return 0;
10131 case ARM_HARD_FRAME_POINTER_REGNUM:
10132 /* The hard frame pointer points to the top entry in the
10133 stack frame. The soft frame pointer to the bottom entry
10134 in the stack frame. If there is no stack frame at all,
10135 then they are identical. */
10137 return offsets->frame - offsets->soft_frame;
10139 case STACK_POINTER_REGNUM:
10140 return offsets->outgoing_args - offsets->soft_frame;
10142 default:
10143 abort ();
10145 break;
10147 default:
10148 /* You cannot eliminate from the stack pointer.
10149 In theory you could eliminate from the hard frame
10150 pointer to the stack pointer, but this will never
10151 happen, since if a stack frame is not needed the
10152 hard frame pointer will never be used. */
10153 abort ();
10158 /* Generate the prologue instructions for entry into an ARM function. */
10159 void
10160 arm_expand_prologue (void)
10162 int reg;
10163 rtx amount;
10164 rtx insn;
10165 rtx ip_rtx;
10166 unsigned long live_regs_mask;
10167 unsigned long func_type;
10168 int fp_offset = 0;
10169 int saved_pretend_args = 0;
10170 int saved_regs = 0;
10171 unsigned HOST_WIDE_INT args_to_push;
10172 arm_stack_offsets *offsets;
10174 func_type = arm_current_func_type ();
10176 /* Naked functions don't have prologues. */
10177 if (IS_NAKED (func_type))
10178 return;
10180 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10181 args_to_push = current_function_pretend_args_size;
10183 /* Compute which register we will have to save onto the stack. */
10184 live_regs_mask = arm_compute_save_reg_mask ();
10186 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10188 if (frame_pointer_needed)
10190 if (IS_INTERRUPT (func_type))
10192 /* Interrupt functions must not corrupt any registers.
10193 Creating a frame pointer however, corrupts the IP
10194 register, so we must push it first. */
10195 insn = emit_multi_reg_push (1 << IP_REGNUM);
10197 /* Do not set RTX_FRAME_RELATED_P on this insn.
10198 The dwarf stack unwinding code only wants to see one
10199 stack decrement per function, and this is not it. If
10200 this instruction is labeled as being part of the frame
10201 creation sequence then dwarf2out_frame_debug_expr will
10202 abort when it encounters the assignment of IP to FP
10203 later on, since the use of SP here establishes SP as
10204 the CFA register and not IP.
10206 Anyway this instruction is not really part of the stack
10207 frame creation although it is part of the prologue. */
10209 else if (IS_NESTED (func_type))
10211 /* The Static chain register is the same as the IP register
10212 used as a scratch register during stack frame creation.
10213 To get around this need to find somewhere to store IP
10214 whilst the frame is being created. We try the following
10215 places in order:
10217 1. The last argument register.
10218 2. A slot on the stack above the frame. (This only
10219 works if the function is not a varargs function).
10220 3. Register r3, after pushing the argument registers
10221 onto the stack.
10223 Note - we only need to tell the dwarf2 backend about the SP
10224 adjustment in the second variant; the static chain register
10225 doesn't need to be unwound, as it doesn't contain a value
10226 inherited from the caller. */
10228 if (regs_ever_live[3] == 0)
10230 insn = gen_rtx_REG (SImode, 3);
10231 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10232 insn = emit_insn (insn);
10234 else if (args_to_push == 0)
10236 rtx dwarf;
10237 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10238 insn = gen_rtx_MEM (SImode, insn);
10239 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10240 insn = emit_insn (insn);
10242 fp_offset = 4;
10244 /* Just tell the dwarf backend that we adjusted SP. */
10245 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10246 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10247 GEN_INT (-fp_offset)));
10248 RTX_FRAME_RELATED_P (insn) = 1;
10249 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10250 dwarf, REG_NOTES (insn));
10252 else
10254 /* Store the args on the stack. */
10255 if (cfun->machine->uses_anonymous_args)
10256 insn = emit_multi_reg_push
10257 ((0xf0 >> (args_to_push / 4)) & 0xf);
10258 else
10259 insn = emit_insn
10260 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10261 GEN_INT (- args_to_push)));
10263 RTX_FRAME_RELATED_P (insn) = 1;
10265 saved_pretend_args = 1;
10266 fp_offset = args_to_push;
10267 args_to_push = 0;
10269 /* Now reuse r3 to preserve IP. */
10270 insn = gen_rtx_REG (SImode, 3);
10271 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10272 (void) emit_insn (insn);
10276 if (fp_offset)
10278 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10279 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10281 else
10282 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10284 insn = emit_insn (insn);
10285 RTX_FRAME_RELATED_P (insn) = 1;
10288 if (args_to_push)
10290 /* Push the argument registers, or reserve space for them. */
10291 if (cfun->machine->uses_anonymous_args)
10292 insn = emit_multi_reg_push
10293 ((0xf0 >> (args_to_push / 4)) & 0xf);
10294 else
10295 insn = emit_insn
10296 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10297 GEN_INT (- args_to_push)));
10298 RTX_FRAME_RELATED_P (insn) = 1;
10301 /* If this is an interrupt service routine, and the link register
10302 is going to be pushed, and we are not creating a stack frame,
10303 (which would involve an extra push of IP and a pop in the epilogue)
10304 subtracting four from LR now will mean that the function return
10305 can be done with a single instruction. */
10306 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10307 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10308 && ! frame_pointer_needed)
10309 emit_insn (gen_rtx_SET (SImode,
10310 gen_rtx_REG (SImode, LR_REGNUM),
10311 gen_rtx_PLUS (SImode,
10312 gen_rtx_REG (SImode, LR_REGNUM),
10313 GEN_INT (-4))));
10315 if (live_regs_mask)
10317 insn = emit_multi_reg_push (live_regs_mask);
10318 saved_regs += bit_count (live_regs_mask) * 4;
10319 RTX_FRAME_RELATED_P (insn) = 1;
10322 if (TARGET_IWMMXT)
10323 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10324 if (regs_ever_live[reg] && ! call_used_regs [reg])
10326 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10327 insn = gen_rtx_MEM (V2SImode, insn);
10328 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10329 gen_rtx_REG (V2SImode, reg)));
10330 RTX_FRAME_RELATED_P (insn) = 1;
10331 saved_regs += 8;
10334 if (! IS_VOLATILE (func_type))
10336 int start_reg;
10338 /* Save any floating point call-saved registers used by this
10339 function. */
10340 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10342 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10343 if (regs_ever_live[reg] && !call_used_regs[reg])
10345 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10346 insn = gen_rtx_MEM (XFmode, insn);
10347 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10348 gen_rtx_REG (XFmode, reg)));
10349 RTX_FRAME_RELATED_P (insn) = 1;
10350 saved_regs += 12;
10353 else
10355 start_reg = LAST_FPA_REGNUM;
10357 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10359 if (regs_ever_live[reg] && !call_used_regs[reg])
10361 if (start_reg - reg == 3)
10363 insn = emit_sfm (reg, 4);
10364 RTX_FRAME_RELATED_P (insn) = 1;
10365 saved_regs += 48;
10366 start_reg = reg - 1;
10369 else
10371 if (start_reg != reg)
10373 insn = emit_sfm (reg + 1, start_reg - reg);
10374 RTX_FRAME_RELATED_P (insn) = 1;
10375 saved_regs += (start_reg - reg) * 12;
10377 start_reg = reg - 1;
10381 if (start_reg != reg)
10383 insn = emit_sfm (reg + 1, start_reg - reg);
10384 saved_regs += (start_reg - reg) * 12;
10385 RTX_FRAME_RELATED_P (insn) = 1;
10388 if (TARGET_HARD_FLOAT && TARGET_VFP)
10390 start_reg = FIRST_VFP_REGNUM;
10392 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10394 if ((!regs_ever_live[reg] || call_used_regs[reg])
10395 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10397 if (start_reg != reg)
10398 saved_regs += vfp_emit_fstmx (start_reg,
10399 (reg - start_reg) / 2);
10400 start_reg = reg + 2;
10403 if (start_reg != reg)
10404 saved_regs += vfp_emit_fstmx (start_reg,
10405 (reg - start_reg) / 2);
10409 if (frame_pointer_needed)
10411 /* Create the new frame pointer. */
10412 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10413 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10414 RTX_FRAME_RELATED_P (insn) = 1;
10416 if (IS_NESTED (func_type))
10418 /* Recover the static chain register. */
10419 if (regs_ever_live [3] == 0
10420 || saved_pretend_args)
10421 insn = gen_rtx_REG (SImode, 3);
10422 else /* if (current_function_pretend_args_size == 0) */
10424 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10425 GEN_INT (4));
10426 insn = gen_rtx_MEM (SImode, insn);
10429 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10430 /* Add a USE to stop propagate_one_insn() from barfing. */
10431 emit_insn (gen_prologue_use (ip_rtx));
10435 offsets = arm_get_frame_offsets ();
10436 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10438 /* This add can produce multiple insns for a large constant, so we
10439 need to get tricky. */
10440 rtx last = get_last_insn ();
10442 amount = GEN_INT (offsets->saved_args + saved_regs
10443 - offsets->outgoing_args);
10445 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10446 amount));
10449 last = last ? NEXT_INSN (last) : get_insns ();
10450 RTX_FRAME_RELATED_P (last) = 1;
10452 while (last != insn);
10454 /* If the frame pointer is needed, emit a special barrier that
10455 will prevent the scheduler from moving stores to the frame
10456 before the stack adjustment. */
10457 if (frame_pointer_needed)
10458 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10459 hard_frame_pointer_rtx));
10463 if (flag_pic)
10464 arm_load_pic_register (INVALID_REGNUM);
10466 /* If we are profiling, make sure no instructions are scheduled before
10467 the call to mcount. Similarly if the user has requested no
10468 scheduling in the prolog. */
10469 if (current_function_profile || TARGET_NO_SCHED_PRO)
10470 emit_insn (gen_blockage ());
10472 /* If the link register is being kept alive, with the return address in it,
10473 then make sure that it does not get reused by the ce2 pass. */
10474 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10476 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10477 cfun->machine->lr_save_eliminated = 1;
10481 /* If CODE is 'd', then the X is a condition operand and the instruction
10482 should only be executed if the condition is true.
10483 if CODE is 'D', then the X is a condition operand and the instruction
10484 should only be executed if the condition is false: however, if the mode
10485 of the comparison is CCFPEmode, then always execute the instruction -- we
10486 do this because in these circumstances !GE does not necessarily imply LT;
10487 in these cases the instruction pattern will take care to make sure that
10488 an instruction containing %d will follow, thereby undoing the effects of
10489 doing this instruction unconditionally.
10490 If CODE is 'N' then X is a floating point operand that must be negated
10491 before output.
10492 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10493 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10494 void
10495 arm_print_operand (FILE *stream, rtx x, int code)
10497 switch (code)
10499 case '@':
10500 fputs (ASM_COMMENT_START, stream);
10501 return;
10503 case '_':
10504 fputs (user_label_prefix, stream);
10505 return;
10507 case '|':
10508 fputs (REGISTER_PREFIX, stream);
10509 return;
10511 case '?':
10512 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10514 if (TARGET_THUMB)
10516 output_operand_lossage ("predicated Thumb instruction");
10517 break;
10519 if (current_insn_predicate != NULL)
10521 output_operand_lossage
10522 ("predicated instruction in conditional sequence");
10523 break;
10526 fputs (arm_condition_codes[arm_current_cc], stream);
10528 else if (current_insn_predicate)
10530 enum arm_cond_code code;
10532 if (TARGET_THUMB)
10534 output_operand_lossage ("predicated Thumb instruction");
10535 break;
10538 code = get_arm_condition_code (current_insn_predicate);
10539 fputs (arm_condition_codes[code], stream);
10541 return;
10543 case 'N':
10545 REAL_VALUE_TYPE r;
10546 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10547 r = REAL_VALUE_NEGATE (r);
10548 fprintf (stream, "%s", fp_const_from_val (&r));
10550 return;
10552 case 'B':
10553 if (GET_CODE (x) == CONST_INT)
10555 HOST_WIDE_INT val;
10556 val = ARM_SIGN_EXTEND (~INTVAL (x));
10557 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10559 else
10561 putc ('~', stream);
10562 output_addr_const (stream, x);
10564 return;
10566 case 'i':
10567 fprintf (stream, "%s", arithmetic_instr (x, 1));
10568 return;
10570 /* Truncate Cirrus shift counts. */
10571 case 's':
10572 if (GET_CODE (x) == CONST_INT)
10574 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10575 return;
10577 arm_print_operand (stream, x, 0);
10578 return;
10580 case 'I':
10581 fprintf (stream, "%s", arithmetic_instr (x, 0));
10582 return;
10584 case 'S':
10586 HOST_WIDE_INT val;
10587 const char * shift = shift_op (x, &val);
10589 if (shift)
10591 fprintf (stream, ", %s ", shift_op (x, &val));
10592 if (val == -1)
10593 arm_print_operand (stream, XEXP (x, 1), 0);
10594 else
10595 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10598 return;
10600 /* An explanation of the 'Q', 'R' and 'H' register operands:
10602 In a pair of registers containing a DI or DF value the 'Q'
10603 operand returns the register number of the register containing
10604 the least significant part of the value. The 'R' operand returns
10605 the register number of the register containing the most
10606 significant part of the value.
10608 The 'H' operand returns the higher of the two register numbers.
10609 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10610 same as the 'Q' operand, since the most significant part of the
10611 value is held in the lower number register. The reverse is true
10612 on systems where WORDS_BIG_ENDIAN is false.
10614 The purpose of these operands is to distinguish between cases
10615 where the endian-ness of the values is important (for example
10616 when they are added together), and cases where the endian-ness
10617 is irrelevant, but the order of register operations is important.
10618 For example when loading a value from memory into a register
10619 pair, the endian-ness does not matter. Provided that the value
10620 from the lower memory address is put into the lower numbered
10621 register, and the value from the higher address is put into the
10622 higher numbered register, the load will work regardless of whether
10623 the value being loaded is big-wordian or little-wordian. The
10624 order of the two register loads can matter however, if the address
10625 of the memory location is actually held in one of the registers
10626 being overwritten by the load. */
10627 case 'Q':
10628 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10630 output_operand_lossage ("invalid operand for code '%c'", code);
10631 return;
10634 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10635 return;
10637 case 'R':
10638 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10640 output_operand_lossage ("invalid operand for code '%c'", code);
10641 return;
10644 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10645 return;
10647 case 'H':
10648 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10650 output_operand_lossage ("invalid operand for code '%c'", code);
10651 return;
10654 asm_fprintf (stream, "%r", REGNO (x) + 1);
10655 return;
10657 case 'm':
10658 asm_fprintf (stream, "%r",
10659 GET_CODE (XEXP (x, 0)) == REG
10660 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10661 return;
10663 case 'M':
10664 asm_fprintf (stream, "{%r-%r}",
10665 REGNO (x),
10666 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10667 return;
10669 case 'd':
10670 /* CONST_TRUE_RTX means always -- that's the default. */
10671 if (x == const_true_rtx)
10672 return;
10674 if (!COMPARISON_P (x))
10676 output_operand_lossage ("invalid operand for code '%c'", code);
10677 return;
10680 fputs (arm_condition_codes[get_arm_condition_code (x)],
10681 stream);
10682 return;
10684 case 'D':
10685 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10686 want to do that. */
10687 if (x == const_true_rtx)
10689 output_operand_lossage ("instruction never exectued");
10690 return;
10692 if (!COMPARISON_P (x))
10694 output_operand_lossage ("invalid operand for code '%c'", code);
10695 return;
10698 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10699 (get_arm_condition_code (x))],
10700 stream);
10701 return;
10703 /* Cirrus registers can be accessed in a variety of ways:
10704 single floating point (f)
10705 double floating point (d)
10706 32bit integer (fx)
10707 64bit integer (dx). */
10708 case 'W': /* Cirrus register in F mode. */
10709 case 'X': /* Cirrus register in D mode. */
10710 case 'Y': /* Cirrus register in FX mode. */
10711 case 'Z': /* Cirrus register in DX mode. */
10712 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10713 abort ();
10715 fprintf (stream, "mv%s%s",
10716 code == 'W' ? "f"
10717 : code == 'X' ? "d"
10718 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10720 return;
10722 /* Print cirrus register in the mode specified by the register's mode. */
10723 case 'V':
10725 int mode = GET_MODE (x);
10727 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10729 output_operand_lossage ("invalid operand for code '%c'", code);
10730 return;
10733 fprintf (stream, "mv%s%s",
10734 mode == DFmode ? "d"
10735 : mode == SImode ? "fx"
10736 : mode == DImode ? "dx"
10737 : "f", reg_names[REGNO (x)] + 2);
10739 return;
10742 case 'U':
10743 if (GET_CODE (x) != REG
10744 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10745 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10746 /* Bad value for wCG register number. */
10748 output_operand_lossage ("invalid operand for code '%c'", code);
10749 return;
10752 else
10753 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10754 return;
10756 /* Print an iWMMXt control register name. */
10757 case 'w':
10758 if (GET_CODE (x) != CONST_INT
10759 || INTVAL (x) < 0
10760 || INTVAL (x) >= 16)
10761 /* Bad value for wC register number. */
10763 output_operand_lossage ("invalid operand for code '%c'", code);
10764 return;
10767 else
10769 static const char * wc_reg_names [16] =
10771 "wCID", "wCon", "wCSSF", "wCASF",
10772 "wC4", "wC5", "wC6", "wC7",
10773 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10774 "wC12", "wC13", "wC14", "wC15"
10777 fprintf (stream, wc_reg_names [INTVAL (x)]);
10779 return;
10781 /* Print a VFP double precision register name. */
10782 case 'P':
10784 int mode = GET_MODE (x);
10785 int num;
10787 if (mode != DImode && mode != DFmode)
10789 output_operand_lossage ("invalid operand for code '%c'", code);
10790 return;
10793 if (GET_CODE (x) != REG
10794 || !IS_VFP_REGNUM (REGNO (x)))
10796 output_operand_lossage ("invalid operand for code '%c'", code);
10797 return;
10800 num = REGNO(x) - FIRST_VFP_REGNUM;
10801 if (num & 1)
10803 output_operand_lossage ("invalid operand for code '%c'", code);
10804 return;
10807 fprintf (stream, "d%d", num >> 1);
10809 return;
10811 default:
10812 if (x == 0)
10814 output_operand_lossage ("missing operand");
10815 return;
10818 if (GET_CODE (x) == REG)
10819 asm_fprintf (stream, "%r", REGNO (x));
10820 else if (GET_CODE (x) == MEM)
10822 output_memory_reference_mode = GET_MODE (x);
10823 output_address (XEXP (x, 0));
10825 else if (GET_CODE (x) == CONST_DOUBLE)
10826 fprintf (stream, "#%s", fp_immediate_constant (x));
10827 else if (GET_CODE (x) == NEG)
10828 abort (); /* This should never happen now. */
10829 else
10831 fputc ('#', stream);
10832 output_addr_const (stream, x);
10837 #ifndef AOF_ASSEMBLER
10838 /* Target hook for assembling integer objects. The ARM version needs to
10839 handle word-sized values specially. */
10840 static bool
10841 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10843 if (size == UNITS_PER_WORD && aligned_p)
10845 fputs ("\t.word\t", asm_out_file);
10846 output_addr_const (asm_out_file, x);
10848 /* Mark symbols as position independent. We only do this in the
10849 .text segment, not in the .data segment. */
10850 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10851 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10853 if (GET_CODE (x) == SYMBOL_REF
10854 && (CONSTANT_POOL_ADDRESS_P (x)
10855 || SYMBOL_REF_LOCAL_P (x)))
10856 fputs ("(GOTOFF)", asm_out_file);
10857 else if (GET_CODE (x) == LABEL_REF)
10858 fputs ("(GOTOFF)", asm_out_file);
10859 else
10860 fputs ("(GOT)", asm_out_file);
10862 fputc ('\n', asm_out_file);
10863 return true;
10866 if (arm_vector_mode_supported_p (GET_MODE (x)))
10868 int i, units;
10870 if (GET_CODE (x) != CONST_VECTOR)
10871 abort ();
10873 units = CONST_VECTOR_NUNITS (x);
10875 switch (GET_MODE (x))
10877 case V2SImode: size = 4; break;
10878 case V4HImode: size = 2; break;
10879 case V8QImode: size = 1; break;
10880 default:
10881 abort ();
10884 for (i = 0; i < units; i++)
10886 rtx elt;
10888 elt = CONST_VECTOR_ELT (x, i);
10889 assemble_integer
10890 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10893 return true;
10896 return default_assemble_integer (x, size, aligned_p);
10898 #endif
10900 /* A finite state machine takes care of noticing whether or not instructions
10901 can be conditionally executed, and thus decrease execution time and code
10902 size by deleting branch instructions. The fsm is controlled by
10903 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10905 /* The state of the fsm controlling condition codes are:
10906 0: normal, do nothing special
10907 1: make ASM_OUTPUT_OPCODE not output this instruction
10908 2: make ASM_OUTPUT_OPCODE not output this instruction
10909 3: make instructions conditional
10910 4: make instructions conditional
10912 State transitions (state->state by whom under condition):
10913 0 -> 1 final_prescan_insn if the `target' is a label
10914 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10915 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10916 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10917 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10918 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10919 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10920 (the target insn is arm_target_insn).
10922 If the jump clobbers the conditions then we use states 2 and 4.
10924 A similar thing can be done with conditional return insns.
10926 XXX In case the `target' is an unconditional branch, this conditionalising
10927 of the instructions always reduces code size, but not always execution
10928 time. But then, I want to reduce the code size to somewhere near what
10929 /bin/cc produces. */
10931 /* Returns the index of the ARM condition code string in
10932 `arm_condition_codes'. COMPARISON should be an rtx like
10933 `(eq (...) (...))'. */
10934 static enum arm_cond_code
10935 get_arm_condition_code (rtx comparison)
10937 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10938 int code;
10939 enum rtx_code comp_code = GET_CODE (comparison);
10941 if (GET_MODE_CLASS (mode) != MODE_CC)
10942 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10943 XEXP (comparison, 1));
10945 switch (mode)
10947 case CC_DNEmode: code = ARM_NE; goto dominance;
10948 case CC_DEQmode: code = ARM_EQ; goto dominance;
10949 case CC_DGEmode: code = ARM_GE; goto dominance;
10950 case CC_DGTmode: code = ARM_GT; goto dominance;
10951 case CC_DLEmode: code = ARM_LE; goto dominance;
10952 case CC_DLTmode: code = ARM_LT; goto dominance;
10953 case CC_DGEUmode: code = ARM_CS; goto dominance;
10954 case CC_DGTUmode: code = ARM_HI; goto dominance;
10955 case CC_DLEUmode: code = ARM_LS; goto dominance;
10956 case CC_DLTUmode: code = ARM_CC;
10958 dominance:
10959 if (comp_code != EQ && comp_code != NE)
10960 abort ();
10962 if (comp_code == EQ)
10963 return ARM_INVERSE_CONDITION_CODE (code);
10964 return code;
10966 case CC_NOOVmode:
10967 switch (comp_code)
10969 case NE: return ARM_NE;
10970 case EQ: return ARM_EQ;
10971 case GE: return ARM_PL;
10972 case LT: return ARM_MI;
10973 default: abort ();
10976 case CC_Zmode:
10977 switch (comp_code)
10979 case NE: return ARM_NE;
10980 case EQ: return ARM_EQ;
10981 default: abort ();
10984 case CC_Nmode:
10985 switch (comp_code)
10987 case NE: return ARM_MI;
10988 case EQ: return ARM_PL;
10989 default: abort ();
10992 case CCFPEmode:
10993 case CCFPmode:
10994 /* These encodings assume that AC=1 in the FPA system control
10995 byte. This allows us to handle all cases except UNEQ and
10996 LTGT. */
10997 switch (comp_code)
10999 case GE: return ARM_GE;
11000 case GT: return ARM_GT;
11001 case LE: return ARM_LS;
11002 case LT: return ARM_MI;
11003 case NE: return ARM_NE;
11004 case EQ: return ARM_EQ;
11005 case ORDERED: return ARM_VC;
11006 case UNORDERED: return ARM_VS;
11007 case UNLT: return ARM_LT;
11008 case UNLE: return ARM_LE;
11009 case UNGT: return ARM_HI;
11010 case UNGE: return ARM_PL;
11011 /* UNEQ and LTGT do not have a representation. */
11012 case UNEQ: /* Fall through. */
11013 case LTGT: /* Fall through. */
11014 default: abort ();
11017 case CC_SWPmode:
11018 switch (comp_code)
11020 case NE: return ARM_NE;
11021 case EQ: return ARM_EQ;
11022 case GE: return ARM_LE;
11023 case GT: return ARM_LT;
11024 case LE: return ARM_GE;
11025 case LT: return ARM_GT;
11026 case GEU: return ARM_LS;
11027 case GTU: return ARM_CC;
11028 case LEU: return ARM_CS;
11029 case LTU: return ARM_HI;
11030 default: abort ();
11033 case CC_Cmode:
11034 switch (comp_code)
11036 case LTU: return ARM_CS;
11037 case GEU: return ARM_CC;
11038 default: abort ();
11041 case CCmode:
11042 switch (comp_code)
11044 case NE: return ARM_NE;
11045 case EQ: return ARM_EQ;
11046 case GE: return ARM_GE;
11047 case GT: return ARM_GT;
11048 case LE: return ARM_LE;
11049 case LT: return ARM_LT;
11050 case GEU: return ARM_CS;
11051 case GTU: return ARM_HI;
11052 case LEU: return ARM_LS;
11053 case LTU: return ARM_CC;
11054 default: abort ();
11057 default: abort ();
11060 abort ();
11063 void
11064 arm_final_prescan_insn (rtx insn)
11066 /* BODY will hold the body of INSN. */
11067 rtx body = PATTERN (insn);
11069 /* This will be 1 if trying to repeat the trick, and things need to be
11070 reversed if it appears to fail. */
11071 int reverse = 0;
11073 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11074 taken are clobbered, even if the rtl suggests otherwise. It also
11075 means that we have to grub around within the jump expression to find
11076 out what the conditions are when the jump isn't taken. */
11077 int jump_clobbers = 0;
11079 /* If we start with a return insn, we only succeed if we find another one. */
11080 int seeking_return = 0;
11082 /* START_INSN will hold the insn from where we start looking. This is the
11083 first insn after the following code_label if REVERSE is true. */
11084 rtx start_insn = insn;
11086 /* If in state 4, check if the target branch is reached, in order to
11087 change back to state 0. */
11088 if (arm_ccfsm_state == 4)
11090 if (insn == arm_target_insn)
11092 arm_target_insn = NULL;
11093 arm_ccfsm_state = 0;
11095 return;
11098 /* If in state 3, it is possible to repeat the trick, if this insn is an
11099 unconditional branch to a label, and immediately following this branch
11100 is the previous target label which is only used once, and the label this
11101 branch jumps to is not too far off. */
11102 if (arm_ccfsm_state == 3)
11104 if (simplejump_p (insn))
11106 start_insn = next_nonnote_insn (start_insn);
11107 if (GET_CODE (start_insn) == BARRIER)
11109 /* XXX Isn't this always a barrier? */
11110 start_insn = next_nonnote_insn (start_insn);
11112 if (GET_CODE (start_insn) == CODE_LABEL
11113 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11114 && LABEL_NUSES (start_insn) == 1)
11115 reverse = TRUE;
11116 else
11117 return;
11119 else if (GET_CODE (body) == RETURN)
11121 start_insn = next_nonnote_insn (start_insn);
11122 if (GET_CODE (start_insn) == BARRIER)
11123 start_insn = next_nonnote_insn (start_insn);
11124 if (GET_CODE (start_insn) == CODE_LABEL
11125 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11126 && LABEL_NUSES (start_insn) == 1)
11128 reverse = TRUE;
11129 seeking_return = 1;
11131 else
11132 return;
11134 else
11135 return;
11138 if (arm_ccfsm_state != 0 && !reverse)
11139 abort ();
11140 if (GET_CODE (insn) != JUMP_INSN)
11141 return;
11143 /* This jump might be paralleled with a clobber of the condition codes
11144 the jump should always come first */
11145 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11146 body = XVECEXP (body, 0, 0);
11148 if (reverse
11149 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11150 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11152 int insns_skipped;
11153 int fail = FALSE, succeed = FALSE;
11154 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11155 int then_not_else = TRUE;
11156 rtx this_insn = start_insn, label = 0;
11158 /* If the jump cannot be done with one instruction, we cannot
11159 conditionally execute the instruction in the inverse case. */
11160 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11162 jump_clobbers = 1;
11163 return;
11166 /* Register the insn jumped to. */
11167 if (reverse)
11169 if (!seeking_return)
11170 label = XEXP (SET_SRC (body), 0);
11172 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11173 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11174 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11176 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11177 then_not_else = FALSE;
11179 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11180 seeking_return = 1;
11181 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11183 seeking_return = 1;
11184 then_not_else = FALSE;
11186 else
11187 abort ();
11189 /* See how many insns this branch skips, and what kind of insns. If all
11190 insns are okay, and the label or unconditional branch to the same
11191 label is not too far away, succeed. */
11192 for (insns_skipped = 0;
11193 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11195 rtx scanbody;
11197 this_insn = next_nonnote_insn (this_insn);
11198 if (!this_insn)
11199 break;
11201 switch (GET_CODE (this_insn))
11203 case CODE_LABEL:
11204 /* Succeed if it is the target label, otherwise fail since
11205 control falls in from somewhere else. */
11206 if (this_insn == label)
11208 if (jump_clobbers)
11210 arm_ccfsm_state = 2;
11211 this_insn = next_nonnote_insn (this_insn);
11213 else
11214 arm_ccfsm_state = 1;
11215 succeed = TRUE;
11217 else
11218 fail = TRUE;
11219 break;
11221 case BARRIER:
11222 /* Succeed if the following insn is the target label.
11223 Otherwise fail.
11224 If return insns are used then the last insn in a function
11225 will be a barrier. */
11226 this_insn = next_nonnote_insn (this_insn);
11227 if (this_insn && this_insn == label)
11229 if (jump_clobbers)
11231 arm_ccfsm_state = 2;
11232 this_insn = next_nonnote_insn (this_insn);
11234 else
11235 arm_ccfsm_state = 1;
11236 succeed = TRUE;
11238 else
11239 fail = TRUE;
11240 break;
11242 case CALL_INSN:
11243 /* The AAPCS says that conditional calls should not be
11244 used since they make interworking inefficient (the
11245 linker can't transform BL<cond> into BLX). That's
11246 only a problem if the machine has BLX. */
11247 if (arm_arch5)
11249 fail = TRUE;
11250 break;
11253 /* Succeed if the following insn is the target label, or
11254 if the following two insns are a barrier and the
11255 target label. */
11256 this_insn = next_nonnote_insn (this_insn);
11257 if (this_insn && GET_CODE (this_insn) == BARRIER)
11258 this_insn = next_nonnote_insn (this_insn);
11260 if (this_insn && this_insn == label
11261 && insns_skipped < max_insns_skipped)
11263 if (jump_clobbers)
11265 arm_ccfsm_state = 2;
11266 this_insn = next_nonnote_insn (this_insn);
11268 else
11269 arm_ccfsm_state = 1;
11270 succeed = TRUE;
11272 else
11273 fail = TRUE;
11274 break;
11276 case JUMP_INSN:
11277 /* If this is an unconditional branch to the same label, succeed.
11278 If it is to another label, do nothing. If it is conditional,
11279 fail. */
11280 /* XXX Probably, the tests for SET and the PC are
11281 unnecessary. */
11283 scanbody = PATTERN (this_insn);
11284 if (GET_CODE (scanbody) == SET
11285 && GET_CODE (SET_DEST (scanbody)) == PC)
11287 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11288 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11290 arm_ccfsm_state = 2;
11291 succeed = TRUE;
11293 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11294 fail = TRUE;
11296 /* Fail if a conditional return is undesirable (e.g. on a
11297 StrongARM), but still allow this if optimizing for size. */
11298 else if (GET_CODE (scanbody) == RETURN
11299 && !use_return_insn (TRUE, NULL)
11300 && !optimize_size)
11301 fail = TRUE;
11302 else if (GET_CODE (scanbody) == RETURN
11303 && seeking_return)
11305 arm_ccfsm_state = 2;
11306 succeed = TRUE;
11308 else if (GET_CODE (scanbody) == PARALLEL)
11310 switch (get_attr_conds (this_insn))
11312 case CONDS_NOCOND:
11313 break;
11314 default:
11315 fail = TRUE;
11316 break;
11319 else
11320 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11322 break;
11324 case INSN:
11325 /* Instructions using or affecting the condition codes make it
11326 fail. */
11327 scanbody = PATTERN (this_insn);
11328 if (!(GET_CODE (scanbody) == SET
11329 || GET_CODE (scanbody) == PARALLEL)
11330 || get_attr_conds (this_insn) != CONDS_NOCOND)
11331 fail = TRUE;
11333 /* A conditional cirrus instruction must be followed by
11334 a non Cirrus instruction. However, since we
11335 conditionalize instructions in this function and by
11336 the time we get here we can't add instructions
11337 (nops), because shorten_branches() has already been
11338 called, we will disable conditionalizing Cirrus
11339 instructions to be safe. */
11340 if (GET_CODE (scanbody) != USE
11341 && GET_CODE (scanbody) != CLOBBER
11342 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11343 fail = TRUE;
11344 break;
11346 default:
11347 break;
11350 if (succeed)
11352 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11353 arm_target_label = CODE_LABEL_NUMBER (label);
11354 else if (seeking_return || arm_ccfsm_state == 2)
11356 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11358 this_insn = next_nonnote_insn (this_insn);
11359 if (this_insn && (GET_CODE (this_insn) == BARRIER
11360 || GET_CODE (this_insn) == CODE_LABEL))
11361 abort ();
11363 if (!this_insn)
11365 /* Oh, dear! we ran off the end.. give up. */
11366 recog (PATTERN (insn), insn, NULL);
11367 arm_ccfsm_state = 0;
11368 arm_target_insn = NULL;
11369 return;
11371 arm_target_insn = this_insn;
11373 else
11374 abort ();
11375 if (jump_clobbers)
11377 if (reverse)
11378 abort ();
11379 arm_current_cc =
11380 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11381 0), 0), 1));
11382 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11383 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11384 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11385 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11387 else
11389 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11390 what it was. */
11391 if (!reverse)
11392 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11393 0));
11396 if (reverse || then_not_else)
11397 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11400 /* Restore recog_data (getting the attributes of other insns can
11401 destroy this array, but final.c assumes that it remains intact
11402 across this call; since the insn has been recognized already we
11403 call recog direct). */
11404 recog (PATTERN (insn), insn, NULL);
11408 /* Returns true if REGNO is a valid register
11409 for holding a quantity of type MODE. */
11411 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11413 if (GET_MODE_CLASS (mode) == MODE_CC)
11414 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11416 if (TARGET_THUMB)
11417 /* For the Thumb we only allow values bigger than SImode in
11418 registers 0 - 6, so that there is always a second low
11419 register available to hold the upper part of the value.
11420 We probably we ought to ensure that the register is the
11421 start of an even numbered register pair. */
11422 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11424 if (IS_CIRRUS_REGNUM (regno))
11425 /* We have outlawed SI values in Cirrus registers because they
11426 reside in the lower 32 bits, but SF values reside in the
11427 upper 32 bits. This causes gcc all sorts of grief. We can't
11428 even split the registers into pairs because Cirrus SI values
11429 get sign extended to 64bits-- aldyh. */
11430 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11432 if (IS_VFP_REGNUM (regno))
11434 if (mode == SFmode || mode == SImode)
11435 return TRUE;
11437 /* DFmode values are only valid in even register pairs. */
11438 if (mode == DFmode)
11439 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11440 return FALSE;
11443 if (IS_IWMMXT_GR_REGNUM (regno))
11444 return mode == SImode;
11446 if (IS_IWMMXT_REGNUM (regno))
11447 return VALID_IWMMXT_REG_MODE (mode);
11449 /* We allow any value to be stored in the general registers.
11450 Restrict doubleword quantities to even register pairs so that we can
11451 use ldrd. */
11452 if (regno <= LAST_ARM_REGNUM)
11453 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11455 if ( regno == FRAME_POINTER_REGNUM
11456 || regno == ARG_POINTER_REGNUM)
11457 /* We only allow integers in the fake hard registers. */
11458 return GET_MODE_CLASS (mode) == MODE_INT;
11460 /* The only registers left are the FPA registers
11461 which we only allow to hold FP values. */
11462 return GET_MODE_CLASS (mode) == MODE_FLOAT
11463 && regno >= FIRST_FPA_REGNUM
11464 && regno <= LAST_FPA_REGNUM;
11468 arm_regno_class (int regno)
11470 if (TARGET_THUMB)
11472 if (regno == STACK_POINTER_REGNUM)
11473 return STACK_REG;
11474 if (regno == CC_REGNUM)
11475 return CC_REG;
11476 if (regno < 8)
11477 return LO_REGS;
11478 return HI_REGS;
11481 if ( regno <= LAST_ARM_REGNUM
11482 || regno == FRAME_POINTER_REGNUM
11483 || regno == ARG_POINTER_REGNUM)
11484 return GENERAL_REGS;
11486 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11487 return NO_REGS;
11489 if (IS_CIRRUS_REGNUM (regno))
11490 return CIRRUS_REGS;
11492 if (IS_VFP_REGNUM (regno))
11493 return VFP_REGS;
11495 if (IS_IWMMXT_REGNUM (regno))
11496 return IWMMXT_REGS;
11498 if (IS_IWMMXT_GR_REGNUM (regno))
11499 return IWMMXT_GR_REGS;
11501 return FPA_REGS;
11504 /* Handle a special case when computing the offset
11505 of an argument from the frame pointer. */
11507 arm_debugger_arg_offset (int value, rtx addr)
11509 rtx insn;
11511 /* We are only interested if dbxout_parms() failed to compute the offset. */
11512 if (value != 0)
11513 return 0;
11515 /* We can only cope with the case where the address is held in a register. */
11516 if (GET_CODE (addr) != REG)
11517 return 0;
11519 /* If we are using the frame pointer to point at the argument, then
11520 an offset of 0 is correct. */
11521 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11522 return 0;
11524 /* If we are using the stack pointer to point at the
11525 argument, then an offset of 0 is correct. */
11526 if ((TARGET_THUMB || !frame_pointer_needed)
11527 && REGNO (addr) == SP_REGNUM)
11528 return 0;
11530 /* Oh dear. The argument is pointed to by a register rather
11531 than being held in a register, or being stored at a known
11532 offset from the frame pointer. Since GDB only understands
11533 those two kinds of argument we must translate the address
11534 held in the register into an offset from the frame pointer.
11535 We do this by searching through the insns for the function
11536 looking to see where this register gets its value. If the
11537 register is initialized from the frame pointer plus an offset
11538 then we are in luck and we can continue, otherwise we give up.
11540 This code is exercised by producing debugging information
11541 for a function with arguments like this:
11543 double func (double a, double b, int c, double d) {return d;}
11545 Without this code the stab for parameter 'd' will be set to
11546 an offset of 0 from the frame pointer, rather than 8. */
11548 /* The if() statement says:
11550 If the insn is a normal instruction
11551 and if the insn is setting the value in a register
11552 and if the register being set is the register holding the address of the argument
11553 and if the address is computing by an addition
11554 that involves adding to a register
11555 which is the frame pointer
11556 a constant integer
11558 then... */
11560 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11562 if ( GET_CODE (insn) == INSN
11563 && GET_CODE (PATTERN (insn)) == SET
11564 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11565 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11566 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11567 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11568 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11571 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11573 break;
11577 if (value == 0)
11579 debug_rtx (addr);
11580 warning ("unable to compute real location of stacked parameter");
11581 value = 8; /* XXX magic hack */
11584 return value;
11587 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11588 do \
11590 if ((MASK) & insn_flags) \
11591 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11592 BUILT_IN_MD, NULL, NULL_TREE); \
11594 while (0)
11596 struct builtin_description
11598 const unsigned int mask;
11599 const enum insn_code icode;
11600 const char * const name;
11601 const enum arm_builtins code;
11602 const enum rtx_code comparison;
11603 const unsigned int flag;
11606 static const struct builtin_description bdesc_2arg[] =
11608 #define IWMMXT_BUILTIN(code, string, builtin) \
11609 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11610 ARM_BUILTIN_##builtin, 0, 0 },
11612 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11613 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11614 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11615 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11616 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11617 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11618 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11619 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11620 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11621 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11622 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11623 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11624 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11625 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11626 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11627 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11628 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11629 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11630 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11631 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11632 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11633 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11634 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11635 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11636 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11637 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11638 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11639 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11640 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11641 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11642 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11643 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11644 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11645 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11646 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11647 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11648 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11649 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11650 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11651 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11652 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11653 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11654 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11655 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11656 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11657 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11658 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11659 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11660 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11661 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11662 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11663 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11664 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11665 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11666 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11667 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11668 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11669 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11671 #define IWMMXT_BUILTIN2(code, builtin) \
11672 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11674 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11675 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11676 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11677 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11678 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11679 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11680 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11681 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11682 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11683 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11684 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11685 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11686 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11687 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11688 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11689 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11690 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11691 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11692 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11693 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11694 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11695 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11696 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11697 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11698 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11699 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11700 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11701 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11702 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11703 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11704 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11705 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11708 static const struct builtin_description bdesc_1arg[] =
11710 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11711 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11712 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11713 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11714 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11715 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11716 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11717 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11718 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11719 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11720 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11721 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11722 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11723 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11724 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11725 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11726 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11727 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11730 /* Set up all the iWMMXt builtins. This is
11731 not called if TARGET_IWMMXT is zero. */
11733 static void
11734 arm_init_iwmmxt_builtins (void)
11736 const struct builtin_description * d;
11737 size_t i;
11738 tree endlink = void_list_node;
11740 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11741 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11742 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11744 tree int_ftype_int
11745 = build_function_type (integer_type_node,
11746 tree_cons (NULL_TREE, integer_type_node, endlink));
11747 tree v8qi_ftype_v8qi_v8qi_int
11748 = build_function_type (V8QI_type_node,
11749 tree_cons (NULL_TREE, V8QI_type_node,
11750 tree_cons (NULL_TREE, V8QI_type_node,
11751 tree_cons (NULL_TREE,
11752 integer_type_node,
11753 endlink))));
11754 tree v4hi_ftype_v4hi_int
11755 = build_function_type (V4HI_type_node,
11756 tree_cons (NULL_TREE, V4HI_type_node,
11757 tree_cons (NULL_TREE, integer_type_node,
11758 endlink)));
11759 tree v2si_ftype_v2si_int
11760 = build_function_type (V2SI_type_node,
11761 tree_cons (NULL_TREE, V2SI_type_node,
11762 tree_cons (NULL_TREE, integer_type_node,
11763 endlink)));
11764 tree v2si_ftype_di_di
11765 = build_function_type (V2SI_type_node,
11766 tree_cons (NULL_TREE, long_long_integer_type_node,
11767 tree_cons (NULL_TREE, long_long_integer_type_node,
11768 endlink)));
11769 tree di_ftype_di_int
11770 = build_function_type (long_long_integer_type_node,
11771 tree_cons (NULL_TREE, long_long_integer_type_node,
11772 tree_cons (NULL_TREE, integer_type_node,
11773 endlink)));
11774 tree di_ftype_di_int_int
11775 = build_function_type (long_long_integer_type_node,
11776 tree_cons (NULL_TREE, long_long_integer_type_node,
11777 tree_cons (NULL_TREE, integer_type_node,
11778 tree_cons (NULL_TREE,
11779 integer_type_node,
11780 endlink))));
11781 tree int_ftype_v8qi
11782 = build_function_type (integer_type_node,
11783 tree_cons (NULL_TREE, V8QI_type_node,
11784 endlink));
11785 tree int_ftype_v4hi
11786 = build_function_type (integer_type_node,
11787 tree_cons (NULL_TREE, V4HI_type_node,
11788 endlink));
11789 tree int_ftype_v2si
11790 = build_function_type (integer_type_node,
11791 tree_cons (NULL_TREE, V2SI_type_node,
11792 endlink));
11793 tree int_ftype_v8qi_int
11794 = build_function_type (integer_type_node,
11795 tree_cons (NULL_TREE, V8QI_type_node,
11796 tree_cons (NULL_TREE, integer_type_node,
11797 endlink)));
11798 tree int_ftype_v4hi_int
11799 = build_function_type (integer_type_node,
11800 tree_cons (NULL_TREE, V4HI_type_node,
11801 tree_cons (NULL_TREE, integer_type_node,
11802 endlink)));
11803 tree int_ftype_v2si_int
11804 = build_function_type (integer_type_node,
11805 tree_cons (NULL_TREE, V2SI_type_node,
11806 tree_cons (NULL_TREE, integer_type_node,
11807 endlink)));
11808 tree v8qi_ftype_v8qi_int_int
11809 = build_function_type (V8QI_type_node,
11810 tree_cons (NULL_TREE, V8QI_type_node,
11811 tree_cons (NULL_TREE, integer_type_node,
11812 tree_cons (NULL_TREE,
11813 integer_type_node,
11814 endlink))));
11815 tree v4hi_ftype_v4hi_int_int
11816 = build_function_type (V4HI_type_node,
11817 tree_cons (NULL_TREE, V4HI_type_node,
11818 tree_cons (NULL_TREE, integer_type_node,
11819 tree_cons (NULL_TREE,
11820 integer_type_node,
11821 endlink))));
11822 tree v2si_ftype_v2si_int_int
11823 = build_function_type (V2SI_type_node,
11824 tree_cons (NULL_TREE, V2SI_type_node,
11825 tree_cons (NULL_TREE, integer_type_node,
11826 tree_cons (NULL_TREE,
11827 integer_type_node,
11828 endlink))));
11829 /* Miscellaneous. */
11830 tree v8qi_ftype_v4hi_v4hi
11831 = build_function_type (V8QI_type_node,
11832 tree_cons (NULL_TREE, V4HI_type_node,
11833 tree_cons (NULL_TREE, V4HI_type_node,
11834 endlink)));
11835 tree v4hi_ftype_v2si_v2si
11836 = build_function_type (V4HI_type_node,
11837 tree_cons (NULL_TREE, V2SI_type_node,
11838 tree_cons (NULL_TREE, V2SI_type_node,
11839 endlink)));
11840 tree v2si_ftype_v4hi_v4hi
11841 = build_function_type (V2SI_type_node,
11842 tree_cons (NULL_TREE, V4HI_type_node,
11843 tree_cons (NULL_TREE, V4HI_type_node,
11844 endlink)));
11845 tree v2si_ftype_v8qi_v8qi
11846 = build_function_type (V2SI_type_node,
11847 tree_cons (NULL_TREE, V8QI_type_node,
11848 tree_cons (NULL_TREE, V8QI_type_node,
11849 endlink)));
11850 tree v4hi_ftype_v4hi_di
11851 = build_function_type (V4HI_type_node,
11852 tree_cons (NULL_TREE, V4HI_type_node,
11853 tree_cons (NULL_TREE,
11854 long_long_integer_type_node,
11855 endlink)));
11856 tree v2si_ftype_v2si_di
11857 = build_function_type (V2SI_type_node,
11858 tree_cons (NULL_TREE, V2SI_type_node,
11859 tree_cons (NULL_TREE,
11860 long_long_integer_type_node,
11861 endlink)));
11862 tree void_ftype_int_int
11863 = build_function_type (void_type_node,
11864 tree_cons (NULL_TREE, integer_type_node,
11865 tree_cons (NULL_TREE, integer_type_node,
11866 endlink)));
11867 tree di_ftype_void
11868 = build_function_type (long_long_unsigned_type_node, endlink);
11869 tree di_ftype_v8qi
11870 = build_function_type (long_long_integer_type_node,
11871 tree_cons (NULL_TREE, V8QI_type_node,
11872 endlink));
11873 tree di_ftype_v4hi
11874 = build_function_type (long_long_integer_type_node,
11875 tree_cons (NULL_TREE, V4HI_type_node,
11876 endlink));
11877 tree di_ftype_v2si
11878 = build_function_type (long_long_integer_type_node,
11879 tree_cons (NULL_TREE, V2SI_type_node,
11880 endlink));
11881 tree v2si_ftype_v4hi
11882 = build_function_type (V2SI_type_node,
11883 tree_cons (NULL_TREE, V4HI_type_node,
11884 endlink));
11885 tree v4hi_ftype_v8qi
11886 = build_function_type (V4HI_type_node,
11887 tree_cons (NULL_TREE, V8QI_type_node,
11888 endlink));
11890 tree di_ftype_di_v4hi_v4hi
11891 = build_function_type (long_long_unsigned_type_node,
11892 tree_cons (NULL_TREE,
11893 long_long_unsigned_type_node,
11894 tree_cons (NULL_TREE, V4HI_type_node,
11895 tree_cons (NULL_TREE,
11896 V4HI_type_node,
11897 endlink))));
11899 tree di_ftype_v4hi_v4hi
11900 = build_function_type (long_long_unsigned_type_node,
11901 tree_cons (NULL_TREE, V4HI_type_node,
11902 tree_cons (NULL_TREE, V4HI_type_node,
11903 endlink)));
11905 /* Normal vector binops. */
11906 tree v8qi_ftype_v8qi_v8qi
11907 = build_function_type (V8QI_type_node,
11908 tree_cons (NULL_TREE, V8QI_type_node,
11909 tree_cons (NULL_TREE, V8QI_type_node,
11910 endlink)));
11911 tree v4hi_ftype_v4hi_v4hi
11912 = build_function_type (V4HI_type_node,
11913 tree_cons (NULL_TREE, V4HI_type_node,
11914 tree_cons (NULL_TREE, V4HI_type_node,
11915 endlink)));
11916 tree v2si_ftype_v2si_v2si
11917 = build_function_type (V2SI_type_node,
11918 tree_cons (NULL_TREE, V2SI_type_node,
11919 tree_cons (NULL_TREE, V2SI_type_node,
11920 endlink)));
11921 tree di_ftype_di_di
11922 = build_function_type (long_long_unsigned_type_node,
11923 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11924 tree_cons (NULL_TREE,
11925 long_long_unsigned_type_node,
11926 endlink)));
11928 /* Add all builtins that are more or less simple operations on two
11929 operands. */
11930 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11932 /* Use one of the operands; the target can have a different mode for
11933 mask-generating compares. */
11934 enum machine_mode mode;
11935 tree type;
11937 if (d->name == 0)
11938 continue;
11940 mode = insn_data[d->icode].operand[1].mode;
11942 switch (mode)
11944 case V8QImode:
11945 type = v8qi_ftype_v8qi_v8qi;
11946 break;
11947 case V4HImode:
11948 type = v4hi_ftype_v4hi_v4hi;
11949 break;
11950 case V2SImode:
11951 type = v2si_ftype_v2si_v2si;
11952 break;
11953 case DImode:
11954 type = di_ftype_di_di;
11955 break;
11957 default:
11958 abort ();
11961 def_mbuiltin (d->mask, d->name, type, d->code);
11964 /* Add the remaining MMX insns with somewhat more complicated types. */
11965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11967 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12015 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12030 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12034 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12048 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12050 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12053 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12056 static void
12057 arm_init_builtins (void)
12059 if (TARGET_REALLY_IWMMXT)
12060 arm_init_iwmmxt_builtins ();
12063 /* Errors in the source file can cause expand_expr to return const0_rtx
12064 where we expect a vector. To avoid crashing, use one of the vector
12065 clear instructions. */
12067 static rtx
12068 safe_vector_operand (rtx x, enum machine_mode mode)
12070 if (x != const0_rtx)
12071 return x;
12072 x = gen_reg_rtx (mode);
12074 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12075 : gen_rtx_SUBREG (DImode, x, 0)));
12076 return x;
12079 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12081 static rtx
12082 arm_expand_binop_builtin (enum insn_code icode,
12083 tree arglist, rtx target)
12085 rtx pat;
12086 tree arg0 = TREE_VALUE (arglist);
12087 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12088 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12089 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12090 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12091 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12092 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12094 if (VECTOR_MODE_P (mode0))
12095 op0 = safe_vector_operand (op0, mode0);
12096 if (VECTOR_MODE_P (mode1))
12097 op1 = safe_vector_operand (op1, mode1);
12099 if (! target
12100 || GET_MODE (target) != tmode
12101 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12102 target = gen_reg_rtx (tmode);
12104 /* In case the insn wants input operands in modes different from
12105 the result, abort. */
12106 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12107 abort ();
12109 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12110 op0 = copy_to_mode_reg (mode0, op0);
12111 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12112 op1 = copy_to_mode_reg (mode1, op1);
12114 pat = GEN_FCN (icode) (target, op0, op1);
12115 if (! pat)
12116 return 0;
12117 emit_insn (pat);
12118 return target;
12121 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12123 static rtx
12124 arm_expand_unop_builtin (enum insn_code icode,
12125 tree arglist, rtx target, int do_load)
12127 rtx pat;
12128 tree arg0 = TREE_VALUE (arglist);
12129 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12130 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12131 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12133 if (! target
12134 || GET_MODE (target) != tmode
12135 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12136 target = gen_reg_rtx (tmode);
12137 if (do_load)
12138 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12139 else
12141 if (VECTOR_MODE_P (mode0))
12142 op0 = safe_vector_operand (op0, mode0);
12144 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12145 op0 = copy_to_mode_reg (mode0, op0);
12148 pat = GEN_FCN (icode) (target, op0);
12149 if (! pat)
12150 return 0;
12151 emit_insn (pat);
12152 return target;
12155 /* Expand an expression EXP that calls a built-in function,
12156 with result going to TARGET if that's convenient
12157 (and in mode MODE if that's convenient).
12158 SUBTARGET may be used as the target for computing one of EXP's operands.
12159 IGNORE is nonzero if the value is to be ignored. */
12161 static rtx
12162 arm_expand_builtin (tree exp,
12163 rtx target,
12164 rtx subtarget ATTRIBUTE_UNUSED,
12165 enum machine_mode mode ATTRIBUTE_UNUSED,
12166 int ignore ATTRIBUTE_UNUSED)
12168 const struct builtin_description * d;
12169 enum insn_code icode;
12170 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12171 tree arglist = TREE_OPERAND (exp, 1);
12172 tree arg0;
12173 tree arg1;
12174 tree arg2;
12175 rtx op0;
12176 rtx op1;
12177 rtx op2;
12178 rtx pat;
12179 int fcode = DECL_FUNCTION_CODE (fndecl);
12180 size_t i;
12181 enum machine_mode tmode;
12182 enum machine_mode mode0;
12183 enum machine_mode mode1;
12184 enum machine_mode mode2;
12186 switch (fcode)
12188 case ARM_BUILTIN_TEXTRMSB:
12189 case ARM_BUILTIN_TEXTRMUB:
12190 case ARM_BUILTIN_TEXTRMSH:
12191 case ARM_BUILTIN_TEXTRMUH:
12192 case ARM_BUILTIN_TEXTRMSW:
12193 case ARM_BUILTIN_TEXTRMUW:
12194 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12195 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12196 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12197 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12198 : CODE_FOR_iwmmxt_textrmw);
12200 arg0 = TREE_VALUE (arglist);
12201 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12202 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12203 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12204 tmode = insn_data[icode].operand[0].mode;
12205 mode0 = insn_data[icode].operand[1].mode;
12206 mode1 = insn_data[icode].operand[2].mode;
12208 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12209 op0 = copy_to_mode_reg (mode0, op0);
12210 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12212 /* @@@ better error message */
12213 error ("selector must be an immediate");
12214 return gen_reg_rtx (tmode);
12216 if (target == 0
12217 || GET_MODE (target) != tmode
12218 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12219 target = gen_reg_rtx (tmode);
12220 pat = GEN_FCN (icode) (target, op0, op1);
12221 if (! pat)
12222 return 0;
12223 emit_insn (pat);
12224 return target;
12226 case ARM_BUILTIN_TINSRB:
12227 case ARM_BUILTIN_TINSRH:
12228 case ARM_BUILTIN_TINSRW:
12229 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12230 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12231 : CODE_FOR_iwmmxt_tinsrw);
12232 arg0 = TREE_VALUE (arglist);
12233 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12234 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12235 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12236 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12237 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12238 tmode = insn_data[icode].operand[0].mode;
12239 mode0 = insn_data[icode].operand[1].mode;
12240 mode1 = insn_data[icode].operand[2].mode;
12241 mode2 = insn_data[icode].operand[3].mode;
12243 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12244 op0 = copy_to_mode_reg (mode0, op0);
12245 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12246 op1 = copy_to_mode_reg (mode1, op1);
12247 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12249 /* @@@ better error message */
12250 error ("selector must be an immediate");
12251 return const0_rtx;
12253 if (target == 0
12254 || GET_MODE (target) != tmode
12255 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12256 target = gen_reg_rtx (tmode);
12257 pat = GEN_FCN (icode) (target, op0, op1, op2);
12258 if (! pat)
12259 return 0;
12260 emit_insn (pat);
12261 return target;
12263 case ARM_BUILTIN_SETWCX:
12264 arg0 = TREE_VALUE (arglist);
12265 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12266 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12267 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12268 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12269 return 0;
12271 case ARM_BUILTIN_GETWCX:
12272 arg0 = TREE_VALUE (arglist);
12273 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12274 target = gen_reg_rtx (SImode);
12275 emit_insn (gen_iwmmxt_tmrc (target, op0));
12276 return target;
12278 case ARM_BUILTIN_WSHUFH:
12279 icode = CODE_FOR_iwmmxt_wshufh;
12280 arg0 = TREE_VALUE (arglist);
12281 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12282 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12283 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12284 tmode = insn_data[icode].operand[0].mode;
12285 mode1 = insn_data[icode].operand[1].mode;
12286 mode2 = insn_data[icode].operand[2].mode;
12288 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12289 op0 = copy_to_mode_reg (mode1, op0);
12290 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12292 /* @@@ better error message */
12293 error ("mask must be an immediate");
12294 return const0_rtx;
12296 if (target == 0
12297 || GET_MODE (target) != tmode
12298 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12299 target = gen_reg_rtx (tmode);
12300 pat = GEN_FCN (icode) (target, op0, op1);
12301 if (! pat)
12302 return 0;
12303 emit_insn (pat);
12304 return target;
12306 case ARM_BUILTIN_WSADB:
12307 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12308 case ARM_BUILTIN_WSADH:
12309 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12310 case ARM_BUILTIN_WSADBZ:
12311 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12312 case ARM_BUILTIN_WSADHZ:
12313 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12315 /* Several three-argument builtins. */
12316 case ARM_BUILTIN_WMACS:
12317 case ARM_BUILTIN_WMACU:
12318 case ARM_BUILTIN_WALIGN:
12319 case ARM_BUILTIN_TMIA:
12320 case ARM_BUILTIN_TMIAPH:
12321 case ARM_BUILTIN_TMIATT:
12322 case ARM_BUILTIN_TMIATB:
12323 case ARM_BUILTIN_TMIABT:
12324 case ARM_BUILTIN_TMIABB:
12325 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12326 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12327 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12328 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12329 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12330 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12331 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12332 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12333 : CODE_FOR_iwmmxt_walign);
12334 arg0 = TREE_VALUE (arglist);
12335 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12336 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12337 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12338 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12339 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12340 tmode = insn_data[icode].operand[0].mode;
12341 mode0 = insn_data[icode].operand[1].mode;
12342 mode1 = insn_data[icode].operand[2].mode;
12343 mode2 = insn_data[icode].operand[3].mode;
12345 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12346 op0 = copy_to_mode_reg (mode0, op0);
12347 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12348 op1 = copy_to_mode_reg (mode1, op1);
12349 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12350 op2 = copy_to_mode_reg (mode2, op2);
12351 if (target == 0
12352 || GET_MODE (target) != tmode
12353 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12354 target = gen_reg_rtx (tmode);
12355 pat = GEN_FCN (icode) (target, op0, op1, op2);
12356 if (! pat)
12357 return 0;
12358 emit_insn (pat);
12359 return target;
12361 case ARM_BUILTIN_WZERO:
12362 target = gen_reg_rtx (DImode);
12363 emit_insn (gen_iwmmxt_clrdi (target));
12364 return target;
12366 default:
12367 break;
12370 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12371 if (d->code == (const enum arm_builtins) fcode)
12372 return arm_expand_binop_builtin (d->icode, arglist, target);
12374 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12375 if (d->code == (const enum arm_builtins) fcode)
12376 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12378 /* @@@ Should really do something sensible here. */
12379 return NULL_RTX;
12382 /* Return the number (counting from 0) of
12383 the least significant set bit in MASK. */
12385 inline static int
12386 number_of_first_bit_set (unsigned mask)
12388 int bit;
12390 for (bit = 0;
12391 (mask & (1 << bit)) == 0;
12392 ++bit)
12393 continue;
12395 return bit;
12398 /* Emit code to push or pop registers to or from the stack. F is the
12399 assembly file. MASK is the registers to push or pop. PUSH is
12400 nonzero if we should push, and zero if we should pop. For debugging
12401 output, if pushing, adjust CFA_OFFSET by the amount of space added
12402 to the stack. REAL_REGS should have the same number of bits set as
12403 MASK, and will be used instead (in the same order) to describe which
12404 registers were saved - this is used to mark the save slots when we
12405 push high registers after moving them to low registers. */
12406 static void
12407 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12408 unsigned long real_regs)
12410 int regno;
12411 int lo_mask = mask & 0xFF;
12412 int pushed_words = 0;
12414 if (mask == 0)
12415 abort ();
12417 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12419 /* Special case. Do not generate a POP PC statement here, do it in
12420 thumb_exit() */
12421 thumb_exit (f, -1);
12422 return;
12425 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12427 /* Look at the low registers first. */
12428 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12430 if (lo_mask & 1)
12432 asm_fprintf (f, "%r", regno);
12434 if ((lo_mask & ~1) != 0)
12435 fprintf (f, ", ");
12437 pushed_words++;
12441 if (push && (mask & (1 << LR_REGNUM)))
12443 /* Catch pushing the LR. */
12444 if (mask & 0xFF)
12445 fprintf (f, ", ");
12447 asm_fprintf (f, "%r", LR_REGNUM);
12449 pushed_words++;
12451 else if (!push && (mask & (1 << PC_REGNUM)))
12453 /* Catch popping the PC. */
12454 if (TARGET_INTERWORK || TARGET_BACKTRACE
12455 || current_function_calls_eh_return)
12457 /* The PC is never poped directly, instead
12458 it is popped into r3 and then BX is used. */
12459 fprintf (f, "}\n");
12461 thumb_exit (f, -1);
12463 return;
12465 else
12467 if (mask & 0xFF)
12468 fprintf (f, ", ");
12470 asm_fprintf (f, "%r", PC_REGNUM);
12474 fprintf (f, "}\n");
12476 if (push && pushed_words && dwarf2out_do_frame ())
12478 char *l = dwarf2out_cfi_label ();
12479 int pushed_mask = real_regs;
12481 *cfa_offset += pushed_words * 4;
12482 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12484 pushed_words = 0;
12485 pushed_mask = real_regs;
12486 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12488 if (pushed_mask & 1)
12489 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12494 /* Generate code to return from a thumb function.
12495 If 'reg_containing_return_addr' is -1, then the return address is
12496 actually on the stack, at the stack pointer. */
12497 static void
12498 thumb_exit (FILE *f, int reg_containing_return_addr)
12500 unsigned regs_available_for_popping;
12501 unsigned regs_to_pop;
12502 int pops_needed;
12503 unsigned available;
12504 unsigned required;
12505 int mode;
12506 int size;
12507 int restore_a4 = FALSE;
12509 /* Compute the registers we need to pop. */
12510 regs_to_pop = 0;
12511 pops_needed = 0;
12513 if (reg_containing_return_addr == -1)
12515 regs_to_pop |= 1 << LR_REGNUM;
12516 ++pops_needed;
12519 if (TARGET_BACKTRACE)
12521 /* Restore the (ARM) frame pointer and stack pointer. */
12522 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12523 pops_needed += 2;
12526 /* If there is nothing to pop then just emit the BX instruction and
12527 return. */
12528 if (pops_needed == 0)
12530 if (current_function_calls_eh_return)
12531 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12533 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12534 return;
12536 /* Otherwise if we are not supporting interworking and we have not created
12537 a backtrace structure and the function was not entered in ARM mode then
12538 just pop the return address straight into the PC. */
12539 else if (!TARGET_INTERWORK
12540 && !TARGET_BACKTRACE
12541 && !is_called_in_ARM_mode (current_function_decl)
12542 && !current_function_calls_eh_return)
12544 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12545 return;
12548 /* Find out how many of the (return) argument registers we can corrupt. */
12549 regs_available_for_popping = 0;
12551 /* If returning via __builtin_eh_return, the bottom three registers
12552 all contain information needed for the return. */
12553 if (current_function_calls_eh_return)
12554 size = 12;
12555 else
12557 /* If we can deduce the registers used from the function's
12558 return value. This is more reliable that examining
12559 regs_ever_live[] because that will be set if the register is
12560 ever used in the function, not just if the register is used
12561 to hold a return value. */
12563 if (current_function_return_rtx != 0)
12564 mode = GET_MODE (current_function_return_rtx);
12565 else
12566 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12568 size = GET_MODE_SIZE (mode);
12570 if (size == 0)
12572 /* In a void function we can use any argument register.
12573 In a function that returns a structure on the stack
12574 we can use the second and third argument registers. */
12575 if (mode == VOIDmode)
12576 regs_available_for_popping =
12577 (1 << ARG_REGISTER (1))
12578 | (1 << ARG_REGISTER (2))
12579 | (1 << ARG_REGISTER (3));
12580 else
12581 regs_available_for_popping =
12582 (1 << ARG_REGISTER (2))
12583 | (1 << ARG_REGISTER (3));
12585 else if (size <= 4)
12586 regs_available_for_popping =
12587 (1 << ARG_REGISTER (2))
12588 | (1 << ARG_REGISTER (3));
12589 else if (size <= 8)
12590 regs_available_for_popping =
12591 (1 << ARG_REGISTER (3));
12594 /* Match registers to be popped with registers into which we pop them. */
12595 for (available = regs_available_for_popping,
12596 required = regs_to_pop;
12597 required != 0 && available != 0;
12598 available &= ~(available & - available),
12599 required &= ~(required & - required))
12600 -- pops_needed;
12602 /* If we have any popping registers left over, remove them. */
12603 if (available > 0)
12604 regs_available_for_popping &= ~available;
12606 /* Otherwise if we need another popping register we can use
12607 the fourth argument register. */
12608 else if (pops_needed)
12610 /* If we have not found any free argument registers and
12611 reg a4 contains the return address, we must move it. */
12612 if (regs_available_for_popping == 0
12613 && reg_containing_return_addr == LAST_ARG_REGNUM)
12615 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12616 reg_containing_return_addr = LR_REGNUM;
12618 else if (size > 12)
12620 /* Register a4 is being used to hold part of the return value,
12621 but we have dire need of a free, low register. */
12622 restore_a4 = TRUE;
12624 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12627 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12629 /* The fourth argument register is available. */
12630 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12632 --pops_needed;
12636 /* Pop as many registers as we can. */
12637 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12638 regs_available_for_popping);
12640 /* Process the registers we popped. */
12641 if (reg_containing_return_addr == -1)
12643 /* The return address was popped into the lowest numbered register. */
12644 regs_to_pop &= ~(1 << LR_REGNUM);
12646 reg_containing_return_addr =
12647 number_of_first_bit_set (regs_available_for_popping);
12649 /* Remove this register for the mask of available registers, so that
12650 the return address will not be corrupted by further pops. */
12651 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12654 /* If we popped other registers then handle them here. */
12655 if (regs_available_for_popping)
12657 int frame_pointer;
12659 /* Work out which register currently contains the frame pointer. */
12660 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12662 /* Move it into the correct place. */
12663 asm_fprintf (f, "\tmov\t%r, %r\n",
12664 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12666 /* (Temporarily) remove it from the mask of popped registers. */
12667 regs_available_for_popping &= ~(1 << frame_pointer);
12668 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12670 if (regs_available_for_popping)
12672 int stack_pointer;
12674 /* We popped the stack pointer as well,
12675 find the register that contains it. */
12676 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12678 /* Move it into the stack register. */
12679 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12681 /* At this point we have popped all necessary registers, so
12682 do not worry about restoring regs_available_for_popping
12683 to its correct value:
12685 assert (pops_needed == 0)
12686 assert (regs_available_for_popping == (1 << frame_pointer))
12687 assert (regs_to_pop == (1 << STACK_POINTER)) */
12689 else
12691 /* Since we have just move the popped value into the frame
12692 pointer, the popping register is available for reuse, and
12693 we know that we still have the stack pointer left to pop. */
12694 regs_available_for_popping |= (1 << frame_pointer);
12698 /* If we still have registers left on the stack, but we no longer have
12699 any registers into which we can pop them, then we must move the return
12700 address into the link register and make available the register that
12701 contained it. */
12702 if (regs_available_for_popping == 0 && pops_needed > 0)
12704 regs_available_for_popping |= 1 << reg_containing_return_addr;
12706 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12707 reg_containing_return_addr);
12709 reg_containing_return_addr = LR_REGNUM;
12712 /* If we have registers left on the stack then pop some more.
12713 We know that at most we will want to pop FP and SP. */
12714 if (pops_needed > 0)
12716 int popped_into;
12717 int move_to;
12719 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12720 regs_available_for_popping);
12722 /* We have popped either FP or SP.
12723 Move whichever one it is into the correct register. */
12724 popped_into = number_of_first_bit_set (regs_available_for_popping);
12725 move_to = number_of_first_bit_set (regs_to_pop);
12727 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12729 regs_to_pop &= ~(1 << move_to);
12731 --pops_needed;
12734 /* If we still have not popped everything then we must have only
12735 had one register available to us and we are now popping the SP. */
12736 if (pops_needed > 0)
12738 int popped_into;
12740 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12741 regs_available_for_popping);
12743 popped_into = number_of_first_bit_set (regs_available_for_popping);
12745 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12747 assert (regs_to_pop == (1 << STACK_POINTER))
12748 assert (pops_needed == 1)
12752 /* If necessary restore the a4 register. */
12753 if (restore_a4)
12755 if (reg_containing_return_addr != LR_REGNUM)
12757 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12758 reg_containing_return_addr = LR_REGNUM;
12761 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12764 if (current_function_calls_eh_return)
12765 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12767 /* Return to caller. */
12768 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12772 void
12773 thumb_final_prescan_insn (rtx insn)
12775 if (flag_print_asm_name)
12776 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12777 INSN_ADDRESSES (INSN_UID (insn)));
12781 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12783 unsigned HOST_WIDE_INT mask = 0xff;
12784 int i;
12786 if (val == 0) /* XXX */
12787 return 0;
12789 for (i = 0; i < 25; i++)
12790 if ((val & (mask << i)) == val)
12791 return 1;
12793 return 0;
12796 /* Returns nonzero if the current function contains,
12797 or might contain a far jump. */
12798 static int
12799 thumb_far_jump_used_p (void)
12801 rtx insn;
12803 /* This test is only important for leaf functions. */
12804 /* assert (!leaf_function_p ()); */
12806 /* If we have already decided that far jumps may be used,
12807 do not bother checking again, and always return true even if
12808 it turns out that they are not being used. Once we have made
12809 the decision that far jumps are present (and that hence the link
12810 register will be pushed onto the stack) we cannot go back on it. */
12811 if (cfun->machine->far_jump_used)
12812 return 1;
12814 /* If this function is not being called from the prologue/epilogue
12815 generation code then it must be being called from the
12816 INITIAL_ELIMINATION_OFFSET macro. */
12817 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12819 /* In this case we know that we are being asked about the elimination
12820 of the arg pointer register. If that register is not being used,
12821 then there are no arguments on the stack, and we do not have to
12822 worry that a far jump might force the prologue to push the link
12823 register, changing the stack offsets. In this case we can just
12824 return false, since the presence of far jumps in the function will
12825 not affect stack offsets.
12827 If the arg pointer is live (or if it was live, but has now been
12828 eliminated and so set to dead) then we do have to test to see if
12829 the function might contain a far jump. This test can lead to some
12830 false negatives, since before reload is completed, then length of
12831 branch instructions is not known, so gcc defaults to returning their
12832 longest length, which in turn sets the far jump attribute to true.
12834 A false negative will not result in bad code being generated, but it
12835 will result in a needless push and pop of the link register. We
12836 hope that this does not occur too often.
12838 If we need doubleword stack alignment this could affect the other
12839 elimination offsets so we can't risk getting it wrong. */
12840 if (regs_ever_live [ARG_POINTER_REGNUM])
12841 cfun->machine->arg_pointer_live = 1;
12842 else if (!cfun->machine->arg_pointer_live)
12843 return 0;
12846 /* Check to see if the function contains a branch
12847 insn with the far jump attribute set. */
12848 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12850 if (GET_CODE (insn) == JUMP_INSN
12851 /* Ignore tablejump patterns. */
12852 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12853 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12854 && get_attr_far_jump (insn) == FAR_JUMP_YES
12857 /* Record the fact that we have decided that
12858 the function does use far jumps. */
12859 cfun->machine->far_jump_used = 1;
12860 return 1;
12864 return 0;
12867 /* Return nonzero if FUNC must be entered in ARM mode. */
12869 is_called_in_ARM_mode (tree func)
12871 if (TREE_CODE (func) != FUNCTION_DECL)
12872 abort ();
12874 /* Ignore the problem about functions whoes address is taken. */
12875 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12876 return TRUE;
12878 #ifdef ARM_PE
12879 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12880 #else
12881 return FALSE;
12882 #endif
12885 /* The bits which aren't usefully expanded as rtl. */
12886 const char *
12887 thumb_unexpanded_epilogue (void)
12889 int regno;
12890 unsigned long live_regs_mask = 0;
12891 int high_regs_pushed = 0;
12892 int had_to_push_lr;
12893 int size;
12894 int mode;
12896 if (return_used_this_function)
12897 return "";
12899 if (IS_NAKED (arm_current_func_type ()))
12900 return "";
12902 live_regs_mask = thumb_compute_save_reg_mask ();
12903 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12905 /* If we can deduce the registers used from the function's return value.
12906 This is more reliable that examining regs_ever_live[] because that
12907 will be set if the register is ever used in the function, not just if
12908 the register is used to hold a return value. */
12910 if (current_function_return_rtx != 0)
12911 mode = GET_MODE (current_function_return_rtx);
12912 else
12913 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12915 size = GET_MODE_SIZE (mode);
12917 /* The prolog may have pushed some high registers to use as
12918 work registers. e.g. the testsuite file:
12919 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12920 compiles to produce:
12921 push {r4, r5, r6, r7, lr}
12922 mov r7, r9
12923 mov r6, r8
12924 push {r6, r7}
12925 as part of the prolog. We have to undo that pushing here. */
12927 if (high_regs_pushed)
12929 unsigned long mask = live_regs_mask & 0xff;
12930 int next_hi_reg;
12932 /* The available low registers depend on the size of the value we are
12933 returning. */
12934 if (size <= 12)
12935 mask |= 1 << 3;
12936 if (size <= 8)
12937 mask |= 1 << 2;
12939 if (mask == 0)
12940 /* Oh dear! We have no low registers into which we can pop
12941 high registers! */
12942 internal_error
12943 ("no low registers available for popping high registers");
12945 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12946 if (live_regs_mask & (1 << next_hi_reg))
12947 break;
12949 while (high_regs_pushed)
12951 /* Find lo register(s) into which the high register(s) can
12952 be popped. */
12953 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12955 if (mask & (1 << regno))
12956 high_regs_pushed--;
12957 if (high_regs_pushed == 0)
12958 break;
12961 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12963 /* Pop the values into the low register(s). */
12964 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12966 /* Move the value(s) into the high registers. */
12967 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12969 if (mask & (1 << regno))
12971 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12972 regno);
12974 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12975 if (live_regs_mask & (1 << next_hi_reg))
12976 break;
12980 live_regs_mask &= ~0x0f00;
12983 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12984 live_regs_mask &= 0xff;
12986 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12988 /* Pop the return address into the PC. */
12989 if (had_to_push_lr)
12990 live_regs_mask |= 1 << PC_REGNUM;
12992 /* Either no argument registers were pushed or a backtrace
12993 structure was created which includes an adjusted stack
12994 pointer, so just pop everything. */
12995 if (live_regs_mask)
12996 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12997 live_regs_mask);
12999 /* We have either just popped the return address into the
13000 PC or it is was kept in LR for the entire function. */
13001 if (!had_to_push_lr)
13002 thumb_exit (asm_out_file, LR_REGNUM);
13004 else
13006 /* Pop everything but the return address. */
13007 if (live_regs_mask)
13008 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13009 live_regs_mask);
13011 if (had_to_push_lr)
13013 if (size > 12)
13015 /* We have no free low regs, so save one. */
13016 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13017 LAST_ARG_REGNUM);
13020 /* Get the return address into a temporary register. */
13021 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13022 1 << LAST_ARG_REGNUM);
13024 if (size > 12)
13026 /* Move the return address to lr. */
13027 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13028 LAST_ARG_REGNUM);
13029 /* Restore the low register. */
13030 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13031 IP_REGNUM);
13032 regno = LR_REGNUM;
13034 else
13035 regno = LAST_ARG_REGNUM;
13037 else
13038 regno = LR_REGNUM;
13040 /* Remove the argument registers that were pushed onto the stack. */
13041 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13042 SP_REGNUM, SP_REGNUM,
13043 current_function_pretend_args_size);
13045 thumb_exit (asm_out_file, regno);
13048 return "";
13051 /* Functions to save and restore machine-specific function data. */
13052 static struct machine_function *
13053 arm_init_machine_status (void)
13055 struct machine_function *machine;
13056 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13058 #if ARM_FT_UNKNOWN != 0
13059 machine->func_type = ARM_FT_UNKNOWN;
13060 #endif
13061 return machine;
13064 /* Return an RTX indicating where the return address to the
13065 calling function can be found. */
13067 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13069 if (count != 0)
13070 return NULL_RTX;
13072 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13075 /* Do anything needed before RTL is emitted for each function. */
13076 void
13077 arm_init_expanders (void)
13079 /* Arrange to initialize and mark the machine per-function status. */
13080 init_machine_status = arm_init_machine_status;
13082 /* This is to stop the combine pass optimizing away the alignment
13083 adjustment of va_arg. */
13084 /* ??? It is claimed that this should not be necessary. */
13085 if (cfun)
13086 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13090 /* Like arm_compute_initial_elimination offset. Simpler because
13091 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13093 HOST_WIDE_INT
13094 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13096 arm_stack_offsets *offsets;
13098 offsets = arm_get_frame_offsets ();
13100 switch (from)
13102 case ARG_POINTER_REGNUM:
13103 switch (to)
13105 case STACK_POINTER_REGNUM:
13106 return offsets->outgoing_args - offsets->saved_args;
13108 case FRAME_POINTER_REGNUM:
13109 return offsets->soft_frame - offsets->saved_args;
13111 case THUMB_HARD_FRAME_POINTER_REGNUM:
13112 case ARM_HARD_FRAME_POINTER_REGNUM:
13113 return offsets->saved_regs - offsets->saved_args;
13115 default:
13116 abort();
13118 break;
13120 case FRAME_POINTER_REGNUM:
13121 switch (to)
13123 case STACK_POINTER_REGNUM:
13124 return offsets->outgoing_args - offsets->soft_frame;
13126 case THUMB_HARD_FRAME_POINTER_REGNUM:
13127 case ARM_HARD_FRAME_POINTER_REGNUM:
13128 return offsets->saved_regs - offsets->soft_frame;
13130 default:
13131 abort();
13133 break;
13135 default:
13136 abort ();
13141 /* Generate the rest of a function's prologue. */
13142 void
13143 thumb_expand_prologue (void)
13145 rtx insn, dwarf;
13147 HOST_WIDE_INT amount;
13148 arm_stack_offsets *offsets;
13149 unsigned long func_type;
13150 int regno;
13151 unsigned long live_regs_mask;
13153 func_type = arm_current_func_type ();
13155 /* Naked functions don't have prologues. */
13156 if (IS_NAKED (func_type))
13157 return;
13159 if (IS_INTERRUPT (func_type))
13161 error ("interrupt Service Routines cannot be coded in Thumb mode");
13162 return;
13165 live_regs_mask = thumb_compute_save_reg_mask ();
13166 /* Load the pic register before setting the frame pointer,
13167 so we can use r7 as a temporary work register. */
13168 if (flag_pic)
13169 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13171 offsets = arm_get_frame_offsets ();
13173 if (frame_pointer_needed)
13175 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13176 stack_pointer_rtx));
13177 RTX_FRAME_RELATED_P (insn) = 1;
13179 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13180 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13181 stack_pointer_rtx);
13183 amount = offsets->outgoing_args - offsets->saved_regs;
13184 if (amount)
13186 if (amount < 512)
13188 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13189 GEN_INT (- amount)));
13190 RTX_FRAME_RELATED_P (insn) = 1;
13192 else
13194 rtx reg;
13196 /* The stack decrement is too big for an immediate value in a single
13197 insn. In theory we could issue multiple subtracts, but after
13198 three of them it becomes more space efficient to place the full
13199 value in the constant pool and load into a register. (Also the
13200 ARM debugger really likes to see only one stack decrement per
13201 function). So instead we look for a scratch register into which
13202 we can load the decrement, and then we subtract this from the
13203 stack pointer. Unfortunately on the thumb the only available
13204 scratch registers are the argument registers, and we cannot use
13205 these as they may hold arguments to the function. Instead we
13206 attempt to locate a call preserved register which is used by this
13207 function. If we can find one, then we know that it will have
13208 been pushed at the start of the prologue and so we can corrupt
13209 it now. */
13210 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13211 if (live_regs_mask & (1 << regno)
13212 && !(frame_pointer_needed
13213 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13214 break;
13216 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13218 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13220 /* Choose an arbitrary, non-argument low register. */
13221 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13223 /* Save it by copying it into a high, scratch register. */
13224 emit_insn (gen_movsi (spare, reg));
13225 /* Add a USE to stop propagate_one_insn() from barfing. */
13226 emit_insn (gen_prologue_use (spare));
13228 /* Decrement the stack. */
13229 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13230 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13231 stack_pointer_rtx, reg));
13232 RTX_FRAME_RELATED_P (insn) = 1;
13233 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13234 plus_constant (stack_pointer_rtx,
13235 -amount));
13236 RTX_FRAME_RELATED_P (dwarf) = 1;
13237 REG_NOTES (insn)
13238 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13239 REG_NOTES (insn));
13241 /* Restore the low register's original value. */
13242 emit_insn (gen_movsi (reg, spare));
13244 /* Emit a USE of the restored scratch register, so that flow
13245 analysis will not consider the restore redundant. The
13246 register won't be used again in this function and isn't
13247 restored by the epilogue. */
13248 emit_insn (gen_prologue_use (reg));
13250 else
13252 reg = gen_rtx_REG (SImode, regno);
13254 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13256 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13257 stack_pointer_rtx, reg));
13258 RTX_FRAME_RELATED_P (insn) = 1;
13259 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13260 plus_constant (stack_pointer_rtx,
13261 -amount));
13262 RTX_FRAME_RELATED_P (dwarf) = 1;
13263 REG_NOTES (insn)
13264 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13265 REG_NOTES (insn));
13268 /* If the frame pointer is needed, emit a special barrier that
13269 will prevent the scheduler from moving stores to the frame
13270 before the stack adjustment. */
13271 if (frame_pointer_needed)
13272 emit_insn (gen_stack_tie (stack_pointer_rtx,
13273 hard_frame_pointer_rtx));
13276 if (current_function_profile || TARGET_NO_SCHED_PRO)
13277 emit_insn (gen_blockage ());
13279 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13280 if (live_regs_mask & 0xff)
13281 cfun->machine->lr_save_eliminated = 0;
13283 /* If the link register is being kept alive, with the return address in it,
13284 then make sure that it does not get reused by the ce2 pass. */
13285 if (cfun->machine->lr_save_eliminated)
13286 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13290 void
13291 thumb_expand_epilogue (void)
13293 HOST_WIDE_INT amount;
13294 arm_stack_offsets *offsets;
13295 int regno;
13297 /* Naked functions don't have prologues. */
13298 if (IS_NAKED (arm_current_func_type ()))
13299 return;
13301 offsets = arm_get_frame_offsets ();
13302 amount = offsets->outgoing_args - offsets->saved_regs;
13304 if (frame_pointer_needed)
13305 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13306 else if (amount)
13308 if (amount < 512)
13309 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13310 GEN_INT (amount)));
13311 else
13313 /* r3 is always free in the epilogue. */
13314 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13316 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13317 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13321 /* Emit a USE (stack_pointer_rtx), so that
13322 the stack adjustment will not be deleted. */
13323 emit_insn (gen_prologue_use (stack_pointer_rtx));
13325 if (current_function_profile || TARGET_NO_SCHED_PRO)
13326 emit_insn (gen_blockage ());
13328 /* Emit a clobber for each insn that will be restored in the epilogue,
13329 so that flow2 will get register lifetimes correct. */
13330 for (regno = 0; regno < 13; regno++)
13331 if (regs_ever_live[regno] && !call_used_regs[regno])
13332 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13334 if (! regs_ever_live[LR_REGNUM])
13335 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13338 static void
13339 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13341 unsigned long live_regs_mask = 0;
13342 unsigned long l_mask;
13343 unsigned high_regs_pushed = 0;
13344 int cfa_offset = 0;
13345 int regno;
13347 if (IS_NAKED (arm_current_func_type ()))
13348 return;
13350 if (is_called_in_ARM_mode (current_function_decl))
13352 const char * name;
13354 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13355 abort ();
13356 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13357 abort ();
13358 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13360 /* Generate code sequence to switch us into Thumb mode. */
13361 /* The .code 32 directive has already been emitted by
13362 ASM_DECLARE_FUNCTION_NAME. */
13363 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13364 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13366 /* Generate a label, so that the debugger will notice the
13367 change in instruction sets. This label is also used by
13368 the assembler to bypass the ARM code when this function
13369 is called from a Thumb encoded function elsewhere in the
13370 same file. Hence the definition of STUB_NAME here must
13371 agree with the definition in gas/config/tc-arm.c. */
13373 #define STUB_NAME ".real_start_of"
13375 fprintf (f, "\t.code\t16\n");
13376 #ifdef ARM_PE
13377 if (arm_dllexport_name_p (name))
13378 name = arm_strip_name_encoding (name);
13379 #endif
13380 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13381 fprintf (f, "\t.thumb_func\n");
13382 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13385 if (current_function_pretend_args_size)
13387 if (cfun->machine->uses_anonymous_args)
13389 int num_pushes;
13391 fprintf (f, "\tpush\t{");
13393 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13395 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13396 regno <= LAST_ARG_REGNUM;
13397 regno++)
13398 asm_fprintf (f, "%r%s", regno,
13399 regno == LAST_ARG_REGNUM ? "" : ", ");
13401 fprintf (f, "}\n");
13403 else
13404 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13405 SP_REGNUM, SP_REGNUM,
13406 current_function_pretend_args_size);
13408 /* We don't need to record the stores for unwinding (would it
13409 help the debugger any if we did?), but record the change in
13410 the stack pointer. */
13411 if (dwarf2out_do_frame ())
13413 char *l = dwarf2out_cfi_label ();
13415 cfa_offset = cfa_offset + current_function_pretend_args_size;
13416 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13420 /* Get the registers we are going to push. */
13421 live_regs_mask = thumb_compute_save_reg_mask ();
13422 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13423 l_mask = live_regs_mask & 0x40ff;
13424 /* Then count how many other high registers will need to be pushed. */
13425 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13427 if (TARGET_BACKTRACE)
13429 unsigned offset;
13430 unsigned work_register;
13432 /* We have been asked to create a stack backtrace structure.
13433 The code looks like this:
13435 0 .align 2
13436 0 func:
13437 0 sub SP, #16 Reserve space for 4 registers.
13438 2 push {R7} Push low registers.
13439 4 add R7, SP, #20 Get the stack pointer before the push.
13440 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13441 8 mov R7, PC Get hold of the start of this code plus 12.
13442 10 str R7, [SP, #16] Store it.
13443 12 mov R7, FP Get hold of the current frame pointer.
13444 14 str R7, [SP, #4] Store it.
13445 16 mov R7, LR Get hold of the current return address.
13446 18 str R7, [SP, #12] Store it.
13447 20 add R7, SP, #16 Point at the start of the backtrace structure.
13448 22 mov FP, R7 Put this value into the frame pointer. */
13450 work_register = thumb_find_work_register (live_regs_mask);
13452 asm_fprintf
13453 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13454 SP_REGNUM, SP_REGNUM);
13456 if (dwarf2out_do_frame ())
13458 char *l = dwarf2out_cfi_label ();
13460 cfa_offset = cfa_offset + 16;
13461 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13464 if (l_mask)
13466 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13467 offset = bit_count (l_mask);
13469 else
13470 offset = 0;
13472 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13473 offset + 16 + current_function_pretend_args_size);
13475 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13476 offset + 4);
13478 /* Make sure that the instruction fetching the PC is in the right place
13479 to calculate "start of backtrace creation code + 12". */
13480 if (l_mask)
13482 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13483 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13484 offset + 12);
13485 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13486 ARM_HARD_FRAME_POINTER_REGNUM);
13487 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13488 offset);
13490 else
13492 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13493 ARM_HARD_FRAME_POINTER_REGNUM);
13494 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13495 offset);
13496 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13497 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13498 offset + 12);
13501 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13502 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13503 offset + 8);
13504 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13505 offset + 12);
13506 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13507 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13509 /* Optimisation: If we are not pushing any low registers but we are going
13510 to push some high registers then delay our first push. This will just
13511 be a push of LR and we can combine it with the push of the first high
13512 register. */
13513 else if ((l_mask & 0xff) != 0
13514 || (high_regs_pushed == 0 && l_mask))
13515 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13517 if (high_regs_pushed)
13519 unsigned pushable_regs;
13520 unsigned next_hi_reg;
13522 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13523 if (live_regs_mask & (1 << next_hi_reg))
13524 break;
13526 pushable_regs = l_mask & 0xff;
13528 if (pushable_regs == 0)
13529 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13531 while (high_regs_pushed > 0)
13533 unsigned long real_regs_mask = 0;
13535 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13537 if (pushable_regs & (1 << regno))
13539 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13541 high_regs_pushed --;
13542 real_regs_mask |= (1 << next_hi_reg);
13544 if (high_regs_pushed)
13546 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13547 next_hi_reg --)
13548 if (live_regs_mask & (1 << next_hi_reg))
13549 break;
13551 else
13553 pushable_regs &= ~((1 << regno) - 1);
13554 break;
13559 /* If we had to find a work register and we have not yet
13560 saved the LR then add it to the list of regs to push. */
13561 if (l_mask == (1 << LR_REGNUM))
13563 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13564 1, &cfa_offset,
13565 real_regs_mask | (1 << LR_REGNUM));
13566 l_mask = 0;
13568 else
13569 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13574 /* Handle the case of a double word load into a low register from
13575 a computed memory address. The computed address may involve a
13576 register which is overwritten by the load. */
13577 const char *
13578 thumb_load_double_from_address (rtx *operands)
13580 rtx addr;
13581 rtx base;
13582 rtx offset;
13583 rtx arg1;
13584 rtx arg2;
13586 if (GET_CODE (operands[0]) != REG)
13587 abort ();
13589 if (GET_CODE (operands[1]) != MEM)
13590 abort ();
13592 /* Get the memory address. */
13593 addr = XEXP (operands[1], 0);
13595 /* Work out how the memory address is computed. */
13596 switch (GET_CODE (addr))
13598 case REG:
13599 operands[2] = gen_rtx_MEM (SImode,
13600 plus_constant (XEXP (operands[1], 0), 4));
13602 if (REGNO (operands[0]) == REGNO (addr))
13604 output_asm_insn ("ldr\t%H0, %2", operands);
13605 output_asm_insn ("ldr\t%0, %1", operands);
13607 else
13609 output_asm_insn ("ldr\t%0, %1", operands);
13610 output_asm_insn ("ldr\t%H0, %2", operands);
13612 break;
13614 case CONST:
13615 /* Compute <address> + 4 for the high order load. */
13616 operands[2] = gen_rtx_MEM (SImode,
13617 plus_constant (XEXP (operands[1], 0), 4));
13619 output_asm_insn ("ldr\t%0, %1", operands);
13620 output_asm_insn ("ldr\t%H0, %2", operands);
13621 break;
13623 case PLUS:
13624 arg1 = XEXP (addr, 0);
13625 arg2 = XEXP (addr, 1);
13627 if (CONSTANT_P (arg1))
13628 base = arg2, offset = arg1;
13629 else
13630 base = arg1, offset = arg2;
13632 if (GET_CODE (base) != REG)
13633 abort ();
13635 /* Catch the case of <address> = <reg> + <reg> */
13636 if (GET_CODE (offset) == REG)
13638 int reg_offset = REGNO (offset);
13639 int reg_base = REGNO (base);
13640 int reg_dest = REGNO (operands[0]);
13642 /* Add the base and offset registers together into the
13643 higher destination register. */
13644 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13645 reg_dest + 1, reg_base, reg_offset);
13647 /* Load the lower destination register from the address in
13648 the higher destination register. */
13649 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13650 reg_dest, reg_dest + 1);
13652 /* Load the higher destination register from its own address
13653 plus 4. */
13654 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13655 reg_dest + 1, reg_dest + 1);
13657 else
13659 /* Compute <address> + 4 for the high order load. */
13660 operands[2] = gen_rtx_MEM (SImode,
13661 plus_constant (XEXP (operands[1], 0), 4));
13663 /* If the computed address is held in the low order register
13664 then load the high order register first, otherwise always
13665 load the low order register first. */
13666 if (REGNO (operands[0]) == REGNO (base))
13668 output_asm_insn ("ldr\t%H0, %2", operands);
13669 output_asm_insn ("ldr\t%0, %1", operands);
13671 else
13673 output_asm_insn ("ldr\t%0, %1", operands);
13674 output_asm_insn ("ldr\t%H0, %2", operands);
13677 break;
13679 case LABEL_REF:
13680 /* With no registers to worry about we can just load the value
13681 directly. */
13682 operands[2] = gen_rtx_MEM (SImode,
13683 plus_constant (XEXP (operands[1], 0), 4));
13685 output_asm_insn ("ldr\t%H0, %2", operands);
13686 output_asm_insn ("ldr\t%0, %1", operands);
13687 break;
13689 default:
13690 abort ();
13691 break;
13694 return "";
13697 const char *
13698 thumb_output_move_mem_multiple (int n, rtx *operands)
13700 rtx tmp;
13702 switch (n)
13704 case 2:
13705 if (REGNO (operands[4]) > REGNO (operands[5]))
13707 tmp = operands[4];
13708 operands[4] = operands[5];
13709 operands[5] = tmp;
13711 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13712 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13713 break;
13715 case 3:
13716 if (REGNO (operands[4]) > REGNO (operands[5]))
13718 tmp = operands[4];
13719 operands[4] = operands[5];
13720 operands[5] = tmp;
13722 if (REGNO (operands[5]) > REGNO (operands[6]))
13724 tmp = operands[5];
13725 operands[5] = operands[6];
13726 operands[6] = tmp;
13728 if (REGNO (operands[4]) > REGNO (operands[5]))
13730 tmp = operands[4];
13731 operands[4] = operands[5];
13732 operands[5] = tmp;
13735 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13736 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13737 break;
13739 default:
13740 abort ();
13743 return "";
13746 /* Output a call-via instruction for thumb state. */
13747 const char *
13748 thumb_call_via_reg (rtx reg)
13750 int regno = REGNO (reg);
13751 rtx *labelp;
13753 gcc_assert (regno < LR_REGNUM);
13755 /* If we are in the normal text section we can use a single instance
13756 per compilation unit. If we are doing function sections, then we need
13757 an entry per section, since we can't rely on reachability. */
13758 if (in_text_section ())
13760 thumb_call_reg_needed = 1;
13762 if (thumb_call_via_label[regno] == NULL)
13763 thumb_call_via_label[regno] = gen_label_rtx ();
13764 labelp = thumb_call_via_label + regno;
13766 else
13768 if (cfun->machine->call_via[regno] == NULL)
13769 cfun->machine->call_via[regno] = gen_label_rtx ();
13770 labelp = cfun->machine->call_via + regno;
13773 output_asm_insn ("bl\t%a0", labelp);
13774 return "";
13777 /* Routines for generating rtl. */
13778 void
13779 thumb_expand_movmemqi (rtx *operands)
13781 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13782 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13783 HOST_WIDE_INT len = INTVAL (operands[2]);
13784 HOST_WIDE_INT offset = 0;
13786 while (len >= 12)
13788 emit_insn (gen_movmem12b (out, in, out, in));
13789 len -= 12;
13792 if (len >= 8)
13794 emit_insn (gen_movmem8b (out, in, out, in));
13795 len -= 8;
13798 if (len >= 4)
13800 rtx reg = gen_reg_rtx (SImode);
13801 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13802 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13803 len -= 4;
13804 offset += 4;
13807 if (len >= 2)
13809 rtx reg = gen_reg_rtx (HImode);
13810 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13811 plus_constant (in, offset))));
13812 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13813 reg));
13814 len -= 2;
13815 offset += 2;
13818 if (len)
13820 rtx reg = gen_reg_rtx (QImode);
13821 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13822 plus_constant (in, offset))));
13823 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13824 reg));
13828 void
13829 thumb_reload_out_hi (rtx *operands)
13831 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13834 /* Handle reading a half-word from memory during reload. */
13835 void
13836 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13838 abort ();
13841 /* Return the length of a function name prefix
13842 that starts with the character 'c'. */
13843 static int
13844 arm_get_strip_length (int c)
13846 switch (c)
13848 ARM_NAME_ENCODING_LENGTHS
13849 default: return 0;
13853 /* Return a pointer to a function's name with any
13854 and all prefix encodings stripped from it. */
13855 const char *
13856 arm_strip_name_encoding (const char *name)
13858 int skip;
13860 while ((skip = arm_get_strip_length (* name)))
13861 name += skip;
13863 return name;
13866 /* If there is a '*' anywhere in the name's prefix, then
13867 emit the stripped name verbatim, otherwise prepend an
13868 underscore if leading underscores are being used. */
13869 void
13870 arm_asm_output_labelref (FILE *stream, const char *name)
13872 int skip;
13873 int verbatim = 0;
13875 while ((skip = arm_get_strip_length (* name)))
13877 verbatim |= (*name == '*');
13878 name += skip;
13881 if (verbatim)
13882 fputs (name, stream);
13883 else
13884 asm_fprintf (stream, "%U%s", name);
13887 static void
13888 arm_file_end (void)
13890 int regno;
13892 if (! thumb_call_reg_needed)
13893 return;
13895 text_section ();
13896 asm_fprintf (asm_out_file, "\t.code 16\n");
13897 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13899 for (regno = 0; regno < LR_REGNUM; regno++)
13901 rtx label = thumb_call_via_label[regno];
13903 if (label != 0)
13905 targetm.asm_out.internal_label (asm_out_file, "L",
13906 CODE_LABEL_NUMBER (label));
13907 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13912 rtx aof_pic_label;
13914 #ifdef AOF_ASSEMBLER
13915 /* Special functions only needed when producing AOF syntax assembler. */
13917 struct pic_chain
13919 struct pic_chain * next;
13920 const char * symname;
13923 static struct pic_chain * aof_pic_chain = NULL;
13926 aof_pic_entry (rtx x)
13928 struct pic_chain ** chainp;
13929 int offset;
13931 if (aof_pic_label == NULL_RTX)
13933 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13936 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13937 offset += 4, chainp = &(*chainp)->next)
13938 if ((*chainp)->symname == XSTR (x, 0))
13939 return plus_constant (aof_pic_label, offset);
13941 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13942 (*chainp)->next = NULL;
13943 (*chainp)->symname = XSTR (x, 0);
13944 return plus_constant (aof_pic_label, offset);
13947 void
13948 aof_dump_pic_table (FILE *f)
13950 struct pic_chain * chain;
13952 if (aof_pic_chain == NULL)
13953 return;
13955 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13956 PIC_OFFSET_TABLE_REGNUM,
13957 PIC_OFFSET_TABLE_REGNUM);
13958 fputs ("|x$adcons|\n", f);
13960 for (chain = aof_pic_chain; chain; chain = chain->next)
13962 fputs ("\tDCD\t", f);
13963 assemble_name (f, chain->symname);
13964 fputs ("\n", f);
13968 int arm_text_section_count = 1;
13970 char *
13971 aof_text_section (void )
13973 static char buf[100];
13974 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13975 arm_text_section_count++);
13976 if (flag_pic)
13977 strcat (buf, ", PIC, REENTRANT");
13978 return buf;
13981 static int arm_data_section_count = 1;
13983 char *
13984 aof_data_section (void)
13986 static char buf[100];
13987 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13988 return buf;
13991 /* The AOF assembler is religiously strict about declarations of
13992 imported and exported symbols, so that it is impossible to declare
13993 a function as imported near the beginning of the file, and then to
13994 export it later on. It is, however, possible to delay the decision
13995 until all the functions in the file have been compiled. To get
13996 around this, we maintain a list of the imports and exports, and
13997 delete from it any that are subsequently defined. At the end of
13998 compilation we spit the remainder of the list out before the END
13999 directive. */
14001 struct import
14003 struct import * next;
14004 const char * name;
14007 static struct import * imports_list = NULL;
14009 void
14010 aof_add_import (const char *name)
14012 struct import * new;
14014 for (new = imports_list; new; new = new->next)
14015 if (new->name == name)
14016 return;
14018 new = (struct import *) xmalloc (sizeof (struct import));
14019 new->next = imports_list;
14020 imports_list = new;
14021 new->name = name;
14024 void
14025 aof_delete_import (const char *name)
14027 struct import ** old;
14029 for (old = &imports_list; *old; old = & (*old)->next)
14031 if ((*old)->name == name)
14033 *old = (*old)->next;
14034 return;
14039 int arm_main_function = 0;
14041 static void
14042 aof_dump_imports (FILE *f)
14044 /* The AOF assembler needs this to cause the startup code to be extracted
14045 from the library. Brining in __main causes the whole thing to work
14046 automagically. */
14047 if (arm_main_function)
14049 text_section ();
14050 fputs ("\tIMPORT __main\n", f);
14051 fputs ("\tDCD __main\n", f);
14054 /* Now dump the remaining imports. */
14055 while (imports_list)
14057 fprintf (f, "\tIMPORT\t");
14058 assemble_name (f, imports_list->name);
14059 fputc ('\n', f);
14060 imports_list = imports_list->next;
14064 static void
14065 aof_globalize_label (FILE *stream, const char *name)
14067 default_globalize_label (stream, name);
14068 if (! strcmp (name, "main"))
14069 arm_main_function = 1;
14072 static void
14073 aof_file_start (void)
14075 fputs ("__r0\tRN\t0\n", asm_out_file);
14076 fputs ("__a1\tRN\t0\n", asm_out_file);
14077 fputs ("__a2\tRN\t1\n", asm_out_file);
14078 fputs ("__a3\tRN\t2\n", asm_out_file);
14079 fputs ("__a4\tRN\t3\n", asm_out_file);
14080 fputs ("__v1\tRN\t4\n", asm_out_file);
14081 fputs ("__v2\tRN\t5\n", asm_out_file);
14082 fputs ("__v3\tRN\t6\n", asm_out_file);
14083 fputs ("__v4\tRN\t7\n", asm_out_file);
14084 fputs ("__v5\tRN\t8\n", asm_out_file);
14085 fputs ("__v6\tRN\t9\n", asm_out_file);
14086 fputs ("__sl\tRN\t10\n", asm_out_file);
14087 fputs ("__fp\tRN\t11\n", asm_out_file);
14088 fputs ("__ip\tRN\t12\n", asm_out_file);
14089 fputs ("__sp\tRN\t13\n", asm_out_file);
14090 fputs ("__lr\tRN\t14\n", asm_out_file);
14091 fputs ("__pc\tRN\t15\n", asm_out_file);
14092 fputs ("__f0\tFN\t0\n", asm_out_file);
14093 fputs ("__f1\tFN\t1\n", asm_out_file);
14094 fputs ("__f2\tFN\t2\n", asm_out_file);
14095 fputs ("__f3\tFN\t3\n", asm_out_file);
14096 fputs ("__f4\tFN\t4\n", asm_out_file);
14097 fputs ("__f5\tFN\t5\n", asm_out_file);
14098 fputs ("__f6\tFN\t6\n", asm_out_file);
14099 fputs ("__f7\tFN\t7\n", asm_out_file);
14100 text_section ();
14103 static void
14104 aof_file_end (void)
14106 if (flag_pic)
14107 aof_dump_pic_table (asm_out_file);
14108 arm_file_end ();
14109 aof_dump_imports (asm_out_file);
14110 fputs ("\tEND\n", asm_out_file);
14112 #endif /* AOF_ASSEMBLER */
14114 #ifndef ARM_PE
14115 /* Symbols in the text segment can be accessed without indirecting via the
14116 constant pool; it may take an extra binary operation, but this is still
14117 faster than indirecting via memory. Don't do this when not optimizing,
14118 since we won't be calculating al of the offsets necessary to do this
14119 simplification. */
14121 static void
14122 arm_encode_section_info (tree decl, rtx rtl, int first)
14124 /* This doesn't work with AOF syntax, since the string table may be in
14125 a different AREA. */
14126 #ifndef AOF_ASSEMBLER
14127 if (optimize > 0 && TREE_CONSTANT (decl))
14128 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14129 #endif
14131 /* If we are referencing a function that is weak then encode a long call
14132 flag in the function name, otherwise if the function is static or
14133 or known to be defined in this file then encode a short call flag. */
14134 if (first && DECL_P (decl))
14136 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14137 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14138 else if (! TREE_PUBLIC (decl))
14139 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14142 #endif /* !ARM_PE */
14144 static void
14145 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14147 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14148 && !strcmp (prefix, "L"))
14150 arm_ccfsm_state = 0;
14151 arm_target_insn = NULL;
14153 default_internal_label (stream, prefix, labelno);
14156 /* Output code to add DELTA to the first argument, and then jump
14157 to FUNCTION. Used for C++ multiple inheritance. */
14158 static void
14159 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14160 HOST_WIDE_INT delta,
14161 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14162 tree function)
14164 static int thunk_label = 0;
14165 char label[256];
14166 int mi_delta = delta;
14167 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14168 int shift = 0;
14169 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14170 ? 1 : 0);
14171 if (mi_delta < 0)
14172 mi_delta = - mi_delta;
14173 if (TARGET_THUMB)
14175 int labelno = thunk_label++;
14176 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14177 fputs ("\tldr\tr12, ", file);
14178 assemble_name (file, label);
14179 fputc ('\n', file);
14181 while (mi_delta != 0)
14183 if ((mi_delta & (3 << shift)) == 0)
14184 shift += 2;
14185 else
14187 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14188 mi_op, this_regno, this_regno,
14189 mi_delta & (0xff << shift));
14190 mi_delta &= ~(0xff << shift);
14191 shift += 8;
14194 if (TARGET_THUMB)
14196 fprintf (file, "\tbx\tr12\n");
14197 ASM_OUTPUT_ALIGN (file, 2);
14198 assemble_name (file, label);
14199 fputs (":\n", file);
14200 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14202 else
14204 fputs ("\tb\t", file);
14205 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14206 if (NEED_PLT_RELOC)
14207 fputs ("(PLT)", file);
14208 fputc ('\n', file);
14213 arm_emit_vector_const (FILE *file, rtx x)
14215 int i;
14216 const char * pattern;
14218 if (GET_CODE (x) != CONST_VECTOR)
14219 abort ();
14221 switch (GET_MODE (x))
14223 case V2SImode: pattern = "%08x"; break;
14224 case V4HImode: pattern = "%04x"; break;
14225 case V8QImode: pattern = "%02x"; break;
14226 default: abort ();
14229 fprintf (file, "0x");
14230 for (i = CONST_VECTOR_NUNITS (x); i--;)
14232 rtx element;
14234 element = CONST_VECTOR_ELT (x, i);
14235 fprintf (file, pattern, INTVAL (element));
14238 return 1;
14241 const char *
14242 arm_output_load_gr (rtx *operands)
14244 rtx reg;
14245 rtx offset;
14246 rtx wcgr;
14247 rtx sum;
14249 if (GET_CODE (operands [1]) != MEM
14250 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14251 || GET_CODE (reg = XEXP (sum, 0)) != REG
14252 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14253 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14254 return "wldrw%?\t%0, %1";
14256 /* Fix up an out-of-range load of a GR register. */
14257 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14258 wcgr = operands[0];
14259 operands[0] = reg;
14260 output_asm_insn ("ldr%?\t%0, %1", operands);
14262 operands[0] = wcgr;
14263 operands[1] = reg;
14264 output_asm_insn ("tmcr%?\t%0, %1", operands);
14265 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14267 return "";
14270 static rtx
14271 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14272 int incoming ATTRIBUTE_UNUSED)
14274 #if 0
14275 /* FIXME: The ARM backend has special code to handle structure
14276 returns, and will reserve its own hidden first argument. So
14277 if this macro is enabled a *second* hidden argument will be
14278 reserved, which will break binary compatibility with old
14279 toolchains and also thunk handling. One day this should be
14280 fixed. */
14281 return 0;
14282 #else
14283 /* Register in which address to store a structure value
14284 is passed to a function. */
14285 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14286 #endif
14289 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14291 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14292 named arg and all anonymous args onto the stack.
14293 XXX I know the prologue shouldn't be pushing registers, but it is faster
14294 that way. */
14296 static void
14297 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14298 enum machine_mode mode ATTRIBUTE_UNUSED,
14299 tree type ATTRIBUTE_UNUSED,
14300 int *pretend_size,
14301 int second_time ATTRIBUTE_UNUSED)
14303 cfun->machine->uses_anonymous_args = 1;
14304 if (cum->nregs < NUM_ARG_REGS)
14305 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14308 /* Return nonzero if the CONSUMER instruction (a store) does not need
14309 PRODUCER's value to calculate the address. */
14312 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14314 rtx value = PATTERN (producer);
14315 rtx addr = PATTERN (consumer);
14317 if (GET_CODE (value) == COND_EXEC)
14318 value = COND_EXEC_CODE (value);
14319 if (GET_CODE (value) == PARALLEL)
14320 value = XVECEXP (value, 0, 0);
14321 value = XEXP (value, 0);
14322 if (GET_CODE (addr) == COND_EXEC)
14323 addr = COND_EXEC_CODE (addr);
14324 if (GET_CODE (addr) == PARALLEL)
14325 addr = XVECEXP (addr, 0, 0);
14326 addr = XEXP (addr, 0);
14328 return !reg_overlap_mentioned_p (value, addr);
14331 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14332 have an early register shift value or amount dependency on the
14333 result of PRODUCER. */
14336 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14338 rtx value = PATTERN (producer);
14339 rtx op = PATTERN (consumer);
14340 rtx early_op;
14342 if (GET_CODE (value) == COND_EXEC)
14343 value = COND_EXEC_CODE (value);
14344 if (GET_CODE (value) == PARALLEL)
14345 value = XVECEXP (value, 0, 0);
14346 value = XEXP (value, 0);
14347 if (GET_CODE (op) == COND_EXEC)
14348 op = COND_EXEC_CODE (op);
14349 if (GET_CODE (op) == PARALLEL)
14350 op = XVECEXP (op, 0, 0);
14351 op = XEXP (op, 1);
14353 early_op = XEXP (op, 0);
14354 /* This is either an actual independent shift, or a shift applied to
14355 the first operand of another operation. We want the whole shift
14356 operation. */
14357 if (GET_CODE (early_op) == REG)
14358 early_op = op;
14360 return !reg_overlap_mentioned_p (value, early_op);
14363 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14364 have an early register shift value dependency on the result of
14365 PRODUCER. */
14368 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14370 rtx value = PATTERN (producer);
14371 rtx op = PATTERN (consumer);
14372 rtx early_op;
14374 if (GET_CODE (value) == COND_EXEC)
14375 value = COND_EXEC_CODE (value);
14376 if (GET_CODE (value) == PARALLEL)
14377 value = XVECEXP (value, 0, 0);
14378 value = XEXP (value, 0);
14379 if (GET_CODE (op) == COND_EXEC)
14380 op = COND_EXEC_CODE (op);
14381 if (GET_CODE (op) == PARALLEL)
14382 op = XVECEXP (op, 0, 0);
14383 op = XEXP (op, 1);
14385 early_op = XEXP (op, 0);
14387 /* This is either an actual independent shift, or a shift applied to
14388 the first operand of another operation. We want the value being
14389 shifted, in either case. */
14390 if (GET_CODE (early_op) != REG)
14391 early_op = XEXP (early_op, 0);
14393 return !reg_overlap_mentioned_p (value, early_op);
14396 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14397 have an early register mult dependency on the result of
14398 PRODUCER. */
14401 arm_no_early_mul_dep (rtx producer, rtx consumer)
14403 rtx value = PATTERN (producer);
14404 rtx op = PATTERN (consumer);
14406 if (GET_CODE (value) == COND_EXEC)
14407 value = COND_EXEC_CODE (value);
14408 if (GET_CODE (value) == PARALLEL)
14409 value = XVECEXP (value, 0, 0);
14410 value = XEXP (value, 0);
14411 if (GET_CODE (op) == COND_EXEC)
14412 op = COND_EXEC_CODE (op);
14413 if (GET_CODE (op) == PARALLEL)
14414 op = XVECEXP (op, 0, 0);
14415 op = XEXP (op, 1);
14417 return (GET_CODE (op) == PLUS
14418 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14422 /* We can't rely on the caller doing the proper promotion when
14423 using APCS or ATPCS. */
14425 static bool
14426 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14428 return !TARGET_AAPCS_BASED;
14432 /* AAPCS based ABIs use short enums by default. */
14434 static bool
14435 arm_default_short_enums (void)
14437 return TARGET_AAPCS_BASED;
14441 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14443 static bool
14444 arm_align_anon_bitfield (void)
14446 return TARGET_AAPCS_BASED;
14450 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14452 static tree
14453 arm_cxx_guard_type (void)
14455 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14459 /* The EABI says test the least significan bit of a guard variable. */
14461 static bool
14462 arm_cxx_guard_mask_bit (void)
14464 return TARGET_AAPCS_BASED;
14468 /* The EABI specifies that all array cookies are 8 bytes long. */
14470 static tree
14471 arm_get_cookie_size (tree type)
14473 tree size;
14475 if (!TARGET_AAPCS_BASED)
14476 return default_cxx_get_cookie_size (type);
14478 size = build_int_cst (sizetype, 8);
14479 return size;
14483 /* The EABI says that array cookies should also contain the element size. */
14485 static bool
14486 arm_cookie_has_size (void)
14488 return TARGET_AAPCS_BASED;
14492 /* The EABI says constructors and destructors should return a pointer to
14493 the object constructed/destroyed. */
14495 static bool
14496 arm_cxx_cdtor_returns_this (void)
14498 return TARGET_AAPCS_BASED;
14501 /* The EABI says that an inline function may never be the key
14502 method. */
14504 static bool
14505 arm_cxx_key_method_may_be_inline (void)
14507 return !TARGET_AAPCS_BASED;
14510 /* The EABI says that the virtual table, etc., for a class must be
14511 exported if it has a key method. The EABI does not specific the
14512 behavior if there is no key method, but there is no harm in
14513 exporting the class data in that case too. */
14515 static bool
14516 arm_cxx_export_class_data (void)
14518 return TARGET_AAPCS_BASED;
14521 void
14522 arm_set_return_address (rtx source, rtx scratch)
14524 arm_stack_offsets *offsets;
14525 HOST_WIDE_INT delta;
14526 rtx addr;
14527 unsigned long saved_regs;
14529 saved_regs = arm_compute_save_reg_mask ();
14531 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14532 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14533 else
14535 if (frame_pointer_needed)
14536 addr = plus_constant(hard_frame_pointer_rtx, -4);
14537 else
14539 /* LR will be the first saved register. */
14540 offsets = arm_get_frame_offsets ();
14541 delta = offsets->outgoing_args - (offsets->frame + 4);
14544 if (delta >= 4096)
14546 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14547 GEN_INT (delta & ~4095)));
14548 addr = scratch;
14549 delta &= 4095;
14551 else
14552 addr = stack_pointer_rtx;
14554 addr = plus_constant (addr, delta);
14556 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14561 void
14562 thumb_set_return_address (rtx source, rtx scratch)
14564 arm_stack_offsets *offsets;
14565 HOST_WIDE_INT delta;
14566 int reg;
14567 rtx addr;
14568 unsigned long mask;
14570 emit_insn (gen_rtx_USE (VOIDmode, source));
14572 mask = thumb_compute_save_reg_mask ();
14573 if (mask & (1 << LR_REGNUM))
14575 offsets = arm_get_frame_offsets ();
14577 /* Find the saved regs. */
14578 if (frame_pointer_needed)
14580 delta = offsets->soft_frame - offsets->saved_args;
14581 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14583 else
14585 delta = offsets->outgoing_args - offsets->saved_args;
14586 reg = SP_REGNUM;
14588 /* Allow for the stack frame. */
14589 if (TARGET_BACKTRACE)
14590 delta -= 16;
14591 /* The link register is always the first saved register. */
14592 delta -= 4;
14594 /* Construct the address. */
14595 addr = gen_rtx_REG (SImode, reg);
14596 if ((reg != SP_REGNUM && delta >= 128)
14597 || delta >= 1024)
14599 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14600 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14601 addr = scratch;
14603 else
14604 addr = plus_constant (addr, delta);
14606 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14608 else
14609 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14612 /* Implements target hook vector_mode_supported_p. */
14613 bool
14614 arm_vector_mode_supported_p (enum machine_mode mode)
14616 if ((mode == V2SImode)
14617 || (mode == V4HImode)
14618 || (mode == V8QImode))
14619 return true;
14621 return false;
14624 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14625 ARM insns and therefore guarantee that the shift count is modulo 256.
14626 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14627 guarantee no particular behavior for out-of-range counts. */
14629 static unsigned HOST_WIDE_INT
14630 arm_shift_truncation_mask (enum machine_mode mode)
14632 return mode == SImode ? 255 : 0;
14636 /* Map internal gcc register numbers to DWARF2 register numbers. */
14638 unsigned int
14639 arm_dbx_register_number (unsigned int regno)
14641 if (regno < 16)
14642 return regno;
14644 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14645 compatibility. The EABI defines them as registers 96-103. */
14646 if (IS_FPA_REGNUM (regno))
14647 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14649 if (IS_VFP_REGNUM (regno))
14650 return 64 + regno - FIRST_VFP_REGNUM;
14652 if (IS_IWMMXT_GR_REGNUM (regno))
14653 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14655 if (IS_IWMMXT_REGNUM (regno))
14656 return 112 + regno - FIRST_IWMMXT_REGNUM;
14658 abort ();