PR target/14812
[official-gcc.git] / gcc / config / arm / arm.c
blobbcf1904264b70ddd1362ccef4e9d3b6fc055c820
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void replace_symbols_in_block (tree, rtx, rtx);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
146 tree, bool);
148 #ifndef ARM_PE
149 static void arm_encode_section_info (tree, rtx, int);
150 #endif
152 static void arm_file_end (void);
154 #ifdef AOF_ASSEMBLER
155 static void aof_globalize_label (FILE *, const char *);
156 static void aof_dump_imports (FILE *);
157 static void aof_dump_pic_table (FILE *);
158 static void aof_file_start (void);
159 static void aof_file_end (void);
160 #endif
161 static rtx arm_struct_value_rtx (tree, int);
162 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
163 tree, int *, int);
164 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
165 enum machine_mode, tree, bool);
166 static bool arm_promote_prototypes (tree);
167 static bool arm_default_short_enums (void);
168 static bool arm_align_anon_bitfield (void);
170 static tree arm_cxx_guard_type (void);
171 static bool arm_cxx_guard_mask_bit (void);
172 static tree arm_get_cookie_size (tree);
173 static bool arm_cookie_has_size (void);
174 static bool arm_cxx_cdtor_returns_this (void);
175 static bool arm_cxx_key_method_may_be_inline (void);
176 static bool arm_cxx_export_class_data (void);
177 static void arm_init_libfuncs (void);
178 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
180 /* Initialize the GCC target structure. */
181 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
182 #undef TARGET_MERGE_DECL_ATTRIBUTES
183 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
184 #endif
186 #undef TARGET_ATTRIBUTE_TABLE
187 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
189 #undef TARGET_ASM_FILE_END
190 #define TARGET_ASM_FILE_END arm_file_end
192 #ifdef AOF_ASSEMBLER
193 #undef TARGET_ASM_BYTE_OP
194 #define TARGET_ASM_BYTE_OP "\tDCB\t"
195 #undef TARGET_ASM_ALIGNED_HI_OP
196 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
197 #undef TARGET_ASM_ALIGNED_SI_OP
198 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
199 #undef TARGET_ASM_GLOBALIZE_LABEL
200 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
201 #undef TARGET_ASM_FILE_START
202 #define TARGET_ASM_FILE_START aof_file_start
203 #undef TARGET_ASM_FILE_END
204 #define TARGET_ASM_FILE_END aof_file_end
205 #else
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP NULL
208 #undef TARGET_ASM_INTEGER
209 #define TARGET_ASM_INTEGER arm_assemble_integer
210 #endif
212 #undef TARGET_ASM_FUNCTION_PROLOGUE
213 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
215 #undef TARGET_ASM_FUNCTION_EPILOGUE
216 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
218 #undef TARGET_COMP_TYPE_ATTRIBUTES
219 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
221 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
222 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
227 #undef TARGET_ENCODE_SECTION_INFO
228 #ifdef ARM_PE
229 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
230 #else
231 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
232 #endif
234 #undef TARGET_STRIP_NAME_ENCODING
235 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
237 #undef TARGET_ASM_INTERNAL_LABEL
238 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
240 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
241 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
243 #undef TARGET_ASM_OUTPUT_MI_THUNK
244 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
245 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
246 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
248 /* This will be overridden in arm_override_options. */
249 #undef TARGET_RTX_COSTS
250 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
251 #undef TARGET_ADDRESS_COST
252 #define TARGET_ADDRESS_COST arm_address_cost
254 #undef TARGET_SHIFT_TRUNCATION_MASK
255 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
256 #undef TARGET_VECTOR_MODE_SUPPORTED_P
257 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
259 #undef TARGET_MACHINE_DEPENDENT_REORG
260 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
262 #undef TARGET_INIT_BUILTINS
263 #define TARGET_INIT_BUILTINS arm_init_builtins
264 #undef TARGET_EXPAND_BUILTIN
265 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
267 #undef TARGET_INIT_LIBFUNCS
268 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
270 #undef TARGET_PROMOTE_FUNCTION_ARGS
271 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
272 #undef TARGET_PROMOTE_FUNCTION_RETURN
273 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
274 #undef TARGET_PROMOTE_PROTOTYPES
275 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
276 #undef TARGET_PASS_BY_REFERENCE
277 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
278 #undef TARGET_ARG_PARTIAL_BYTES
279 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
281 #undef TARGET_STRUCT_VALUE_RTX
282 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
284 #undef TARGET_SETUP_INCOMING_VARARGS
285 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
287 #undef TARGET_DEFAULT_SHORT_ENUMS
288 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
290 #undef TARGET_ALIGN_ANON_BITFIELD
291 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
293 #undef TARGET_CXX_GUARD_TYPE
294 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
296 #undef TARGET_CXX_GUARD_MASK_BIT
297 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
299 #undef TARGET_CXX_GET_COOKIE_SIZE
300 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
302 #undef TARGET_CXX_COOKIE_HAS_SIZE
303 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
305 #undef TARGET_CXX_CDTOR_RETURNS_THIS
306 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
308 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
309 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
311 #undef TARGET_CXX_EXPORT_CLASS_DATA
312 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
314 struct gcc_target targetm = TARGET_INITIALIZER;
316 /* Obstack for minipool constant handling. */
317 static struct obstack minipool_obstack;
318 static char * minipool_startobj;
320 /* The maximum number of insns skipped which
321 will be conditionalised if possible. */
322 static int max_insns_skipped = 5;
324 extern FILE * asm_out_file;
326 /* True if we are currently building a constant table. */
327 int making_const_table;
329 /* Define the information needed to generate branch insns. This is
330 stored from the compare operation. */
331 rtx arm_compare_op0, arm_compare_op1;
333 /* The processor for which instructions should be scheduled. */
334 enum processor_type arm_tune = arm_none;
336 /* Which floating point model to use. */
337 enum arm_fp_model arm_fp_model;
339 /* Which floating point hardware is available. */
340 enum fputype arm_fpu_arch;
342 /* Which floating point hardware to schedule for. */
343 enum fputype arm_fpu_tune;
345 /* Whether to use floating point hardware. */
346 enum float_abi_type arm_float_abi;
348 /* Which ABI to use. */
349 enum arm_abi_type arm_abi;
351 /* Set by the -mfpu=... option. */
352 const char * target_fpu_name = NULL;
354 /* Set by the -mfpe=... option. */
355 const char * target_fpe_name = NULL;
357 /* Set by the -mfloat-abi=... option. */
358 const char * target_float_abi_name = NULL;
360 /* Set by the legacy -mhard-float and -msoft-float options. */
361 const char * target_float_switch = NULL;
363 /* Set by the -mabi=... option. */
364 const char * target_abi_name = NULL;
366 /* Used to parse -mstructure_size_boundary command line option. */
367 const char * structure_size_string = NULL;
368 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
370 /* Used for Thumb call_via trampolines. */
371 rtx thumb_call_via_label[14];
372 static int thumb_call_reg_needed;
374 /* Bit values used to identify processor capabilities. */
375 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
376 #define FL_ARCH3M (1 << 1) /* Extended multiply */
377 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
378 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
379 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
380 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
381 #define FL_THUMB (1 << 6) /* Thumb aware */
382 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
383 #define FL_STRONG (1 << 8) /* StrongARM */
384 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
385 #define FL_XSCALE (1 << 10) /* XScale */
386 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
387 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
388 media instructions. */
389 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
391 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
393 #define FL_FOR_ARCH2 0
394 #define FL_FOR_ARCH3 FL_MODE32
395 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
396 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
397 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
398 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
399 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
400 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
401 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
402 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
403 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
404 #define FL_FOR_ARCH6J FL_FOR_ARCH6
405 #define FL_FOR_ARCH6K FL_FOR_ARCH6
406 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
407 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
409 /* The bits in this mask specify which
410 instructions we are allowed to generate. */
411 static unsigned long insn_flags = 0;
413 /* The bits in this mask specify which instruction scheduling options should
414 be used. */
415 static unsigned long tune_flags = 0;
417 /* The following are used in the arm.md file as equivalents to bits
418 in the above two flag variables. */
420 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
421 int arm_arch3m = 0;
423 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
424 int arm_arch4 = 0;
426 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
427 int arm_arch4t = 0;
429 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
430 int arm_arch5 = 0;
432 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
433 int arm_arch5e = 0;
435 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
436 int arm_arch6 = 0;
438 /* Nonzero if this chip can benefit from load scheduling. */
439 int arm_ld_sched = 0;
441 /* Nonzero if this chip is a StrongARM. */
442 int arm_is_strong = 0;
444 /* Nonzero if this chip is a Cirrus variant. */
445 int arm_arch_cirrus = 0;
447 /* Nonzero if this chip supports Intel Wireless MMX technology. */
448 int arm_arch_iwmmxt = 0;
450 /* Nonzero if this chip is an XScale. */
451 int arm_arch_xscale = 0;
453 /* Nonzero if tuning for XScale */
454 int arm_tune_xscale = 0;
456 /* Nonzero if this chip is an ARM6 or an ARM7. */
457 int arm_is_6_or_7 = 0;
459 /* Nonzero if generating Thumb instructions. */
460 int thumb_code = 0;
462 /* Nonzero if we should define __THUMB_INTERWORK__ in the
463 preprocessor.
464 XXX This is a bit of a hack, it's intended to help work around
465 problems in GLD which doesn't understand that armv5t code is
466 interworking clean. */
467 int arm_cpp_interwork = 0;
469 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
470 must report the mode of the memory reference from PRINT_OPERAND to
471 PRINT_OPERAND_ADDRESS. */
472 enum machine_mode output_memory_reference_mode;
474 /* The register number to be used for the PIC offset register. */
475 const char * arm_pic_register_string = NULL;
476 int arm_pic_register = INVALID_REGNUM;
478 /* Set to 1 when a return insn is output, this means that the epilogue
479 is not needed. */
480 int return_used_this_function;
482 /* Set to 1 after arm_reorg has started. Reset to start at the start of
483 the next function. */
484 static int after_arm_reorg = 0;
486 /* The maximum number of insns to be used when loading a constant. */
487 static int arm_constant_limit = 3;
489 /* For an explanation of these variables, see final_prescan_insn below. */
490 int arm_ccfsm_state;
491 enum arm_cond_code arm_current_cc;
492 rtx arm_target_insn;
493 int arm_target_label;
495 /* The condition codes of the ARM, and the inverse function. */
496 static const char * const arm_condition_codes[] =
498 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
499 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
502 #define streq(string1, string2) (strcmp (string1, string2) == 0)
504 /* Initialization code. */
506 struct processors
508 const char *const name;
509 enum processor_type core;
510 const char *arch;
511 const unsigned long flags;
512 bool (* rtx_costs) (rtx, int, int, int *);
515 /* Not all of these give usefully different compilation alternatives,
516 but there is no simple way of generalizing them. */
517 static const struct processors all_cores[] =
519 /* ARM Cores */
520 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
521 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
522 #include "arm-cores.def"
523 #undef ARM_CORE
524 {NULL, arm_none, NULL, 0, NULL}
527 static const struct processors all_architectures[] =
529 /* ARM Architectures */
530 /* We don't specify rtx_costs here as it will be figured out
531 from the core. */
533 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
534 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
535 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
536 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
537 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
538 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
539 implementations that support it, so we will leave it out for now. */
540 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
541 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
542 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
543 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
544 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
545 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
546 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
547 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
548 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
549 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
550 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
551 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
552 {NULL, arm_none, NULL, 0 , NULL}
555 /* This is a magic structure. The 'string' field is magically filled in
556 with a pointer to the value specified by the user on the command line
557 assuming that the user has specified such a value. */
559 struct arm_cpu_select arm_select[] =
561 /* string name processors */
562 { NULL, "-mcpu=", all_cores },
563 { NULL, "-march=", all_architectures },
564 { NULL, "-mtune=", all_cores }
568 /* The name of the proprocessor macro to define for this architecture. */
570 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
572 struct fpu_desc
574 const char * name;
575 enum fputype fpu;
579 /* Available values for for -mfpu=. */
581 static const struct fpu_desc all_fpus[] =
583 {"fpa", FPUTYPE_FPA},
584 {"fpe2", FPUTYPE_FPA_EMU2},
585 {"fpe3", FPUTYPE_FPA_EMU2},
586 {"maverick", FPUTYPE_MAVERICK},
587 {"vfp", FPUTYPE_VFP}
591 /* Floating point models used by the different hardware.
592 See fputype in arm.h. */
594 static const enum fputype fp_model_for_fpu[] =
596 /* No FP hardware. */
597 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
598 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
599 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
600 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
601 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
602 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
606 struct float_abi
608 const char * name;
609 enum float_abi_type abi_type;
613 /* Available values for -mfloat-abi=. */
615 static const struct float_abi all_float_abis[] =
617 {"soft", ARM_FLOAT_ABI_SOFT},
618 {"softfp", ARM_FLOAT_ABI_SOFTFP},
619 {"hard", ARM_FLOAT_ABI_HARD}
623 struct abi_name
625 const char *name;
626 enum arm_abi_type abi_type;
630 /* Available values for -mabi=. */
632 static const struct abi_name arm_all_abis[] =
634 {"apcs-gnu", ARM_ABI_APCS},
635 {"atpcs", ARM_ABI_ATPCS},
636 {"aapcs", ARM_ABI_AAPCS},
637 {"iwmmxt", ARM_ABI_IWMMXT}
640 /* Return the number of bits set in VALUE. */
641 static unsigned
642 bit_count (unsigned long value)
644 unsigned long count = 0;
646 while (value)
648 count++;
649 value &= value - 1; /* Clear the least-significant set bit. */
652 return count;
655 /* Set up library functions unique to ARM. */
657 static void
658 arm_init_libfuncs (void)
660 /* There are no special library functions unless we are using the
661 ARM BPABI. */
662 if (!TARGET_BPABI)
663 return;
665 /* The functions below are described in Section 4 of the "Run-Time
666 ABI for the ARM architecture", Version 1.0. */
668 /* Double-precision floating-point arithmetic. Table 2. */
669 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
670 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
671 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
672 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
673 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
675 /* Double-precision comparisons. Table 3. */
676 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
677 set_optab_libfunc (ne_optab, DFmode, NULL);
678 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
679 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
680 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
681 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
682 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
684 /* Single-precision floating-point arithmetic. Table 4. */
685 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
686 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
687 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
688 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
689 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
691 /* Single-precision comparisons. Table 5. */
692 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
693 set_optab_libfunc (ne_optab, SFmode, NULL);
694 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
695 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
696 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
697 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
698 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
700 /* Floating-point to integer conversions. Table 6. */
701 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
702 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
703 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
704 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
705 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
706 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
707 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
708 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
710 /* Conversions between floating types. Table 7. */
711 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
712 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
714 /* Integer to floating-point conversions. Table 8. */
715 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
716 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
717 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
718 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
719 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
720 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
721 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
722 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
724 /* Long long. Table 9. */
725 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
726 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
727 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
728 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
729 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
730 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
731 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
732 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
734 /* Integer (32/32->32) division. \S 4.3.1. */
735 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
736 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
738 /* The divmod functions are designed so that they can be used for
739 plain division, even though they return both the quotient and the
740 remainder. The quotient is returned in the usual location (i.e.,
741 r0 for SImode, {r0, r1} for DImode), just as would be expected
742 for an ordinary division routine. Because the AAPCS calling
743 conventions specify that all of { r0, r1, r2, r3 } are
744 callee-saved registers, there is no need to tell the compiler
745 explicitly that those registers are clobbered by these
746 routines. */
747 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
748 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
749 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
750 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
753 /* Fix up any incompatible options that the user has specified.
754 This has now turned into a maze. */
755 void
756 arm_override_options (void)
758 unsigned i;
760 /* Set up the flags based on the cpu/architecture selected by the user. */
761 for (i = ARRAY_SIZE (arm_select); i--;)
763 struct arm_cpu_select * ptr = arm_select + i;
765 if (ptr->string != NULL && ptr->string[0] != '\0')
767 const struct processors * sel;
769 for (sel = ptr->processors; sel->name != NULL; sel++)
770 if (streq (ptr->string, sel->name))
772 /* Set the architecture define. */
773 if (i != 2)
774 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
776 /* Determine the processor core for which we should
777 tune code-generation. */
778 if (/* -mcpu= is a sensible default. */
779 i == 0
780 /* If -march= is used, and -mcpu= has not been used,
781 assume that we should tune for a representative
782 CPU from that architecture. */
783 || i == 1
784 /* -mtune= overrides -mcpu= and -march=. */
785 || i == 2)
786 arm_tune = (enum processor_type) (sel - ptr->processors);
788 if (i != 2)
790 /* If we have been given an architecture and a processor
791 make sure that they are compatible. We only generate
792 a warning though, and we prefer the CPU over the
793 architecture. */
794 if (insn_flags != 0 && (insn_flags ^ sel->flags))
795 warning ("switch -mcpu=%s conflicts with -march= switch",
796 ptr->string);
798 insn_flags = sel->flags;
801 break;
804 if (sel->name == NULL)
805 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
809 /* If the user did not specify a processor, choose one for them. */
810 if (insn_flags == 0)
812 const struct processors * sel;
813 unsigned int sought;
814 enum processor_type cpu;
816 cpu = TARGET_CPU_DEFAULT;
817 if (cpu == arm_none)
819 #ifdef SUBTARGET_CPU_DEFAULT
820 /* Use the subtarget default CPU if none was specified by
821 configure. */
822 cpu = SUBTARGET_CPU_DEFAULT;
823 #endif
824 /* Default to ARM6. */
825 if (cpu == arm_none)
826 cpu = arm6;
828 sel = &all_cores[cpu];
830 insn_flags = sel->flags;
832 /* Now check to see if the user has specified some command line
833 switch that require certain abilities from the cpu. */
834 sought = 0;
836 if (TARGET_INTERWORK || TARGET_THUMB)
838 sought |= (FL_THUMB | FL_MODE32);
840 /* There are no ARM processors that support both APCS-26 and
841 interworking. Therefore we force FL_MODE26 to be removed
842 from insn_flags here (if it was set), so that the search
843 below will always be able to find a compatible processor. */
844 insn_flags &= ~FL_MODE26;
847 if (sought != 0 && ((sought & insn_flags) != sought))
849 /* Try to locate a CPU type that supports all of the abilities
850 of the default CPU, plus the extra abilities requested by
851 the user. */
852 for (sel = all_cores; sel->name != NULL; sel++)
853 if ((sel->flags & sought) == (sought | insn_flags))
854 break;
856 if (sel->name == NULL)
858 unsigned current_bit_count = 0;
859 const struct processors * best_fit = NULL;
861 /* Ideally we would like to issue an error message here
862 saying that it was not possible to find a CPU compatible
863 with the default CPU, but which also supports the command
864 line options specified by the programmer, and so they
865 ought to use the -mcpu=<name> command line option to
866 override the default CPU type.
868 If we cannot find a cpu that has both the
869 characteristics of the default cpu and the given
870 command line options we scan the array again looking
871 for a best match. */
872 for (sel = all_cores; sel->name != NULL; sel++)
873 if ((sel->flags & sought) == sought)
875 unsigned count;
877 count = bit_count (sel->flags & insn_flags);
879 if (count >= current_bit_count)
881 best_fit = sel;
882 current_bit_count = count;
886 if (best_fit == NULL)
887 abort ();
888 else
889 sel = best_fit;
892 insn_flags = sel->flags;
894 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
895 if (arm_tune == arm_none)
896 arm_tune = (enum processor_type) (sel - all_cores);
899 /* The processor for which we should tune should now have been
900 chosen. */
901 if (arm_tune == arm_none)
902 abort ();
904 tune_flags = all_cores[(int)arm_tune].flags;
905 if (optimize_size)
906 targetm.rtx_costs = arm_size_rtx_costs;
907 else
908 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
910 /* Make sure that the processor choice does not conflict with any of the
911 other command line choices. */
912 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
914 warning ("target CPU does not support interworking" );
915 target_flags &= ~ARM_FLAG_INTERWORK;
918 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
920 warning ("target CPU does not support THUMB instructions");
921 target_flags &= ~ARM_FLAG_THUMB;
924 if (TARGET_APCS_FRAME && TARGET_THUMB)
926 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
927 target_flags &= ~ARM_FLAG_APCS_FRAME;
930 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
931 from here where no function is being compiled currently. */
932 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
933 && TARGET_ARM)
934 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
936 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
937 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
939 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
940 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
942 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
944 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
945 target_flags |= ARM_FLAG_APCS_FRAME;
948 if (TARGET_POKE_FUNCTION_NAME)
949 target_flags |= ARM_FLAG_APCS_FRAME;
951 if (TARGET_APCS_REENT && flag_pic)
952 error ("-fpic and -mapcs-reent are incompatible");
954 if (TARGET_APCS_REENT)
955 warning ("APCS reentrant code not supported. Ignored");
957 /* If this target is normally configured to use APCS frames, warn if they
958 are turned off and debugging is turned on. */
959 if (TARGET_ARM
960 && write_symbols != NO_DEBUG
961 && !TARGET_APCS_FRAME
962 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
963 warning ("-g with -mno-apcs-frame may not give sensible debugging");
965 /* If stack checking is disabled, we can use r10 as the PIC register,
966 which keeps r9 available. */
967 if (flag_pic)
968 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
970 if (TARGET_APCS_FLOAT)
971 warning ("passing floating point arguments in fp regs not yet supported");
973 /* Initialize boolean versions of the flags, for use in the arm.md file. */
974 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
975 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
976 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
977 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
978 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
979 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
980 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
981 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
983 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
984 arm_is_strong = (tune_flags & FL_STRONG) != 0;
985 thumb_code = (TARGET_ARM == 0);
986 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
987 && !(tune_flags & FL_ARCH4))) != 0;
988 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
989 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
991 /* V5 code we generate is completely interworking capable, so we turn off
992 TARGET_INTERWORK here to avoid many tests later on. */
994 /* XXX However, we must pass the right pre-processor defines to CPP
995 or GLD can get confused. This is a hack. */
996 if (TARGET_INTERWORK)
997 arm_cpp_interwork = 1;
999 if (arm_arch5)
1000 target_flags &= ~ARM_FLAG_INTERWORK;
1002 if (target_abi_name)
1004 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1006 if (streq (arm_all_abis[i].name, target_abi_name))
1008 arm_abi = arm_all_abis[i].abi_type;
1009 break;
1012 if (i == ARRAY_SIZE (arm_all_abis))
1013 error ("invalid ABI option: -mabi=%s", target_abi_name);
1015 else
1016 arm_abi = ARM_DEFAULT_ABI;
1018 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1019 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1021 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1022 error ("iwmmxt abi requires an iwmmxt capable cpu");
1024 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1025 if (target_fpu_name == NULL && target_fpe_name != NULL)
1027 if (streq (target_fpe_name, "2"))
1028 target_fpu_name = "fpe2";
1029 else if (streq (target_fpe_name, "3"))
1030 target_fpu_name = "fpe3";
1031 else
1032 error ("invalid floating point emulation option: -mfpe=%s",
1033 target_fpe_name);
1035 if (target_fpu_name != NULL)
1037 /* The user specified a FPU. */
1038 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1040 if (streq (all_fpus[i].name, target_fpu_name))
1042 arm_fpu_arch = all_fpus[i].fpu;
1043 arm_fpu_tune = arm_fpu_arch;
1044 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1045 break;
1048 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1049 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1051 else
1053 #ifdef FPUTYPE_DEFAULT
1054 /* Use the default if it is specified for this platform. */
1055 arm_fpu_arch = FPUTYPE_DEFAULT;
1056 arm_fpu_tune = FPUTYPE_DEFAULT;
1057 #else
1058 /* Pick one based on CPU type. */
1059 /* ??? Some targets assume FPA is the default.
1060 if ((insn_flags & FL_VFP) != 0)
1061 arm_fpu_arch = FPUTYPE_VFP;
1062 else
1064 if (arm_arch_cirrus)
1065 arm_fpu_arch = FPUTYPE_MAVERICK;
1066 else
1067 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1068 #endif
1069 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1070 arm_fpu_tune = FPUTYPE_FPA;
1071 else
1072 arm_fpu_tune = arm_fpu_arch;
1073 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1074 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1075 abort ();
1078 if (target_float_abi_name != NULL)
1080 /* The user specified a FP ABI. */
1081 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1083 if (streq (all_float_abis[i].name, target_float_abi_name))
1085 arm_float_abi = all_float_abis[i].abi_type;
1086 break;
1089 if (i == ARRAY_SIZE (all_float_abis))
1090 error ("invalid floating point abi: -mfloat-abi=%s",
1091 target_float_abi_name);
1093 else if (target_float_switch)
1095 /* This is a bit of a hack to avoid needing target flags for these. */
1096 if (target_float_switch[0] == 'h')
1097 arm_float_abi = ARM_FLOAT_ABI_HARD;
1098 else
1099 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1101 else
1102 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1104 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1105 sorry ("-mfloat-abi=hard and VFP");
1107 /* If soft-float is specified then don't use FPU. */
1108 if (TARGET_SOFT_FLOAT)
1109 arm_fpu_arch = FPUTYPE_NONE;
1111 /* For arm2/3 there is no need to do any scheduling if there is only
1112 a floating point emulator, or we are doing software floating-point. */
1113 if ((TARGET_SOFT_FLOAT
1114 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1115 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1116 && (tune_flags & FL_MODE32) == 0)
1117 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1119 /* Override the default structure alignment for AAPCS ABI. */
1120 if (arm_abi == ARM_ABI_AAPCS)
1121 arm_structure_size_boundary = 8;
1123 if (structure_size_string != NULL)
1125 int size = strtol (structure_size_string, NULL, 0);
1127 if (size == 8 || size == 32
1128 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1129 arm_structure_size_boundary = size;
1130 else
1131 warning ("structure size boundary can only be set to %s",
1132 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1135 if (arm_pic_register_string != NULL)
1137 int pic_register = decode_reg_name (arm_pic_register_string);
1139 if (!flag_pic)
1140 warning ("-mpic-register= is useless without -fpic");
1142 /* Prevent the user from choosing an obviously stupid PIC register. */
1143 else if (pic_register < 0 || call_used_regs[pic_register]
1144 || pic_register == HARD_FRAME_POINTER_REGNUM
1145 || pic_register == STACK_POINTER_REGNUM
1146 || pic_register >= PC_REGNUM)
1147 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1148 else
1149 arm_pic_register = pic_register;
1152 if (TARGET_THUMB && flag_schedule_insns)
1154 /* Don't warn since it's on by default in -O2. */
1155 flag_schedule_insns = 0;
1158 if (optimize_size)
1160 /* There's some dispute as to whether this should be 1 or 2. However,
1161 experiments seem to show that in pathological cases a setting of
1162 1 degrades less severely than a setting of 2. This could change if
1163 other parts of the compiler change their behavior. */
1164 arm_constant_limit = 1;
1166 /* If optimizing for size, bump the number of instructions that we
1167 are prepared to conditionally execute (even on a StrongARM). */
1168 max_insns_skipped = 6;
1170 else
1172 /* For processors with load scheduling, it never costs more than
1173 2 cycles to load a constant, and the load scheduler may well
1174 reduce that to 1. */
1175 if (arm_ld_sched)
1176 arm_constant_limit = 1;
1178 /* On XScale the longer latency of a load makes it more difficult
1179 to achieve a good schedule, so it's faster to synthesize
1180 constants that can be done in two insns. */
1181 if (arm_tune_xscale)
1182 arm_constant_limit = 2;
1184 /* StrongARM has early execution of branches, so a sequence
1185 that is worth skipping is shorter. */
1186 if (arm_is_strong)
1187 max_insns_skipped = 3;
1190 /* Register global variables with the garbage collector. */
1191 arm_add_gc_roots ();
1194 static void
1195 arm_add_gc_roots (void)
1197 gcc_obstack_init(&minipool_obstack);
1198 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1201 /* A table of known ARM exception types.
1202 For use with the interrupt function attribute. */
1204 typedef struct
1206 const char *const arg;
1207 const unsigned long return_value;
1209 isr_attribute_arg;
1211 static const isr_attribute_arg isr_attribute_args [] =
1213 { "IRQ", ARM_FT_ISR },
1214 { "irq", ARM_FT_ISR },
1215 { "FIQ", ARM_FT_FIQ },
1216 { "fiq", ARM_FT_FIQ },
1217 { "ABORT", ARM_FT_ISR },
1218 { "abort", ARM_FT_ISR },
1219 { "ABORT", ARM_FT_ISR },
1220 { "abort", ARM_FT_ISR },
1221 { "UNDEF", ARM_FT_EXCEPTION },
1222 { "undef", ARM_FT_EXCEPTION },
1223 { "SWI", ARM_FT_EXCEPTION },
1224 { "swi", ARM_FT_EXCEPTION },
1225 { NULL, ARM_FT_NORMAL }
1228 /* Returns the (interrupt) function type of the current
1229 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1231 static unsigned long
1232 arm_isr_value (tree argument)
1234 const isr_attribute_arg * ptr;
1235 const char * arg;
1237 /* No argument - default to IRQ. */
1238 if (argument == NULL_TREE)
1239 return ARM_FT_ISR;
1241 /* Get the value of the argument. */
1242 if (TREE_VALUE (argument) == NULL_TREE
1243 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1244 return ARM_FT_UNKNOWN;
1246 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1248 /* Check it against the list of known arguments. */
1249 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1250 if (streq (arg, ptr->arg))
1251 return ptr->return_value;
1253 /* An unrecognized interrupt type. */
1254 return ARM_FT_UNKNOWN;
1257 /* Computes the type of the current function. */
1259 static unsigned long
1260 arm_compute_func_type (void)
1262 unsigned long type = ARM_FT_UNKNOWN;
1263 tree a;
1264 tree attr;
1266 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1267 abort ();
1269 /* Decide if the current function is volatile. Such functions
1270 never return, and many memory cycles can be saved by not storing
1271 register values that will never be needed again. This optimization
1272 was added to speed up context switching in a kernel application. */
1273 if (optimize > 0
1274 && TREE_NOTHROW (current_function_decl)
1275 && TREE_THIS_VOLATILE (current_function_decl))
1276 type |= ARM_FT_VOLATILE;
1278 if (cfun->static_chain_decl != NULL)
1279 type |= ARM_FT_NESTED;
1281 attr = DECL_ATTRIBUTES (current_function_decl);
1283 a = lookup_attribute ("naked", attr);
1284 if (a != NULL_TREE)
1285 type |= ARM_FT_NAKED;
1287 a = lookup_attribute ("isr", attr);
1288 if (a == NULL_TREE)
1289 a = lookup_attribute ("interrupt", attr);
1291 if (a == NULL_TREE)
1292 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1293 else
1294 type |= arm_isr_value (TREE_VALUE (a));
1296 return type;
1299 /* Returns the type of the current function. */
1301 unsigned long
1302 arm_current_func_type (void)
1304 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1305 cfun->machine->func_type = arm_compute_func_type ();
1307 return cfun->machine->func_type;
1310 /* Return 1 if it is possible to return using a single instruction.
1311 If SIBLING is non-null, this is a test for a return before a sibling
1312 call. SIBLING is the call insn, so we can examine its register usage. */
1315 use_return_insn (int iscond, rtx sibling)
1317 int regno;
1318 unsigned int func_type;
1319 unsigned long saved_int_regs;
1320 unsigned HOST_WIDE_INT stack_adjust;
1321 arm_stack_offsets *offsets;
1323 /* Never use a return instruction before reload has run. */
1324 if (!reload_completed)
1325 return 0;
1327 func_type = arm_current_func_type ();
1329 /* Naked functions and volatile functions need special
1330 consideration. */
1331 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1332 return 0;
1334 /* So do interrupt functions that use the frame pointer. */
1335 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1336 return 0;
1338 offsets = arm_get_frame_offsets ();
1339 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1341 /* As do variadic functions. */
1342 if (current_function_pretend_args_size
1343 || cfun->machine->uses_anonymous_args
1344 /* Or if the function calls __builtin_eh_return () */
1345 || current_function_calls_eh_return
1346 /* Or if the function calls alloca */
1347 || current_function_calls_alloca
1348 /* Or if there is a stack adjustment. However, if the stack pointer
1349 is saved on the stack, we can use a pre-incrementing stack load. */
1350 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1351 return 0;
1353 saved_int_regs = arm_compute_save_reg_mask ();
1355 /* Unfortunately, the insn
1357 ldmib sp, {..., sp, ...}
1359 triggers a bug on most SA-110 based devices, such that the stack
1360 pointer won't be correctly restored if the instruction takes a
1361 page fault. We work around this problem by popping r3 along with
1362 the other registers, since that is never slower than executing
1363 another instruction.
1365 We test for !arm_arch5 here, because code for any architecture
1366 less than this could potentially be run on one of the buggy
1367 chips. */
1368 if (stack_adjust == 4 && !arm_arch5)
1370 /* Validate that r3 is a call-clobbered register (always true in
1371 the default abi) ... */
1372 if (!call_used_regs[3])
1373 return 0;
1375 /* ... that it isn't being used for a return value (always true
1376 until we implement return-in-regs), or for a tail-call
1377 argument ... */
1378 if (sibling)
1380 if (GET_CODE (sibling) != CALL_INSN)
1381 abort ();
1383 if (find_regno_fusage (sibling, USE, 3))
1384 return 0;
1387 /* ... and that there are no call-saved registers in r0-r2
1388 (always true in the default ABI). */
1389 if (saved_int_regs & 0x7)
1390 return 0;
1393 /* Can't be done if interworking with Thumb, and any registers have been
1394 stacked. */
1395 if (TARGET_INTERWORK && saved_int_regs != 0)
1396 return 0;
1398 /* On StrongARM, conditional returns are expensive if they aren't
1399 taken and multiple registers have been stacked. */
1400 if (iscond && arm_is_strong)
1402 /* Conditional return when just the LR is stored is a simple
1403 conditional-load instruction, that's not expensive. */
1404 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1405 return 0;
1407 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1408 return 0;
1411 /* If there are saved registers but the LR isn't saved, then we need
1412 two instructions for the return. */
1413 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1414 return 0;
1416 /* Can't be done if any of the FPA regs are pushed,
1417 since this also requires an insn. */
1418 if (TARGET_HARD_FLOAT && TARGET_FPA)
1419 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1420 if (regs_ever_live[regno] && !call_used_regs[regno])
1421 return 0;
1423 /* Likewise VFP regs. */
1424 if (TARGET_HARD_FLOAT && TARGET_VFP)
1425 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1426 if (regs_ever_live[regno] && !call_used_regs[regno])
1427 return 0;
1429 if (TARGET_REALLY_IWMMXT)
1430 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1431 if (regs_ever_live[regno] && ! call_used_regs [regno])
1432 return 0;
1434 return 1;
1437 /* Return TRUE if int I is a valid immediate ARM constant. */
1440 const_ok_for_arm (HOST_WIDE_INT i)
1442 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1444 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1445 be all zero, or all one. */
1446 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1447 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1448 != ((~(unsigned HOST_WIDE_INT) 0)
1449 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1450 return FALSE;
1452 /* Fast return for 0 and powers of 2 */
1453 if ((i & (i - 1)) == 0)
1454 return TRUE;
1458 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1459 return TRUE;
1460 mask =
1461 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1462 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1464 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1466 return FALSE;
1469 /* Return true if I is a valid constant for the operation CODE. */
1470 static int
1471 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1473 if (const_ok_for_arm (i))
1474 return 1;
1476 switch (code)
1478 case PLUS:
1479 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1481 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1482 case XOR:
1483 case IOR:
1484 return 0;
1486 case AND:
1487 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1489 default:
1490 abort ();
1494 /* Emit a sequence of insns to handle a large constant.
1495 CODE is the code of the operation required, it can be any of SET, PLUS,
1496 IOR, AND, XOR, MINUS;
1497 MODE is the mode in which the operation is being performed;
1498 VAL is the integer to operate on;
1499 SOURCE is the other operand (a register, or a null-pointer for SET);
1500 SUBTARGETS means it is safe to create scratch registers if that will
1501 either produce a simpler sequence, or we will want to cse the values.
1502 Return value is the number of insns emitted. */
1505 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1506 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1508 rtx cond;
1510 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1511 cond = COND_EXEC_TEST (PATTERN (insn));
1512 else
1513 cond = NULL_RTX;
1515 if (subtargets || code == SET
1516 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1517 && REGNO (target) != REGNO (source)))
1519 /* After arm_reorg has been called, we can't fix up expensive
1520 constants by pushing them into memory so we must synthesize
1521 them in-line, regardless of the cost. This is only likely to
1522 be more costly on chips that have load delay slots and we are
1523 compiling without running the scheduler (so no splitting
1524 occurred before the final instruction emission).
1526 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1528 if (!after_arm_reorg
1529 && !cond
1530 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1531 1, 0)
1532 > arm_constant_limit + (code != SET)))
1534 if (code == SET)
1536 /* Currently SET is the only monadic value for CODE, all
1537 the rest are diadic. */
1538 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1539 return 1;
1541 else
1543 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1545 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1546 /* For MINUS, the value is subtracted from, since we never
1547 have subtraction of a constant. */
1548 if (code == MINUS)
1549 emit_insn (gen_rtx_SET (VOIDmode, target,
1550 gen_rtx_MINUS (mode, temp, source)));
1551 else
1552 emit_insn (gen_rtx_SET (VOIDmode, target,
1553 gen_rtx_fmt_ee (code, mode, source, temp)));
1554 return 2;
1559 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1563 static int
1564 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1566 HOST_WIDE_INT temp1;
1567 int num_insns = 0;
1570 int end;
1572 if (i <= 0)
1573 i += 32;
1574 if (remainder & (3 << (i - 2)))
1576 end = i - 8;
1577 if (end < 0)
1578 end += 32;
1579 temp1 = remainder & ((0x0ff << end)
1580 | ((i < end) ? (0xff >> (32 - end)) : 0));
1581 remainder &= ~temp1;
1582 num_insns++;
1583 i -= 6;
1585 i -= 2;
1586 } while (remainder);
1587 return num_insns;
1590 /* Emit an instruction with the indicated PATTERN. If COND is
1591 non-NULL, conditionalize the execution of the instruction on COND
1592 being true. */
1594 static void
1595 emit_constant_insn (rtx cond, rtx pattern)
1597 if (cond)
1598 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1599 emit_insn (pattern);
1602 /* As above, but extra parameter GENERATE which, if clear, suppresses
1603 RTL generation. */
1605 static int
1606 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1607 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1608 int generate)
1610 int can_invert = 0;
1611 int can_negate = 0;
1612 int can_negate_initial = 0;
1613 int can_shift = 0;
1614 int i;
1615 int num_bits_set = 0;
1616 int set_sign_bit_copies = 0;
1617 int clear_sign_bit_copies = 0;
1618 int clear_zero_bit_copies = 0;
1619 int set_zero_bit_copies = 0;
1620 int insns = 0;
1621 unsigned HOST_WIDE_INT temp1, temp2;
1622 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1624 /* Find out which operations are safe for a given CODE. Also do a quick
1625 check for degenerate cases; these can occur when DImode operations
1626 are split. */
1627 switch (code)
1629 case SET:
1630 can_invert = 1;
1631 can_shift = 1;
1632 can_negate = 1;
1633 break;
1635 case PLUS:
1636 can_negate = 1;
1637 can_negate_initial = 1;
1638 break;
1640 case IOR:
1641 if (remainder == 0xffffffff)
1643 if (generate)
1644 emit_constant_insn (cond,
1645 gen_rtx_SET (VOIDmode, target,
1646 GEN_INT (ARM_SIGN_EXTEND (val))));
1647 return 1;
1649 if (remainder == 0)
1651 if (reload_completed && rtx_equal_p (target, source))
1652 return 0;
1653 if (generate)
1654 emit_constant_insn (cond,
1655 gen_rtx_SET (VOIDmode, target, source));
1656 return 1;
1658 break;
1660 case AND:
1661 if (remainder == 0)
1663 if (generate)
1664 emit_constant_insn (cond,
1665 gen_rtx_SET (VOIDmode, target, const0_rtx));
1666 return 1;
1668 if (remainder == 0xffffffff)
1670 if (reload_completed && rtx_equal_p (target, source))
1671 return 0;
1672 if (generate)
1673 emit_constant_insn (cond,
1674 gen_rtx_SET (VOIDmode, target, source));
1675 return 1;
1677 can_invert = 1;
1678 break;
1680 case XOR:
1681 if (remainder == 0)
1683 if (reload_completed && rtx_equal_p (target, source))
1684 return 0;
1685 if (generate)
1686 emit_constant_insn (cond,
1687 gen_rtx_SET (VOIDmode, target, source));
1688 return 1;
1690 if (remainder == 0xffffffff)
1692 if (generate)
1693 emit_constant_insn (cond,
1694 gen_rtx_SET (VOIDmode, target,
1695 gen_rtx_NOT (mode, source)));
1696 return 1;
1699 /* We don't know how to handle this yet below. */
1700 abort ();
1702 case MINUS:
1703 /* We treat MINUS as (val - source), since (source - val) is always
1704 passed as (source + (-val)). */
1705 if (remainder == 0)
1707 if (generate)
1708 emit_constant_insn (cond,
1709 gen_rtx_SET (VOIDmode, target,
1710 gen_rtx_NEG (mode, source)));
1711 return 1;
1713 if (const_ok_for_arm (val))
1715 if (generate)
1716 emit_constant_insn (cond,
1717 gen_rtx_SET (VOIDmode, target,
1718 gen_rtx_MINUS (mode, GEN_INT (val),
1719 source)));
1720 return 1;
1722 can_negate = 1;
1724 break;
1726 default:
1727 abort ();
1730 /* If we can do it in one insn get out quickly. */
1731 if (const_ok_for_arm (val)
1732 || (can_negate_initial && const_ok_for_arm (-val))
1733 || (can_invert && const_ok_for_arm (~val)))
1735 if (generate)
1736 emit_constant_insn (cond,
1737 gen_rtx_SET (VOIDmode, target,
1738 (source
1739 ? gen_rtx_fmt_ee (code, mode, source,
1740 GEN_INT (val))
1741 : GEN_INT (val))));
1742 return 1;
1745 /* Calculate a few attributes that may be useful for specific
1746 optimizations. */
1747 for (i = 31; i >= 0; i--)
1749 if ((remainder & (1 << i)) == 0)
1750 clear_sign_bit_copies++;
1751 else
1752 break;
1755 for (i = 31; i >= 0; i--)
1757 if ((remainder & (1 << i)) != 0)
1758 set_sign_bit_copies++;
1759 else
1760 break;
1763 for (i = 0; i <= 31; i++)
1765 if ((remainder & (1 << i)) == 0)
1766 clear_zero_bit_copies++;
1767 else
1768 break;
1771 for (i = 0; i <= 31; i++)
1773 if ((remainder & (1 << i)) != 0)
1774 set_zero_bit_copies++;
1775 else
1776 break;
1779 switch (code)
1781 case SET:
1782 /* See if we can do this by sign_extending a constant that is known
1783 to be negative. This is a good, way of doing it, since the shift
1784 may well merge into a subsequent insn. */
1785 if (set_sign_bit_copies > 1)
1787 if (const_ok_for_arm
1788 (temp1 = ARM_SIGN_EXTEND (remainder
1789 << (set_sign_bit_copies - 1))))
1791 if (generate)
1793 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1794 emit_constant_insn (cond,
1795 gen_rtx_SET (VOIDmode, new_src,
1796 GEN_INT (temp1)));
1797 emit_constant_insn (cond,
1798 gen_ashrsi3 (target, new_src,
1799 GEN_INT (set_sign_bit_copies - 1)));
1801 return 2;
1803 /* For an inverted constant, we will need to set the low bits,
1804 these will be shifted out of harm's way. */
1805 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1806 if (const_ok_for_arm (~temp1))
1808 if (generate)
1810 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, new_src,
1813 GEN_INT (temp1)));
1814 emit_constant_insn (cond,
1815 gen_ashrsi3 (target, new_src,
1816 GEN_INT (set_sign_bit_copies - 1)));
1818 return 2;
1822 /* See if we can generate this by setting the bottom (or the top)
1823 16 bits, and then shifting these into the other half of the
1824 word. We only look for the simplest cases, to do more would cost
1825 too much. Be careful, however, not to generate this when the
1826 alternative would take fewer insns. */
1827 if (val & 0xffff0000)
1829 temp1 = remainder & 0xffff0000;
1830 temp2 = remainder & 0x0000ffff;
1832 /* Overlaps outside this range are best done using other methods. */
1833 for (i = 9; i < 24; i++)
1835 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1836 && !const_ok_for_arm (temp2))
1838 rtx new_src = (subtargets
1839 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1840 : target);
1841 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1842 source, subtargets, generate);
1843 source = new_src;
1844 if (generate)
1845 emit_constant_insn
1846 (cond,
1847 gen_rtx_SET
1848 (VOIDmode, target,
1849 gen_rtx_IOR (mode,
1850 gen_rtx_ASHIFT (mode, source,
1851 GEN_INT (i)),
1852 source)));
1853 return insns + 1;
1857 /* Don't duplicate cases already considered. */
1858 for (i = 17; i < 24; i++)
1860 if (((temp1 | (temp1 >> i)) == remainder)
1861 && !const_ok_for_arm (temp1))
1863 rtx new_src = (subtargets
1864 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1865 : target);
1866 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1867 source, subtargets, generate);
1868 source = new_src;
1869 if (generate)
1870 emit_constant_insn
1871 (cond,
1872 gen_rtx_SET (VOIDmode, target,
1873 gen_rtx_IOR
1874 (mode,
1875 gen_rtx_LSHIFTRT (mode, source,
1876 GEN_INT (i)),
1877 source)));
1878 return insns + 1;
1882 break;
1884 case IOR:
1885 case XOR:
1886 /* If we have IOR or XOR, and the constant can be loaded in a
1887 single instruction, and we can find a temporary to put it in,
1888 then this can be done in two instructions instead of 3-4. */
1889 if (subtargets
1890 /* TARGET can't be NULL if SUBTARGETS is 0 */
1891 || (reload_completed && !reg_mentioned_p (target, source)))
1893 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1895 if (generate)
1897 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1899 emit_constant_insn (cond,
1900 gen_rtx_SET (VOIDmode, sub,
1901 GEN_INT (val)));
1902 emit_constant_insn (cond,
1903 gen_rtx_SET (VOIDmode, target,
1904 gen_rtx_fmt_ee (code, mode,
1905 source, sub)));
1907 return 2;
1911 if (code == XOR)
1912 break;
1914 if (set_sign_bit_copies > 8
1915 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1917 if (generate)
1919 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1920 rtx shift = GEN_INT (set_sign_bit_copies);
1922 emit_constant_insn
1923 (cond,
1924 gen_rtx_SET (VOIDmode, sub,
1925 gen_rtx_NOT (mode,
1926 gen_rtx_ASHIFT (mode,
1927 source,
1928 shift))));
1929 emit_constant_insn
1930 (cond,
1931 gen_rtx_SET (VOIDmode, target,
1932 gen_rtx_NOT (mode,
1933 gen_rtx_LSHIFTRT (mode, sub,
1934 shift))));
1936 return 2;
1939 if (set_zero_bit_copies > 8
1940 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1942 if (generate)
1944 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1945 rtx shift = GEN_INT (set_zero_bit_copies);
1947 emit_constant_insn
1948 (cond,
1949 gen_rtx_SET (VOIDmode, sub,
1950 gen_rtx_NOT (mode,
1951 gen_rtx_LSHIFTRT (mode,
1952 source,
1953 shift))));
1954 emit_constant_insn
1955 (cond,
1956 gen_rtx_SET (VOIDmode, target,
1957 gen_rtx_NOT (mode,
1958 gen_rtx_ASHIFT (mode, sub,
1959 shift))));
1961 return 2;
1964 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1966 if (generate)
1968 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1969 emit_constant_insn (cond,
1970 gen_rtx_SET (VOIDmode, sub,
1971 gen_rtx_NOT (mode, source)));
1972 source = sub;
1973 if (subtargets)
1974 sub = gen_reg_rtx (mode);
1975 emit_constant_insn (cond,
1976 gen_rtx_SET (VOIDmode, sub,
1977 gen_rtx_AND (mode, source,
1978 GEN_INT (temp1))));
1979 emit_constant_insn (cond,
1980 gen_rtx_SET (VOIDmode, target,
1981 gen_rtx_NOT (mode, sub)));
1983 return 3;
1985 break;
1987 case AND:
1988 /* See if two shifts will do 2 or more insn's worth of work. */
1989 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1991 HOST_WIDE_INT shift_mask = ((0xffffffff
1992 << (32 - clear_sign_bit_copies))
1993 & 0xffffffff);
1995 if ((remainder | shift_mask) != 0xffffffff)
1997 if (generate)
1999 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2000 insns = arm_gen_constant (AND, mode, cond,
2001 remainder | shift_mask,
2002 new_src, source, subtargets, 1);
2003 source = new_src;
2005 else
2007 rtx targ = subtargets ? NULL_RTX : target;
2008 insns = arm_gen_constant (AND, mode, cond,
2009 remainder | shift_mask,
2010 targ, source, subtargets, 0);
2014 if (generate)
2016 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2017 rtx shift = GEN_INT (clear_sign_bit_copies);
2019 emit_insn (gen_ashlsi3 (new_src, source, shift));
2020 emit_insn (gen_lshrsi3 (target, new_src, shift));
2023 return insns + 2;
2026 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2028 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2030 if ((remainder | shift_mask) != 0xffffffff)
2032 if (generate)
2034 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2036 insns = arm_gen_constant (AND, mode, cond,
2037 remainder | shift_mask,
2038 new_src, source, subtargets, 1);
2039 source = new_src;
2041 else
2043 rtx targ = subtargets ? NULL_RTX : target;
2045 insns = arm_gen_constant (AND, mode, cond,
2046 remainder | shift_mask,
2047 targ, source, subtargets, 0);
2051 if (generate)
2053 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2054 rtx shift = GEN_INT (clear_zero_bit_copies);
2056 emit_insn (gen_lshrsi3 (new_src, source, shift));
2057 emit_insn (gen_ashlsi3 (target, new_src, shift));
2060 return insns + 2;
2063 break;
2065 default:
2066 break;
2069 for (i = 0; i < 32; i++)
2070 if (remainder & (1 << i))
2071 num_bits_set++;
2073 if (code == AND || (can_invert && num_bits_set > 16))
2074 remainder = (~remainder) & 0xffffffff;
2075 else if (code == PLUS && num_bits_set > 16)
2076 remainder = (-remainder) & 0xffffffff;
2077 else
2079 can_invert = 0;
2080 can_negate = 0;
2083 /* Now try and find a way of doing the job in either two or three
2084 instructions.
2085 We start by looking for the largest block of zeros that are aligned on
2086 a 2-bit boundary, we then fill up the temps, wrapping around to the
2087 top of the word when we drop off the bottom.
2088 In the worst case this code should produce no more than four insns. */
2090 int best_start = 0;
2091 int best_consecutive_zeros = 0;
2093 for (i = 0; i < 32; i += 2)
2095 int consecutive_zeros = 0;
2097 if (!(remainder & (3 << i)))
2099 while ((i < 32) && !(remainder & (3 << i)))
2101 consecutive_zeros += 2;
2102 i += 2;
2104 if (consecutive_zeros > best_consecutive_zeros)
2106 best_consecutive_zeros = consecutive_zeros;
2107 best_start = i - consecutive_zeros;
2109 i -= 2;
2113 /* So long as it won't require any more insns to do so, it's
2114 desirable to emit a small constant (in bits 0...9) in the last
2115 insn. This way there is more chance that it can be combined with
2116 a later addressing insn to form a pre-indexed load or store
2117 operation. Consider:
2119 *((volatile int *)0xe0000100) = 1;
2120 *((volatile int *)0xe0000110) = 2;
2122 We want this to wind up as:
2124 mov rA, #0xe0000000
2125 mov rB, #1
2126 str rB, [rA, #0x100]
2127 mov rB, #2
2128 str rB, [rA, #0x110]
2130 rather than having to synthesize both large constants from scratch.
2132 Therefore, we calculate how many insns would be required to emit
2133 the constant starting from `best_start', and also starting from
2134 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2135 yield a shorter sequence, we may as well use zero. */
2136 if (best_start != 0
2137 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2138 && (count_insns_for_constant (remainder, 0) <=
2139 count_insns_for_constant (remainder, best_start)))
2140 best_start = 0;
2142 /* Now start emitting the insns. */
2143 i = best_start;
2146 int end;
2148 if (i <= 0)
2149 i += 32;
2150 if (remainder & (3 << (i - 2)))
2152 end = i - 8;
2153 if (end < 0)
2154 end += 32;
2155 temp1 = remainder & ((0x0ff << end)
2156 | ((i < end) ? (0xff >> (32 - end)) : 0));
2157 remainder &= ~temp1;
2159 if (generate)
2161 rtx new_src, temp1_rtx;
2163 if (code == SET || code == MINUS)
2165 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2166 if (can_invert && code != MINUS)
2167 temp1 = ~temp1;
2169 else
2171 if (remainder && subtargets)
2172 new_src = gen_reg_rtx (mode);
2173 else
2174 new_src = target;
2175 if (can_invert)
2176 temp1 = ~temp1;
2177 else if (can_negate)
2178 temp1 = -temp1;
2181 temp1 = trunc_int_for_mode (temp1, mode);
2182 temp1_rtx = GEN_INT (temp1);
2184 if (code == SET)
2186 else if (code == MINUS)
2187 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2188 else
2189 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2191 emit_constant_insn (cond,
2192 gen_rtx_SET (VOIDmode, new_src,
2193 temp1_rtx));
2194 source = new_src;
2197 if (code == SET)
2199 can_invert = 0;
2200 code = PLUS;
2202 else if (code == MINUS)
2203 code = PLUS;
2205 insns++;
2206 i -= 6;
2208 i -= 2;
2210 while (remainder);
2213 return insns;
2216 /* Canonicalize a comparison so that we are more likely to recognize it.
2217 This can be done for a few constant compares, where we can make the
2218 immediate value easier to load. */
2220 enum rtx_code
2221 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2223 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2225 switch (code)
2227 case EQ:
2228 case NE:
2229 return code;
2231 case GT:
2232 case LE:
2233 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2234 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2236 *op1 = GEN_INT (i + 1);
2237 return code == GT ? GE : LT;
2239 break;
2241 case GE:
2242 case LT:
2243 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2244 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2246 *op1 = GEN_INT (i - 1);
2247 return code == GE ? GT : LE;
2249 break;
2251 case GTU:
2252 case LEU:
2253 if (i != ~((unsigned HOST_WIDE_INT) 0)
2254 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2256 *op1 = GEN_INT (i + 1);
2257 return code == GTU ? GEU : LTU;
2259 break;
2261 case GEU:
2262 case LTU:
2263 if (i != 0
2264 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2266 *op1 = GEN_INT (i - 1);
2267 return code == GEU ? GTU : LEU;
2269 break;
2271 default:
2272 abort ();
2275 return code;
2279 /* Define how to find the value returned by a function. */
2282 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2284 enum machine_mode mode;
2285 int unsignedp ATTRIBUTE_UNUSED;
2286 rtx r ATTRIBUTE_UNUSED;
2289 mode = TYPE_MODE (type);
2290 /* Promote integer types. */
2291 if (INTEGRAL_TYPE_P (type))
2292 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2293 return LIBCALL_VALUE(mode);
2296 /* Determine the amount of memory needed to store the possible return
2297 registers of an untyped call. */
2299 arm_apply_result_size (void)
2301 int size = 16;
2303 if (TARGET_ARM)
2305 if (TARGET_HARD_FLOAT_ABI)
2307 if (TARGET_FPA)
2308 size += 12;
2309 if (TARGET_MAVERICK)
2310 size += 8;
2312 if (TARGET_IWMMXT_ABI)
2313 size += 8;
2316 return size;
2319 /* Decide whether a type should be returned in memory (true)
2320 or in a register (false). This is called by the macro
2321 RETURN_IN_MEMORY. */
2323 arm_return_in_memory (tree type)
2325 HOST_WIDE_INT size;
2327 if (!AGGREGATE_TYPE_P (type) &&
2328 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2329 /* All simple types are returned in registers.
2330 For AAPCS, complex types are treated the same as aggregates. */
2331 return 0;
2333 size = int_size_in_bytes (type);
2335 if (arm_abi != ARM_ABI_APCS)
2337 /* ATPCS and later return aggregate types in memory only if they are
2338 larger than a word (or are variable size). */
2339 return (size < 0 || size > UNITS_PER_WORD);
2342 /* For the arm-wince targets we choose to be compatible with Microsoft's
2343 ARM and Thumb compilers, which always return aggregates in memory. */
2344 #ifndef ARM_WINCE
2345 /* All structures/unions bigger than one word are returned in memory.
2346 Also catch the case where int_size_in_bytes returns -1. In this case
2347 the aggregate is either huge or of variable size, and in either case
2348 we will want to return it via memory and not in a register. */
2349 if (size < 0 || size > UNITS_PER_WORD)
2350 return 1;
2352 if (TREE_CODE (type) == RECORD_TYPE)
2354 tree field;
2356 /* For a struct the APCS says that we only return in a register
2357 if the type is 'integer like' and every addressable element
2358 has an offset of zero. For practical purposes this means
2359 that the structure can have at most one non bit-field element
2360 and that this element must be the first one in the structure. */
2362 /* Find the first field, ignoring non FIELD_DECL things which will
2363 have been created by C++. */
2364 for (field = TYPE_FIELDS (type);
2365 field && TREE_CODE (field) != FIELD_DECL;
2366 field = TREE_CHAIN (field))
2367 continue;
2369 if (field == NULL)
2370 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2372 /* Check that the first field is valid for returning in a register. */
2374 /* ... Floats are not allowed */
2375 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2376 return 1;
2378 /* ... Aggregates that are not themselves valid for returning in
2379 a register are not allowed. */
2380 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2381 return 1;
2383 /* Now check the remaining fields, if any. Only bitfields are allowed,
2384 since they are not addressable. */
2385 for (field = TREE_CHAIN (field);
2386 field;
2387 field = TREE_CHAIN (field))
2389 if (TREE_CODE (field) != FIELD_DECL)
2390 continue;
2392 if (!DECL_BIT_FIELD_TYPE (field))
2393 return 1;
2396 return 0;
2399 if (TREE_CODE (type) == UNION_TYPE)
2401 tree field;
2403 /* Unions can be returned in registers if every element is
2404 integral, or can be returned in an integer register. */
2405 for (field = TYPE_FIELDS (type);
2406 field;
2407 field = TREE_CHAIN (field))
2409 if (TREE_CODE (field) != FIELD_DECL)
2410 continue;
2412 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2413 return 1;
2415 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2416 return 1;
2419 return 0;
2421 #endif /* not ARM_WINCE */
2423 /* Return all other types in memory. */
2424 return 1;
2427 /* Indicate whether or not words of a double are in big-endian order. */
2430 arm_float_words_big_endian (void)
2432 if (TARGET_MAVERICK)
2433 return 0;
2435 /* For FPA, float words are always big-endian. For VFP, floats words
2436 follow the memory system mode. */
2438 if (TARGET_FPA)
2440 return 1;
2443 if (TARGET_VFP)
2444 return (TARGET_BIG_END ? 1 : 0);
2446 return 1;
2449 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2450 for a call to a function whose data type is FNTYPE.
2451 For a library call, FNTYPE is NULL. */
2452 void
2453 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2454 rtx libname ATTRIBUTE_UNUSED,
2455 tree fndecl ATTRIBUTE_UNUSED)
2457 /* On the ARM, the offset starts at 0. */
2458 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2459 pcum->iwmmxt_nregs = 0;
2460 pcum->can_split = true;
2462 pcum->call_cookie = CALL_NORMAL;
2464 if (TARGET_LONG_CALLS)
2465 pcum->call_cookie = CALL_LONG;
2467 /* Check for long call/short call attributes. The attributes
2468 override any command line option. */
2469 if (fntype)
2471 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2472 pcum->call_cookie = CALL_SHORT;
2473 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2474 pcum->call_cookie = CALL_LONG;
2477 /* Varargs vectors are treated the same as long long.
2478 named_count avoids having to change the way arm handles 'named' */
2479 pcum->named_count = 0;
2480 pcum->nargs = 0;
2482 if (TARGET_REALLY_IWMMXT && fntype)
2484 tree fn_arg;
2486 for (fn_arg = TYPE_ARG_TYPES (fntype);
2487 fn_arg;
2488 fn_arg = TREE_CHAIN (fn_arg))
2489 pcum->named_count += 1;
2491 if (! pcum->named_count)
2492 pcum->named_count = INT_MAX;
2497 /* Return true if mode/type need doubleword alignment. */
2498 bool
2499 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2501 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2502 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2506 /* Determine where to put an argument to a function.
2507 Value is zero to push the argument on the stack,
2508 or a hard register in which to store the argument.
2510 MODE is the argument's machine mode.
2511 TYPE is the data type of the argument (as a tree).
2512 This is null for libcalls where that information may
2513 not be available.
2514 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2515 the preceding args and about the function being called.
2516 NAMED is nonzero if this argument is a named parameter
2517 (otherwise it is an extra parameter matching an ellipsis). */
2520 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2521 tree type, int named)
2523 int nregs;
2525 /* Varargs vectors are treated the same as long long.
2526 named_count avoids having to change the way arm handles 'named' */
2527 if (TARGET_IWMMXT_ABI
2528 && arm_vector_mode_supported_p (mode)
2529 && pcum->named_count > pcum->nargs + 1)
2531 if (pcum->iwmmxt_nregs <= 9)
2532 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2533 else
2535 pcum->can_split = false;
2536 return NULL_RTX;
2540 /* Put doubleword aligned quantities in even register pairs. */
2541 if (pcum->nregs & 1
2542 && ARM_DOUBLEWORD_ALIGN
2543 && arm_needs_doubleword_align (mode, type))
2544 pcum->nregs++;
2546 if (mode == VOIDmode)
2547 /* Compute operand 2 of the call insn. */
2548 return GEN_INT (pcum->call_cookie);
2550 /* Only allow splitting an arg between regs and memory if all preceding
2551 args were allocated to regs. For args passed by reference we only count
2552 the reference pointer. */
2553 if (pcum->can_split)
2554 nregs = 1;
2555 else
2556 nregs = ARM_NUM_REGS2 (mode, type);
2558 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2559 return NULL_RTX;
2561 return gen_rtx_REG (mode, pcum->nregs);
2564 static int
2565 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2566 tree type, bool named ATTRIBUTE_UNUSED)
2568 int nregs = pcum->nregs;
2570 if (arm_vector_mode_supported_p (mode))
2571 return 0;
2573 if (NUM_ARG_REGS > nregs
2574 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2575 && pcum->can_split)
2576 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2578 return 0;
2581 /* Variable sized types are passed by reference. This is a GCC
2582 extension to the ARM ABI. */
2584 static bool
2585 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2586 enum machine_mode mode ATTRIBUTE_UNUSED,
2587 tree type, bool named ATTRIBUTE_UNUSED)
2589 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2592 /* Encode the current state of the #pragma [no_]long_calls. */
2593 typedef enum
2595 OFF, /* No #pramgma [no_]long_calls is in effect. */
2596 LONG, /* #pragma long_calls is in effect. */
2597 SHORT /* #pragma no_long_calls is in effect. */
2598 } arm_pragma_enum;
2600 static arm_pragma_enum arm_pragma_long_calls = OFF;
2602 void
2603 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2605 arm_pragma_long_calls = LONG;
2608 void
2609 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2611 arm_pragma_long_calls = SHORT;
2614 void
2615 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2617 arm_pragma_long_calls = OFF;
2620 /* Table of machine attributes. */
2621 const struct attribute_spec arm_attribute_table[] =
2623 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2624 /* Function calls made to this symbol must be done indirectly, because
2625 it may lie outside of the 26 bit addressing range of a normal function
2626 call. */
2627 { "long_call", 0, 0, false, true, true, NULL },
2628 /* Whereas these functions are always known to reside within the 26 bit
2629 addressing range. */
2630 { "short_call", 0, 0, false, true, true, NULL },
2631 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2632 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2633 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2634 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2635 #ifdef ARM_PE
2636 /* ARM/PE has three new attributes:
2637 interfacearm - ?
2638 dllexport - for exporting a function/variable that will live in a dll
2639 dllimport - for importing a function/variable from a dll
2641 Microsoft allows multiple declspecs in one __declspec, separating
2642 them with spaces. We do NOT support this. Instead, use __declspec
2643 multiple times.
2645 { "dllimport", 0, 0, true, false, false, NULL },
2646 { "dllexport", 0, 0, true, false, false, NULL },
2647 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2648 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2649 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2650 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2651 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2652 #endif
2653 { NULL, 0, 0, false, false, false, NULL }
2656 /* Handle an attribute requiring a FUNCTION_DECL;
2657 arguments as in struct attribute_spec.handler. */
2658 static tree
2659 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2660 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2662 if (TREE_CODE (*node) != FUNCTION_DECL)
2664 warning ("%qs attribute only applies to functions",
2665 IDENTIFIER_POINTER (name));
2666 *no_add_attrs = true;
2669 return NULL_TREE;
2672 /* Handle an "interrupt" or "isr" attribute;
2673 arguments as in struct attribute_spec.handler. */
2674 static tree
2675 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2676 bool *no_add_attrs)
2678 if (DECL_P (*node))
2680 if (TREE_CODE (*node) != FUNCTION_DECL)
2682 warning ("%qs attribute only applies to functions",
2683 IDENTIFIER_POINTER (name));
2684 *no_add_attrs = true;
2686 /* FIXME: the argument if any is checked for type attributes;
2687 should it be checked for decl ones? */
2689 else
2691 if (TREE_CODE (*node) == FUNCTION_TYPE
2692 || TREE_CODE (*node) == METHOD_TYPE)
2694 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2696 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2697 *no_add_attrs = true;
2700 else if (TREE_CODE (*node) == POINTER_TYPE
2701 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2702 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2703 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2705 *node = build_variant_type_copy (*node);
2706 TREE_TYPE (*node) = build_type_attribute_variant
2707 (TREE_TYPE (*node),
2708 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2709 *no_add_attrs = true;
2711 else
2713 /* Possibly pass this attribute on from the type to a decl. */
2714 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2715 | (int) ATTR_FLAG_FUNCTION_NEXT
2716 | (int) ATTR_FLAG_ARRAY_NEXT))
2718 *no_add_attrs = true;
2719 return tree_cons (name, args, NULL_TREE);
2721 else
2723 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2728 return NULL_TREE;
2731 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2732 /* Handle the "notshared" attribute. This attribute is another way of
2733 requesting hidden visibility. ARM's compiler supports
2734 "__declspec(notshared)"; we support the same thing via an
2735 attribute. */
2737 static tree
2738 arm_handle_notshared_attribute (tree *node,
2739 tree name ATTRIBUTE_UNUSED,
2740 tree args ATTRIBUTE_UNUSED,
2741 int flags ATTRIBUTE_UNUSED,
2742 bool *no_add_attrs)
2744 tree decl = TYPE_NAME (*node);
2746 if (decl)
2748 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2749 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2750 *no_add_attrs = false;
2752 return NULL_TREE;
2754 #endif
2756 /* Return 0 if the attributes for two types are incompatible, 1 if they
2757 are compatible, and 2 if they are nearly compatible (which causes a
2758 warning to be generated). */
2759 static int
2760 arm_comp_type_attributes (tree type1, tree type2)
2762 int l1, l2, s1, s2;
2764 /* Check for mismatch of non-default calling convention. */
2765 if (TREE_CODE (type1) != FUNCTION_TYPE)
2766 return 1;
2768 /* Check for mismatched call attributes. */
2769 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2770 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2771 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2772 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2774 /* Only bother to check if an attribute is defined. */
2775 if (l1 | l2 | s1 | s2)
2777 /* If one type has an attribute, the other must have the same attribute. */
2778 if ((l1 != l2) || (s1 != s2))
2779 return 0;
2781 /* Disallow mixed attributes. */
2782 if ((l1 & s2) || (l2 & s1))
2783 return 0;
2786 /* Check for mismatched ISR attribute. */
2787 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2788 if (! l1)
2789 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2790 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2791 if (! l2)
2792 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2793 if (l1 != l2)
2794 return 0;
2796 return 1;
2799 /* Encode long_call or short_call attribute by prefixing
2800 symbol name in DECL with a special character FLAG. */
2801 void
2802 arm_encode_call_attribute (tree decl, int flag)
2804 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2805 int len = strlen (str);
2806 char * newstr;
2808 /* Do not allow weak functions to be treated as short call. */
2809 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2810 return;
2812 newstr = alloca (len + 2);
2813 newstr[0] = flag;
2814 strcpy (newstr + 1, str);
2816 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2817 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2820 /* Assigns default attributes to newly defined type. This is used to
2821 set short_call/long_call attributes for function types of
2822 functions defined inside corresponding #pragma scopes. */
2823 static void
2824 arm_set_default_type_attributes (tree type)
2826 /* Add __attribute__ ((long_call)) to all functions, when
2827 inside #pragma long_calls or __attribute__ ((short_call)),
2828 when inside #pragma no_long_calls. */
2829 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2831 tree type_attr_list, attr_name;
2832 type_attr_list = TYPE_ATTRIBUTES (type);
2834 if (arm_pragma_long_calls == LONG)
2835 attr_name = get_identifier ("long_call");
2836 else if (arm_pragma_long_calls == SHORT)
2837 attr_name = get_identifier ("short_call");
2838 else
2839 return;
2841 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2842 TYPE_ATTRIBUTES (type) = type_attr_list;
2846 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2847 defined within the current compilation unit. If this cannot be
2848 determined, then 0 is returned. */
2849 static int
2850 current_file_function_operand (rtx sym_ref)
2852 /* This is a bit of a fib. A function will have a short call flag
2853 applied to its name if it has the short call attribute, or it has
2854 already been defined within the current compilation unit. */
2855 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2856 return 1;
2858 /* The current function is always defined within the current compilation
2859 unit. If it s a weak definition however, then this may not be the real
2860 definition of the function, and so we have to say no. */
2861 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2862 && !DECL_WEAK (current_function_decl))
2863 return 1;
2865 /* We cannot make the determination - default to returning 0. */
2866 return 0;
2869 /* Return nonzero if a 32 bit "long_call" should be generated for
2870 this call. We generate a long_call if the function:
2872 a. has an __attribute__((long call))
2873 or b. is within the scope of a #pragma long_calls
2874 or c. the -mlong-calls command line switch has been specified
2875 . and either:
2876 1. -ffunction-sections is in effect
2877 or 2. the current function has __attribute__ ((section))
2878 or 3. the target function has __attribute__ ((section))
2880 However we do not generate a long call if the function:
2882 d. has an __attribute__ ((short_call))
2883 or e. is inside the scope of a #pragma no_long_calls
2884 or f. is defined within the current compilation unit.
2886 This function will be called by C fragments contained in the machine
2887 description file. SYM_REF and CALL_COOKIE correspond to the matched
2888 rtl operands. CALL_SYMBOL is used to distinguish between
2889 two different callers of the function. It is set to 1 in the
2890 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2891 and "call_value" patterns. This is because of the difference in the
2892 SYM_REFs passed by these patterns. */
2894 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2896 if (!call_symbol)
2898 if (GET_CODE (sym_ref) != MEM)
2899 return 0;
2901 sym_ref = XEXP (sym_ref, 0);
2904 if (GET_CODE (sym_ref) != SYMBOL_REF)
2905 return 0;
2907 if (call_cookie & CALL_SHORT)
2908 return 0;
2910 if (TARGET_LONG_CALLS)
2912 if (flag_function_sections
2913 || DECL_SECTION_NAME (current_function_decl))
2914 /* c.3 is handled by the definition of the
2915 ARM_DECLARE_FUNCTION_SIZE macro. */
2916 return 1;
2919 if (current_file_function_operand (sym_ref))
2920 return 0;
2922 return (call_cookie & CALL_LONG)
2923 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2924 || TARGET_LONG_CALLS;
2927 /* Return nonzero if it is ok to make a tail-call to DECL. */
2928 static bool
2929 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2931 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2933 if (cfun->machine->sibcall_blocked)
2934 return false;
2936 /* Never tailcall something for which we have no decl, or if we
2937 are in Thumb mode. */
2938 if (decl == NULL || TARGET_THUMB)
2939 return false;
2941 /* Get the calling method. */
2942 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2943 call_type = CALL_SHORT;
2944 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2945 call_type = CALL_LONG;
2947 /* Cannot tail-call to long calls, since these are out of range of
2948 a branch instruction. However, if not compiling PIC, we know
2949 we can reach the symbol if it is in this compilation unit. */
2950 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2951 return false;
2953 /* If we are interworking and the function is not declared static
2954 then we can't tail-call it unless we know that it exists in this
2955 compilation unit (since it might be a Thumb routine). */
2956 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2957 return false;
2959 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2960 if (IS_INTERRUPT (arm_current_func_type ()))
2961 return false;
2963 /* Everything else is ok. */
2964 return true;
2968 /* Addressing mode support functions. */
2970 /* Return nonzero if X is a legitimate immediate operand when compiling
2971 for PIC. */
2973 legitimate_pic_operand_p (rtx x)
2975 if (CONSTANT_P (x)
2976 && flag_pic
2977 && (GET_CODE (x) == SYMBOL_REF
2978 || (GET_CODE (x) == CONST
2979 && GET_CODE (XEXP (x, 0)) == PLUS
2980 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2981 return 0;
2983 return 1;
2987 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2989 if (GET_CODE (orig) == SYMBOL_REF
2990 || GET_CODE (orig) == LABEL_REF)
2992 #ifndef AOF_ASSEMBLER
2993 rtx pic_ref, address;
2994 #endif
2995 rtx insn;
2996 int subregs = 0;
2998 if (reg == 0)
3000 if (no_new_pseudos)
3001 abort ();
3002 else
3003 reg = gen_reg_rtx (Pmode);
3005 subregs = 1;
3008 #ifdef AOF_ASSEMBLER
3009 /* The AOF assembler can generate relocations for these directly, and
3010 understands that the PIC register has to be added into the offset. */
3011 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3012 #else
3013 if (subregs)
3014 address = gen_reg_rtx (Pmode);
3015 else
3016 address = reg;
3018 if (TARGET_ARM)
3019 emit_insn (gen_pic_load_addr_arm (address, orig));
3020 else
3021 emit_insn (gen_pic_load_addr_thumb (address, orig));
3023 if ((GET_CODE (orig) == LABEL_REF
3024 || (GET_CODE (orig) == SYMBOL_REF &&
3025 SYMBOL_REF_LOCAL_P (orig)))
3026 && NEED_GOT_RELOC)
3027 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3028 else
3030 pic_ref = gen_const_mem (Pmode,
3031 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3032 address));
3035 insn = emit_move_insn (reg, pic_ref);
3036 #endif
3037 current_function_uses_pic_offset_table = 1;
3038 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3039 by loop. */
3040 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3041 REG_NOTES (insn));
3042 return reg;
3044 else if (GET_CODE (orig) == CONST)
3046 rtx base, offset;
3048 if (GET_CODE (XEXP (orig, 0)) == PLUS
3049 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3050 return orig;
3052 if (reg == 0)
3054 if (no_new_pseudos)
3055 abort ();
3056 else
3057 reg = gen_reg_rtx (Pmode);
3060 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3062 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3063 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3064 base == reg ? 0 : reg);
3066 else
3067 abort ();
3069 if (GET_CODE (offset) == CONST_INT)
3071 /* The base register doesn't really matter, we only want to
3072 test the index for the appropriate mode. */
3073 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3075 if (!no_new_pseudos)
3076 offset = force_reg (Pmode, offset);
3077 else
3078 abort ();
3081 if (GET_CODE (offset) == CONST_INT)
3082 return plus_constant (base, INTVAL (offset));
3085 if (GET_MODE_SIZE (mode) > 4
3086 && (GET_MODE_CLASS (mode) == MODE_INT
3087 || TARGET_SOFT_FLOAT))
3089 emit_insn (gen_addsi3 (reg, base, offset));
3090 return reg;
3093 return gen_rtx_PLUS (Pmode, base, offset);
3096 return orig;
3100 /* Find a spare low register to use during the prolog of a function. */
3102 static int
3103 thumb_find_work_register (unsigned long pushed_regs_mask)
3105 int reg;
3107 /* Check the argument registers first as these are call-used. The
3108 register allocation order means that sometimes r3 might be used
3109 but earlier argument registers might not, so check them all. */
3110 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3111 if (!regs_ever_live[reg])
3112 return reg;
3114 /* Before going on to check the call-saved registers we can try a couple
3115 more ways of deducing that r3 is available. The first is when we are
3116 pushing anonymous arguments onto the stack and we have less than 4
3117 registers worth of fixed arguments(*). In this case r3 will be part of
3118 the variable argument list and so we can be sure that it will be
3119 pushed right at the start of the function. Hence it will be available
3120 for the rest of the prologue.
3121 (*): ie current_function_pretend_args_size is greater than 0. */
3122 if (cfun->machine->uses_anonymous_args
3123 && current_function_pretend_args_size > 0)
3124 return LAST_ARG_REGNUM;
3126 /* The other case is when we have fixed arguments but less than 4 registers
3127 worth. In this case r3 might be used in the body of the function, but
3128 it is not being used to convey an argument into the function. In theory
3129 we could just check current_function_args_size to see how many bytes are
3130 being passed in argument registers, but it seems that it is unreliable.
3131 Sometimes it will have the value 0 when in fact arguments are being
3132 passed. (See testcase execute/20021111-1.c for an example). So we also
3133 check the args_info.nregs field as well. The problem with this field is
3134 that it makes no allowances for arguments that are passed to the
3135 function but which are not used. Hence we could miss an opportunity
3136 when a function has an unused argument in r3. But it is better to be
3137 safe than to be sorry. */
3138 if (! cfun->machine->uses_anonymous_args
3139 && current_function_args_size >= 0
3140 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3141 && cfun->args_info.nregs < 4)
3142 return LAST_ARG_REGNUM;
3144 /* Otherwise look for a call-saved register that is going to be pushed. */
3145 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3146 if (pushed_regs_mask & (1 << reg))
3147 return reg;
3149 /* Something went wrong - thumb_compute_save_reg_mask()
3150 should have arranged for a suitable register to be pushed. */
3151 abort ();
3155 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3156 low register. */
3158 void
3159 arm_load_pic_register (unsigned int scratch)
3161 #ifndef AOF_ASSEMBLER
3162 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3163 rtx global_offset_table;
3165 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3166 return;
3168 if (!flag_pic)
3169 abort ();
3171 l1 = gen_label_rtx ();
3173 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3174 /* On the ARM the PC register contains 'dot + 8' at the time of the
3175 addition, on the Thumb it is 'dot + 4'. */
3176 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3177 if (GOT_PCREL)
3178 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3179 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3180 else
3181 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3183 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3185 if (TARGET_ARM)
3187 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3188 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3190 else
3192 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3194 /* We will have pushed the pic register, so should always be
3195 able to find a work register. */
3196 pic_tmp = gen_rtx_REG (SImode, scratch);
3197 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3198 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3200 else
3201 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3202 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3205 /* Need to emit this whether or not we obey regdecls,
3206 since setjmp/longjmp can cause life info to screw up. */
3207 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3208 #endif /* AOF_ASSEMBLER */
3212 /* Return nonzero if X is valid as an ARM state addressing register. */
3213 static int
3214 arm_address_register_rtx_p (rtx x, int strict_p)
3216 int regno;
3218 if (GET_CODE (x) != REG)
3219 return 0;
3221 regno = REGNO (x);
3223 if (strict_p)
3224 return ARM_REGNO_OK_FOR_BASE_P (regno);
3226 return (regno <= LAST_ARM_REGNUM
3227 || regno >= FIRST_PSEUDO_REGISTER
3228 || regno == FRAME_POINTER_REGNUM
3229 || regno == ARG_POINTER_REGNUM);
3232 /* Return nonzero if X is a valid ARM state address operand. */
3234 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3235 int strict_p)
3237 bool use_ldrd;
3238 enum rtx_code code = GET_CODE (x);
3240 if (arm_address_register_rtx_p (x, strict_p))
3241 return 1;
3243 use_ldrd = (TARGET_LDRD
3244 && (mode == DImode
3245 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3247 if (code == POST_INC || code == PRE_DEC
3248 || ((code == PRE_INC || code == POST_DEC)
3249 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3250 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3252 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3253 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3254 && GET_CODE (XEXP (x, 1)) == PLUS
3255 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3257 rtx addend = XEXP (XEXP (x, 1), 1);
3259 /* Don't allow ldrd post increment by register because it's hard
3260 to fixup invalid register choices. */
3261 if (use_ldrd
3262 && GET_CODE (x) == POST_MODIFY
3263 && GET_CODE (addend) == REG)
3264 return 0;
3266 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3267 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3270 /* After reload constants split into minipools will have addresses
3271 from a LABEL_REF. */
3272 else if (reload_completed
3273 && (code == LABEL_REF
3274 || (code == CONST
3275 && GET_CODE (XEXP (x, 0)) == PLUS
3276 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3277 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3278 return 1;
3280 else if (mode == TImode)
3281 return 0;
3283 else if (code == PLUS)
3285 rtx xop0 = XEXP (x, 0);
3286 rtx xop1 = XEXP (x, 1);
3288 return ((arm_address_register_rtx_p (xop0, strict_p)
3289 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3290 || (arm_address_register_rtx_p (xop1, strict_p)
3291 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3294 #if 0
3295 /* Reload currently can't handle MINUS, so disable this for now */
3296 else if (GET_CODE (x) == MINUS)
3298 rtx xop0 = XEXP (x, 0);
3299 rtx xop1 = XEXP (x, 1);
3301 return (arm_address_register_rtx_p (xop0, strict_p)
3302 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3304 #endif
3306 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3307 && code == SYMBOL_REF
3308 && CONSTANT_POOL_ADDRESS_P (x)
3309 && ! (flag_pic
3310 && symbol_mentioned_p (get_pool_constant (x))))
3311 return 1;
3313 return 0;
3316 /* Return nonzero if INDEX is valid for an address index operand in
3317 ARM state. */
3318 static int
3319 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3320 int strict_p)
3322 HOST_WIDE_INT range;
3323 enum rtx_code code = GET_CODE (index);
3325 /* Standard coprocessor addressing modes. */
3326 if (TARGET_HARD_FLOAT
3327 && (TARGET_FPA || TARGET_MAVERICK)
3328 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3329 || (TARGET_MAVERICK && mode == DImode)))
3330 return (code == CONST_INT && INTVAL (index) < 1024
3331 && INTVAL (index) > -1024
3332 && (INTVAL (index) & 3) == 0);
3334 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3335 return (code == CONST_INT
3336 && INTVAL (index) < 1024
3337 && INTVAL (index) > -1024
3338 && (INTVAL (index) & 3) == 0);
3340 if (arm_address_register_rtx_p (index, strict_p)
3341 && (GET_MODE_SIZE (mode) <= 4))
3342 return 1;
3344 if (mode == DImode || mode == DFmode)
3346 if (code == CONST_INT)
3348 HOST_WIDE_INT val = INTVAL (index);
3350 if (TARGET_LDRD)
3351 return val > -256 && val < 256;
3352 else
3353 return val > -4096 && val < 4092;
3356 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3359 if (GET_MODE_SIZE (mode) <= 4
3360 && ! (arm_arch4
3361 && (mode == HImode
3362 || (mode == QImode && outer == SIGN_EXTEND))))
3364 if (code == MULT)
3366 rtx xiop0 = XEXP (index, 0);
3367 rtx xiop1 = XEXP (index, 1);
3369 return ((arm_address_register_rtx_p (xiop0, strict_p)
3370 && power_of_two_operand (xiop1, SImode))
3371 || (arm_address_register_rtx_p (xiop1, strict_p)
3372 && power_of_two_operand (xiop0, SImode)));
3374 else if (code == LSHIFTRT || code == ASHIFTRT
3375 || code == ASHIFT || code == ROTATERT)
3377 rtx op = XEXP (index, 1);
3379 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3380 && GET_CODE (op) == CONST_INT
3381 && INTVAL (op) > 0
3382 && INTVAL (op) <= 31);
3386 /* For ARM v4 we may be doing a sign-extend operation during the
3387 load. */
3388 if (arm_arch4)
3390 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3391 range = 256;
3392 else
3393 range = 4096;
3395 else
3396 range = (mode == HImode) ? 4095 : 4096;
3398 return (code == CONST_INT
3399 && INTVAL (index) < range
3400 && INTVAL (index) > -range);
3403 /* Return nonzero if X is valid as a Thumb state base register. */
3404 static int
3405 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3407 int regno;
3409 if (GET_CODE (x) != REG)
3410 return 0;
3412 regno = REGNO (x);
3414 if (strict_p)
3415 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3417 return (regno <= LAST_LO_REGNUM
3418 || regno > LAST_VIRTUAL_REGISTER
3419 || regno == FRAME_POINTER_REGNUM
3420 || (GET_MODE_SIZE (mode) >= 4
3421 && (regno == STACK_POINTER_REGNUM
3422 || regno >= FIRST_PSEUDO_REGISTER
3423 || x == hard_frame_pointer_rtx
3424 || x == arg_pointer_rtx)));
3427 /* Return nonzero if x is a legitimate index register. This is the case
3428 for any base register that can access a QImode object. */
3429 inline static int
3430 thumb_index_register_rtx_p (rtx x, int strict_p)
3432 return thumb_base_register_rtx_p (x, QImode, strict_p);
3435 /* Return nonzero if x is a legitimate Thumb-state address.
3437 The AP may be eliminated to either the SP or the FP, so we use the
3438 least common denominator, e.g. SImode, and offsets from 0 to 64.
3440 ??? Verify whether the above is the right approach.
3442 ??? Also, the FP may be eliminated to the SP, so perhaps that
3443 needs special handling also.
3445 ??? Look at how the mips16 port solves this problem. It probably uses
3446 better ways to solve some of these problems.
3448 Although it is not incorrect, we don't accept QImode and HImode
3449 addresses based on the frame pointer or arg pointer until the
3450 reload pass starts. This is so that eliminating such addresses
3451 into stack based ones won't produce impossible code. */
3453 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3455 /* ??? Not clear if this is right. Experiment. */
3456 if (GET_MODE_SIZE (mode) < 4
3457 && !(reload_in_progress || reload_completed)
3458 && (reg_mentioned_p (frame_pointer_rtx, x)
3459 || reg_mentioned_p (arg_pointer_rtx, x)
3460 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3461 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3462 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3463 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3464 return 0;
3466 /* Accept any base register. SP only in SImode or larger. */
3467 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3468 return 1;
3470 /* This is PC relative data before arm_reorg runs. */
3471 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3472 && GET_CODE (x) == SYMBOL_REF
3473 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3474 return 1;
3476 /* This is PC relative data after arm_reorg runs. */
3477 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3478 && (GET_CODE (x) == LABEL_REF
3479 || (GET_CODE (x) == CONST
3480 && GET_CODE (XEXP (x, 0)) == PLUS
3481 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3482 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3483 return 1;
3485 /* Post-inc indexing only supported for SImode and larger. */
3486 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3487 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3488 return 1;
3490 else if (GET_CODE (x) == PLUS)
3492 /* REG+REG address can be any two index registers. */
3493 /* We disallow FRAME+REG addressing since we know that FRAME
3494 will be replaced with STACK, and SP relative addressing only
3495 permits SP+OFFSET. */
3496 if (GET_MODE_SIZE (mode) <= 4
3497 && XEXP (x, 0) != frame_pointer_rtx
3498 && XEXP (x, 1) != frame_pointer_rtx
3499 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3500 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3501 return 1;
3503 /* REG+const has 5-7 bit offset for non-SP registers. */
3504 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3505 || XEXP (x, 0) == arg_pointer_rtx)
3506 && GET_CODE (XEXP (x, 1)) == CONST_INT
3507 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3508 return 1;
3510 /* REG+const has 10 bit offset for SP, but only SImode and
3511 larger is supported. */
3512 /* ??? Should probably check for DI/DFmode overflow here
3513 just like GO_IF_LEGITIMATE_OFFSET does. */
3514 else if (GET_CODE (XEXP (x, 0)) == REG
3515 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3516 && GET_MODE_SIZE (mode) >= 4
3517 && GET_CODE (XEXP (x, 1)) == CONST_INT
3518 && INTVAL (XEXP (x, 1)) >= 0
3519 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3520 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3521 return 1;
3523 else if (GET_CODE (XEXP (x, 0)) == REG
3524 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3525 && GET_MODE_SIZE (mode) >= 4
3526 && GET_CODE (XEXP (x, 1)) == CONST_INT
3527 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3528 return 1;
3531 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3532 && GET_MODE_SIZE (mode) == 4
3533 && GET_CODE (x) == SYMBOL_REF
3534 && CONSTANT_POOL_ADDRESS_P (x)
3535 && !(flag_pic
3536 && symbol_mentioned_p (get_pool_constant (x))))
3537 return 1;
3539 return 0;
3542 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3543 instruction of mode MODE. */
3545 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3547 switch (GET_MODE_SIZE (mode))
3549 case 1:
3550 return val >= 0 && val < 32;
3552 case 2:
3553 return val >= 0 && val < 64 && (val & 1) == 0;
3555 default:
3556 return (val >= 0
3557 && (val + GET_MODE_SIZE (mode)) <= 128
3558 && (val & 3) == 0);
3562 /* Try machine-dependent ways of modifying an illegitimate address
3563 to be legitimate. If we find one, return the new, valid address. */
3565 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3567 if (GET_CODE (x) == PLUS)
3569 rtx xop0 = XEXP (x, 0);
3570 rtx xop1 = XEXP (x, 1);
3572 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3573 xop0 = force_reg (SImode, xop0);
3575 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3576 xop1 = force_reg (SImode, xop1);
3578 if (ARM_BASE_REGISTER_RTX_P (xop0)
3579 && GET_CODE (xop1) == CONST_INT)
3581 HOST_WIDE_INT n, low_n;
3582 rtx base_reg, val;
3583 n = INTVAL (xop1);
3585 /* VFP addressing modes actually allow greater offsets, but for
3586 now we just stick with the lowest common denominator. */
3587 if (mode == DImode
3588 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3590 low_n = n & 0x0f;
3591 n &= ~0x0f;
3592 if (low_n > 4)
3594 n += 16;
3595 low_n -= 16;
3598 else
3600 low_n = ((mode) == TImode ? 0
3601 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3602 n -= low_n;
3605 base_reg = gen_reg_rtx (SImode);
3606 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3607 GEN_INT (n)), NULL_RTX);
3608 emit_move_insn (base_reg, val);
3609 x = (low_n == 0 ? base_reg
3610 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3612 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3613 x = gen_rtx_PLUS (SImode, xop0, xop1);
3616 /* XXX We don't allow MINUS any more -- see comment in
3617 arm_legitimate_address_p (). */
3618 else if (GET_CODE (x) == MINUS)
3620 rtx xop0 = XEXP (x, 0);
3621 rtx xop1 = XEXP (x, 1);
3623 if (CONSTANT_P (xop0))
3624 xop0 = force_reg (SImode, xop0);
3626 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3627 xop1 = force_reg (SImode, xop1);
3629 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3630 x = gen_rtx_MINUS (SImode, xop0, xop1);
3633 if (flag_pic)
3635 /* We need to find and carefully transform any SYMBOL and LABEL
3636 references; so go back to the original address expression. */
3637 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3639 if (new_x != orig_x)
3640 x = new_x;
3643 return x;
3647 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3648 to be legitimate. If we find one, return the new, valid address. */
3650 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3652 if (GET_CODE (x) == PLUS
3653 && GET_CODE (XEXP (x, 1)) == CONST_INT
3654 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3655 || INTVAL (XEXP (x, 1)) < 0))
3657 rtx xop0 = XEXP (x, 0);
3658 rtx xop1 = XEXP (x, 1);
3659 HOST_WIDE_INT offset = INTVAL (xop1);
3661 /* Try and fold the offset into a biasing of the base register and
3662 then offsetting that. Don't do this when optimizing for space
3663 since it can cause too many CSEs. */
3664 if (optimize_size && offset >= 0
3665 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3667 HOST_WIDE_INT delta;
3669 if (offset >= 256)
3670 delta = offset - (256 - GET_MODE_SIZE (mode));
3671 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3672 delta = 31 * GET_MODE_SIZE (mode);
3673 else
3674 delta = offset & (~31 * GET_MODE_SIZE (mode));
3676 xop0 = force_operand (plus_constant (xop0, offset - delta),
3677 NULL_RTX);
3678 x = plus_constant (xop0, delta);
3680 else if (offset < 0 && offset > -256)
3681 /* Small negative offsets are best done with a subtract before the
3682 dereference, forcing these into a register normally takes two
3683 instructions. */
3684 x = force_operand (x, NULL_RTX);
3685 else
3687 /* For the remaining cases, force the constant into a register. */
3688 xop1 = force_reg (SImode, xop1);
3689 x = gen_rtx_PLUS (SImode, xop0, xop1);
3692 else if (GET_CODE (x) == PLUS
3693 && s_register_operand (XEXP (x, 1), SImode)
3694 && !s_register_operand (XEXP (x, 0), SImode))
3696 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3698 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3701 if (flag_pic)
3703 /* We need to find and carefully transform any SYMBOL and LABEL
3704 references; so go back to the original address expression. */
3705 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3707 if (new_x != orig_x)
3708 x = new_x;
3711 return x;
3716 #define REG_OR_SUBREG_REG(X) \
3717 (GET_CODE (X) == REG \
3718 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3720 #define REG_OR_SUBREG_RTX(X) \
3721 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3723 #ifndef COSTS_N_INSNS
3724 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3725 #endif
3726 static inline int
3727 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3729 enum machine_mode mode = GET_MODE (x);
3731 switch (code)
3733 case ASHIFT:
3734 case ASHIFTRT:
3735 case LSHIFTRT:
3736 case ROTATERT:
3737 case PLUS:
3738 case MINUS:
3739 case COMPARE:
3740 case NEG:
3741 case NOT:
3742 return COSTS_N_INSNS (1);
3744 case MULT:
3745 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3747 int cycles = 0;
3748 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3750 while (i)
3752 i >>= 2;
3753 cycles++;
3755 return COSTS_N_INSNS (2) + cycles;
3757 return COSTS_N_INSNS (1) + 16;
3759 case SET:
3760 return (COSTS_N_INSNS (1)
3761 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3762 + GET_CODE (SET_DEST (x)) == MEM));
3764 case CONST_INT:
3765 if (outer == SET)
3767 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3768 return 0;
3769 if (thumb_shiftable_const (INTVAL (x)))
3770 return COSTS_N_INSNS (2);
3771 return COSTS_N_INSNS (3);
3773 else if ((outer == PLUS || outer == COMPARE)
3774 && INTVAL (x) < 256 && INTVAL (x) > -256)
3775 return 0;
3776 else if (outer == AND
3777 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3778 return COSTS_N_INSNS (1);
3779 else if (outer == ASHIFT || outer == ASHIFTRT
3780 || outer == LSHIFTRT)
3781 return 0;
3782 return COSTS_N_INSNS (2);
3784 case CONST:
3785 case CONST_DOUBLE:
3786 case LABEL_REF:
3787 case SYMBOL_REF:
3788 return COSTS_N_INSNS (3);
3790 case UDIV:
3791 case UMOD:
3792 case DIV:
3793 case MOD:
3794 return 100;
3796 case TRUNCATE:
3797 return 99;
3799 case AND:
3800 case XOR:
3801 case IOR:
3802 /* XXX guess. */
3803 return 8;
3805 case MEM:
3806 /* XXX another guess. */
3807 /* Memory costs quite a lot for the first word, but subsequent words
3808 load at the equivalent of a single insn each. */
3809 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3810 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3811 ? 4 : 0));
3813 case IF_THEN_ELSE:
3814 /* XXX a guess. */
3815 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3816 return 14;
3817 return 2;
3819 case ZERO_EXTEND:
3820 /* XXX still guessing. */
3821 switch (GET_MODE (XEXP (x, 0)))
3823 case QImode:
3824 return (1 + (mode == DImode ? 4 : 0)
3825 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3827 case HImode:
3828 return (4 + (mode == DImode ? 4 : 0)
3829 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3831 case SImode:
3832 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3834 default:
3835 return 99;
3838 default:
3839 return 99;
3844 /* Worker routine for arm_rtx_costs. */
3845 static inline int
3846 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3848 enum machine_mode mode = GET_MODE (x);
3849 enum rtx_code subcode;
3850 int extra_cost;
3852 switch (code)
3854 case MEM:
3855 /* Memory costs quite a lot for the first word, but subsequent words
3856 load at the equivalent of a single insn each. */
3857 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3858 + (GET_CODE (x) == SYMBOL_REF
3859 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3861 case DIV:
3862 case MOD:
3863 case UDIV:
3864 case UMOD:
3865 return optimize_size ? COSTS_N_INSNS (2) : 100;
3867 case ROTATE:
3868 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3869 return 4;
3870 /* Fall through */
3871 case ROTATERT:
3872 if (mode != SImode)
3873 return 8;
3874 /* Fall through */
3875 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3876 if (mode == DImode)
3877 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3878 + ((GET_CODE (XEXP (x, 0)) == REG
3879 || (GET_CODE (XEXP (x, 0)) == SUBREG
3880 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3881 ? 0 : 8));
3882 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3883 || (GET_CODE (XEXP (x, 0)) == SUBREG
3884 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3885 ? 0 : 4)
3886 + ((GET_CODE (XEXP (x, 1)) == REG
3887 || (GET_CODE (XEXP (x, 1)) == SUBREG
3888 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3889 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3890 ? 0 : 4));
3892 case MINUS:
3893 if (mode == DImode)
3894 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3895 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3896 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3897 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3898 ? 0 : 8));
3900 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3901 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3902 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3903 && arm_const_double_rtx (XEXP (x, 1))))
3904 ? 0 : 8)
3905 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3906 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3907 && arm_const_double_rtx (XEXP (x, 0))))
3908 ? 0 : 8));
3910 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3911 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3912 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3913 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3914 || subcode == ASHIFTRT || subcode == LSHIFTRT
3915 || subcode == ROTATE || subcode == ROTATERT
3916 || (subcode == MULT
3917 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3918 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3919 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3920 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3921 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3922 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3923 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3924 return 1;
3925 /* Fall through */
3927 case PLUS:
3928 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3929 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3930 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3931 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3932 && arm_const_double_rtx (XEXP (x, 1))))
3933 ? 0 : 8));
3935 /* Fall through */
3936 case AND: case XOR: case IOR:
3937 extra_cost = 0;
3939 /* Normally the frame registers will be spilt into reg+const during
3940 reload, so it is a bad idea to combine them with other instructions,
3941 since then they might not be moved outside of loops. As a compromise
3942 we allow integration with ops that have a constant as their second
3943 operand. */
3944 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3945 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3946 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3947 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3948 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3949 extra_cost = 4;
3951 if (mode == DImode)
3952 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3953 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3954 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3955 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3956 ? 0 : 8));
3958 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3959 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3960 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3961 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3962 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3963 ? 0 : 4));
3965 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3966 return (1 + extra_cost
3967 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3968 || subcode == LSHIFTRT || subcode == ASHIFTRT
3969 || subcode == ROTATE || subcode == ROTATERT
3970 || (subcode == MULT
3971 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3972 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3973 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3974 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3975 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3976 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3977 ? 0 : 4));
3979 return 8;
3981 case MULT:
3982 /* This should have been handled by the CPU specific routines. */
3983 abort ();
3985 case TRUNCATE:
3986 if (arm_arch3m && mode == SImode
3987 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3988 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3989 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3990 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3991 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3992 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3993 return 8;
3994 return 99;
3996 case NEG:
3997 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3998 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3999 /* Fall through */
4000 case NOT:
4001 if (mode == DImode)
4002 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4004 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4006 case IF_THEN_ELSE:
4007 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4008 return 14;
4009 return 2;
4011 case COMPARE:
4012 return 1;
4014 case ABS:
4015 return 4 + (mode == DImode ? 4 : 0);
4017 case SIGN_EXTEND:
4018 if (GET_MODE (XEXP (x, 0)) == QImode)
4019 return (4 + (mode == DImode ? 4 : 0)
4020 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4021 /* Fall through */
4022 case ZERO_EXTEND:
4023 switch (GET_MODE (XEXP (x, 0)))
4025 case QImode:
4026 return (1 + (mode == DImode ? 4 : 0)
4027 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4029 case HImode:
4030 return (4 + (mode == DImode ? 4 : 0)
4031 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4033 case SImode:
4034 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4036 case V8QImode:
4037 case V4HImode:
4038 case V2SImode:
4039 case V4QImode:
4040 case V2HImode:
4041 return 1;
4043 default:
4044 break;
4046 abort ();
4048 case CONST_INT:
4049 if (const_ok_for_arm (INTVAL (x)))
4050 return outer == SET ? 2 : -1;
4051 else if (outer == AND
4052 && const_ok_for_arm (~INTVAL (x)))
4053 return -1;
4054 else if ((outer == COMPARE
4055 || outer == PLUS || outer == MINUS)
4056 && const_ok_for_arm (-INTVAL (x)))
4057 return -1;
4058 else
4059 return 5;
4061 case CONST:
4062 case LABEL_REF:
4063 case SYMBOL_REF:
4064 return 6;
4066 case CONST_DOUBLE:
4067 if (arm_const_double_rtx (x))
4068 return outer == SET ? 2 : -1;
4069 else if ((outer == COMPARE || outer == PLUS)
4070 && neg_const_double_rtx_ok_for_fpa (x))
4071 return -1;
4072 return 7;
4074 default:
4075 return 99;
4079 /* RTX costs when optimizing for size. */
4080 static bool
4081 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4083 enum machine_mode mode = GET_MODE (x);
4085 if (TARGET_THUMB)
4087 /* XXX TBD. For now, use the standard costs. */
4088 *total = thumb_rtx_costs (x, code, outer_code);
4089 return true;
4092 switch (code)
4094 case MEM:
4095 /* A memory access costs 1 insn if the mode is small, or the address is
4096 a single register, otherwise it costs one insn per word. */
4097 if (REG_P (XEXP (x, 0)))
4098 *total = COSTS_N_INSNS (1);
4099 else
4100 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4101 return true;
4103 case DIV:
4104 case MOD:
4105 case UDIV:
4106 case UMOD:
4107 /* Needs a libcall, so it costs about this. */
4108 *total = COSTS_N_INSNS (2);
4109 return false;
4111 case ROTATE:
4112 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4114 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4115 return true;
4117 /* Fall through */
4118 case ROTATERT:
4119 case ASHIFT:
4120 case LSHIFTRT:
4121 case ASHIFTRT:
4122 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4124 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4125 return true;
4127 else if (mode == SImode)
4129 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4130 /* Slightly disparage register shifts, but not by much. */
4131 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4132 *total += 1 + rtx_cost (XEXP (x, 1), code);
4133 return true;
4136 /* Needs a libcall. */
4137 *total = COSTS_N_INSNS (2);
4138 return false;
4140 case MINUS:
4141 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4143 *total = COSTS_N_INSNS (1);
4144 return false;
4147 if (mode == SImode)
4149 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4150 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4152 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4153 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4154 || subcode1 == ROTATE || subcode1 == ROTATERT
4155 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4156 || subcode1 == ASHIFTRT)
4158 /* It's just the cost of the two operands. */
4159 *total = 0;
4160 return false;
4163 *total = COSTS_N_INSNS (1);
4164 return false;
4167 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4168 return false;
4170 case PLUS:
4171 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4173 *total = COSTS_N_INSNS (1);
4174 return false;
4177 /* Fall through */
4178 case AND: case XOR: case IOR:
4179 if (mode == SImode)
4181 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4183 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4184 || subcode == LSHIFTRT || subcode == ASHIFTRT
4185 || (code == AND && subcode == NOT))
4187 /* It's just the cost of the two operands. */
4188 *total = 0;
4189 return false;
4193 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4194 return false;
4196 case MULT:
4197 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4198 return false;
4200 case NEG:
4201 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4202 *total = COSTS_N_INSNS (1);
4203 /* Fall through */
4204 case NOT:
4205 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4207 return false;
4209 case IF_THEN_ELSE:
4210 *total = 0;
4211 return false;
4213 case COMPARE:
4214 if (cc_register (XEXP (x, 0), VOIDmode))
4215 * total = 0;
4216 else
4217 *total = COSTS_N_INSNS (1);
4218 return false;
4220 case ABS:
4221 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4222 *total = COSTS_N_INSNS (1);
4223 else
4224 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4225 return false;
4227 case SIGN_EXTEND:
4228 *total = 0;
4229 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4231 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4232 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4234 if (mode == DImode)
4235 *total += COSTS_N_INSNS (1);
4236 return false;
4238 case ZERO_EXTEND:
4239 *total = 0;
4240 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4242 switch (GET_MODE (XEXP (x, 0)))
4244 case QImode:
4245 *total += COSTS_N_INSNS (1);
4246 break;
4248 case HImode:
4249 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4251 case SImode:
4252 break;
4254 default:
4255 *total += COSTS_N_INSNS (2);
4259 if (mode == DImode)
4260 *total += COSTS_N_INSNS (1);
4262 return false;
4264 case CONST_INT:
4265 if (const_ok_for_arm (INTVAL (x)))
4266 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4267 else if (const_ok_for_arm (~INTVAL (x)))
4268 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4269 else if (const_ok_for_arm (-INTVAL (x)))
4271 if (outer_code == COMPARE || outer_code == PLUS
4272 || outer_code == MINUS)
4273 *total = 0;
4274 else
4275 *total = COSTS_N_INSNS (1);
4277 else
4278 *total = COSTS_N_INSNS (2);
4279 return true;
4281 case CONST:
4282 case LABEL_REF:
4283 case SYMBOL_REF:
4284 *total = COSTS_N_INSNS (2);
4285 return true;
4287 case CONST_DOUBLE:
4288 *total = COSTS_N_INSNS (4);
4289 return true;
4291 default:
4292 if (mode != VOIDmode)
4293 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4294 else
4295 *total = COSTS_N_INSNS (4); /* How knows? */
4296 return false;
4300 /* RTX costs for cores with a slow MUL implementation. */
4302 static bool
4303 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4305 enum machine_mode mode = GET_MODE (x);
4307 if (TARGET_THUMB)
4309 *total = thumb_rtx_costs (x, code, outer_code);
4310 return true;
4313 switch (code)
4315 case MULT:
4316 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4317 || mode == DImode)
4319 *total = 30;
4320 return true;
4323 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4325 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4326 & (unsigned HOST_WIDE_INT) 0xffffffff);
4327 int cost, const_ok = const_ok_for_arm (i);
4328 int j, booth_unit_size;
4330 /* Tune as appropriate. */
4331 cost = const_ok ? 4 : 8;
4332 booth_unit_size = 2;
4333 for (j = 0; i && j < 32; j += booth_unit_size)
4335 i >>= booth_unit_size;
4336 cost += 2;
4339 *total = cost;
4340 return true;
4343 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4344 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4345 return true;
4347 default:
4348 *total = arm_rtx_costs_1 (x, code, outer_code);
4349 return true;
4354 /* RTX cost for cores with a fast multiply unit (M variants). */
4356 static bool
4357 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4359 enum machine_mode mode = GET_MODE (x);
4361 if (TARGET_THUMB)
4363 *total = thumb_rtx_costs (x, code, outer_code);
4364 return true;
4367 switch (code)
4369 case MULT:
4370 /* There is no point basing this on the tuning, since it is always the
4371 fast variant if it exists at all. */
4372 if (mode == DImode
4373 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4374 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4375 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4377 *total = 8;
4378 return true;
4382 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4383 || mode == DImode)
4385 *total = 30;
4386 return true;
4389 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4391 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4392 & (unsigned HOST_WIDE_INT) 0xffffffff);
4393 int cost, const_ok = const_ok_for_arm (i);
4394 int j, booth_unit_size;
4396 /* Tune as appropriate. */
4397 cost = const_ok ? 4 : 8;
4398 booth_unit_size = 8;
4399 for (j = 0; i && j < 32; j += booth_unit_size)
4401 i >>= booth_unit_size;
4402 cost += 2;
4405 *total = cost;
4406 return true;
4409 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4410 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4411 return true;
4413 default:
4414 *total = arm_rtx_costs_1 (x, code, outer_code);
4415 return true;
4420 /* RTX cost for XScale CPUs. */
4422 static bool
4423 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4425 enum machine_mode mode = GET_MODE (x);
4427 if (TARGET_THUMB)
4429 *total = thumb_rtx_costs (x, code, outer_code);
4430 return true;
4433 switch (code)
4435 case MULT:
4436 /* There is no point basing this on the tuning, since it is always the
4437 fast variant if it exists at all. */
4438 if (mode == DImode
4439 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4440 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4441 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4443 *total = 8;
4444 return true;
4448 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4449 || mode == DImode)
4451 *total = 30;
4452 return true;
4455 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4457 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4458 & (unsigned HOST_WIDE_INT) 0xffffffff);
4459 int cost, const_ok = const_ok_for_arm (i);
4460 unsigned HOST_WIDE_INT masked_const;
4462 /* The cost will be related to two insns.
4463 First a load of the constant (MOV or LDR), then a multiply. */
4464 cost = 2;
4465 if (! const_ok)
4466 cost += 1; /* LDR is probably more expensive because
4467 of longer result latency. */
4468 masked_const = i & 0xffff8000;
4469 if (masked_const != 0 && masked_const != 0xffff8000)
4471 masked_const = i & 0xf8000000;
4472 if (masked_const == 0 || masked_const == 0xf8000000)
4473 cost += 1;
4474 else
4475 cost += 2;
4477 *total = cost;
4478 return true;
4481 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4482 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4483 return true;
4485 case COMPARE:
4486 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4487 will stall until the multiplication is complete. */
4488 if (GET_CODE (XEXP (x, 0)) == MULT)
4489 *total = 4 + rtx_cost (XEXP (x, 0), code);
4490 else
4491 *total = arm_rtx_costs_1 (x, code, outer_code);
4492 return true;
4494 default:
4495 *total = arm_rtx_costs_1 (x, code, outer_code);
4496 return true;
4501 /* RTX costs for 9e (and later) cores. */
4503 static bool
4504 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4506 enum machine_mode mode = GET_MODE (x);
4507 int nonreg_cost;
4508 int cost;
4510 if (TARGET_THUMB)
4512 switch (code)
4514 case MULT:
4515 *total = COSTS_N_INSNS (3);
4516 return true;
4518 default:
4519 *total = thumb_rtx_costs (x, code, outer_code);
4520 return true;
4524 switch (code)
4526 case MULT:
4527 /* There is no point basing this on the tuning, since it is always the
4528 fast variant if it exists at all. */
4529 if (mode == DImode
4530 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4531 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4532 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4534 *total = 3;
4535 return true;
4539 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4541 *total = 30;
4542 return true;
4544 if (mode == DImode)
4546 cost = 7;
4547 nonreg_cost = 8;
4549 else
4551 cost = 2;
4552 nonreg_cost = 4;
4556 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4557 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4558 return true;
4560 default:
4561 *total = arm_rtx_costs_1 (x, code, outer_code);
4562 return true;
4565 /* All address computations that can be done are free, but rtx cost returns
4566 the same for practically all of them. So we weight the different types
4567 of address here in the order (most pref first):
4568 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4569 static inline int
4570 arm_arm_address_cost (rtx x)
4572 enum rtx_code c = GET_CODE (x);
4574 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4575 return 0;
4576 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4577 return 10;
4579 if (c == PLUS || c == MINUS)
4581 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4582 return 2;
4584 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4585 return 3;
4587 return 4;
4590 return 6;
4593 static inline int
4594 arm_thumb_address_cost (rtx x)
4596 enum rtx_code c = GET_CODE (x);
4598 if (c == REG)
4599 return 1;
4600 if (c == PLUS
4601 && GET_CODE (XEXP (x, 0)) == REG
4602 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4603 return 1;
4605 return 2;
4608 static int
4609 arm_address_cost (rtx x)
4611 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4614 static int
4615 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4617 rtx i_pat, d_pat;
4619 /* Some true dependencies can have a higher cost depending
4620 on precisely how certain input operands are used. */
4621 if (arm_tune_xscale
4622 && REG_NOTE_KIND (link) == 0
4623 && recog_memoized (insn) >= 0
4624 && recog_memoized (dep) >= 0)
4626 int shift_opnum = get_attr_shift (insn);
4627 enum attr_type attr_type = get_attr_type (dep);
4629 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4630 operand for INSN. If we have a shifted input operand and the
4631 instruction we depend on is another ALU instruction, then we may
4632 have to account for an additional stall. */
4633 if (shift_opnum != 0
4634 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4636 rtx shifted_operand;
4637 int opno;
4639 /* Get the shifted operand. */
4640 extract_insn (insn);
4641 shifted_operand = recog_data.operand[shift_opnum];
4643 /* Iterate over all the operands in DEP. If we write an operand
4644 that overlaps with SHIFTED_OPERAND, then we have increase the
4645 cost of this dependency. */
4646 extract_insn (dep);
4647 preprocess_constraints ();
4648 for (opno = 0; opno < recog_data.n_operands; opno++)
4650 /* We can ignore strict inputs. */
4651 if (recog_data.operand_type[opno] == OP_IN)
4652 continue;
4654 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4655 shifted_operand))
4656 return 2;
4661 /* XXX This is not strictly true for the FPA. */
4662 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4663 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4664 return 0;
4666 /* Call insns don't incur a stall, even if they follow a load. */
4667 if (REG_NOTE_KIND (link) == 0
4668 && GET_CODE (insn) == CALL_INSN)
4669 return 1;
4671 if ((i_pat = single_set (insn)) != NULL
4672 && GET_CODE (SET_SRC (i_pat)) == MEM
4673 && (d_pat = single_set (dep)) != NULL
4674 && GET_CODE (SET_DEST (d_pat)) == MEM)
4676 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4677 /* This is a load after a store, there is no conflict if the load reads
4678 from a cached area. Assume that loads from the stack, and from the
4679 constant pool are cached, and that others will miss. This is a
4680 hack. */
4682 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4683 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4684 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4685 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4686 return 1;
4689 return cost;
4692 static int fp_consts_inited = 0;
4694 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4695 static const char * const strings_fp[8] =
4697 "0", "1", "2", "3",
4698 "4", "5", "0.5", "10"
4701 static REAL_VALUE_TYPE values_fp[8];
4703 static void
4704 init_fp_table (void)
4706 int i;
4707 REAL_VALUE_TYPE r;
4709 if (TARGET_VFP)
4710 fp_consts_inited = 1;
4711 else
4712 fp_consts_inited = 8;
4714 for (i = 0; i < fp_consts_inited; i++)
4716 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4717 values_fp[i] = r;
4721 /* Return TRUE if rtx X is a valid immediate FP constant. */
4723 arm_const_double_rtx (rtx x)
4725 REAL_VALUE_TYPE r;
4726 int i;
4728 if (!fp_consts_inited)
4729 init_fp_table ();
4731 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4732 if (REAL_VALUE_MINUS_ZERO (r))
4733 return 0;
4735 for (i = 0; i < fp_consts_inited; i++)
4736 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4737 return 1;
4739 return 0;
4742 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4744 neg_const_double_rtx_ok_for_fpa (rtx x)
4746 REAL_VALUE_TYPE r;
4747 int i;
4749 if (!fp_consts_inited)
4750 init_fp_table ();
4752 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4753 r = REAL_VALUE_NEGATE (r);
4754 if (REAL_VALUE_MINUS_ZERO (r))
4755 return 0;
4757 for (i = 0; i < 8; i++)
4758 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4759 return 1;
4761 return 0;
4764 /* Predicates for `match_operand' and `match_operator'. */
4766 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4768 cirrus_memory_offset (rtx op)
4770 /* Reject eliminable registers. */
4771 if (! (reload_in_progress || reload_completed)
4772 && ( reg_mentioned_p (frame_pointer_rtx, op)
4773 || reg_mentioned_p (arg_pointer_rtx, op)
4774 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4775 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4776 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4777 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4778 return 0;
4780 if (GET_CODE (op) == MEM)
4782 rtx ind;
4784 ind = XEXP (op, 0);
4786 /* Match: (mem (reg)). */
4787 if (GET_CODE (ind) == REG)
4788 return 1;
4790 /* Match:
4791 (mem (plus (reg)
4792 (const))). */
4793 if (GET_CODE (ind) == PLUS
4794 && GET_CODE (XEXP (ind, 0)) == REG
4795 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4796 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4797 return 1;
4800 return 0;
4803 /* Return TRUE if OP is a valid VFP memory address pattern.
4804 WB if true if writeback address modes are allowed. */
4807 arm_coproc_mem_operand (rtx op, bool wb)
4809 rtx ind;
4811 /* Reject eliminable registers. */
4812 if (! (reload_in_progress || reload_completed)
4813 && ( reg_mentioned_p (frame_pointer_rtx, op)
4814 || reg_mentioned_p (arg_pointer_rtx, op)
4815 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4816 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4817 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4818 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4819 return FALSE;
4821 /* Constants are converted into offsets from labels. */
4822 if (GET_CODE (op) != MEM)
4823 return FALSE;
4825 ind = XEXP (op, 0);
4827 if (reload_completed
4828 && (GET_CODE (ind) == LABEL_REF
4829 || (GET_CODE (ind) == CONST
4830 && GET_CODE (XEXP (ind, 0)) == PLUS
4831 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4832 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4833 return TRUE;
4835 /* Match: (mem (reg)). */
4836 if (GET_CODE (ind) == REG)
4837 return arm_address_register_rtx_p (ind, 0);
4839 /* Autoincremment addressing modes. */
4840 if (wb
4841 && (GET_CODE (ind) == PRE_INC
4842 || GET_CODE (ind) == POST_INC
4843 || GET_CODE (ind) == PRE_DEC
4844 || GET_CODE (ind) == POST_DEC))
4845 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4847 if (wb
4848 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4849 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4850 && GET_CODE (XEXP (ind, 1)) == PLUS
4851 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4852 ind = XEXP (ind, 1);
4854 /* Match:
4855 (plus (reg)
4856 (const)). */
4857 if (GET_CODE (ind) == PLUS
4858 && GET_CODE (XEXP (ind, 0)) == REG
4859 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4860 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4861 && INTVAL (XEXP (ind, 1)) > -1024
4862 && INTVAL (XEXP (ind, 1)) < 1024
4863 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4864 return TRUE;
4866 return FALSE;
4869 /* Return true if X is a register that will be eliminated later on. */
4871 arm_eliminable_register (rtx x)
4873 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4874 || REGNO (x) == ARG_POINTER_REGNUM
4875 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4876 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4879 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4880 VFP registers. Otherwise return NO_REGS. */
4882 enum reg_class
4883 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4885 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4886 return NO_REGS;
4888 return GENERAL_REGS;
4892 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4893 Use by the Cirrus Maverick code which has to workaround
4894 a hardware bug triggered by such instructions. */
4895 static bool
4896 arm_memory_load_p (rtx insn)
4898 rtx body, lhs, rhs;;
4900 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4901 return false;
4903 body = PATTERN (insn);
4905 if (GET_CODE (body) != SET)
4906 return false;
4908 lhs = XEXP (body, 0);
4909 rhs = XEXP (body, 1);
4911 lhs = REG_OR_SUBREG_RTX (lhs);
4913 /* If the destination is not a general purpose
4914 register we do not have to worry. */
4915 if (GET_CODE (lhs) != REG
4916 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4917 return false;
4919 /* As well as loads from memory we also have to react
4920 to loads of invalid constants which will be turned
4921 into loads from the minipool. */
4922 return (GET_CODE (rhs) == MEM
4923 || GET_CODE (rhs) == SYMBOL_REF
4924 || note_invalid_constants (insn, -1, false));
4927 /* Return TRUE if INSN is a Cirrus instruction. */
4928 static bool
4929 arm_cirrus_insn_p (rtx insn)
4931 enum attr_cirrus attr;
4933 /* get_attr aborts on USE and CLOBBER. */
4934 if (!insn
4935 || GET_CODE (insn) != INSN
4936 || GET_CODE (PATTERN (insn)) == USE
4937 || GET_CODE (PATTERN (insn)) == CLOBBER)
4938 return 0;
4940 attr = get_attr_cirrus (insn);
4942 return attr != CIRRUS_NOT;
4945 /* Cirrus reorg for invalid instruction combinations. */
4946 static void
4947 cirrus_reorg (rtx first)
4949 enum attr_cirrus attr;
4950 rtx body = PATTERN (first);
4951 rtx t;
4952 int nops;
4954 /* Any branch must be followed by 2 non Cirrus instructions. */
4955 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4957 nops = 0;
4958 t = next_nonnote_insn (first);
4960 if (arm_cirrus_insn_p (t))
4961 ++ nops;
4963 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4964 ++ nops;
4966 while (nops --)
4967 emit_insn_after (gen_nop (), first);
4969 return;
4972 /* (float (blah)) is in parallel with a clobber. */
4973 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4974 body = XVECEXP (body, 0, 0);
4976 if (GET_CODE (body) == SET)
4978 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4980 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4981 be followed by a non Cirrus insn. */
4982 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4984 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4985 emit_insn_after (gen_nop (), first);
4987 return;
4989 else if (arm_memory_load_p (first))
4991 unsigned int arm_regno;
4993 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4994 ldr/cfmv64hr combination where the Rd field is the same
4995 in both instructions must be split with a non Cirrus
4996 insn. Example:
4998 ldr r0, blah
5000 cfmvsr mvf0, r0. */
5002 /* Get Arm register number for ldr insn. */
5003 if (GET_CODE (lhs) == REG)
5004 arm_regno = REGNO (lhs);
5005 else if (GET_CODE (rhs) == REG)
5006 arm_regno = REGNO (rhs);
5007 else
5008 abort ();
5010 /* Next insn. */
5011 first = next_nonnote_insn (first);
5013 if (! arm_cirrus_insn_p (first))
5014 return;
5016 body = PATTERN (first);
5018 /* (float (blah)) is in parallel with a clobber. */
5019 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5020 body = XVECEXP (body, 0, 0);
5022 if (GET_CODE (body) == FLOAT)
5023 body = XEXP (body, 0);
5025 if (get_attr_cirrus (first) == CIRRUS_MOVE
5026 && GET_CODE (XEXP (body, 1)) == REG
5027 && arm_regno == REGNO (XEXP (body, 1)))
5028 emit_insn_after (gen_nop (), first);
5030 return;
5034 /* get_attr aborts on USE and CLOBBER. */
5035 if (!first
5036 || GET_CODE (first) != INSN
5037 || GET_CODE (PATTERN (first)) == USE
5038 || GET_CODE (PATTERN (first)) == CLOBBER)
5039 return;
5041 attr = get_attr_cirrus (first);
5043 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5044 must be followed by a non-coprocessor instruction. */
5045 if (attr == CIRRUS_COMPARE)
5047 nops = 0;
5049 t = next_nonnote_insn (first);
5051 if (arm_cirrus_insn_p (t))
5052 ++ nops;
5054 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5055 ++ nops;
5057 while (nops --)
5058 emit_insn_after (gen_nop (), first);
5060 return;
5064 /* Return TRUE if X references a SYMBOL_REF. */
5066 symbol_mentioned_p (rtx x)
5068 const char * fmt;
5069 int i;
5071 if (GET_CODE (x) == SYMBOL_REF)
5072 return 1;
5074 fmt = GET_RTX_FORMAT (GET_CODE (x));
5076 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5078 if (fmt[i] == 'E')
5080 int j;
5082 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5083 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5084 return 1;
5086 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5087 return 1;
5090 return 0;
5093 /* Return TRUE if X references a LABEL_REF. */
5095 label_mentioned_p (rtx x)
5097 const char * fmt;
5098 int i;
5100 if (GET_CODE (x) == LABEL_REF)
5101 return 1;
5103 fmt = GET_RTX_FORMAT (GET_CODE (x));
5104 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5106 if (fmt[i] == 'E')
5108 int j;
5110 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5111 if (label_mentioned_p (XVECEXP (x, i, j)))
5112 return 1;
5114 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5115 return 1;
5118 return 0;
5121 enum rtx_code
5122 minmax_code (rtx x)
5124 enum rtx_code code = GET_CODE (x);
5126 if (code == SMAX)
5127 return GE;
5128 else if (code == SMIN)
5129 return LE;
5130 else if (code == UMIN)
5131 return LEU;
5132 else if (code == UMAX)
5133 return GEU;
5135 abort ();
5138 /* Return 1 if memory locations are adjacent. */
5140 adjacent_mem_locations (rtx a, rtx b)
5142 /* We don't guarantee to preserve the order of these memory refs. */
5143 if (volatile_refs_p (a) || volatile_refs_p (b))
5144 return 0;
5146 if ((GET_CODE (XEXP (a, 0)) == REG
5147 || (GET_CODE (XEXP (a, 0)) == PLUS
5148 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5149 && (GET_CODE (XEXP (b, 0)) == REG
5150 || (GET_CODE (XEXP (b, 0)) == PLUS
5151 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5153 HOST_WIDE_INT val0 = 0, val1 = 0;
5154 rtx reg0, reg1;
5155 int val_diff;
5157 if (GET_CODE (XEXP (a, 0)) == PLUS)
5159 reg0 = XEXP (XEXP (a, 0), 0);
5160 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5162 else
5163 reg0 = XEXP (a, 0);
5165 if (GET_CODE (XEXP (b, 0)) == PLUS)
5167 reg1 = XEXP (XEXP (b, 0), 0);
5168 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5170 else
5171 reg1 = XEXP (b, 0);
5173 /* Don't accept any offset that will require multiple
5174 instructions to handle, since this would cause the
5175 arith_adjacentmem pattern to output an overlong sequence. */
5176 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5177 return 0;
5179 /* Don't allow an eliminable register: register elimination can make
5180 the offset too large. */
5181 if (arm_eliminable_register (reg0))
5182 return 0;
5184 val_diff = val1 - val0;
5186 if (arm_ld_sched)
5188 /* If the target has load delay slots, then there's no benefit
5189 to using an ldm instruction unless the offset is zero and
5190 we are optimizing for size. */
5191 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5192 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5193 && (val_diff == 4 || val_diff == -4));
5196 return ((REGNO (reg0) == REGNO (reg1))
5197 && (val_diff == 4 || val_diff == -4));
5200 return 0;
5204 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5205 HOST_WIDE_INT *load_offset)
5207 int unsorted_regs[4];
5208 HOST_WIDE_INT unsorted_offsets[4];
5209 int order[4];
5210 int base_reg = -1;
5211 int i;
5213 /* Can only handle 2, 3, or 4 insns at present,
5214 though could be easily extended if required. */
5215 if (nops < 2 || nops > 4)
5216 abort ();
5218 /* Loop over the operands and check that the memory references are
5219 suitable (i.e. immediate offsets from the same base register). At
5220 the same time, extract the target register, and the memory
5221 offsets. */
5222 for (i = 0; i < nops; i++)
5224 rtx reg;
5225 rtx offset;
5227 /* Convert a subreg of a mem into the mem itself. */
5228 if (GET_CODE (operands[nops + i]) == SUBREG)
5229 operands[nops + i] = alter_subreg (operands + (nops + i));
5231 if (GET_CODE (operands[nops + i]) != MEM)
5232 abort ();
5234 /* Don't reorder volatile memory references; it doesn't seem worth
5235 looking for the case where the order is ok anyway. */
5236 if (MEM_VOLATILE_P (operands[nops + i]))
5237 return 0;
5239 offset = const0_rtx;
5241 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5242 || (GET_CODE (reg) == SUBREG
5243 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5244 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5245 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5246 == REG)
5247 || (GET_CODE (reg) == SUBREG
5248 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5249 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5250 == CONST_INT)))
5252 if (i == 0)
5254 base_reg = REGNO (reg);
5255 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5256 ? REGNO (operands[i])
5257 : REGNO (SUBREG_REG (operands[i])));
5258 order[0] = 0;
5260 else
5262 if (base_reg != (int) REGNO (reg))
5263 /* Not addressed from the same base register. */
5264 return 0;
5266 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5267 ? REGNO (operands[i])
5268 : REGNO (SUBREG_REG (operands[i])));
5269 if (unsorted_regs[i] < unsorted_regs[order[0]])
5270 order[0] = i;
5273 /* If it isn't an integer register, or if it overwrites the
5274 base register but isn't the last insn in the list, then
5275 we can't do this. */
5276 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5277 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5278 return 0;
5280 unsorted_offsets[i] = INTVAL (offset);
5282 else
5283 /* Not a suitable memory address. */
5284 return 0;
5287 /* All the useful information has now been extracted from the
5288 operands into unsorted_regs and unsorted_offsets; additionally,
5289 order[0] has been set to the lowest numbered register in the
5290 list. Sort the registers into order, and check that the memory
5291 offsets are ascending and adjacent. */
5293 for (i = 1; i < nops; i++)
5295 int j;
5297 order[i] = order[i - 1];
5298 for (j = 0; j < nops; j++)
5299 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5300 && (order[i] == order[i - 1]
5301 || unsorted_regs[j] < unsorted_regs[order[i]]))
5302 order[i] = j;
5304 /* Have we found a suitable register? if not, one must be used more
5305 than once. */
5306 if (order[i] == order[i - 1])
5307 return 0;
5309 /* Is the memory address adjacent and ascending? */
5310 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5311 return 0;
5314 if (base)
5316 *base = base_reg;
5318 for (i = 0; i < nops; i++)
5319 regs[i] = unsorted_regs[order[i]];
5321 *load_offset = unsorted_offsets[order[0]];
5324 if (unsorted_offsets[order[0]] == 0)
5325 return 1; /* ldmia */
5327 if (unsorted_offsets[order[0]] == 4)
5328 return 2; /* ldmib */
5330 if (unsorted_offsets[order[nops - 1]] == 0)
5331 return 3; /* ldmda */
5333 if (unsorted_offsets[order[nops - 1]] == -4)
5334 return 4; /* ldmdb */
5336 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5337 if the offset isn't small enough. The reason 2 ldrs are faster
5338 is because these ARMs are able to do more than one cache access
5339 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5340 whilst the ARM8 has a double bandwidth cache. This means that
5341 these cores can do both an instruction fetch and a data fetch in
5342 a single cycle, so the trick of calculating the address into a
5343 scratch register (one of the result regs) and then doing a load
5344 multiple actually becomes slower (and no smaller in code size).
5345 That is the transformation
5347 ldr rd1, [rbase + offset]
5348 ldr rd2, [rbase + offset + 4]
5352 add rd1, rbase, offset
5353 ldmia rd1, {rd1, rd2}
5355 produces worse code -- '3 cycles + any stalls on rd2' instead of
5356 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5357 access per cycle, the first sequence could never complete in less
5358 than 6 cycles, whereas the ldm sequence would only take 5 and
5359 would make better use of sequential accesses if not hitting the
5360 cache.
5362 We cheat here and test 'arm_ld_sched' which we currently know to
5363 only be true for the ARM8, ARM9 and StrongARM. If this ever
5364 changes, then the test below needs to be reworked. */
5365 if (nops == 2 && arm_ld_sched)
5366 return 0;
5368 /* Can't do it without setting up the offset, only do this if it takes
5369 no more than one insn. */
5370 return (const_ok_for_arm (unsorted_offsets[order[0]])
5371 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5374 const char *
5375 emit_ldm_seq (rtx *operands, int nops)
5377 int regs[4];
5378 int base_reg;
5379 HOST_WIDE_INT offset;
5380 char buf[100];
5381 int i;
5383 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5385 case 1:
5386 strcpy (buf, "ldm%?ia\t");
5387 break;
5389 case 2:
5390 strcpy (buf, "ldm%?ib\t");
5391 break;
5393 case 3:
5394 strcpy (buf, "ldm%?da\t");
5395 break;
5397 case 4:
5398 strcpy (buf, "ldm%?db\t");
5399 break;
5401 case 5:
5402 if (offset >= 0)
5403 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5404 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5405 (long) offset);
5406 else
5407 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5408 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5409 (long) -offset);
5410 output_asm_insn (buf, operands);
5411 base_reg = regs[0];
5412 strcpy (buf, "ldm%?ia\t");
5413 break;
5415 default:
5416 abort ();
5419 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5420 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5422 for (i = 1; i < nops; i++)
5423 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5424 reg_names[regs[i]]);
5426 strcat (buf, "}\t%@ phole ldm");
5428 output_asm_insn (buf, operands);
5429 return "";
5433 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5434 HOST_WIDE_INT * load_offset)
5436 int unsorted_regs[4];
5437 HOST_WIDE_INT unsorted_offsets[4];
5438 int order[4];
5439 int base_reg = -1;
5440 int i;
5442 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5443 extended if required. */
5444 if (nops < 2 || nops > 4)
5445 abort ();
5447 /* Loop over the operands and check that the memory references are
5448 suitable (i.e. immediate offsets from the same base register). At
5449 the same time, extract the target register, and the memory
5450 offsets. */
5451 for (i = 0; i < nops; i++)
5453 rtx reg;
5454 rtx offset;
5456 /* Convert a subreg of a mem into the mem itself. */
5457 if (GET_CODE (operands[nops + i]) == SUBREG)
5458 operands[nops + i] = alter_subreg (operands + (nops + i));
5460 if (GET_CODE (operands[nops + i]) != MEM)
5461 abort ();
5463 /* Don't reorder volatile memory references; it doesn't seem worth
5464 looking for the case where the order is ok anyway. */
5465 if (MEM_VOLATILE_P (operands[nops + i]))
5466 return 0;
5468 offset = const0_rtx;
5470 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5471 || (GET_CODE (reg) == SUBREG
5472 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5473 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5474 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5475 == REG)
5476 || (GET_CODE (reg) == SUBREG
5477 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5478 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5479 == CONST_INT)))
5481 if (i == 0)
5483 base_reg = REGNO (reg);
5484 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5485 ? REGNO (operands[i])
5486 : REGNO (SUBREG_REG (operands[i])));
5487 order[0] = 0;
5489 else
5491 if (base_reg != (int) REGNO (reg))
5492 /* Not addressed from the same base register. */
5493 return 0;
5495 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5496 ? REGNO (operands[i])
5497 : REGNO (SUBREG_REG (operands[i])));
5498 if (unsorted_regs[i] < unsorted_regs[order[0]])
5499 order[0] = i;
5502 /* If it isn't an integer register, then we can't do this. */
5503 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5504 return 0;
5506 unsorted_offsets[i] = INTVAL (offset);
5508 else
5509 /* Not a suitable memory address. */
5510 return 0;
5513 /* All the useful information has now been extracted from the
5514 operands into unsorted_regs and unsorted_offsets; additionally,
5515 order[0] has been set to the lowest numbered register in the
5516 list. Sort the registers into order, and check that the memory
5517 offsets are ascending and adjacent. */
5519 for (i = 1; i < nops; i++)
5521 int j;
5523 order[i] = order[i - 1];
5524 for (j = 0; j < nops; j++)
5525 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5526 && (order[i] == order[i - 1]
5527 || unsorted_regs[j] < unsorted_regs[order[i]]))
5528 order[i] = j;
5530 /* Have we found a suitable register? if not, one must be used more
5531 than once. */
5532 if (order[i] == order[i - 1])
5533 return 0;
5535 /* Is the memory address adjacent and ascending? */
5536 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5537 return 0;
5540 if (base)
5542 *base = base_reg;
5544 for (i = 0; i < nops; i++)
5545 regs[i] = unsorted_regs[order[i]];
5547 *load_offset = unsorted_offsets[order[0]];
5550 if (unsorted_offsets[order[0]] == 0)
5551 return 1; /* stmia */
5553 if (unsorted_offsets[order[0]] == 4)
5554 return 2; /* stmib */
5556 if (unsorted_offsets[order[nops - 1]] == 0)
5557 return 3; /* stmda */
5559 if (unsorted_offsets[order[nops - 1]] == -4)
5560 return 4; /* stmdb */
5562 return 0;
5565 const char *
5566 emit_stm_seq (rtx *operands, int nops)
5568 int regs[4];
5569 int base_reg;
5570 HOST_WIDE_INT offset;
5571 char buf[100];
5572 int i;
5574 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5576 case 1:
5577 strcpy (buf, "stm%?ia\t");
5578 break;
5580 case 2:
5581 strcpy (buf, "stm%?ib\t");
5582 break;
5584 case 3:
5585 strcpy (buf, "stm%?da\t");
5586 break;
5588 case 4:
5589 strcpy (buf, "stm%?db\t");
5590 break;
5592 default:
5593 abort ();
5596 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5597 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5599 for (i = 1; i < nops; i++)
5600 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5601 reg_names[regs[i]]);
5603 strcat (buf, "}\t%@ phole stm");
5605 output_asm_insn (buf, operands);
5606 return "";
5610 /* Routines for use in generating RTL. */
5613 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5614 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5616 HOST_WIDE_INT offset = *offsetp;
5617 int i = 0, j;
5618 rtx result;
5619 int sign = up ? 1 : -1;
5620 rtx mem, addr;
5622 /* XScale has load-store double instructions, but they have stricter
5623 alignment requirements than load-store multiple, so we cannot
5624 use them.
5626 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5627 the pipeline until completion.
5629 NREGS CYCLES
5635 An ldr instruction takes 1-3 cycles, but does not block the
5636 pipeline.
5638 NREGS CYCLES
5639 1 1-3
5640 2 2-6
5641 3 3-9
5642 4 4-12
5644 Best case ldr will always win. However, the more ldr instructions
5645 we issue, the less likely we are to be able to schedule them well.
5646 Using ldr instructions also increases code size.
5648 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5649 for counts of 3 or 4 regs. */
5650 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5652 rtx seq;
5654 start_sequence ();
5656 for (i = 0; i < count; i++)
5658 addr = plus_constant (from, i * 4 * sign);
5659 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5660 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5661 offset += 4 * sign;
5664 if (write_back)
5666 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5667 *offsetp = offset;
5670 seq = get_insns ();
5671 end_sequence ();
5673 return seq;
5676 result = gen_rtx_PARALLEL (VOIDmode,
5677 rtvec_alloc (count + (write_back ? 1 : 0)));
5678 if (write_back)
5680 XVECEXP (result, 0, 0)
5681 = gen_rtx_SET (GET_MODE (from), from,
5682 plus_constant (from, count * 4 * sign));
5683 i = 1;
5684 count++;
5687 for (j = 0; i < count; i++, j++)
5689 addr = plus_constant (from, j * 4 * sign);
5690 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5691 XVECEXP (result, 0, i)
5692 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5693 offset += 4 * sign;
5696 if (write_back)
5697 *offsetp = offset;
5699 return result;
5703 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5704 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5706 HOST_WIDE_INT offset = *offsetp;
5707 int i = 0, j;
5708 rtx result;
5709 int sign = up ? 1 : -1;
5710 rtx mem, addr;
5712 /* See arm_gen_load_multiple for discussion of
5713 the pros/cons of ldm/stm usage for XScale. */
5714 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5716 rtx seq;
5718 start_sequence ();
5720 for (i = 0; i < count; i++)
5722 addr = plus_constant (to, i * 4 * sign);
5723 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5724 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5725 offset += 4 * sign;
5728 if (write_back)
5730 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5731 *offsetp = offset;
5734 seq = get_insns ();
5735 end_sequence ();
5737 return seq;
5740 result = gen_rtx_PARALLEL (VOIDmode,
5741 rtvec_alloc (count + (write_back ? 1 : 0)));
5742 if (write_back)
5744 XVECEXP (result, 0, 0)
5745 = gen_rtx_SET (GET_MODE (to), to,
5746 plus_constant (to, count * 4 * sign));
5747 i = 1;
5748 count++;
5751 for (j = 0; i < count; i++, j++)
5753 addr = plus_constant (to, j * 4 * sign);
5754 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5755 XVECEXP (result, 0, i)
5756 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5757 offset += 4 * sign;
5760 if (write_back)
5761 *offsetp = offset;
5763 return result;
5767 arm_gen_movmemqi (rtx *operands)
5769 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5770 HOST_WIDE_INT srcoffset, dstoffset;
5771 int i;
5772 rtx src, dst, srcbase, dstbase;
5773 rtx part_bytes_reg = NULL;
5774 rtx mem;
5776 if (GET_CODE (operands[2]) != CONST_INT
5777 || GET_CODE (operands[3]) != CONST_INT
5778 || INTVAL (operands[2]) > 64
5779 || INTVAL (operands[3]) & 3)
5780 return 0;
5782 dstbase = operands[0];
5783 srcbase = operands[1];
5785 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5786 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5788 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5789 out_words_to_go = INTVAL (operands[2]) / 4;
5790 last_bytes = INTVAL (operands[2]) & 3;
5791 dstoffset = srcoffset = 0;
5793 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5794 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5796 for (i = 0; in_words_to_go >= 2; i+=4)
5798 if (in_words_to_go > 4)
5799 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5800 srcbase, &srcoffset));
5801 else
5802 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5803 FALSE, srcbase, &srcoffset));
5805 if (out_words_to_go)
5807 if (out_words_to_go > 4)
5808 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5809 dstbase, &dstoffset));
5810 else if (out_words_to_go != 1)
5811 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5812 dst, TRUE,
5813 (last_bytes == 0
5814 ? FALSE : TRUE),
5815 dstbase, &dstoffset));
5816 else
5818 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5819 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5820 if (last_bytes != 0)
5822 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5823 dstoffset += 4;
5828 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5829 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5832 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5833 if (out_words_to_go)
5835 rtx sreg;
5837 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5838 sreg = copy_to_reg (mem);
5840 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5841 emit_move_insn (mem, sreg);
5842 in_words_to_go--;
5844 if (in_words_to_go) /* Sanity check */
5845 abort ();
5848 if (in_words_to_go)
5850 if (in_words_to_go < 0)
5851 abort ();
5853 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5854 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5857 if (last_bytes && part_bytes_reg == NULL)
5858 abort ();
5860 if (BYTES_BIG_ENDIAN && last_bytes)
5862 rtx tmp = gen_reg_rtx (SImode);
5864 /* The bytes we want are in the top end of the word. */
5865 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5866 GEN_INT (8 * (4 - last_bytes))));
5867 part_bytes_reg = tmp;
5869 while (last_bytes)
5871 mem = adjust_automodify_address (dstbase, QImode,
5872 plus_constant (dst, last_bytes - 1),
5873 dstoffset + last_bytes - 1);
5874 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5876 if (--last_bytes)
5878 tmp = gen_reg_rtx (SImode);
5879 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5880 part_bytes_reg = tmp;
5885 else
5887 if (last_bytes > 1)
5889 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5890 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5891 last_bytes -= 2;
5892 if (last_bytes)
5894 rtx tmp = gen_reg_rtx (SImode);
5895 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5896 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5897 part_bytes_reg = tmp;
5898 dstoffset += 2;
5902 if (last_bytes)
5904 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5905 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5909 return 1;
5912 /* Generate a memory reference for a half word, such that it will be loaded
5913 into the top 16 bits of the word. We can assume that the address is
5914 known to be alignable and of the form reg, or plus (reg, const). */
5917 arm_gen_rotated_half_load (rtx memref)
5919 HOST_WIDE_INT offset = 0;
5920 rtx base = XEXP (memref, 0);
5922 if (GET_CODE (base) == PLUS)
5924 offset = INTVAL (XEXP (base, 1));
5925 base = XEXP (base, 0);
5928 /* If we aren't allowed to generate unaligned addresses, then fail. */
5929 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5930 return NULL;
5932 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5934 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5935 return base;
5937 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5940 /* Select a dominance comparison mode if possible for a test of the general
5941 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5942 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5943 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5944 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5945 In all cases OP will be either EQ or NE, but we don't need to know which
5946 here. If we are unable to support a dominance comparison we return
5947 CC mode. This will then fail to match for the RTL expressions that
5948 generate this call. */
5949 enum machine_mode
5950 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5952 enum rtx_code cond1, cond2;
5953 int swapped = 0;
5955 /* Currently we will probably get the wrong result if the individual
5956 comparisons are not simple. This also ensures that it is safe to
5957 reverse a comparison if necessary. */
5958 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5959 != CCmode)
5960 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5961 != CCmode))
5962 return CCmode;
5964 /* The if_then_else variant of this tests the second condition if the
5965 first passes, but is true if the first fails. Reverse the first
5966 condition to get a true "inclusive-or" expression. */
5967 if (cond_or == DOM_CC_NX_OR_Y)
5968 cond1 = reverse_condition (cond1);
5970 /* If the comparisons are not equal, and one doesn't dominate the other,
5971 then we can't do this. */
5972 if (cond1 != cond2
5973 && !comparison_dominates_p (cond1, cond2)
5974 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5975 return CCmode;
5977 if (swapped)
5979 enum rtx_code temp = cond1;
5980 cond1 = cond2;
5981 cond2 = temp;
5984 switch (cond1)
5986 case EQ:
5987 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5988 return CC_DEQmode;
5990 switch (cond2)
5992 case LE: return CC_DLEmode;
5993 case LEU: return CC_DLEUmode;
5994 case GE: return CC_DGEmode;
5995 case GEU: return CC_DGEUmode;
5996 default: break;
5999 break;
6001 case LT:
6002 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6003 return CC_DLTmode;
6004 if (cond2 == LE)
6005 return CC_DLEmode;
6006 if (cond2 == NE)
6007 return CC_DNEmode;
6008 break;
6010 case GT:
6011 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6012 return CC_DGTmode;
6013 if (cond2 == GE)
6014 return CC_DGEmode;
6015 if (cond2 == NE)
6016 return CC_DNEmode;
6017 break;
6019 case LTU:
6020 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6021 return CC_DLTUmode;
6022 if (cond2 == LEU)
6023 return CC_DLEUmode;
6024 if (cond2 == NE)
6025 return CC_DNEmode;
6026 break;
6028 case GTU:
6029 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6030 return CC_DGTUmode;
6031 if (cond2 == GEU)
6032 return CC_DGEUmode;
6033 if (cond2 == NE)
6034 return CC_DNEmode;
6035 break;
6037 /* The remaining cases only occur when both comparisons are the
6038 same. */
6039 case NE:
6040 return CC_DNEmode;
6042 case LE:
6043 return CC_DLEmode;
6045 case GE:
6046 return CC_DGEmode;
6048 case LEU:
6049 return CC_DLEUmode;
6051 case GEU:
6052 return CC_DGEUmode;
6054 default:
6055 break;
6058 abort ();
6061 enum machine_mode
6062 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6064 /* All floating point compares return CCFP if it is an equality
6065 comparison, and CCFPE otherwise. */
6066 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6068 switch (op)
6070 case EQ:
6071 case NE:
6072 case UNORDERED:
6073 case ORDERED:
6074 case UNLT:
6075 case UNLE:
6076 case UNGT:
6077 case UNGE:
6078 case UNEQ:
6079 case LTGT:
6080 return CCFPmode;
6082 case LT:
6083 case LE:
6084 case GT:
6085 case GE:
6086 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6087 return CCFPmode;
6088 return CCFPEmode;
6090 default:
6091 abort ();
6095 /* A compare with a shifted operand. Because of canonicalization, the
6096 comparison will have to be swapped when we emit the assembler. */
6097 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6098 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6099 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6100 || GET_CODE (x) == ROTATERT))
6101 return CC_SWPmode;
6103 /* This operation is performed swapped, but since we only rely on the Z
6104 flag we don't need an additional mode. */
6105 if (GET_MODE (y) == SImode && REG_P (y)
6106 && GET_CODE (x) == NEG
6107 && (op == EQ || op == NE))
6108 return CC_Zmode;
6110 /* This is a special case that is used by combine to allow a
6111 comparison of a shifted byte load to be split into a zero-extend
6112 followed by a comparison of the shifted integer (only valid for
6113 equalities and unsigned inequalities). */
6114 if (GET_MODE (x) == SImode
6115 && GET_CODE (x) == ASHIFT
6116 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6117 && GET_CODE (XEXP (x, 0)) == SUBREG
6118 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6119 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6120 && (op == EQ || op == NE
6121 || op == GEU || op == GTU || op == LTU || op == LEU)
6122 && GET_CODE (y) == CONST_INT)
6123 return CC_Zmode;
6125 /* A construct for a conditional compare, if the false arm contains
6126 0, then both conditions must be true, otherwise either condition
6127 must be true. Not all conditions are possible, so CCmode is
6128 returned if it can't be done. */
6129 if (GET_CODE (x) == IF_THEN_ELSE
6130 && (XEXP (x, 2) == const0_rtx
6131 || XEXP (x, 2) == const1_rtx)
6132 && COMPARISON_P (XEXP (x, 0))
6133 && COMPARISON_P (XEXP (x, 1)))
6134 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6135 INTVAL (XEXP (x, 2)));
6137 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6138 if (GET_CODE (x) == AND
6139 && COMPARISON_P (XEXP (x, 0))
6140 && COMPARISON_P (XEXP (x, 1)))
6141 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6142 DOM_CC_X_AND_Y);
6144 if (GET_CODE (x) == IOR
6145 && COMPARISON_P (XEXP (x, 0))
6146 && COMPARISON_P (XEXP (x, 1)))
6147 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6148 DOM_CC_X_OR_Y);
6150 /* An operation (on Thumb) where we want to test for a single bit.
6151 This is done by shifting that bit up into the top bit of a
6152 scratch register; we can then branch on the sign bit. */
6153 if (TARGET_THUMB
6154 && GET_MODE (x) == SImode
6155 && (op == EQ || op == NE)
6156 && (GET_CODE (x) == ZERO_EXTRACT))
6157 return CC_Nmode;
6159 /* An operation that sets the condition codes as a side-effect, the
6160 V flag is not set correctly, so we can only use comparisons where
6161 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6162 instead.) */
6163 if (GET_MODE (x) == SImode
6164 && y == const0_rtx
6165 && (op == EQ || op == NE || op == LT || op == GE)
6166 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6167 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6168 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6169 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6170 || GET_CODE (x) == LSHIFTRT
6171 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6172 || GET_CODE (x) == ROTATERT
6173 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6174 return CC_NOOVmode;
6176 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6177 return CC_Zmode;
6179 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6180 && GET_CODE (x) == PLUS
6181 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6182 return CC_Cmode;
6184 return CCmode;
6187 /* X and Y are two things to compare using CODE. Emit the compare insn and
6188 return the rtx for register 0 in the proper mode. FP means this is a
6189 floating point compare: I don't think that it is needed on the arm. */
6191 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6193 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6194 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6196 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6197 gen_rtx_COMPARE (mode, x, y)));
6199 return cc_reg;
6202 /* Generate a sequence of insns that will generate the correct return
6203 address mask depending on the physical architecture that the program
6204 is running on. */
6206 arm_gen_return_addr_mask (void)
6208 rtx reg = gen_reg_rtx (Pmode);
6210 emit_insn (gen_return_addr_mask (reg));
6211 return reg;
6214 void
6215 arm_reload_in_hi (rtx *operands)
6217 rtx ref = operands[1];
6218 rtx base, scratch;
6219 HOST_WIDE_INT offset = 0;
6221 if (GET_CODE (ref) == SUBREG)
6223 offset = SUBREG_BYTE (ref);
6224 ref = SUBREG_REG (ref);
6227 if (GET_CODE (ref) == REG)
6229 /* We have a pseudo which has been spilt onto the stack; there
6230 are two cases here: the first where there is a simple
6231 stack-slot replacement and a second where the stack-slot is
6232 out of range, or is used as a subreg. */
6233 if (reg_equiv_mem[REGNO (ref)])
6235 ref = reg_equiv_mem[REGNO (ref)];
6236 base = find_replacement (&XEXP (ref, 0));
6238 else
6239 /* The slot is out of range, or was dressed up in a SUBREG. */
6240 base = reg_equiv_address[REGNO (ref)];
6242 else
6243 base = find_replacement (&XEXP (ref, 0));
6245 /* Handle the case where the address is too complex to be offset by 1. */
6246 if (GET_CODE (base) == MINUS
6247 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6249 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6251 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6252 base = base_plus;
6254 else if (GET_CODE (base) == PLUS)
6256 /* The addend must be CONST_INT, or we would have dealt with it above. */
6257 HOST_WIDE_INT hi, lo;
6259 offset += INTVAL (XEXP (base, 1));
6260 base = XEXP (base, 0);
6262 /* Rework the address into a legal sequence of insns. */
6263 /* Valid range for lo is -4095 -> 4095 */
6264 lo = (offset >= 0
6265 ? (offset & 0xfff)
6266 : -((-offset) & 0xfff));
6268 /* Corner case, if lo is the max offset then we would be out of range
6269 once we have added the additional 1 below, so bump the msb into the
6270 pre-loading insn(s). */
6271 if (lo == 4095)
6272 lo &= 0x7ff;
6274 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6275 ^ (HOST_WIDE_INT) 0x80000000)
6276 - (HOST_WIDE_INT) 0x80000000);
6278 if (hi + lo != offset)
6279 abort ();
6281 if (hi != 0)
6283 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6285 /* Get the base address; addsi3 knows how to handle constants
6286 that require more than one insn. */
6287 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6288 base = base_plus;
6289 offset = lo;
6293 /* Operands[2] may overlap operands[0] (though it won't overlap
6294 operands[1]), that's why we asked for a DImode reg -- so we can
6295 use the bit that does not overlap. */
6296 if (REGNO (operands[2]) == REGNO (operands[0]))
6297 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6298 else
6299 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6301 emit_insn (gen_zero_extendqisi2 (scratch,
6302 gen_rtx_MEM (QImode,
6303 plus_constant (base,
6304 offset))));
6305 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6306 gen_rtx_MEM (QImode,
6307 plus_constant (base,
6308 offset + 1))));
6309 if (!BYTES_BIG_ENDIAN)
6310 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6311 gen_rtx_IOR (SImode,
6312 gen_rtx_ASHIFT
6313 (SImode,
6314 gen_rtx_SUBREG (SImode, operands[0], 0),
6315 GEN_INT (8)),
6316 scratch)));
6317 else
6318 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6319 gen_rtx_IOR (SImode,
6320 gen_rtx_ASHIFT (SImode, scratch,
6321 GEN_INT (8)),
6322 gen_rtx_SUBREG (SImode, operands[0],
6323 0))));
6326 /* Handle storing a half-word to memory during reload by synthesizing as two
6327 byte stores. Take care not to clobber the input values until after we
6328 have moved them somewhere safe. This code assumes that if the DImode
6329 scratch in operands[2] overlaps either the input value or output address
6330 in some way, then that value must die in this insn (we absolutely need
6331 two scratch registers for some corner cases). */
6332 void
6333 arm_reload_out_hi (rtx *operands)
6335 rtx ref = operands[0];
6336 rtx outval = operands[1];
6337 rtx base, scratch;
6338 HOST_WIDE_INT offset = 0;
6340 if (GET_CODE (ref) == SUBREG)
6342 offset = SUBREG_BYTE (ref);
6343 ref = SUBREG_REG (ref);
6346 if (GET_CODE (ref) == REG)
6348 /* We have a pseudo which has been spilt onto the stack; there
6349 are two cases here: the first where there is a simple
6350 stack-slot replacement and a second where the stack-slot is
6351 out of range, or is used as a subreg. */
6352 if (reg_equiv_mem[REGNO (ref)])
6354 ref = reg_equiv_mem[REGNO (ref)];
6355 base = find_replacement (&XEXP (ref, 0));
6357 else
6358 /* The slot is out of range, or was dressed up in a SUBREG. */
6359 base = reg_equiv_address[REGNO (ref)];
6361 else
6362 base = find_replacement (&XEXP (ref, 0));
6364 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6366 /* Handle the case where the address is too complex to be offset by 1. */
6367 if (GET_CODE (base) == MINUS
6368 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6370 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6372 /* Be careful not to destroy OUTVAL. */
6373 if (reg_overlap_mentioned_p (base_plus, outval))
6375 /* Updating base_plus might destroy outval, see if we can
6376 swap the scratch and base_plus. */
6377 if (!reg_overlap_mentioned_p (scratch, outval))
6379 rtx tmp = scratch;
6380 scratch = base_plus;
6381 base_plus = tmp;
6383 else
6385 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6387 /* Be conservative and copy OUTVAL into the scratch now,
6388 this should only be necessary if outval is a subreg
6389 of something larger than a word. */
6390 /* XXX Might this clobber base? I can't see how it can,
6391 since scratch is known to overlap with OUTVAL, and
6392 must be wider than a word. */
6393 emit_insn (gen_movhi (scratch_hi, outval));
6394 outval = scratch_hi;
6398 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6399 base = base_plus;
6401 else if (GET_CODE (base) == PLUS)
6403 /* The addend must be CONST_INT, or we would have dealt with it above. */
6404 HOST_WIDE_INT hi, lo;
6406 offset += INTVAL (XEXP (base, 1));
6407 base = XEXP (base, 0);
6409 /* Rework the address into a legal sequence of insns. */
6410 /* Valid range for lo is -4095 -> 4095 */
6411 lo = (offset >= 0
6412 ? (offset & 0xfff)
6413 : -((-offset) & 0xfff));
6415 /* Corner case, if lo is the max offset then we would be out of range
6416 once we have added the additional 1 below, so bump the msb into the
6417 pre-loading insn(s). */
6418 if (lo == 4095)
6419 lo &= 0x7ff;
6421 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6422 ^ (HOST_WIDE_INT) 0x80000000)
6423 - (HOST_WIDE_INT) 0x80000000);
6425 if (hi + lo != offset)
6426 abort ();
6428 if (hi != 0)
6430 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6432 /* Be careful not to destroy OUTVAL. */
6433 if (reg_overlap_mentioned_p (base_plus, outval))
6435 /* Updating base_plus might destroy outval, see if we
6436 can swap the scratch and base_plus. */
6437 if (!reg_overlap_mentioned_p (scratch, outval))
6439 rtx tmp = scratch;
6440 scratch = base_plus;
6441 base_plus = tmp;
6443 else
6445 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6447 /* Be conservative and copy outval into scratch now,
6448 this should only be necessary if outval is a
6449 subreg of something larger than a word. */
6450 /* XXX Might this clobber base? I can't see how it
6451 can, since scratch is known to overlap with
6452 outval. */
6453 emit_insn (gen_movhi (scratch_hi, outval));
6454 outval = scratch_hi;
6458 /* Get the base address; addsi3 knows how to handle constants
6459 that require more than one insn. */
6460 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6461 base = base_plus;
6462 offset = lo;
6466 if (BYTES_BIG_ENDIAN)
6468 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6469 plus_constant (base, offset + 1)),
6470 gen_lowpart (QImode, outval)));
6471 emit_insn (gen_lshrsi3 (scratch,
6472 gen_rtx_SUBREG (SImode, outval, 0),
6473 GEN_INT (8)));
6474 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6475 gen_lowpart (QImode, scratch)));
6477 else
6479 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6480 gen_lowpart (QImode, outval)));
6481 emit_insn (gen_lshrsi3 (scratch,
6482 gen_rtx_SUBREG (SImode, outval, 0),
6483 GEN_INT (8)));
6484 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6485 plus_constant (base, offset + 1)),
6486 gen_lowpart (QImode, scratch)));
6490 /* Print a symbolic form of X to the debug file, F. */
6491 static void
6492 arm_print_value (FILE *f, rtx x)
6494 switch (GET_CODE (x))
6496 case CONST_INT:
6497 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6498 return;
6500 case CONST_DOUBLE:
6501 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6502 return;
6504 case CONST_VECTOR:
6506 int i;
6508 fprintf (f, "<");
6509 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6511 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6512 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6513 fputc (',', f);
6515 fprintf (f, ">");
6517 return;
6519 case CONST_STRING:
6520 fprintf (f, "\"%s\"", XSTR (x, 0));
6521 return;
6523 case SYMBOL_REF:
6524 fprintf (f, "`%s'", XSTR (x, 0));
6525 return;
6527 case LABEL_REF:
6528 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6529 return;
6531 case CONST:
6532 arm_print_value (f, XEXP (x, 0));
6533 return;
6535 case PLUS:
6536 arm_print_value (f, XEXP (x, 0));
6537 fprintf (f, "+");
6538 arm_print_value (f, XEXP (x, 1));
6539 return;
6541 case PC:
6542 fprintf (f, "pc");
6543 return;
6545 default:
6546 fprintf (f, "????");
6547 return;
6551 /* Routines for manipulation of the constant pool. */
6553 /* Arm instructions cannot load a large constant directly into a
6554 register; they have to come from a pc relative load. The constant
6555 must therefore be placed in the addressable range of the pc
6556 relative load. Depending on the precise pc relative load
6557 instruction the range is somewhere between 256 bytes and 4k. This
6558 means that we often have to dump a constant inside a function, and
6559 generate code to branch around it.
6561 It is important to minimize this, since the branches will slow
6562 things down and make the code larger.
6564 Normally we can hide the table after an existing unconditional
6565 branch so that there is no interruption of the flow, but in the
6566 worst case the code looks like this:
6568 ldr rn, L1
6570 b L2
6571 align
6572 L1: .long value
6576 ldr rn, L3
6578 b L4
6579 align
6580 L3: .long value
6584 We fix this by performing a scan after scheduling, which notices
6585 which instructions need to have their operands fetched from the
6586 constant table and builds the table.
6588 The algorithm starts by building a table of all the constants that
6589 need fixing up and all the natural barriers in the function (places
6590 where a constant table can be dropped without breaking the flow).
6591 For each fixup we note how far the pc-relative replacement will be
6592 able to reach and the offset of the instruction into the function.
6594 Having built the table we then group the fixes together to form
6595 tables that are as large as possible (subject to addressing
6596 constraints) and emit each table of constants after the last
6597 barrier that is within range of all the instructions in the group.
6598 If a group does not contain a barrier, then we forcibly create one
6599 by inserting a jump instruction into the flow. Once the table has
6600 been inserted, the insns are then modified to reference the
6601 relevant entry in the pool.
6603 Possible enhancements to the algorithm (not implemented) are:
6605 1) For some processors and object formats, there may be benefit in
6606 aligning the pools to the start of cache lines; this alignment
6607 would need to be taken into account when calculating addressability
6608 of a pool. */
6610 /* These typedefs are located at the start of this file, so that
6611 they can be used in the prototypes there. This comment is to
6612 remind readers of that fact so that the following structures
6613 can be understood more easily.
6615 typedef struct minipool_node Mnode;
6616 typedef struct minipool_fixup Mfix; */
6618 struct minipool_node
6620 /* Doubly linked chain of entries. */
6621 Mnode * next;
6622 Mnode * prev;
6623 /* The maximum offset into the code that this entry can be placed. While
6624 pushing fixes for forward references, all entries are sorted in order
6625 of increasing max_address. */
6626 HOST_WIDE_INT max_address;
6627 /* Similarly for an entry inserted for a backwards ref. */
6628 HOST_WIDE_INT min_address;
6629 /* The number of fixes referencing this entry. This can become zero
6630 if we "unpush" an entry. In this case we ignore the entry when we
6631 come to emit the code. */
6632 int refcount;
6633 /* The offset from the start of the minipool. */
6634 HOST_WIDE_INT offset;
6635 /* The value in table. */
6636 rtx value;
6637 /* The mode of value. */
6638 enum machine_mode mode;
6639 /* The size of the value. With iWMMXt enabled
6640 sizes > 4 also imply an alignment of 8-bytes. */
6641 int fix_size;
6644 struct minipool_fixup
6646 Mfix * next;
6647 rtx insn;
6648 HOST_WIDE_INT address;
6649 rtx * loc;
6650 enum machine_mode mode;
6651 int fix_size;
6652 rtx value;
6653 Mnode * minipool;
6654 HOST_WIDE_INT forwards;
6655 HOST_WIDE_INT backwards;
6658 /* Fixes less than a word need padding out to a word boundary. */
6659 #define MINIPOOL_FIX_SIZE(mode) \
6660 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6662 static Mnode * minipool_vector_head;
6663 static Mnode * minipool_vector_tail;
6664 static rtx minipool_vector_label;
6666 /* The linked list of all minipool fixes required for this function. */
6667 Mfix * minipool_fix_head;
6668 Mfix * minipool_fix_tail;
6669 /* The fix entry for the current minipool, once it has been placed. */
6670 Mfix * minipool_barrier;
6672 /* Determines if INSN is the start of a jump table. Returns the end
6673 of the TABLE or NULL_RTX. */
6674 static rtx
6675 is_jump_table (rtx insn)
6677 rtx table;
6679 if (GET_CODE (insn) == JUMP_INSN
6680 && JUMP_LABEL (insn) != NULL
6681 && ((table = next_real_insn (JUMP_LABEL (insn)))
6682 == next_real_insn (insn))
6683 && table != NULL
6684 && GET_CODE (table) == JUMP_INSN
6685 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6686 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6687 return table;
6689 return NULL_RTX;
6692 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6693 #define JUMP_TABLES_IN_TEXT_SECTION 0
6694 #endif
6696 static HOST_WIDE_INT
6697 get_jump_table_size (rtx insn)
6699 /* ADDR_VECs only take room if read-only data does into the text
6700 section. */
6701 if (JUMP_TABLES_IN_TEXT_SECTION
6702 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6703 || 1
6704 #endif
6707 rtx body = PATTERN (insn);
6708 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6710 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6713 return 0;
6716 /* Move a minipool fix MP from its current location to before MAX_MP.
6717 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6718 constraints may need updating. */
6719 static Mnode *
6720 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6721 HOST_WIDE_INT max_address)
6723 /* This should never be true and the code below assumes these are
6724 different. */
6725 if (mp == max_mp)
6726 abort ();
6728 if (max_mp == NULL)
6730 if (max_address < mp->max_address)
6731 mp->max_address = max_address;
6733 else
6735 if (max_address > max_mp->max_address - mp->fix_size)
6736 mp->max_address = max_mp->max_address - mp->fix_size;
6737 else
6738 mp->max_address = max_address;
6740 /* Unlink MP from its current position. Since max_mp is non-null,
6741 mp->prev must be non-null. */
6742 mp->prev->next = mp->next;
6743 if (mp->next != NULL)
6744 mp->next->prev = mp->prev;
6745 else
6746 minipool_vector_tail = mp->prev;
6748 /* Re-insert it before MAX_MP. */
6749 mp->next = max_mp;
6750 mp->prev = max_mp->prev;
6751 max_mp->prev = mp;
6753 if (mp->prev != NULL)
6754 mp->prev->next = mp;
6755 else
6756 minipool_vector_head = mp;
6759 /* Save the new entry. */
6760 max_mp = mp;
6762 /* Scan over the preceding entries and adjust their addresses as
6763 required. */
6764 while (mp->prev != NULL
6765 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6767 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6768 mp = mp->prev;
6771 return max_mp;
6774 /* Add a constant to the minipool for a forward reference. Returns the
6775 node added or NULL if the constant will not fit in this pool. */
6776 static Mnode *
6777 add_minipool_forward_ref (Mfix *fix)
6779 /* If set, max_mp is the first pool_entry that has a lower
6780 constraint than the one we are trying to add. */
6781 Mnode * max_mp = NULL;
6782 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6783 Mnode * mp;
6785 /* If this fix's address is greater than the address of the first
6786 entry, then we can't put the fix in this pool. We subtract the
6787 size of the current fix to ensure that if the table is fully
6788 packed we still have enough room to insert this value by suffling
6789 the other fixes forwards. */
6790 if (minipool_vector_head &&
6791 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6792 return NULL;
6794 /* Scan the pool to see if a constant with the same value has
6795 already been added. While we are doing this, also note the
6796 location where we must insert the constant if it doesn't already
6797 exist. */
6798 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6800 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6801 && fix->mode == mp->mode
6802 && (GET_CODE (fix->value) != CODE_LABEL
6803 || (CODE_LABEL_NUMBER (fix->value)
6804 == CODE_LABEL_NUMBER (mp->value)))
6805 && rtx_equal_p (fix->value, mp->value))
6807 /* More than one fix references this entry. */
6808 mp->refcount++;
6809 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6812 /* Note the insertion point if necessary. */
6813 if (max_mp == NULL
6814 && mp->max_address > max_address)
6815 max_mp = mp;
6817 /* If we are inserting an 8-bytes aligned quantity and
6818 we have not already found an insertion point, then
6819 make sure that all such 8-byte aligned quantities are
6820 placed at the start of the pool. */
6821 if (ARM_DOUBLEWORD_ALIGN
6822 && max_mp == NULL
6823 && fix->fix_size == 8
6824 && mp->fix_size != 8)
6826 max_mp = mp;
6827 max_address = mp->max_address;
6831 /* The value is not currently in the minipool, so we need to create
6832 a new entry for it. If MAX_MP is NULL, the entry will be put on
6833 the end of the list since the placement is less constrained than
6834 any existing entry. Otherwise, we insert the new fix before
6835 MAX_MP and, if necessary, adjust the constraints on the other
6836 entries. */
6837 mp = xmalloc (sizeof (* mp));
6838 mp->fix_size = fix->fix_size;
6839 mp->mode = fix->mode;
6840 mp->value = fix->value;
6841 mp->refcount = 1;
6842 /* Not yet required for a backwards ref. */
6843 mp->min_address = -65536;
6845 if (max_mp == NULL)
6847 mp->max_address = max_address;
6848 mp->next = NULL;
6849 mp->prev = minipool_vector_tail;
6851 if (mp->prev == NULL)
6853 minipool_vector_head = mp;
6854 minipool_vector_label = gen_label_rtx ();
6856 else
6857 mp->prev->next = mp;
6859 minipool_vector_tail = mp;
6861 else
6863 if (max_address > max_mp->max_address - mp->fix_size)
6864 mp->max_address = max_mp->max_address - mp->fix_size;
6865 else
6866 mp->max_address = max_address;
6868 mp->next = max_mp;
6869 mp->prev = max_mp->prev;
6870 max_mp->prev = mp;
6871 if (mp->prev != NULL)
6872 mp->prev->next = mp;
6873 else
6874 minipool_vector_head = mp;
6877 /* Save the new entry. */
6878 max_mp = mp;
6880 /* Scan over the preceding entries and adjust their addresses as
6881 required. */
6882 while (mp->prev != NULL
6883 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6885 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6886 mp = mp->prev;
6889 return max_mp;
6892 static Mnode *
6893 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6894 HOST_WIDE_INT min_address)
6896 HOST_WIDE_INT offset;
6898 /* This should never be true, and the code below assumes these are
6899 different. */
6900 if (mp == min_mp)
6901 abort ();
6903 if (min_mp == NULL)
6905 if (min_address > mp->min_address)
6906 mp->min_address = min_address;
6908 else
6910 /* We will adjust this below if it is too loose. */
6911 mp->min_address = min_address;
6913 /* Unlink MP from its current position. Since min_mp is non-null,
6914 mp->next must be non-null. */
6915 mp->next->prev = mp->prev;
6916 if (mp->prev != NULL)
6917 mp->prev->next = mp->next;
6918 else
6919 minipool_vector_head = mp->next;
6921 /* Reinsert it after MIN_MP. */
6922 mp->prev = min_mp;
6923 mp->next = min_mp->next;
6924 min_mp->next = mp;
6925 if (mp->next != NULL)
6926 mp->next->prev = mp;
6927 else
6928 minipool_vector_tail = mp;
6931 min_mp = mp;
6933 offset = 0;
6934 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6936 mp->offset = offset;
6937 if (mp->refcount > 0)
6938 offset += mp->fix_size;
6940 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6941 mp->next->min_address = mp->min_address + mp->fix_size;
6944 return min_mp;
6947 /* Add a constant to the minipool for a backward reference. Returns the
6948 node added or NULL if the constant will not fit in this pool.
6950 Note that the code for insertion for a backwards reference can be
6951 somewhat confusing because the calculated offsets for each fix do
6952 not take into account the size of the pool (which is still under
6953 construction. */
6954 static Mnode *
6955 add_minipool_backward_ref (Mfix *fix)
6957 /* If set, min_mp is the last pool_entry that has a lower constraint
6958 than the one we are trying to add. */
6959 Mnode *min_mp = NULL;
6960 /* This can be negative, since it is only a constraint. */
6961 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6962 Mnode *mp;
6964 /* If we can't reach the current pool from this insn, or if we can't
6965 insert this entry at the end of the pool without pushing other
6966 fixes out of range, then we don't try. This ensures that we
6967 can't fail later on. */
6968 if (min_address >= minipool_barrier->address
6969 || (minipool_vector_tail->min_address + fix->fix_size
6970 >= minipool_barrier->address))
6971 return NULL;
6973 /* Scan the pool to see if a constant with the same value has
6974 already been added. While we are doing this, also note the
6975 location where we must insert the constant if it doesn't already
6976 exist. */
6977 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6979 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6980 && fix->mode == mp->mode
6981 && (GET_CODE (fix->value) != CODE_LABEL
6982 || (CODE_LABEL_NUMBER (fix->value)
6983 == CODE_LABEL_NUMBER (mp->value)))
6984 && rtx_equal_p (fix->value, mp->value)
6985 /* Check that there is enough slack to move this entry to the
6986 end of the table (this is conservative). */
6987 && (mp->max_address
6988 > (minipool_barrier->address
6989 + minipool_vector_tail->offset
6990 + minipool_vector_tail->fix_size)))
6992 mp->refcount++;
6993 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6996 if (min_mp != NULL)
6997 mp->min_address += fix->fix_size;
6998 else
7000 /* Note the insertion point if necessary. */
7001 if (mp->min_address < min_address)
7003 /* For now, we do not allow the insertion of 8-byte alignment
7004 requiring nodes anywhere but at the start of the pool. */
7005 if (ARM_DOUBLEWORD_ALIGN
7006 && fix->fix_size == 8 && mp->fix_size != 8)
7007 return NULL;
7008 else
7009 min_mp = mp;
7011 else if (mp->max_address
7012 < minipool_barrier->address + mp->offset + fix->fix_size)
7014 /* Inserting before this entry would push the fix beyond
7015 its maximum address (which can happen if we have
7016 re-located a forwards fix); force the new fix to come
7017 after it. */
7018 min_mp = mp;
7019 min_address = mp->min_address + fix->fix_size;
7021 /* If we are inserting an 8-bytes aligned quantity and
7022 we have not already found an insertion point, then
7023 make sure that all such 8-byte aligned quantities are
7024 placed at the start of the pool. */
7025 else if (ARM_DOUBLEWORD_ALIGN
7026 && min_mp == NULL
7027 && fix->fix_size == 8
7028 && mp->fix_size < 8)
7030 min_mp = mp;
7031 min_address = mp->min_address + fix->fix_size;
7036 /* We need to create a new entry. */
7037 mp = xmalloc (sizeof (* mp));
7038 mp->fix_size = fix->fix_size;
7039 mp->mode = fix->mode;
7040 mp->value = fix->value;
7041 mp->refcount = 1;
7042 mp->max_address = minipool_barrier->address + 65536;
7044 mp->min_address = min_address;
7046 if (min_mp == NULL)
7048 mp->prev = NULL;
7049 mp->next = minipool_vector_head;
7051 if (mp->next == NULL)
7053 minipool_vector_tail = mp;
7054 minipool_vector_label = gen_label_rtx ();
7056 else
7057 mp->next->prev = mp;
7059 minipool_vector_head = mp;
7061 else
7063 mp->next = min_mp->next;
7064 mp->prev = min_mp;
7065 min_mp->next = mp;
7067 if (mp->next != NULL)
7068 mp->next->prev = mp;
7069 else
7070 minipool_vector_tail = mp;
7073 /* Save the new entry. */
7074 min_mp = mp;
7076 if (mp->prev)
7077 mp = mp->prev;
7078 else
7079 mp->offset = 0;
7081 /* Scan over the following entries and adjust their offsets. */
7082 while (mp->next != NULL)
7084 if (mp->next->min_address < mp->min_address + mp->fix_size)
7085 mp->next->min_address = mp->min_address + mp->fix_size;
7087 if (mp->refcount)
7088 mp->next->offset = mp->offset + mp->fix_size;
7089 else
7090 mp->next->offset = mp->offset;
7092 mp = mp->next;
7095 return min_mp;
7098 static void
7099 assign_minipool_offsets (Mfix *barrier)
7101 HOST_WIDE_INT offset = 0;
7102 Mnode *mp;
7104 minipool_barrier = barrier;
7106 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7108 mp->offset = offset;
7110 if (mp->refcount > 0)
7111 offset += mp->fix_size;
7115 /* Output the literal table */
7116 static void
7117 dump_minipool (rtx scan)
7119 Mnode * mp;
7120 Mnode * nmp;
7121 int align64 = 0;
7123 if (ARM_DOUBLEWORD_ALIGN)
7124 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7125 if (mp->refcount > 0 && mp->fix_size == 8)
7127 align64 = 1;
7128 break;
7131 if (dump_file)
7132 fprintf (dump_file,
7133 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7134 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7136 scan = emit_label_after (gen_label_rtx (), scan);
7137 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7138 scan = emit_label_after (minipool_vector_label, scan);
7140 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7142 if (mp->refcount > 0)
7144 if (dump_file)
7146 fprintf (dump_file,
7147 ";; Offset %u, min %ld, max %ld ",
7148 (unsigned) mp->offset, (unsigned long) mp->min_address,
7149 (unsigned long) mp->max_address);
7150 arm_print_value (dump_file, mp->value);
7151 fputc ('\n', dump_file);
7154 switch (mp->fix_size)
7156 #ifdef HAVE_consttable_1
7157 case 1:
7158 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7159 break;
7161 #endif
7162 #ifdef HAVE_consttable_2
7163 case 2:
7164 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7165 break;
7167 #endif
7168 #ifdef HAVE_consttable_4
7169 case 4:
7170 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7171 break;
7173 #endif
7174 #ifdef HAVE_consttable_8
7175 case 8:
7176 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7177 break;
7179 #endif
7180 default:
7181 abort ();
7182 break;
7186 nmp = mp->next;
7187 free (mp);
7190 minipool_vector_head = minipool_vector_tail = NULL;
7191 scan = emit_insn_after (gen_consttable_end (), scan);
7192 scan = emit_barrier_after (scan);
7195 /* Return the cost of forcibly inserting a barrier after INSN. */
7196 static int
7197 arm_barrier_cost (rtx insn)
7199 /* Basing the location of the pool on the loop depth is preferable,
7200 but at the moment, the basic block information seems to be
7201 corrupt by this stage of the compilation. */
7202 int base_cost = 50;
7203 rtx next = next_nonnote_insn (insn);
7205 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7206 base_cost -= 20;
7208 switch (GET_CODE (insn))
7210 case CODE_LABEL:
7211 /* It will always be better to place the table before the label, rather
7212 than after it. */
7213 return 50;
7215 case INSN:
7216 case CALL_INSN:
7217 return base_cost;
7219 case JUMP_INSN:
7220 return base_cost - 10;
7222 default:
7223 return base_cost + 10;
7227 /* Find the best place in the insn stream in the range
7228 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7229 Create the barrier by inserting a jump and add a new fix entry for
7230 it. */
7231 static Mfix *
7232 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7234 HOST_WIDE_INT count = 0;
7235 rtx barrier;
7236 rtx from = fix->insn;
7237 rtx selected = from;
7238 int selected_cost;
7239 HOST_WIDE_INT selected_address;
7240 Mfix * new_fix;
7241 HOST_WIDE_INT max_count = max_address - fix->address;
7242 rtx label = gen_label_rtx ();
7244 selected_cost = arm_barrier_cost (from);
7245 selected_address = fix->address;
7247 while (from && count < max_count)
7249 rtx tmp;
7250 int new_cost;
7252 /* This code shouldn't have been called if there was a natural barrier
7253 within range. */
7254 if (GET_CODE (from) == BARRIER)
7255 abort ();
7257 /* Count the length of this insn. */
7258 count += get_attr_length (from);
7260 /* If there is a jump table, add its length. */
7261 tmp = is_jump_table (from);
7262 if (tmp != NULL)
7264 count += get_jump_table_size (tmp);
7266 /* Jump tables aren't in a basic block, so base the cost on
7267 the dispatch insn. If we select this location, we will
7268 still put the pool after the table. */
7269 new_cost = arm_barrier_cost (from);
7271 if (count < max_count && new_cost <= selected_cost)
7273 selected = tmp;
7274 selected_cost = new_cost;
7275 selected_address = fix->address + count;
7278 /* Continue after the dispatch table. */
7279 from = NEXT_INSN (tmp);
7280 continue;
7283 new_cost = arm_barrier_cost (from);
7285 if (count < max_count && new_cost <= selected_cost)
7287 selected = from;
7288 selected_cost = new_cost;
7289 selected_address = fix->address + count;
7292 from = NEXT_INSN (from);
7295 /* Create a new JUMP_INSN that branches around a barrier. */
7296 from = emit_jump_insn_after (gen_jump (label), selected);
7297 JUMP_LABEL (from) = label;
7298 barrier = emit_barrier_after (from);
7299 emit_label_after (label, barrier);
7301 /* Create a minipool barrier entry for the new barrier. */
7302 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7303 new_fix->insn = barrier;
7304 new_fix->address = selected_address;
7305 new_fix->next = fix->next;
7306 fix->next = new_fix;
7308 return new_fix;
7311 /* Record that there is a natural barrier in the insn stream at
7312 ADDRESS. */
7313 static void
7314 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7316 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7318 fix->insn = insn;
7319 fix->address = address;
7321 fix->next = NULL;
7322 if (minipool_fix_head != NULL)
7323 minipool_fix_tail->next = fix;
7324 else
7325 minipool_fix_head = fix;
7327 minipool_fix_tail = fix;
7330 /* Record INSN, which will need fixing up to load a value from the
7331 minipool. ADDRESS is the offset of the insn since the start of the
7332 function; LOC is a pointer to the part of the insn which requires
7333 fixing; VALUE is the constant that must be loaded, which is of type
7334 MODE. */
7335 static void
7336 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7337 enum machine_mode mode, rtx value)
7339 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7341 #ifdef AOF_ASSEMBLER
7342 /* PIC symbol references need to be converted into offsets into the
7343 based area. */
7344 /* XXX This shouldn't be done here. */
7345 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7346 value = aof_pic_entry (value);
7347 #endif /* AOF_ASSEMBLER */
7349 fix->insn = insn;
7350 fix->address = address;
7351 fix->loc = loc;
7352 fix->mode = mode;
7353 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7354 fix->value = value;
7355 fix->forwards = get_attr_pool_range (insn);
7356 fix->backwards = get_attr_neg_pool_range (insn);
7357 fix->minipool = NULL;
7359 /* If an insn doesn't have a range defined for it, then it isn't
7360 expecting to be reworked by this code. Better to abort now than
7361 to generate duff assembly code. */
7362 if (fix->forwards == 0 && fix->backwards == 0)
7363 abort ();
7365 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7366 So there might be an empty word before the start of the pool.
7367 Hence we reduce the forward range by 4 to allow for this
7368 possibility. */
7369 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7370 fix->forwards -= 4;
7372 if (dump_file)
7374 fprintf (dump_file,
7375 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7376 GET_MODE_NAME (mode),
7377 INSN_UID (insn), (unsigned long) address,
7378 -1 * (long)fix->backwards, (long)fix->forwards);
7379 arm_print_value (dump_file, fix->value);
7380 fprintf (dump_file, "\n");
7383 /* Add it to the chain of fixes. */
7384 fix->next = NULL;
7386 if (minipool_fix_head != NULL)
7387 minipool_fix_tail->next = fix;
7388 else
7389 minipool_fix_head = fix;
7391 minipool_fix_tail = fix;
7394 /* Return the cost of synthesizing the const_double VAL inline.
7395 Returns the number of insns needed, or 99 if we don't know how to
7396 do it. */
7398 arm_const_double_inline_cost (rtx val)
7400 long parts[2];
7402 if (GET_MODE (val) == DFmode)
7404 REAL_VALUE_TYPE r;
7405 if (!TARGET_SOFT_FLOAT)
7406 return 99;
7407 REAL_VALUE_FROM_CONST_DOUBLE (r, val);
7408 REAL_VALUE_TO_TARGET_DOUBLE (r, parts);
7410 else if (GET_MODE (val) != VOIDmode)
7411 return 99;
7412 else
7414 parts[0] = CONST_DOUBLE_LOW (val);
7415 parts[1] = CONST_DOUBLE_HIGH (val);
7418 return (arm_gen_constant (SET, SImode, NULL_RTX, parts[0],
7419 NULL_RTX, NULL_RTX, 0, 0)
7420 + arm_gen_constant (SET, SImode, NULL_RTX, parts[1],
7421 NULL_RTX, NULL_RTX, 0, 0));
7424 /* Determine if a CONST_DOUBLE should be pushed to the minipool */
7425 static bool
7426 const_double_needs_minipool (rtx val)
7428 /* thumb only knows to load a CONST_DOUBLE from memory at the moment */
7429 if (TARGET_THUMB)
7430 return true;
7432 /* Don't push anything to the minipool if a CONST_DOUBLE can be built with
7433 a few ALU insns directly. On balance, the optimum is likely to be around
7434 3 insns, except when there are no load delay slots where it should be 4.
7435 When optimizing for size, a limit of 3 allows saving at least one word
7436 except for cases where a single minipool entry could be shared more than
7437 2 times which is rather unlikely to outweight the overall savings. */
7438 return (arm_const_double_inline_cost (val)
7439 > ((optimize_size || arm_ld_sched) ? 3 : 4));
7442 /* Scan INSN and note any of its operands that need fixing.
7443 If DO_PUSHES is false we do not actually push any of the fixups
7444 needed. The function returns TRUE is any fixups were needed/pushed.
7445 This is used by arm_memory_load_p() which needs to know about loads
7446 of constants that will be converted into minipool loads. */
7447 static bool
7448 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7450 bool result = false;
7451 int opno;
7453 extract_insn (insn);
7455 if (!constrain_operands (1))
7456 fatal_insn_not_found (insn);
7458 if (recog_data.n_alternatives == 0)
7459 return false;
7461 /* Fill in recog_op_alt with information about the constraints of this insn. */
7462 preprocess_constraints ();
7464 for (opno = 0; opno < recog_data.n_operands; opno++)
7466 /* Things we need to fix can only occur in inputs. */
7467 if (recog_data.operand_type[opno] != OP_IN)
7468 continue;
7470 /* If this alternative is a memory reference, then any mention
7471 of constants in this alternative is really to fool reload
7472 into allowing us to accept one there. We need to fix them up
7473 now so that we output the right code. */
7474 if (recog_op_alt[opno][which_alternative].memory_ok)
7476 rtx op = recog_data.operand[opno];
7478 if (CONSTANT_P (op)
7479 && (GET_CODE (op) != CONST_DOUBLE
7480 || const_double_needs_minipool (op)))
7482 if (do_pushes)
7483 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7484 recog_data.operand_mode[opno], op);
7485 result = true;
7487 else if (GET_CODE (op) == MEM
7488 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7489 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7491 if (do_pushes)
7493 rtx cop = avoid_constant_pool_reference (op);
7495 /* Casting the address of something to a mode narrower
7496 than a word can cause avoid_constant_pool_reference()
7497 to return the pool reference itself. That's no good to
7498 us here. Lets just hope that we can use the
7499 constant pool value directly. */
7500 if (op == cop)
7501 cop = get_pool_constant (XEXP (op, 0));
7503 push_minipool_fix (insn, address,
7504 recog_data.operand_loc[opno],
7505 recog_data.operand_mode[opno], cop);
7508 result = true;
7513 return result;
7516 /* Gcc puts the pool in the wrong place for ARM, since we can only
7517 load addresses a limited distance around the pc. We do some
7518 special munging to move the constant pool values to the correct
7519 point in the code. */
7520 static void
7521 arm_reorg (void)
7523 rtx insn;
7524 HOST_WIDE_INT address = 0;
7525 Mfix * fix;
7527 minipool_fix_head = minipool_fix_tail = NULL;
7529 /* The first insn must always be a note, or the code below won't
7530 scan it properly. */
7531 insn = get_insns ();
7532 if (GET_CODE (insn) != NOTE)
7533 abort ();
7535 /* Scan all the insns and record the operands that will need fixing. */
7536 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7538 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7539 && (arm_cirrus_insn_p (insn)
7540 || GET_CODE (insn) == JUMP_INSN
7541 || arm_memory_load_p (insn)))
7542 cirrus_reorg (insn);
7544 if (GET_CODE (insn) == BARRIER)
7545 push_minipool_barrier (insn, address);
7546 else if (INSN_P (insn))
7548 rtx table;
7550 note_invalid_constants (insn, address, true);
7551 address += get_attr_length (insn);
7553 /* If the insn is a vector jump, add the size of the table
7554 and skip the table. */
7555 if ((table = is_jump_table (insn)) != NULL)
7557 address += get_jump_table_size (table);
7558 insn = table;
7563 fix = minipool_fix_head;
7565 /* Now scan the fixups and perform the required changes. */
7566 while (fix)
7568 Mfix * ftmp;
7569 Mfix * fdel;
7570 Mfix * last_added_fix;
7571 Mfix * last_barrier = NULL;
7572 Mfix * this_fix;
7574 /* Skip any further barriers before the next fix. */
7575 while (fix && GET_CODE (fix->insn) == BARRIER)
7576 fix = fix->next;
7578 /* No more fixes. */
7579 if (fix == NULL)
7580 break;
7582 last_added_fix = NULL;
7584 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7586 if (GET_CODE (ftmp->insn) == BARRIER)
7588 if (ftmp->address >= minipool_vector_head->max_address)
7589 break;
7591 last_barrier = ftmp;
7593 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7594 break;
7596 last_added_fix = ftmp; /* Keep track of the last fix added. */
7599 /* If we found a barrier, drop back to that; any fixes that we
7600 could have reached but come after the barrier will now go in
7601 the next mini-pool. */
7602 if (last_barrier != NULL)
7604 /* Reduce the refcount for those fixes that won't go into this
7605 pool after all. */
7606 for (fdel = last_barrier->next;
7607 fdel && fdel != ftmp;
7608 fdel = fdel->next)
7610 fdel->minipool->refcount--;
7611 fdel->minipool = NULL;
7614 ftmp = last_barrier;
7616 else
7618 /* ftmp is first fix that we can't fit into this pool and
7619 there no natural barriers that we could use. Insert a
7620 new barrier in the code somewhere between the previous
7621 fix and this one, and arrange to jump around it. */
7622 HOST_WIDE_INT max_address;
7624 /* The last item on the list of fixes must be a barrier, so
7625 we can never run off the end of the list of fixes without
7626 last_barrier being set. */
7627 if (ftmp == NULL)
7628 abort ();
7630 max_address = minipool_vector_head->max_address;
7631 /* Check that there isn't another fix that is in range that
7632 we couldn't fit into this pool because the pool was
7633 already too large: we need to put the pool before such an
7634 instruction. */
7635 if (ftmp->address < max_address)
7636 max_address = ftmp->address;
7638 last_barrier = create_fix_barrier (last_added_fix, max_address);
7641 assign_minipool_offsets (last_barrier);
7643 while (ftmp)
7645 if (GET_CODE (ftmp->insn) != BARRIER
7646 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7647 == NULL))
7648 break;
7650 ftmp = ftmp->next;
7653 /* Scan over the fixes we have identified for this pool, fixing them
7654 up and adding the constants to the pool itself. */
7655 for (this_fix = fix; this_fix && ftmp != this_fix;
7656 this_fix = this_fix->next)
7657 if (GET_CODE (this_fix->insn) != BARRIER)
7659 rtx addr
7660 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7661 minipool_vector_label),
7662 this_fix->minipool->offset);
7663 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7666 dump_minipool (last_barrier->insn);
7667 fix = ftmp;
7670 /* From now on we must synthesize any constants that we can't handle
7671 directly. This can happen if the RTL gets split during final
7672 instruction generation. */
7673 after_arm_reorg = 1;
7675 /* Free the minipool memory. */
7676 obstack_free (&minipool_obstack, minipool_startobj);
7679 /* Routines to output assembly language. */
7681 /* If the rtx is the correct value then return the string of the number.
7682 In this way we can ensure that valid double constants are generated even
7683 when cross compiling. */
7684 const char *
7685 fp_immediate_constant (rtx x)
7687 REAL_VALUE_TYPE r;
7688 int i;
7690 if (!fp_consts_inited)
7691 init_fp_table ();
7693 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7694 for (i = 0; i < 8; i++)
7695 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7696 return strings_fp[i];
7698 abort ();
7701 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7702 static const char *
7703 fp_const_from_val (REAL_VALUE_TYPE *r)
7705 int i;
7707 if (!fp_consts_inited)
7708 init_fp_table ();
7710 for (i = 0; i < 8; i++)
7711 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7712 return strings_fp[i];
7714 abort ();
7717 /* Output the operands of a LDM/STM instruction to STREAM.
7718 MASK is the ARM register set mask of which only bits 0-15 are important.
7719 REG is the base register, either the frame pointer or the stack pointer,
7720 INSTR is the possibly suffixed load or store instruction. */
7722 static void
7723 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7724 unsigned long mask)
7726 unsigned i;
7727 bool not_first = FALSE;
7729 fputc ('\t', stream);
7730 asm_fprintf (stream, instr, reg);
7731 fputs (", {", stream);
7733 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7734 if (mask & (1 << i))
7736 if (not_first)
7737 fprintf (stream, ", ");
7739 asm_fprintf (stream, "%r", i);
7740 not_first = TRUE;
7743 fprintf (stream, "}\n");
7747 /* Output a FLDMX instruction to STREAM.
7748 BASE if the register containing the address.
7749 REG and COUNT specify the register range.
7750 Extra registers may be added to avoid hardware bugs. */
7752 static void
7753 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7755 int i;
7757 /* Workaround ARM10 VFPr1 bug. */
7758 if (count == 2 && !arm_arch6)
7760 if (reg == 15)
7761 reg--;
7762 count++;
7765 fputc ('\t', stream);
7766 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7768 for (i = reg; i < reg + count; i++)
7770 if (i > reg)
7771 fputs (", ", stream);
7772 asm_fprintf (stream, "d%d", i);
7774 fputs ("}\n", stream);
7779 /* Output the assembly for a store multiple. */
7781 const char *
7782 vfp_output_fstmx (rtx * operands)
7784 char pattern[100];
7785 int p;
7786 int base;
7787 int i;
7789 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7790 p = strlen (pattern);
7792 if (GET_CODE (operands[1]) != REG)
7793 abort ();
7795 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7796 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7798 p += sprintf (&pattern[p], ", d%d", base + i);
7800 strcpy (&pattern[p], "}");
7802 output_asm_insn (pattern, operands);
7803 return "";
7807 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7808 number of bytes pushed. */
7810 static int
7811 vfp_emit_fstmx (int base_reg, int count)
7813 rtx par;
7814 rtx dwarf;
7815 rtx tmp, reg;
7816 int i;
7818 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7819 register pairs are stored by a store multiple insn. We avoid this
7820 by pushing an extra pair. */
7821 if (count == 2 && !arm_arch6)
7823 if (base_reg == LAST_VFP_REGNUM - 3)
7824 base_reg -= 2;
7825 count++;
7828 /* ??? The frame layout is implementation defined. We describe
7829 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7830 We really need some way of representing the whole block so that the
7831 unwinder can figure it out at runtime. */
7832 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7833 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7835 reg = gen_rtx_REG (DFmode, base_reg);
7836 base_reg += 2;
7838 XVECEXP (par, 0, 0)
7839 = gen_rtx_SET (VOIDmode,
7840 gen_rtx_MEM (BLKmode,
7841 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7842 gen_rtx_UNSPEC (BLKmode,
7843 gen_rtvec (1, reg),
7844 UNSPEC_PUSH_MULT));
7846 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7847 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7848 GEN_INT (-(count * 8 + 4))));
7849 RTX_FRAME_RELATED_P (tmp) = 1;
7850 XVECEXP (dwarf, 0, 0) = tmp;
7852 tmp = gen_rtx_SET (VOIDmode,
7853 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7854 reg);
7855 RTX_FRAME_RELATED_P (tmp) = 1;
7856 XVECEXP (dwarf, 0, 1) = tmp;
7858 for (i = 1; i < count; i++)
7860 reg = gen_rtx_REG (DFmode, base_reg);
7861 base_reg += 2;
7862 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7864 tmp = gen_rtx_SET (VOIDmode,
7865 gen_rtx_MEM (DFmode,
7866 gen_rtx_PLUS (SImode,
7867 stack_pointer_rtx,
7868 GEN_INT (i * 8))),
7869 reg);
7870 RTX_FRAME_RELATED_P (tmp) = 1;
7871 XVECEXP (dwarf, 0, i + 1) = tmp;
7874 par = emit_insn (par);
7875 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7876 REG_NOTES (par));
7877 RTX_FRAME_RELATED_P (par) = 1;
7879 return count * 8 + 4;
7883 /* Output a 'call' insn. */
7884 const char *
7885 output_call (rtx *operands)
7887 if (arm_arch5)
7888 abort (); /* Patterns should call blx <reg> directly. */
7890 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7891 if (REGNO (operands[0]) == LR_REGNUM)
7893 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7894 output_asm_insn ("mov%?\t%0, %|lr", operands);
7897 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7899 if (TARGET_INTERWORK || arm_arch4t)
7900 output_asm_insn ("bx%?\t%0", operands);
7901 else
7902 output_asm_insn ("mov%?\t%|pc, %0", operands);
7904 return "";
7907 /* Output a 'call' insn that is a reference in memory. */
7908 const char *
7909 output_call_mem (rtx *operands)
7911 if (TARGET_INTERWORK && !arm_arch5)
7913 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7914 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7915 output_asm_insn ("bx%?\t%|ip", operands);
7917 else if (regno_use_in (LR_REGNUM, operands[0]))
7919 /* LR is used in the memory address. We load the address in the
7920 first instruction. It's safe to use IP as the target of the
7921 load since the call will kill it anyway. */
7922 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7923 if (arm_arch5)
7924 output_asm_insn ("blx%?\t%|ip", operands);
7925 else
7927 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7928 if (arm_arch4t)
7929 output_asm_insn ("bx%?\t%|ip", operands);
7930 else
7931 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7934 else
7936 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7937 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7940 return "";
7944 /* Output a move from arm registers to an fpa registers.
7945 OPERANDS[0] is an fpa register.
7946 OPERANDS[1] is the first registers of an arm register pair. */
7947 const char *
7948 output_mov_long_double_fpa_from_arm (rtx *operands)
7950 int arm_reg0 = REGNO (operands[1]);
7951 rtx ops[3];
7953 if (arm_reg0 == IP_REGNUM)
7954 abort ();
7956 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7957 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7958 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7960 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7961 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7963 return "";
7966 /* Output a move from an fpa register to arm registers.
7967 OPERANDS[0] is the first registers of an arm register pair.
7968 OPERANDS[1] is an fpa register. */
7969 const char *
7970 output_mov_long_double_arm_from_fpa (rtx *operands)
7972 int arm_reg0 = REGNO (operands[0]);
7973 rtx ops[3];
7975 if (arm_reg0 == IP_REGNUM)
7976 abort ();
7978 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7979 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7980 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7982 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7983 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7984 return "";
7987 /* Output a move from arm registers to arm registers of a long double
7988 OPERANDS[0] is the destination.
7989 OPERANDS[1] is the source. */
7990 const char *
7991 output_mov_long_double_arm_from_arm (rtx *operands)
7993 /* We have to be careful here because the two might overlap. */
7994 int dest_start = REGNO (operands[0]);
7995 int src_start = REGNO (operands[1]);
7996 rtx ops[2];
7997 int i;
7999 if (dest_start < src_start)
8001 for (i = 0; i < 3; i++)
8003 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8004 ops[1] = gen_rtx_REG (SImode, src_start + i);
8005 output_asm_insn ("mov%?\t%0, %1", ops);
8008 else
8010 for (i = 2; i >= 0; i--)
8012 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8013 ops[1] = gen_rtx_REG (SImode, src_start + i);
8014 output_asm_insn ("mov%?\t%0, %1", ops);
8018 return "";
8022 /* Output a move from arm registers to an fpa registers.
8023 OPERANDS[0] is an fpa register.
8024 OPERANDS[1] is the first registers of an arm register pair. */
8025 const char *
8026 output_mov_double_fpa_from_arm (rtx *operands)
8028 int arm_reg0 = REGNO (operands[1]);
8029 rtx ops[2];
8031 if (arm_reg0 == IP_REGNUM)
8032 abort ();
8034 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8035 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8036 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8037 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8038 return "";
8041 /* Output a move from an fpa register to arm registers.
8042 OPERANDS[0] is the first registers of an arm register pair.
8043 OPERANDS[1] is an fpa register. */
8044 const char *
8045 output_mov_double_arm_from_fpa (rtx *operands)
8047 int arm_reg0 = REGNO (operands[0]);
8048 rtx ops[2];
8050 if (arm_reg0 == IP_REGNUM)
8051 abort ();
8053 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8054 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8055 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8056 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8057 return "";
8060 /* Output a move between double words.
8061 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8062 or MEM<-REG and all MEMs must be offsettable addresses. */
8063 const char *
8064 output_move_double (rtx *operands)
8066 enum rtx_code code0 = GET_CODE (operands[0]);
8067 enum rtx_code code1 = GET_CODE (operands[1]);
8068 rtx otherops[3];
8070 if (code0 == REG)
8072 int reg0 = REGNO (operands[0]);
8074 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8076 if (code1 == REG)
8078 int reg1 = REGNO (operands[1]);
8079 if (reg1 == IP_REGNUM)
8080 abort ();
8082 /* Ensure the second source is not overwritten. */
8083 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8084 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8085 else
8086 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8088 else if (code1 == CONST_VECTOR)
8090 HOST_WIDE_INT hint = 0;
8092 switch (GET_MODE (operands[1]))
8094 case V2SImode:
8095 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8096 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8097 break;
8099 case V4HImode:
8100 if (BYTES_BIG_ENDIAN)
8102 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8103 hint <<= 16;
8104 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8106 else
8108 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8109 hint <<= 16;
8110 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8113 otherops[1] = GEN_INT (hint);
8114 hint = 0;
8116 if (BYTES_BIG_ENDIAN)
8118 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8119 hint <<= 16;
8120 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8122 else
8124 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8125 hint <<= 16;
8126 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8129 operands[1] = GEN_INT (hint);
8130 break;
8132 case V8QImode:
8133 if (BYTES_BIG_ENDIAN)
8135 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8136 hint <<= 8;
8137 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8138 hint <<= 8;
8139 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8140 hint <<= 8;
8141 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8143 else
8145 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8146 hint <<= 8;
8147 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8148 hint <<= 8;
8149 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8150 hint <<= 8;
8151 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8154 otherops[1] = GEN_INT (hint);
8155 hint = 0;
8157 if (BYTES_BIG_ENDIAN)
8159 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8160 hint <<= 8;
8161 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8162 hint <<= 8;
8163 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8164 hint <<= 8;
8165 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8167 else
8169 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8170 hint <<= 8;
8171 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8172 hint <<= 8;
8173 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8174 hint <<= 8;
8175 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8178 operands[1] = GEN_INT (hint);
8179 break;
8181 default:
8182 abort ();
8184 output_mov_immediate (operands);
8185 output_mov_immediate (otherops);
8187 else if (code1 == CONST_DOUBLE)
8189 if (GET_MODE (operands[1]) == DFmode)
8191 REAL_VALUE_TYPE r;
8192 long l[2];
8194 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8195 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8196 otherops[1] = GEN_INT (l[1]);
8197 operands[1] = GEN_INT (l[0]);
8199 else if (GET_MODE (operands[1]) != VOIDmode)
8200 abort ();
8201 else if (WORDS_BIG_ENDIAN)
8203 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8204 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8206 else
8208 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8209 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8212 output_mov_immediate (operands);
8213 output_mov_immediate (otherops);
8215 else if (code1 == CONST_INT)
8217 #if HOST_BITS_PER_WIDE_INT > 32
8218 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8219 what the upper word is. */
8220 if (WORDS_BIG_ENDIAN)
8222 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8223 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8225 else
8227 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8228 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8230 #else
8231 /* Sign extend the intval into the high-order word. */
8232 if (WORDS_BIG_ENDIAN)
8234 otherops[1] = operands[1];
8235 operands[1] = (INTVAL (operands[1]) < 0
8236 ? constm1_rtx : const0_rtx);
8238 else
8239 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8240 #endif
8241 output_mov_immediate (otherops);
8242 output_mov_immediate (operands);
8244 else if (code1 == MEM)
8246 switch (GET_CODE (XEXP (operands[1], 0)))
8248 case REG:
8249 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8250 break;
8252 case PRE_INC:
8253 if (!TARGET_LDRD)
8254 abort (); /* Should never happen now. */
8255 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8256 break;
8258 case PRE_DEC:
8259 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8260 break;
8262 case POST_INC:
8263 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8264 break;
8266 case POST_DEC:
8267 if (!TARGET_LDRD)
8268 abort (); /* Should never happen now. */
8269 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8270 break;
8272 case PRE_MODIFY:
8273 case POST_MODIFY:
8274 otherops[0] = operands[0];
8275 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8276 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8278 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8280 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8282 /* Registers overlap so split out the increment. */
8283 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8284 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8286 else
8287 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8289 else
8291 /* We only allow constant increments, so this is safe. */
8292 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8294 break;
8296 case LABEL_REF:
8297 case CONST:
8298 output_asm_insn ("adr%?\t%0, %1", operands);
8299 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8300 break;
8302 default:
8303 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8304 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8306 otherops[0] = operands[0];
8307 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8308 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8310 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8312 if (GET_CODE (otherops[2]) == CONST_INT)
8314 switch ((int) INTVAL (otherops[2]))
8316 case -8:
8317 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8318 return "";
8319 case -4:
8320 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8321 return "";
8322 case 4:
8323 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8324 return "";
8327 if (TARGET_LDRD
8328 && (GET_CODE (otherops[2]) == REG
8329 || (GET_CODE (otherops[2]) == CONST_INT
8330 && INTVAL (otherops[2]) > -256
8331 && INTVAL (otherops[2]) < 256)))
8333 if (reg_overlap_mentioned_p (otherops[0],
8334 otherops[2]))
8336 /* Swap base and index registers over to
8337 avoid a conflict. */
8338 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8339 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8342 /* If both registers conflict, it will usually
8343 have been fixed by a splitter. */
8344 if (reg_overlap_mentioned_p (otherops[0],
8345 otherops[2]))
8347 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8348 output_asm_insn ("ldr%?d\t%0, [%1]",
8349 otherops);
8350 return "";
8352 else
8354 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8355 otherops);
8356 return "";
8359 if (GET_CODE (otherops[2]) == CONST_INT)
8361 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8362 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8363 else
8364 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8366 else
8367 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8369 else
8370 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8372 return "ldm%?ia\t%0, %M0";
8374 else
8376 otherops[1] = adjust_address (operands[1], SImode, 4);
8377 /* Take care of overlapping base/data reg. */
8378 if (reg_mentioned_p (operands[0], operands[1]))
8380 output_asm_insn ("ldr%?\t%0, %1", otherops);
8381 output_asm_insn ("ldr%?\t%0, %1", operands);
8383 else
8385 output_asm_insn ("ldr%?\t%0, %1", operands);
8386 output_asm_insn ("ldr%?\t%0, %1", otherops);
8391 else
8392 abort (); /* Constraints should prevent this. */
8394 else if (code0 == MEM && code1 == REG)
8396 if (REGNO (operands[1]) == IP_REGNUM)
8397 abort ();
8399 switch (GET_CODE (XEXP (operands[0], 0)))
8401 case REG:
8402 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8403 break;
8405 case PRE_INC:
8406 if (!TARGET_LDRD)
8407 abort (); /* Should never happen now. */
8408 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8409 break;
8411 case PRE_DEC:
8412 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8413 break;
8415 case POST_INC:
8416 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8417 break;
8419 case POST_DEC:
8420 if (!TARGET_LDRD)
8421 abort (); /* Should never happen now. */
8422 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8423 break;
8425 case PRE_MODIFY:
8426 case POST_MODIFY:
8427 otherops[0] = operands[1];
8428 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8429 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8431 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8432 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8433 else
8434 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8435 break;
8437 case PLUS:
8438 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8439 if (GET_CODE (otherops[2]) == CONST_INT)
8441 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8443 case -8:
8444 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8445 return "";
8447 case -4:
8448 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8449 return "";
8451 case 4:
8452 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8453 return "";
8456 if (TARGET_LDRD
8457 && (GET_CODE (otherops[2]) == REG
8458 || (GET_CODE (otherops[2]) == CONST_INT
8459 && INTVAL (otherops[2]) > -256
8460 && INTVAL (otherops[2]) < 256)))
8462 otherops[0] = operands[1];
8463 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8464 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8465 return "";
8467 /* Fall through */
8469 default:
8470 otherops[0] = adjust_address (operands[0], SImode, 4);
8471 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8472 output_asm_insn ("str%?\t%1, %0", operands);
8473 output_asm_insn ("str%?\t%1, %0", otherops);
8476 else
8477 /* Constraints should prevent this. */
8478 abort ();
8480 return "";
8484 /* Output an arbitrary MOV reg, #n.
8485 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8486 const char *
8487 output_mov_immediate (rtx *operands)
8489 HOST_WIDE_INT n = INTVAL (operands[1]);
8491 /* Try to use one MOV. */
8492 if (const_ok_for_arm (n))
8493 output_asm_insn ("mov%?\t%0, %1", operands);
8495 /* Try to use one MVN. */
8496 else if (const_ok_for_arm (~n))
8498 operands[1] = GEN_INT (~n);
8499 output_asm_insn ("mvn%?\t%0, %1", operands);
8501 else
8503 int n_ones = 0;
8504 int i;
8506 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8507 for (i = 0; i < 32; i++)
8508 if (n & 1 << i)
8509 n_ones++;
8511 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8512 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8513 else
8514 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8517 return "";
8520 /* Output an ADD r, s, #n where n may be too big for one instruction.
8521 If adding zero to one register, output nothing. */
8522 const char *
8523 output_add_immediate (rtx *operands)
8525 HOST_WIDE_INT n = INTVAL (operands[2]);
8527 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8529 if (n < 0)
8530 output_multi_immediate (operands,
8531 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8532 -n);
8533 else
8534 output_multi_immediate (operands,
8535 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8539 return "";
8542 /* Output a multiple immediate operation.
8543 OPERANDS is the vector of operands referred to in the output patterns.
8544 INSTR1 is the output pattern to use for the first constant.
8545 INSTR2 is the output pattern to use for subsequent constants.
8546 IMMED_OP is the index of the constant slot in OPERANDS.
8547 N is the constant value. */
8548 static const char *
8549 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8550 int immed_op, HOST_WIDE_INT n)
8552 #if HOST_BITS_PER_WIDE_INT > 32
8553 n &= 0xffffffff;
8554 #endif
8556 if (n == 0)
8558 /* Quick and easy output. */
8559 operands[immed_op] = const0_rtx;
8560 output_asm_insn (instr1, operands);
8562 else
8564 int i;
8565 const char * instr = instr1;
8567 /* Note that n is never zero here (which would give no output). */
8568 for (i = 0; i < 32; i += 2)
8570 if (n & (3 << i))
8572 operands[immed_op] = GEN_INT (n & (255 << i));
8573 output_asm_insn (instr, operands);
8574 instr = instr2;
8575 i += 6;
8580 return "";
8583 /* Return the appropriate ARM instruction for the operation code.
8584 The returned result should not be overwritten. OP is the rtx of the
8585 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8586 was shifted. */
8587 const char *
8588 arithmetic_instr (rtx op, int shift_first_arg)
8590 switch (GET_CODE (op))
8592 case PLUS:
8593 return "add";
8595 case MINUS:
8596 return shift_first_arg ? "rsb" : "sub";
8598 case IOR:
8599 return "orr";
8601 case XOR:
8602 return "eor";
8604 case AND:
8605 return "and";
8607 default:
8608 abort ();
8612 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8613 for the operation code. The returned result should not be overwritten.
8614 OP is the rtx code of the shift.
8615 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8616 shift. */
8617 static const char *
8618 shift_op (rtx op, HOST_WIDE_INT *amountp)
8620 const char * mnem;
8621 enum rtx_code code = GET_CODE (op);
8623 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8624 *amountp = -1;
8625 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8626 *amountp = INTVAL (XEXP (op, 1));
8627 else
8628 abort ();
8630 switch (code)
8632 case ASHIFT:
8633 mnem = "asl";
8634 break;
8636 case ASHIFTRT:
8637 mnem = "asr";
8638 break;
8640 case LSHIFTRT:
8641 mnem = "lsr";
8642 break;
8644 case ROTATE:
8645 if (*amountp == -1)
8646 abort ();
8647 *amountp = 32 - *amountp;
8649 /* Fall through. */
8651 case ROTATERT:
8652 mnem = "ror";
8653 break;
8655 case MULT:
8656 /* We never have to worry about the amount being other than a
8657 power of 2, since this case can never be reloaded from a reg. */
8658 if (*amountp != -1)
8659 *amountp = int_log2 (*amountp);
8660 else
8661 abort ();
8662 return "asl";
8664 default:
8665 abort ();
8668 if (*amountp != -1)
8670 /* This is not 100% correct, but follows from the desire to merge
8671 multiplication by a power of 2 with the recognizer for a
8672 shift. >=32 is not a valid shift for "asl", so we must try and
8673 output a shift that produces the correct arithmetical result.
8674 Using lsr #32 is identical except for the fact that the carry bit
8675 is not set correctly if we set the flags; but we never use the
8676 carry bit from such an operation, so we can ignore that. */
8677 if (code == ROTATERT)
8678 /* Rotate is just modulo 32. */
8679 *amountp &= 31;
8680 else if (*amountp != (*amountp & 31))
8682 if (code == ASHIFT)
8683 mnem = "lsr";
8684 *amountp = 32;
8687 /* Shifts of 0 are no-ops. */
8688 if (*amountp == 0)
8689 return NULL;
8692 return mnem;
8695 /* Obtain the shift from the POWER of two. */
8697 static HOST_WIDE_INT
8698 int_log2 (HOST_WIDE_INT power)
8700 HOST_WIDE_INT shift = 0;
8702 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8704 if (shift > 31)
8705 abort ();
8706 shift++;
8709 return shift;
8712 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8713 because /bin/as is horribly restrictive. The judgement about
8714 whether or not each character is 'printable' (and can be output as
8715 is) or not (and must be printed with an octal escape) must be made
8716 with reference to the *host* character set -- the situation is
8717 similar to that discussed in the comments above pp_c_char in
8718 c-pretty-print.c. */
8720 #define MAX_ASCII_LEN 51
8722 void
8723 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8725 int i;
8726 int len_so_far = 0;
8728 fputs ("\t.ascii\t\"", stream);
8730 for (i = 0; i < len; i++)
8732 int c = p[i];
8734 if (len_so_far >= MAX_ASCII_LEN)
8736 fputs ("\"\n\t.ascii\t\"", stream);
8737 len_so_far = 0;
8740 if (ISPRINT (c))
8742 if (c == '\\' || c == '\"')
8744 putc ('\\', stream);
8745 len_so_far++;
8747 putc (c, stream);
8748 len_so_far++;
8750 else
8752 fprintf (stream, "\\%03o", c);
8753 len_so_far += 4;
8757 fputs ("\"\n", stream);
8760 /* Compute the register save mask for registers 0 through 12
8761 inclusive. This code is used by arm_compute_save_reg_mask. */
8763 static unsigned long
8764 arm_compute_save_reg0_reg12_mask (void)
8766 unsigned long func_type = arm_current_func_type ();
8767 unsigned long save_reg_mask = 0;
8768 unsigned int reg;
8770 if (IS_INTERRUPT (func_type))
8772 unsigned int max_reg;
8773 /* Interrupt functions must not corrupt any registers,
8774 even call clobbered ones. If this is a leaf function
8775 we can just examine the registers used by the RTL, but
8776 otherwise we have to assume that whatever function is
8777 called might clobber anything, and so we have to save
8778 all the call-clobbered registers as well. */
8779 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8780 /* FIQ handlers have registers r8 - r12 banked, so
8781 we only need to check r0 - r7, Normal ISRs only
8782 bank r14 and r15, so we must check up to r12.
8783 r13 is the stack pointer which is always preserved,
8784 so we do not need to consider it here. */
8785 max_reg = 7;
8786 else
8787 max_reg = 12;
8789 for (reg = 0; reg <= max_reg; reg++)
8790 if (regs_ever_live[reg]
8791 || (! current_function_is_leaf && call_used_regs [reg]))
8792 save_reg_mask |= (1 << reg);
8794 /* Also save the pic base register if necessary. */
8795 if (flag_pic
8796 && !TARGET_SINGLE_PIC_BASE
8797 && current_function_uses_pic_offset_table)
8798 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8800 else
8802 /* In the normal case we only need to save those registers
8803 which are call saved and which are used by this function. */
8804 for (reg = 0; reg <= 10; reg++)
8805 if (regs_ever_live[reg] && ! call_used_regs [reg])
8806 save_reg_mask |= (1 << reg);
8808 /* Handle the frame pointer as a special case. */
8809 if (! TARGET_APCS_FRAME
8810 && ! frame_pointer_needed
8811 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8812 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8813 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8815 /* If we aren't loading the PIC register,
8816 don't stack it even though it may be live. */
8817 if (flag_pic
8818 && !TARGET_SINGLE_PIC_BASE
8819 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8820 || current_function_uses_pic_offset_table))
8821 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8824 /* Save registers so the exception handler can modify them. */
8825 if (current_function_calls_eh_return)
8827 unsigned int i;
8829 for (i = 0; ; i++)
8831 reg = EH_RETURN_DATA_REGNO (i);
8832 if (reg == INVALID_REGNUM)
8833 break;
8834 save_reg_mask |= 1 << reg;
8838 return save_reg_mask;
8841 /* Compute a bit mask of which registers need to be
8842 saved on the stack for the current function. */
8844 static unsigned long
8845 arm_compute_save_reg_mask (void)
8847 unsigned int save_reg_mask = 0;
8848 unsigned long func_type = arm_current_func_type ();
8850 if (IS_NAKED (func_type))
8851 /* This should never really happen. */
8852 return 0;
8854 /* If we are creating a stack frame, then we must save the frame pointer,
8855 IP (which will hold the old stack pointer), LR and the PC. */
8856 if (frame_pointer_needed)
8857 save_reg_mask |=
8858 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8859 | (1 << IP_REGNUM)
8860 | (1 << LR_REGNUM)
8861 | (1 << PC_REGNUM);
8863 /* Volatile functions do not return, so there
8864 is no need to save any other registers. */
8865 if (IS_VOLATILE (func_type))
8866 return save_reg_mask;
8868 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8870 /* Decide if we need to save the link register.
8871 Interrupt routines have their own banked link register,
8872 so they never need to save it.
8873 Otherwise if we do not use the link register we do not need to save
8874 it. If we are pushing other registers onto the stack however, we
8875 can save an instruction in the epilogue by pushing the link register
8876 now and then popping it back into the PC. This incurs extra memory
8877 accesses though, so we only do it when optimizing for size, and only
8878 if we know that we will not need a fancy return sequence. */
8879 if (regs_ever_live [LR_REGNUM]
8880 || (save_reg_mask
8881 && optimize_size
8882 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8883 && !current_function_calls_eh_return))
8884 save_reg_mask |= 1 << LR_REGNUM;
8886 if (cfun->machine->lr_save_eliminated)
8887 save_reg_mask &= ~ (1 << LR_REGNUM);
8889 if (TARGET_REALLY_IWMMXT
8890 && ((bit_count (save_reg_mask)
8891 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8893 unsigned int reg;
8895 /* The total number of registers that are going to be pushed
8896 onto the stack is odd. We need to ensure that the stack
8897 is 64-bit aligned before we start to save iWMMXt registers,
8898 and also before we start to create locals. (A local variable
8899 might be a double or long long which we will load/store using
8900 an iWMMXt instruction). Therefore we need to push another
8901 ARM register, so that the stack will be 64-bit aligned. We
8902 try to avoid using the arg registers (r0 -r3) as they might be
8903 used to pass values in a tail call. */
8904 for (reg = 4; reg <= 12; reg++)
8905 if ((save_reg_mask & (1 << reg)) == 0)
8906 break;
8908 if (reg <= 12)
8909 save_reg_mask |= (1 << reg);
8910 else
8912 cfun->machine->sibcall_blocked = 1;
8913 save_reg_mask |= (1 << 3);
8917 return save_reg_mask;
8921 /* Compute a bit mask of which registers need to be
8922 saved on the stack for the current function. */
8923 static unsigned long
8924 thumb_compute_save_reg_mask (void)
8926 unsigned long mask;
8927 unsigned reg;
8929 mask = 0;
8930 for (reg = 0; reg < 12; reg ++)
8931 if (regs_ever_live[reg] && !call_used_regs[reg])
8932 mask |= 1 << reg;
8934 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8935 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8937 if (TARGET_SINGLE_PIC_BASE)
8938 mask &= ~(1 << arm_pic_register);
8940 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8941 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8942 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8944 /* LR will also be pushed if any lo regs are pushed. */
8945 if (mask & 0xff || thumb_force_lr_save ())
8946 mask |= (1 << LR_REGNUM);
8948 /* Make sure we have a low work register if we need one.
8949 We will need one if we are going to push a high register,
8950 but we are not currently intending to push a low register. */
8951 if ((mask & 0xff) == 0
8952 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8954 /* Use thumb_find_work_register to choose which register
8955 we will use. If the register is live then we will
8956 have to push it. Use LAST_LO_REGNUM as our fallback
8957 choice for the register to select. */
8958 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8960 if (! call_used_regs[reg])
8961 mask |= 1 << reg;
8964 return mask;
8968 /* Return the number of bytes required to save VFP registers. */
8969 static int
8970 arm_get_vfp_saved_size (void)
8972 unsigned int regno;
8973 int count;
8974 int saved;
8976 saved = 0;
8977 /* Space for saved VFP registers. */
8978 if (TARGET_HARD_FLOAT && TARGET_VFP)
8980 count = 0;
8981 for (regno = FIRST_VFP_REGNUM;
8982 regno < LAST_VFP_REGNUM;
8983 regno += 2)
8985 if ((!regs_ever_live[regno] || call_used_regs[regno])
8986 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8988 if (count > 0)
8990 /* Workaround ARM10 VFPr1 bug. */
8991 if (count == 2 && !arm_arch6)
8992 count++;
8993 saved += count * 8 + 4;
8995 count = 0;
8997 else
8998 count++;
9000 if (count > 0)
9002 if (count == 2 && !arm_arch6)
9003 count++;
9004 saved += count * 8 + 4;
9007 return saved;
9011 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9012 everything bar the final return instruction. */
9013 const char *
9014 output_return_instruction (rtx operand, int really_return, int reverse)
9016 char conditional[10];
9017 char instr[100];
9018 unsigned reg;
9019 unsigned long live_regs_mask;
9020 unsigned long func_type;
9021 arm_stack_offsets *offsets;
9023 func_type = arm_current_func_type ();
9025 if (IS_NAKED (func_type))
9026 return "";
9028 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9030 /* If this function was declared non-returning, and we have
9031 found a tail call, then we have to trust that the called
9032 function won't return. */
9033 if (really_return)
9035 rtx ops[2];
9037 /* Otherwise, trap an attempted return by aborting. */
9038 ops[0] = operand;
9039 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9040 : "abort");
9041 assemble_external_libcall (ops[1]);
9042 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9045 return "";
9048 if (current_function_calls_alloca && !really_return)
9049 abort ();
9051 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9053 return_used_this_function = 1;
9055 live_regs_mask = arm_compute_save_reg_mask ();
9057 if (live_regs_mask)
9059 const char * return_reg;
9061 /* If we do not have any special requirements for function exit
9062 (e.g. interworking, or ISR) then we can load the return address
9063 directly into the PC. Otherwise we must load it into LR. */
9064 if (really_return
9065 && ! TARGET_INTERWORK)
9066 return_reg = reg_names[PC_REGNUM];
9067 else
9068 return_reg = reg_names[LR_REGNUM];
9070 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9072 /* There are three possible reasons for the IP register
9073 being saved. 1) a stack frame was created, in which case
9074 IP contains the old stack pointer, or 2) an ISR routine
9075 corrupted it, or 3) it was saved to align the stack on
9076 iWMMXt. In case 1, restore IP into SP, otherwise just
9077 restore IP. */
9078 if (frame_pointer_needed)
9080 live_regs_mask &= ~ (1 << IP_REGNUM);
9081 live_regs_mask |= (1 << SP_REGNUM);
9083 else
9085 if (! IS_INTERRUPT (func_type)
9086 && ! TARGET_REALLY_IWMMXT)
9087 abort ();
9091 /* On some ARM architectures it is faster to use LDR rather than
9092 LDM to load a single register. On other architectures, the
9093 cost is the same. In 26 bit mode, or for exception handlers,
9094 we have to use LDM to load the PC so that the CPSR is also
9095 restored. */
9096 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9097 if (live_regs_mask == (1U << reg))
9098 break;
9100 if (reg <= LAST_ARM_REGNUM
9101 && (reg != LR_REGNUM
9102 || ! really_return
9103 || ! IS_INTERRUPT (func_type)))
9105 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9106 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9108 else
9110 char *p;
9111 int first = 1;
9113 /* Generate the load multiple instruction to restore the
9114 registers. Note we can get here, even if
9115 frame_pointer_needed is true, but only if sp already
9116 points to the base of the saved core registers. */
9117 if (live_regs_mask & (1 << SP_REGNUM))
9119 unsigned HOST_WIDE_INT stack_adjust;
9121 offsets = arm_get_frame_offsets ();
9122 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9123 if (stack_adjust != 0 && stack_adjust != 4)
9124 abort ();
9126 if (stack_adjust && arm_arch5)
9127 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9128 else
9130 /* If we can't use ldmib (SA110 bug),
9131 then try to pop r3 instead. */
9132 if (stack_adjust)
9133 live_regs_mask |= 1 << 3;
9134 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9137 else
9138 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9140 p = instr + strlen (instr);
9142 for (reg = 0; reg <= SP_REGNUM; reg++)
9143 if (live_regs_mask & (1 << reg))
9145 int l = strlen (reg_names[reg]);
9147 if (first)
9148 first = 0;
9149 else
9151 memcpy (p, ", ", 2);
9152 p += 2;
9155 memcpy (p, "%|", 2);
9156 memcpy (p + 2, reg_names[reg], l);
9157 p += l + 2;
9160 if (live_regs_mask & (1 << LR_REGNUM))
9162 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9163 /* If returning from an interrupt, restore the CPSR. */
9164 if (IS_INTERRUPT (func_type))
9165 strcat (p, "^");
9167 else
9168 strcpy (p, "}");
9171 output_asm_insn (instr, & operand);
9173 /* See if we need to generate an extra instruction to
9174 perform the actual function return. */
9175 if (really_return
9176 && func_type != ARM_FT_INTERWORKED
9177 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9179 /* The return has already been handled
9180 by loading the LR into the PC. */
9181 really_return = 0;
9185 if (really_return)
9187 switch ((int) ARM_FUNC_TYPE (func_type))
9189 case ARM_FT_ISR:
9190 case ARM_FT_FIQ:
9191 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9192 break;
9194 case ARM_FT_INTERWORKED:
9195 sprintf (instr, "bx%s\t%%|lr", conditional);
9196 break;
9198 case ARM_FT_EXCEPTION:
9199 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9200 break;
9202 default:
9203 /* Use bx if it's available. */
9204 if (arm_arch5 || arm_arch4t)
9205 sprintf (instr, "bx%s\t%%|lr", conditional);
9206 else
9207 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9208 break;
9211 output_asm_insn (instr, & operand);
9214 return "";
9217 /* Write the function name into the code section, directly preceding
9218 the function prologue.
9220 Code will be output similar to this:
9222 .ascii "arm_poke_function_name", 0
9223 .align
9225 .word 0xff000000 + (t1 - t0)
9226 arm_poke_function_name
9227 mov ip, sp
9228 stmfd sp!, {fp, ip, lr, pc}
9229 sub fp, ip, #4
9231 When performing a stack backtrace, code can inspect the value
9232 of 'pc' stored at 'fp' + 0. If the trace function then looks
9233 at location pc - 12 and the top 8 bits are set, then we know
9234 that there is a function name embedded immediately preceding this
9235 location and has length ((pc[-3]) & 0xff000000).
9237 We assume that pc is declared as a pointer to an unsigned long.
9239 It is of no benefit to output the function name if we are assembling
9240 a leaf function. These function types will not contain a stack
9241 backtrace structure, therefore it is not possible to determine the
9242 function name. */
9243 void
9244 arm_poke_function_name (FILE *stream, const char *name)
9246 unsigned long alignlength;
9247 unsigned long length;
9248 rtx x;
9250 length = strlen (name) + 1;
9251 alignlength = ROUND_UP_WORD (length);
9253 ASM_OUTPUT_ASCII (stream, name, length);
9254 ASM_OUTPUT_ALIGN (stream, 2);
9255 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9256 assemble_aligned_integer (UNITS_PER_WORD, x);
9259 /* Place some comments into the assembler stream
9260 describing the current function. */
9261 static void
9262 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9264 unsigned long func_type;
9266 if (!TARGET_ARM)
9268 thumb_output_function_prologue (f, frame_size);
9269 return;
9272 /* Sanity check. */
9273 if (arm_ccfsm_state || arm_target_insn)
9274 abort ();
9276 func_type = arm_current_func_type ();
9278 switch ((int) ARM_FUNC_TYPE (func_type))
9280 default:
9281 case ARM_FT_NORMAL:
9282 break;
9283 case ARM_FT_INTERWORKED:
9284 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9285 break;
9286 case ARM_FT_ISR:
9287 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9288 break;
9289 case ARM_FT_FIQ:
9290 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9291 break;
9292 case ARM_FT_EXCEPTION:
9293 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9294 break;
9297 if (IS_NAKED (func_type))
9298 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9300 if (IS_VOLATILE (func_type))
9301 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9303 if (IS_NESTED (func_type))
9304 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9306 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9307 current_function_args_size,
9308 current_function_pretend_args_size, frame_size);
9310 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9311 frame_pointer_needed,
9312 cfun->machine->uses_anonymous_args);
9314 if (cfun->machine->lr_save_eliminated)
9315 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9317 if (current_function_calls_eh_return)
9318 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9320 #ifdef AOF_ASSEMBLER
9321 if (flag_pic)
9322 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9323 #endif
9325 return_used_this_function = 0;
9328 const char *
9329 arm_output_epilogue (rtx sibling)
9331 int reg;
9332 unsigned long saved_regs_mask;
9333 unsigned long func_type;
9334 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9335 frame that is $fp + 4 for a non-variadic function. */
9336 int floats_offset = 0;
9337 rtx operands[3];
9338 FILE * f = asm_out_file;
9339 unsigned int lrm_count = 0;
9340 int really_return = (sibling == NULL);
9341 int start_reg;
9342 arm_stack_offsets *offsets;
9344 /* If we have already generated the return instruction
9345 then it is futile to generate anything else. */
9346 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9347 return "";
9349 func_type = arm_current_func_type ();
9351 if (IS_NAKED (func_type))
9352 /* Naked functions don't have epilogues. */
9353 return "";
9355 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9357 rtx op;
9359 /* A volatile function should never return. Call abort. */
9360 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9361 assemble_external_libcall (op);
9362 output_asm_insn ("bl\t%a0", &op);
9364 return "";
9367 if (current_function_calls_eh_return
9368 && ! really_return)
9369 /* If we are throwing an exception, then we really must
9370 be doing a return, so we can't tail-call. */
9371 abort ();
9373 offsets = arm_get_frame_offsets ();
9374 saved_regs_mask = arm_compute_save_reg_mask ();
9376 if (TARGET_IWMMXT)
9377 lrm_count = bit_count (saved_regs_mask);
9379 floats_offset = offsets->saved_args;
9380 /* Compute how far away the floats will be. */
9381 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9382 if (saved_regs_mask & (1 << reg))
9383 floats_offset += 4;
9385 if (frame_pointer_needed)
9387 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9388 int vfp_offset = offsets->frame;
9390 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9392 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9393 if (regs_ever_live[reg] && !call_used_regs[reg])
9395 floats_offset += 12;
9396 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9397 reg, FP_REGNUM, floats_offset - vfp_offset);
9400 else
9402 start_reg = LAST_FPA_REGNUM;
9404 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9406 if (regs_ever_live[reg] && !call_used_regs[reg])
9408 floats_offset += 12;
9410 /* We can't unstack more than four registers at once. */
9411 if (start_reg - reg == 3)
9413 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9414 reg, FP_REGNUM, floats_offset - vfp_offset);
9415 start_reg = reg - 1;
9418 else
9420 if (reg != start_reg)
9421 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9422 reg + 1, start_reg - reg,
9423 FP_REGNUM, floats_offset - vfp_offset);
9424 start_reg = reg - 1;
9428 /* Just in case the last register checked also needs unstacking. */
9429 if (reg != start_reg)
9430 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9431 reg + 1, start_reg - reg,
9432 FP_REGNUM, floats_offset - vfp_offset);
9435 if (TARGET_HARD_FLOAT && TARGET_VFP)
9437 int saved_size;
9439 /* The fldmx insn does not have base+offset addressing modes,
9440 so we use IP to hold the address. */
9441 saved_size = arm_get_vfp_saved_size ();
9443 if (saved_size > 0)
9445 floats_offset += saved_size;
9446 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9447 FP_REGNUM, floats_offset - vfp_offset);
9449 start_reg = FIRST_VFP_REGNUM;
9450 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9452 if ((!regs_ever_live[reg] || call_used_regs[reg])
9453 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9455 if (start_reg != reg)
9456 arm_output_fldmx (f, IP_REGNUM,
9457 (start_reg - FIRST_VFP_REGNUM) / 2,
9458 (reg - start_reg) / 2);
9459 start_reg = reg + 2;
9462 if (start_reg != reg)
9463 arm_output_fldmx (f, IP_REGNUM,
9464 (start_reg - FIRST_VFP_REGNUM) / 2,
9465 (reg - start_reg) / 2);
9468 if (TARGET_IWMMXT)
9470 /* The frame pointer is guaranteed to be non-double-word aligned.
9471 This is because it is set to (old_stack_pointer - 4) and the
9472 old_stack_pointer was double word aligned. Thus the offset to
9473 the iWMMXt registers to be loaded must also be non-double-word
9474 sized, so that the resultant address *is* double-word aligned.
9475 We can ignore floats_offset since that was already included in
9476 the live_regs_mask. */
9477 lrm_count += (lrm_count % 2 ? 2 : 1);
9479 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9480 if (regs_ever_live[reg] && !call_used_regs[reg])
9482 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9483 reg, FP_REGNUM, lrm_count * 4);
9484 lrm_count += 2;
9488 /* saved_regs_mask should contain the IP, which at the time of stack
9489 frame generation actually contains the old stack pointer. So a
9490 quick way to unwind the stack is just pop the IP register directly
9491 into the stack pointer. */
9492 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9493 abort ();
9494 saved_regs_mask &= ~ (1 << IP_REGNUM);
9495 saved_regs_mask |= (1 << SP_REGNUM);
9497 /* There are two registers left in saved_regs_mask - LR and PC. We
9498 only need to restore the LR register (the return address), but to
9499 save time we can load it directly into the PC, unless we need a
9500 special function exit sequence, or we are not really returning. */
9501 if (really_return
9502 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9503 && !current_function_calls_eh_return)
9504 /* Delete the LR from the register mask, so that the LR on
9505 the stack is loaded into the PC in the register mask. */
9506 saved_regs_mask &= ~ (1 << LR_REGNUM);
9507 else
9508 saved_regs_mask &= ~ (1 << PC_REGNUM);
9510 /* We must use SP as the base register, because SP is one of the
9511 registers being restored. If an interrupt or page fault
9512 happens in the ldm instruction, the SP might or might not
9513 have been restored. That would be bad, as then SP will no
9514 longer indicate the safe area of stack, and we can get stack
9515 corruption. Using SP as the base register means that it will
9516 be reset correctly to the original value, should an interrupt
9517 occur. If the stack pointer already points at the right
9518 place, then omit the subtraction. */
9519 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9520 || current_function_calls_alloca)
9521 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9522 4 * bit_count (saved_regs_mask));
9523 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9525 if (IS_INTERRUPT (func_type))
9526 /* Interrupt handlers will have pushed the
9527 IP onto the stack, so restore it now. */
9528 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9530 else
9532 /* Restore stack pointer if necessary. */
9533 if (offsets->outgoing_args != offsets->saved_regs)
9535 operands[0] = operands[1] = stack_pointer_rtx;
9536 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9537 output_add_immediate (operands);
9540 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9542 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9543 if (regs_ever_live[reg] && !call_used_regs[reg])
9544 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9545 reg, SP_REGNUM);
9547 else
9549 start_reg = FIRST_FPA_REGNUM;
9551 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9553 if (regs_ever_live[reg] && !call_used_regs[reg])
9555 if (reg - start_reg == 3)
9557 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9558 start_reg, SP_REGNUM);
9559 start_reg = reg + 1;
9562 else
9564 if (reg != start_reg)
9565 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9566 start_reg, reg - start_reg,
9567 SP_REGNUM);
9569 start_reg = reg + 1;
9573 /* Just in case the last register checked also needs unstacking. */
9574 if (reg != start_reg)
9575 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9576 start_reg, reg - start_reg, SP_REGNUM);
9579 if (TARGET_HARD_FLOAT && TARGET_VFP)
9581 start_reg = FIRST_VFP_REGNUM;
9582 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9584 if ((!regs_ever_live[reg] || call_used_regs[reg])
9585 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9587 if (start_reg != reg)
9588 arm_output_fldmx (f, SP_REGNUM,
9589 (start_reg - FIRST_VFP_REGNUM) / 2,
9590 (reg - start_reg) / 2);
9591 start_reg = reg + 2;
9594 if (start_reg != reg)
9595 arm_output_fldmx (f, SP_REGNUM,
9596 (start_reg - FIRST_VFP_REGNUM) / 2,
9597 (reg - start_reg) / 2);
9599 if (TARGET_IWMMXT)
9600 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9601 if (regs_ever_live[reg] && !call_used_regs[reg])
9602 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9604 /* If we can, restore the LR into the PC. */
9605 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9606 && really_return
9607 && current_function_pretend_args_size == 0
9608 && saved_regs_mask & (1 << LR_REGNUM)
9609 && !current_function_calls_eh_return)
9611 saved_regs_mask &= ~ (1 << LR_REGNUM);
9612 saved_regs_mask |= (1 << PC_REGNUM);
9615 /* Load the registers off the stack. If we only have one register
9616 to load use the LDR instruction - it is faster. */
9617 if (saved_regs_mask == (1 << LR_REGNUM))
9619 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9621 else if (saved_regs_mask)
9623 if (saved_regs_mask & (1 << SP_REGNUM))
9624 /* Note - write back to the stack register is not enabled
9625 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9626 in the list of registers and if we add writeback the
9627 instruction becomes UNPREDICTABLE. */
9628 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9629 else
9630 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9633 if (current_function_pretend_args_size)
9635 /* Unwind the pre-pushed regs. */
9636 operands[0] = operands[1] = stack_pointer_rtx;
9637 operands[2] = GEN_INT (current_function_pretend_args_size);
9638 output_add_immediate (operands);
9642 /* We may have already restored PC directly from the stack. */
9643 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9644 return "";
9646 /* Stack adjustment for exception handler. */
9647 if (current_function_calls_eh_return)
9648 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9649 ARM_EH_STACKADJ_REGNUM);
9651 /* Generate the return instruction. */
9652 switch ((int) ARM_FUNC_TYPE (func_type))
9654 case ARM_FT_ISR:
9655 case ARM_FT_FIQ:
9656 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9657 break;
9659 case ARM_FT_EXCEPTION:
9660 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9661 break;
9663 case ARM_FT_INTERWORKED:
9664 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9665 break;
9667 default:
9668 if (arm_arch5 || arm_arch4t)
9669 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9670 else
9671 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9672 break;
9675 return "";
9678 static void
9679 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9680 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9682 arm_stack_offsets *offsets;
9684 if (TARGET_THUMB)
9686 int regno;
9688 /* Emit any call-via-reg trampolines that are needed for v4t support
9689 of call_reg and call_value_reg type insns. */
9690 for (regno = 0; regno < LR_REGNUM; regno++)
9692 rtx label = cfun->machine->call_via[regno];
9694 if (label != NULL)
9696 function_section (current_function_decl);
9697 targetm.asm_out.internal_label (asm_out_file, "L",
9698 CODE_LABEL_NUMBER (label));
9699 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9703 /* ??? Probably not safe to set this here, since it assumes that a
9704 function will be emitted as assembly immediately after we generate
9705 RTL for it. This does not happen for inline functions. */
9706 return_used_this_function = 0;
9708 else
9710 /* We need to take into account any stack-frame rounding. */
9711 offsets = arm_get_frame_offsets ();
9713 if (use_return_insn (FALSE, NULL)
9714 && return_used_this_function
9715 && offsets->saved_regs != offsets->outgoing_args
9716 && !frame_pointer_needed)
9717 abort ();
9719 /* Reset the ARM-specific per-function variables. */
9720 after_arm_reorg = 0;
9724 /* Generate and emit an insn that we will recognize as a push_multi.
9725 Unfortunately, since this insn does not reflect very well the actual
9726 semantics of the operation, we need to annotate the insn for the benefit
9727 of DWARF2 frame unwind information. */
9728 static rtx
9729 emit_multi_reg_push (unsigned long mask)
9731 int num_regs = 0;
9732 int num_dwarf_regs;
9733 int i, j;
9734 rtx par;
9735 rtx dwarf;
9736 int dwarf_par_index;
9737 rtx tmp, reg;
9739 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9740 if (mask & (1 << i))
9741 num_regs++;
9743 if (num_regs == 0 || num_regs > 16)
9744 abort ();
9746 /* We don't record the PC in the dwarf frame information. */
9747 num_dwarf_regs = num_regs;
9748 if (mask & (1 << PC_REGNUM))
9749 num_dwarf_regs--;
9751 /* For the body of the insn we are going to generate an UNSPEC in
9752 parallel with several USEs. This allows the insn to be recognized
9753 by the push_multi pattern in the arm.md file. The insn looks
9754 something like this:
9756 (parallel [
9757 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9758 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9759 (use (reg:SI 11 fp))
9760 (use (reg:SI 12 ip))
9761 (use (reg:SI 14 lr))
9762 (use (reg:SI 15 pc))
9765 For the frame note however, we try to be more explicit and actually
9766 show each register being stored into the stack frame, plus a (single)
9767 decrement of the stack pointer. We do it this way in order to be
9768 friendly to the stack unwinding code, which only wants to see a single
9769 stack decrement per instruction. The RTL we generate for the note looks
9770 something like this:
9772 (sequence [
9773 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9774 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9775 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9776 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9777 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9780 This sequence is used both by the code to support stack unwinding for
9781 exceptions handlers and the code to generate dwarf2 frame debugging. */
9783 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9784 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9785 dwarf_par_index = 1;
9787 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9789 if (mask & (1 << i))
9791 reg = gen_rtx_REG (SImode, i);
9793 XVECEXP (par, 0, 0)
9794 = gen_rtx_SET (VOIDmode,
9795 gen_rtx_MEM (BLKmode,
9796 gen_rtx_PRE_DEC (BLKmode,
9797 stack_pointer_rtx)),
9798 gen_rtx_UNSPEC (BLKmode,
9799 gen_rtvec (1, reg),
9800 UNSPEC_PUSH_MULT));
9802 if (i != PC_REGNUM)
9804 tmp = gen_rtx_SET (VOIDmode,
9805 gen_rtx_MEM (SImode, stack_pointer_rtx),
9806 reg);
9807 RTX_FRAME_RELATED_P (tmp) = 1;
9808 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9809 dwarf_par_index++;
9812 break;
9816 for (j = 1, i++; j < num_regs; i++)
9818 if (mask & (1 << i))
9820 reg = gen_rtx_REG (SImode, i);
9822 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9824 if (i != PC_REGNUM)
9826 tmp = gen_rtx_SET (VOIDmode,
9827 gen_rtx_MEM (SImode,
9828 plus_constant (stack_pointer_rtx,
9829 4 * j)),
9830 reg);
9831 RTX_FRAME_RELATED_P (tmp) = 1;
9832 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9835 j++;
9839 par = emit_insn (par);
9841 tmp = gen_rtx_SET (SImode,
9842 stack_pointer_rtx,
9843 gen_rtx_PLUS (SImode,
9844 stack_pointer_rtx,
9845 GEN_INT (-4 * num_regs)));
9846 RTX_FRAME_RELATED_P (tmp) = 1;
9847 XVECEXP (dwarf, 0, 0) = tmp;
9849 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9850 REG_NOTES (par));
9851 return par;
9854 static rtx
9855 emit_sfm (int base_reg, int count)
9857 rtx par;
9858 rtx dwarf;
9859 rtx tmp, reg;
9860 int i;
9862 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9863 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9865 reg = gen_rtx_REG (XFmode, base_reg++);
9867 XVECEXP (par, 0, 0)
9868 = gen_rtx_SET (VOIDmode,
9869 gen_rtx_MEM (BLKmode,
9870 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9871 gen_rtx_UNSPEC (BLKmode,
9872 gen_rtvec (1, reg),
9873 UNSPEC_PUSH_MULT));
9874 tmp = gen_rtx_SET (VOIDmode,
9875 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9876 RTX_FRAME_RELATED_P (tmp) = 1;
9877 XVECEXP (dwarf, 0, 1) = tmp;
9879 for (i = 1; i < count; i++)
9881 reg = gen_rtx_REG (XFmode, base_reg++);
9882 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9884 tmp = gen_rtx_SET (VOIDmode,
9885 gen_rtx_MEM (XFmode,
9886 plus_constant (stack_pointer_rtx,
9887 i * 12)),
9888 reg);
9889 RTX_FRAME_RELATED_P (tmp) = 1;
9890 XVECEXP (dwarf, 0, i + 1) = tmp;
9893 tmp = gen_rtx_SET (VOIDmode,
9894 stack_pointer_rtx,
9895 gen_rtx_PLUS (SImode,
9896 stack_pointer_rtx,
9897 GEN_INT (-12 * count)));
9898 RTX_FRAME_RELATED_P (tmp) = 1;
9899 XVECEXP (dwarf, 0, 0) = tmp;
9901 par = emit_insn (par);
9902 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9903 REG_NOTES (par));
9904 return par;
9908 /* Return true if the current function needs to save/restore LR. */
9910 static bool
9911 thumb_force_lr_save (void)
9913 return !cfun->machine->lr_save_eliminated
9914 && (!leaf_function_p ()
9915 || thumb_far_jump_used_p ()
9916 || regs_ever_live [LR_REGNUM]);
9920 /* Compute the distance from register FROM to register TO.
9921 These can be the arg pointer (26), the soft frame pointer (25),
9922 the stack pointer (13) or the hard frame pointer (11).
9923 In thumb mode r7 is used as the soft frame pointer, if needed.
9924 Typical stack layout looks like this:
9926 old stack pointer -> | |
9927 ----
9928 | | \
9929 | | saved arguments for
9930 | | vararg functions
9931 | | /
9933 hard FP & arg pointer -> | | \
9934 | | stack
9935 | | frame
9936 | | /
9938 | | \
9939 | | call saved
9940 | | registers
9941 soft frame pointer -> | | /
9943 | | \
9944 | | local
9945 | | variables
9946 | | /
9948 | | \
9949 | | outgoing
9950 | | arguments
9951 current stack pointer -> | | /
9954 For a given function some or all of these stack components
9955 may not be needed, giving rise to the possibility of
9956 eliminating some of the registers.
9958 The values returned by this function must reflect the behavior
9959 of arm_expand_prologue() and arm_compute_save_reg_mask().
9961 The sign of the number returned reflects the direction of stack
9962 growth, so the values are positive for all eliminations except
9963 from the soft frame pointer to the hard frame pointer.
9965 SFP may point just inside the local variables block to ensure correct
9966 alignment. */
9969 /* Calculate stack offsets. These are used to calculate register elimination
9970 offsets and in prologue/epilogue code. */
9972 static arm_stack_offsets *
9973 arm_get_frame_offsets (void)
9975 struct arm_stack_offsets *offsets;
9976 unsigned long func_type;
9977 int leaf;
9978 int saved;
9979 HOST_WIDE_INT frame_size;
9981 offsets = &cfun->machine->stack_offsets;
9983 /* We need to know if we are a leaf function. Unfortunately, it
9984 is possible to be called after start_sequence has been called,
9985 which causes get_insns to return the insns for the sequence,
9986 not the function, which will cause leaf_function_p to return
9987 the incorrect result.
9989 to know about leaf functions once reload has completed, and the
9990 frame size cannot be changed after that time, so we can safely
9991 use the cached value. */
9993 if (reload_completed)
9994 return offsets;
9996 /* Initially this is the size of the local variables. It will translated
9997 into an offset once we have determined the size of preceding data. */
9998 frame_size = ROUND_UP_WORD (get_frame_size ());
10000 leaf = leaf_function_p ();
10002 /* Space for variadic functions. */
10003 offsets->saved_args = current_function_pretend_args_size;
10005 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10007 if (TARGET_ARM)
10009 unsigned int regno;
10011 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10013 /* We know that SP will be doubleword aligned on entry, and we must
10014 preserve that condition at any subroutine call. We also require the
10015 soft frame pointer to be doubleword aligned. */
10017 if (TARGET_REALLY_IWMMXT)
10019 /* Check for the call-saved iWMMXt registers. */
10020 for (regno = FIRST_IWMMXT_REGNUM;
10021 regno <= LAST_IWMMXT_REGNUM;
10022 regno++)
10023 if (regs_ever_live [regno] && ! call_used_regs [regno])
10024 saved += 8;
10027 func_type = arm_current_func_type ();
10028 if (! IS_VOLATILE (func_type))
10030 /* Space for saved FPA registers. */
10031 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10032 if (regs_ever_live[regno] && ! call_used_regs[regno])
10033 saved += 12;
10035 /* Space for saved VFP registers. */
10036 if (TARGET_HARD_FLOAT && TARGET_VFP)
10037 saved += arm_get_vfp_saved_size ();
10040 else /* TARGET_THUMB */
10042 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10043 if (TARGET_BACKTRACE)
10044 saved += 16;
10047 /* Saved registers include the stack frame. */
10048 offsets->saved_regs = offsets->saved_args + saved;
10049 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10050 /* A leaf function does not need any stack alignment if it has nothing
10051 on the stack. */
10052 if (leaf && frame_size == 0)
10054 offsets->outgoing_args = offsets->soft_frame;
10055 return offsets;
10058 /* Ensure SFP has the correct alignment. */
10059 if (ARM_DOUBLEWORD_ALIGN
10060 && (offsets->soft_frame & 7))
10061 offsets->soft_frame += 4;
10063 offsets->outgoing_args = offsets->soft_frame + frame_size
10064 + current_function_outgoing_args_size;
10066 if (ARM_DOUBLEWORD_ALIGN)
10068 /* Ensure SP remains doubleword aligned. */
10069 if (offsets->outgoing_args & 7)
10070 offsets->outgoing_args += 4;
10071 if (offsets->outgoing_args & 7)
10072 abort ();
10075 return offsets;
10079 /* Calculate the relative offsets for the different stack pointers. Positive
10080 offsets are in the direction of stack growth. */
10082 HOST_WIDE_INT
10083 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10085 arm_stack_offsets *offsets;
10087 offsets = arm_get_frame_offsets ();
10089 /* OK, now we have enough information to compute the distances.
10090 There must be an entry in these switch tables for each pair
10091 of registers in ELIMINABLE_REGS, even if some of the entries
10092 seem to be redundant or useless. */
10093 switch (from)
10095 case ARG_POINTER_REGNUM:
10096 switch (to)
10098 case THUMB_HARD_FRAME_POINTER_REGNUM:
10099 return 0;
10101 case FRAME_POINTER_REGNUM:
10102 /* This is the reverse of the soft frame pointer
10103 to hard frame pointer elimination below. */
10104 return offsets->soft_frame - offsets->saved_args;
10106 case ARM_HARD_FRAME_POINTER_REGNUM:
10107 /* If there is no stack frame then the hard
10108 frame pointer and the arg pointer coincide. */
10109 if (offsets->frame == offsets->saved_regs)
10110 return 0;
10111 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10112 return (frame_pointer_needed
10113 && cfun->static_chain_decl != NULL
10114 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10116 case STACK_POINTER_REGNUM:
10117 /* If nothing has been pushed on the stack at all
10118 then this will return -4. This *is* correct! */
10119 return offsets->outgoing_args - (offsets->saved_args + 4);
10121 default:
10122 abort ();
10124 break;
10126 case FRAME_POINTER_REGNUM:
10127 switch (to)
10129 case THUMB_HARD_FRAME_POINTER_REGNUM:
10130 return 0;
10132 case ARM_HARD_FRAME_POINTER_REGNUM:
10133 /* The hard frame pointer points to the top entry in the
10134 stack frame. The soft frame pointer to the bottom entry
10135 in the stack frame. If there is no stack frame at all,
10136 then they are identical. */
10138 return offsets->frame - offsets->soft_frame;
10140 case STACK_POINTER_REGNUM:
10141 return offsets->outgoing_args - offsets->soft_frame;
10143 default:
10144 abort ();
10146 break;
10148 default:
10149 /* You cannot eliminate from the stack pointer.
10150 In theory you could eliminate from the hard frame
10151 pointer to the stack pointer, but this will never
10152 happen, since if a stack frame is not needed the
10153 hard frame pointer will never be used. */
10154 abort ();
10159 /* Generate the prologue instructions for entry into an ARM function. */
10160 void
10161 arm_expand_prologue (void)
10163 int reg;
10164 rtx amount;
10165 rtx insn;
10166 rtx ip_rtx;
10167 unsigned long live_regs_mask;
10168 unsigned long func_type;
10169 int fp_offset = 0;
10170 int saved_pretend_args = 0;
10171 int saved_regs = 0;
10172 unsigned HOST_WIDE_INT args_to_push;
10173 arm_stack_offsets *offsets;
10175 func_type = arm_current_func_type ();
10177 /* Naked functions don't have prologues. */
10178 if (IS_NAKED (func_type))
10179 return;
10181 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10182 args_to_push = current_function_pretend_args_size;
10184 /* Compute which register we will have to save onto the stack. */
10185 live_regs_mask = arm_compute_save_reg_mask ();
10187 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10189 if (frame_pointer_needed)
10191 if (IS_INTERRUPT (func_type))
10193 /* Interrupt functions must not corrupt any registers.
10194 Creating a frame pointer however, corrupts the IP
10195 register, so we must push it first. */
10196 insn = emit_multi_reg_push (1 << IP_REGNUM);
10198 /* Do not set RTX_FRAME_RELATED_P on this insn.
10199 The dwarf stack unwinding code only wants to see one
10200 stack decrement per function, and this is not it. If
10201 this instruction is labeled as being part of the frame
10202 creation sequence then dwarf2out_frame_debug_expr will
10203 abort when it encounters the assignment of IP to FP
10204 later on, since the use of SP here establishes SP as
10205 the CFA register and not IP.
10207 Anyway this instruction is not really part of the stack
10208 frame creation although it is part of the prologue. */
10210 else if (IS_NESTED (func_type))
10212 /* The Static chain register is the same as the IP register
10213 used as a scratch register during stack frame creation.
10214 To get around this need to find somewhere to store IP
10215 whilst the frame is being created. We try the following
10216 places in order:
10218 1. The last argument register.
10219 2. A slot on the stack above the frame. (This only
10220 works if the function is not a varargs function).
10221 3. Register r3, after pushing the argument registers
10222 onto the stack.
10224 Note - we only need to tell the dwarf2 backend about the SP
10225 adjustment in the second variant; the static chain register
10226 doesn't need to be unwound, as it doesn't contain a value
10227 inherited from the caller. */
10229 if (regs_ever_live[3] == 0)
10231 insn = gen_rtx_REG (SImode, 3);
10232 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10233 insn = emit_insn (insn);
10235 else if (args_to_push == 0)
10237 rtx dwarf;
10238 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10239 insn = gen_rtx_MEM (SImode, insn);
10240 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10241 insn = emit_insn (insn);
10243 fp_offset = 4;
10245 /* Just tell the dwarf backend that we adjusted SP. */
10246 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10247 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10248 GEN_INT (-fp_offset)));
10249 RTX_FRAME_RELATED_P (insn) = 1;
10250 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10251 dwarf, REG_NOTES (insn));
10253 else
10255 /* Store the args on the stack. */
10256 if (cfun->machine->uses_anonymous_args)
10257 insn = emit_multi_reg_push
10258 ((0xf0 >> (args_to_push / 4)) & 0xf);
10259 else
10260 insn = emit_insn
10261 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10262 GEN_INT (- args_to_push)));
10264 RTX_FRAME_RELATED_P (insn) = 1;
10266 saved_pretend_args = 1;
10267 fp_offset = args_to_push;
10268 args_to_push = 0;
10270 /* Now reuse r3 to preserve IP. */
10271 insn = gen_rtx_REG (SImode, 3);
10272 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10273 (void) emit_insn (insn);
10277 if (fp_offset)
10279 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10280 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10282 else
10283 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10285 insn = emit_insn (insn);
10286 RTX_FRAME_RELATED_P (insn) = 1;
10289 if (args_to_push)
10291 /* Push the argument registers, or reserve space for them. */
10292 if (cfun->machine->uses_anonymous_args)
10293 insn = emit_multi_reg_push
10294 ((0xf0 >> (args_to_push / 4)) & 0xf);
10295 else
10296 insn = emit_insn
10297 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10298 GEN_INT (- args_to_push)));
10299 RTX_FRAME_RELATED_P (insn) = 1;
10302 /* If this is an interrupt service routine, and the link register
10303 is going to be pushed, and we are not creating a stack frame,
10304 (which would involve an extra push of IP and a pop in the epilogue)
10305 subtracting four from LR now will mean that the function return
10306 can be done with a single instruction. */
10307 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10308 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10309 && ! frame_pointer_needed)
10310 emit_insn (gen_rtx_SET (SImode,
10311 gen_rtx_REG (SImode, LR_REGNUM),
10312 gen_rtx_PLUS (SImode,
10313 gen_rtx_REG (SImode, LR_REGNUM),
10314 GEN_INT (-4))));
10316 if (live_regs_mask)
10318 insn = emit_multi_reg_push (live_regs_mask);
10319 saved_regs += bit_count (live_regs_mask) * 4;
10320 RTX_FRAME_RELATED_P (insn) = 1;
10323 if (TARGET_IWMMXT)
10324 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10325 if (regs_ever_live[reg] && ! call_used_regs [reg])
10327 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10328 insn = gen_rtx_MEM (V2SImode, insn);
10329 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10330 gen_rtx_REG (V2SImode, reg)));
10331 RTX_FRAME_RELATED_P (insn) = 1;
10332 saved_regs += 8;
10335 if (! IS_VOLATILE (func_type))
10337 int start_reg;
10339 /* Save any floating point call-saved registers used by this
10340 function. */
10341 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10343 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10344 if (regs_ever_live[reg] && !call_used_regs[reg])
10346 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10347 insn = gen_rtx_MEM (XFmode, insn);
10348 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10349 gen_rtx_REG (XFmode, reg)));
10350 RTX_FRAME_RELATED_P (insn) = 1;
10351 saved_regs += 12;
10354 else
10356 start_reg = LAST_FPA_REGNUM;
10358 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10360 if (regs_ever_live[reg] && !call_used_regs[reg])
10362 if (start_reg - reg == 3)
10364 insn = emit_sfm (reg, 4);
10365 RTX_FRAME_RELATED_P (insn) = 1;
10366 saved_regs += 48;
10367 start_reg = reg - 1;
10370 else
10372 if (start_reg != reg)
10374 insn = emit_sfm (reg + 1, start_reg - reg);
10375 RTX_FRAME_RELATED_P (insn) = 1;
10376 saved_regs += (start_reg - reg) * 12;
10378 start_reg = reg - 1;
10382 if (start_reg != reg)
10384 insn = emit_sfm (reg + 1, start_reg - reg);
10385 saved_regs += (start_reg - reg) * 12;
10386 RTX_FRAME_RELATED_P (insn) = 1;
10389 if (TARGET_HARD_FLOAT && TARGET_VFP)
10391 start_reg = FIRST_VFP_REGNUM;
10393 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10395 if ((!regs_ever_live[reg] || call_used_regs[reg])
10396 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10398 if (start_reg != reg)
10399 saved_regs += vfp_emit_fstmx (start_reg,
10400 (reg - start_reg) / 2);
10401 start_reg = reg + 2;
10404 if (start_reg != reg)
10405 saved_regs += vfp_emit_fstmx (start_reg,
10406 (reg - start_reg) / 2);
10410 if (frame_pointer_needed)
10412 /* Create the new frame pointer. */
10413 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10414 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10415 RTX_FRAME_RELATED_P (insn) = 1;
10417 if (IS_NESTED (func_type))
10419 /* Recover the static chain register. */
10420 if (regs_ever_live [3] == 0
10421 || saved_pretend_args)
10422 insn = gen_rtx_REG (SImode, 3);
10423 else /* if (current_function_pretend_args_size == 0) */
10425 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10426 GEN_INT (4));
10427 insn = gen_rtx_MEM (SImode, insn);
10430 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10431 /* Add a USE to stop propagate_one_insn() from barfing. */
10432 emit_insn (gen_prologue_use (ip_rtx));
10436 offsets = arm_get_frame_offsets ();
10437 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10439 /* This add can produce multiple insns for a large constant, so we
10440 need to get tricky. */
10441 rtx last = get_last_insn ();
10443 amount = GEN_INT (offsets->saved_args + saved_regs
10444 - offsets->outgoing_args);
10446 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10447 amount));
10450 last = last ? NEXT_INSN (last) : get_insns ();
10451 RTX_FRAME_RELATED_P (last) = 1;
10453 while (last != insn);
10455 /* If the frame pointer is needed, emit a special barrier that
10456 will prevent the scheduler from moving stores to the frame
10457 before the stack adjustment. */
10458 if (frame_pointer_needed)
10459 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10460 hard_frame_pointer_rtx));
10464 if (flag_pic)
10465 arm_load_pic_register (INVALID_REGNUM);
10467 /* If we are profiling, make sure no instructions are scheduled before
10468 the call to mcount. Similarly if the user has requested no
10469 scheduling in the prolog. */
10470 if (current_function_profile || TARGET_NO_SCHED_PRO)
10471 emit_insn (gen_blockage ());
10473 /* If the link register is being kept alive, with the return address in it,
10474 then make sure that it does not get reused by the ce2 pass. */
10475 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10477 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10478 cfun->machine->lr_save_eliminated = 1;
10482 /* If CODE is 'd', then the X is a condition operand and the instruction
10483 should only be executed if the condition is true.
10484 if CODE is 'D', then the X is a condition operand and the instruction
10485 should only be executed if the condition is false: however, if the mode
10486 of the comparison is CCFPEmode, then always execute the instruction -- we
10487 do this because in these circumstances !GE does not necessarily imply LT;
10488 in these cases the instruction pattern will take care to make sure that
10489 an instruction containing %d will follow, thereby undoing the effects of
10490 doing this instruction unconditionally.
10491 If CODE is 'N' then X is a floating point operand that must be negated
10492 before output.
10493 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10494 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10495 void
10496 arm_print_operand (FILE *stream, rtx x, int code)
10498 switch (code)
10500 case '@':
10501 fputs (ASM_COMMENT_START, stream);
10502 return;
10504 case '_':
10505 fputs (user_label_prefix, stream);
10506 return;
10508 case '|':
10509 fputs (REGISTER_PREFIX, stream);
10510 return;
10512 case '?':
10513 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10515 if (TARGET_THUMB)
10517 output_operand_lossage ("predicated Thumb instruction");
10518 break;
10520 if (current_insn_predicate != NULL)
10522 output_operand_lossage
10523 ("predicated instruction in conditional sequence");
10524 break;
10527 fputs (arm_condition_codes[arm_current_cc], stream);
10529 else if (current_insn_predicate)
10531 enum arm_cond_code code;
10533 if (TARGET_THUMB)
10535 output_operand_lossage ("predicated Thumb instruction");
10536 break;
10539 code = get_arm_condition_code (current_insn_predicate);
10540 fputs (arm_condition_codes[code], stream);
10542 return;
10544 case 'N':
10546 REAL_VALUE_TYPE r;
10547 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10548 r = REAL_VALUE_NEGATE (r);
10549 fprintf (stream, "%s", fp_const_from_val (&r));
10551 return;
10553 case 'B':
10554 if (GET_CODE (x) == CONST_INT)
10556 HOST_WIDE_INT val;
10557 val = ARM_SIGN_EXTEND (~INTVAL (x));
10558 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10560 else
10562 putc ('~', stream);
10563 output_addr_const (stream, x);
10565 return;
10567 case 'i':
10568 fprintf (stream, "%s", arithmetic_instr (x, 1));
10569 return;
10571 /* Truncate Cirrus shift counts. */
10572 case 's':
10573 if (GET_CODE (x) == CONST_INT)
10575 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10576 return;
10578 arm_print_operand (stream, x, 0);
10579 return;
10581 case 'I':
10582 fprintf (stream, "%s", arithmetic_instr (x, 0));
10583 return;
10585 case 'S':
10587 HOST_WIDE_INT val;
10588 const char * shift = shift_op (x, &val);
10590 if (shift)
10592 fprintf (stream, ", %s ", shift_op (x, &val));
10593 if (val == -1)
10594 arm_print_operand (stream, XEXP (x, 1), 0);
10595 else
10596 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10599 return;
10601 /* An explanation of the 'Q', 'R' and 'H' register operands:
10603 In a pair of registers containing a DI or DF value the 'Q'
10604 operand returns the register number of the register containing
10605 the least significant part of the value. The 'R' operand returns
10606 the register number of the register containing the most
10607 significant part of the value.
10609 The 'H' operand returns the higher of the two register numbers.
10610 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10611 same as the 'Q' operand, since the most significant part of the
10612 value is held in the lower number register. The reverse is true
10613 on systems where WORDS_BIG_ENDIAN is false.
10615 The purpose of these operands is to distinguish between cases
10616 where the endian-ness of the values is important (for example
10617 when they are added together), and cases where the endian-ness
10618 is irrelevant, but the order of register operations is important.
10619 For example when loading a value from memory into a register
10620 pair, the endian-ness does not matter. Provided that the value
10621 from the lower memory address is put into the lower numbered
10622 register, and the value from the higher address is put into the
10623 higher numbered register, the load will work regardless of whether
10624 the value being loaded is big-wordian or little-wordian. The
10625 order of the two register loads can matter however, if the address
10626 of the memory location is actually held in one of the registers
10627 being overwritten by the load. */
10628 case 'Q':
10629 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10631 output_operand_lossage ("invalid operand for code '%c'", code);
10632 return;
10635 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10636 return;
10638 case 'R':
10639 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10641 output_operand_lossage ("invalid operand for code '%c'", code);
10642 return;
10645 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10646 return;
10648 case 'H':
10649 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10651 output_operand_lossage ("invalid operand for code '%c'", code);
10652 return;
10655 asm_fprintf (stream, "%r", REGNO (x) + 1);
10656 return;
10658 case 'm':
10659 asm_fprintf (stream, "%r",
10660 GET_CODE (XEXP (x, 0)) == REG
10661 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10662 return;
10664 case 'M':
10665 asm_fprintf (stream, "{%r-%r}",
10666 REGNO (x),
10667 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10668 return;
10670 case 'd':
10671 /* CONST_TRUE_RTX means always -- that's the default. */
10672 if (x == const_true_rtx)
10673 return;
10675 if (!COMPARISON_P (x))
10677 output_operand_lossage ("invalid operand for code '%c'", code);
10678 return;
10681 fputs (arm_condition_codes[get_arm_condition_code (x)],
10682 stream);
10683 return;
10685 case 'D':
10686 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10687 want to do that. */
10688 if (x == const_true_rtx)
10690 output_operand_lossage ("instruction never exectued");
10691 return;
10693 if (!COMPARISON_P (x))
10695 output_operand_lossage ("invalid operand for code '%c'", code);
10696 return;
10699 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10700 (get_arm_condition_code (x))],
10701 stream);
10702 return;
10704 /* Cirrus registers can be accessed in a variety of ways:
10705 single floating point (f)
10706 double floating point (d)
10707 32bit integer (fx)
10708 64bit integer (dx). */
10709 case 'W': /* Cirrus register in F mode. */
10710 case 'X': /* Cirrus register in D mode. */
10711 case 'Y': /* Cirrus register in FX mode. */
10712 case 'Z': /* Cirrus register in DX mode. */
10713 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10714 abort ();
10716 fprintf (stream, "mv%s%s",
10717 code == 'W' ? "f"
10718 : code == 'X' ? "d"
10719 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10721 return;
10723 /* Print cirrus register in the mode specified by the register's mode. */
10724 case 'V':
10726 int mode = GET_MODE (x);
10728 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10730 output_operand_lossage ("invalid operand for code '%c'", code);
10731 return;
10734 fprintf (stream, "mv%s%s",
10735 mode == DFmode ? "d"
10736 : mode == SImode ? "fx"
10737 : mode == DImode ? "dx"
10738 : "f", reg_names[REGNO (x)] + 2);
10740 return;
10743 case 'U':
10744 if (GET_CODE (x) != REG
10745 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10746 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10747 /* Bad value for wCG register number. */
10749 output_operand_lossage ("invalid operand for code '%c'", code);
10750 return;
10753 else
10754 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10755 return;
10757 /* Print an iWMMXt control register name. */
10758 case 'w':
10759 if (GET_CODE (x) != CONST_INT
10760 || INTVAL (x) < 0
10761 || INTVAL (x) >= 16)
10762 /* Bad value for wC register number. */
10764 output_operand_lossage ("invalid operand for code '%c'", code);
10765 return;
10768 else
10770 static const char * wc_reg_names [16] =
10772 "wCID", "wCon", "wCSSF", "wCASF",
10773 "wC4", "wC5", "wC6", "wC7",
10774 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10775 "wC12", "wC13", "wC14", "wC15"
10778 fprintf (stream, wc_reg_names [INTVAL (x)]);
10780 return;
10782 /* Print a VFP double precision register name. */
10783 case 'P':
10785 int mode = GET_MODE (x);
10786 int num;
10788 if (mode != DImode && mode != DFmode)
10790 output_operand_lossage ("invalid operand for code '%c'", code);
10791 return;
10794 if (GET_CODE (x) != REG
10795 || !IS_VFP_REGNUM (REGNO (x)))
10797 output_operand_lossage ("invalid operand for code '%c'", code);
10798 return;
10801 num = REGNO(x) - FIRST_VFP_REGNUM;
10802 if (num & 1)
10804 output_operand_lossage ("invalid operand for code '%c'", code);
10805 return;
10808 fprintf (stream, "d%d", num >> 1);
10810 return;
10812 default:
10813 if (x == 0)
10815 output_operand_lossage ("missing operand");
10816 return;
10819 if (GET_CODE (x) == REG)
10820 asm_fprintf (stream, "%r", REGNO (x));
10821 else if (GET_CODE (x) == MEM)
10823 output_memory_reference_mode = GET_MODE (x);
10824 output_address (XEXP (x, 0));
10826 else if (GET_CODE (x) == CONST_DOUBLE)
10827 fprintf (stream, "#%s", fp_immediate_constant (x));
10828 else if (GET_CODE (x) == NEG)
10829 abort (); /* This should never happen now. */
10830 else
10832 fputc ('#', stream);
10833 output_addr_const (stream, x);
10838 #ifndef AOF_ASSEMBLER
10839 /* Target hook for assembling integer objects. The ARM version needs to
10840 handle word-sized values specially. */
10841 static bool
10842 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10844 if (size == UNITS_PER_WORD && aligned_p)
10846 fputs ("\t.word\t", asm_out_file);
10847 output_addr_const (asm_out_file, x);
10849 /* Mark symbols as position independent. We only do this in the
10850 .text segment, not in the .data segment. */
10851 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10852 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10854 if (GET_CODE (x) == SYMBOL_REF
10855 && (CONSTANT_POOL_ADDRESS_P (x)
10856 || SYMBOL_REF_LOCAL_P (x)))
10857 fputs ("(GOTOFF)", asm_out_file);
10858 else if (GET_CODE (x) == LABEL_REF)
10859 fputs ("(GOTOFF)", asm_out_file);
10860 else
10861 fputs ("(GOT)", asm_out_file);
10863 fputc ('\n', asm_out_file);
10864 return true;
10867 if (arm_vector_mode_supported_p (GET_MODE (x)))
10869 int i, units;
10871 if (GET_CODE (x) != CONST_VECTOR)
10872 abort ();
10874 units = CONST_VECTOR_NUNITS (x);
10876 switch (GET_MODE (x))
10878 case V2SImode: size = 4; break;
10879 case V4HImode: size = 2; break;
10880 case V8QImode: size = 1; break;
10881 default:
10882 abort ();
10885 for (i = 0; i < units; i++)
10887 rtx elt;
10889 elt = CONST_VECTOR_ELT (x, i);
10890 assemble_integer
10891 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10894 return true;
10897 return default_assemble_integer (x, size, aligned_p);
10899 #endif
10901 /* A finite state machine takes care of noticing whether or not instructions
10902 can be conditionally executed, and thus decrease execution time and code
10903 size by deleting branch instructions. The fsm is controlled by
10904 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10906 /* The state of the fsm controlling condition codes are:
10907 0: normal, do nothing special
10908 1: make ASM_OUTPUT_OPCODE not output this instruction
10909 2: make ASM_OUTPUT_OPCODE not output this instruction
10910 3: make instructions conditional
10911 4: make instructions conditional
10913 State transitions (state->state by whom under condition):
10914 0 -> 1 final_prescan_insn if the `target' is a label
10915 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10916 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10917 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10918 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10919 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10920 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10921 (the target insn is arm_target_insn).
10923 If the jump clobbers the conditions then we use states 2 and 4.
10925 A similar thing can be done with conditional return insns.
10927 XXX In case the `target' is an unconditional branch, this conditionalising
10928 of the instructions always reduces code size, but not always execution
10929 time. But then, I want to reduce the code size to somewhere near what
10930 /bin/cc produces. */
10932 /* Returns the index of the ARM condition code string in
10933 `arm_condition_codes'. COMPARISON should be an rtx like
10934 `(eq (...) (...))'. */
10935 static enum arm_cond_code
10936 get_arm_condition_code (rtx comparison)
10938 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10939 int code;
10940 enum rtx_code comp_code = GET_CODE (comparison);
10942 if (GET_MODE_CLASS (mode) != MODE_CC)
10943 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10944 XEXP (comparison, 1));
10946 switch (mode)
10948 case CC_DNEmode: code = ARM_NE; goto dominance;
10949 case CC_DEQmode: code = ARM_EQ; goto dominance;
10950 case CC_DGEmode: code = ARM_GE; goto dominance;
10951 case CC_DGTmode: code = ARM_GT; goto dominance;
10952 case CC_DLEmode: code = ARM_LE; goto dominance;
10953 case CC_DLTmode: code = ARM_LT; goto dominance;
10954 case CC_DGEUmode: code = ARM_CS; goto dominance;
10955 case CC_DGTUmode: code = ARM_HI; goto dominance;
10956 case CC_DLEUmode: code = ARM_LS; goto dominance;
10957 case CC_DLTUmode: code = ARM_CC;
10959 dominance:
10960 if (comp_code != EQ && comp_code != NE)
10961 abort ();
10963 if (comp_code == EQ)
10964 return ARM_INVERSE_CONDITION_CODE (code);
10965 return code;
10967 case CC_NOOVmode:
10968 switch (comp_code)
10970 case NE: return ARM_NE;
10971 case EQ: return ARM_EQ;
10972 case GE: return ARM_PL;
10973 case LT: return ARM_MI;
10974 default: abort ();
10977 case CC_Zmode:
10978 switch (comp_code)
10980 case NE: return ARM_NE;
10981 case EQ: return ARM_EQ;
10982 default: abort ();
10985 case CC_Nmode:
10986 switch (comp_code)
10988 case NE: return ARM_MI;
10989 case EQ: return ARM_PL;
10990 default: abort ();
10993 case CCFPEmode:
10994 case CCFPmode:
10995 /* These encodings assume that AC=1 in the FPA system control
10996 byte. This allows us to handle all cases except UNEQ and
10997 LTGT. */
10998 switch (comp_code)
11000 case GE: return ARM_GE;
11001 case GT: return ARM_GT;
11002 case LE: return ARM_LS;
11003 case LT: return ARM_MI;
11004 case NE: return ARM_NE;
11005 case EQ: return ARM_EQ;
11006 case ORDERED: return ARM_VC;
11007 case UNORDERED: return ARM_VS;
11008 case UNLT: return ARM_LT;
11009 case UNLE: return ARM_LE;
11010 case UNGT: return ARM_HI;
11011 case UNGE: return ARM_PL;
11012 /* UNEQ and LTGT do not have a representation. */
11013 case UNEQ: /* Fall through. */
11014 case LTGT: /* Fall through. */
11015 default: abort ();
11018 case CC_SWPmode:
11019 switch (comp_code)
11021 case NE: return ARM_NE;
11022 case EQ: return ARM_EQ;
11023 case GE: return ARM_LE;
11024 case GT: return ARM_LT;
11025 case LE: return ARM_GE;
11026 case LT: return ARM_GT;
11027 case GEU: return ARM_LS;
11028 case GTU: return ARM_CC;
11029 case LEU: return ARM_CS;
11030 case LTU: return ARM_HI;
11031 default: abort ();
11034 case CC_Cmode:
11035 switch (comp_code)
11037 case LTU: return ARM_CS;
11038 case GEU: return ARM_CC;
11039 default: abort ();
11042 case CCmode:
11043 switch (comp_code)
11045 case NE: return ARM_NE;
11046 case EQ: return ARM_EQ;
11047 case GE: return ARM_GE;
11048 case GT: return ARM_GT;
11049 case LE: return ARM_LE;
11050 case LT: return ARM_LT;
11051 case GEU: return ARM_CS;
11052 case GTU: return ARM_HI;
11053 case LEU: return ARM_LS;
11054 case LTU: return ARM_CC;
11055 default: abort ();
11058 default: abort ();
11061 abort ();
11064 void
11065 arm_final_prescan_insn (rtx insn)
11067 /* BODY will hold the body of INSN. */
11068 rtx body = PATTERN (insn);
11070 /* This will be 1 if trying to repeat the trick, and things need to be
11071 reversed if it appears to fail. */
11072 int reverse = 0;
11074 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11075 taken are clobbered, even if the rtl suggests otherwise. It also
11076 means that we have to grub around within the jump expression to find
11077 out what the conditions are when the jump isn't taken. */
11078 int jump_clobbers = 0;
11080 /* If we start with a return insn, we only succeed if we find another one. */
11081 int seeking_return = 0;
11083 /* START_INSN will hold the insn from where we start looking. This is the
11084 first insn after the following code_label if REVERSE is true. */
11085 rtx start_insn = insn;
11087 /* If in state 4, check if the target branch is reached, in order to
11088 change back to state 0. */
11089 if (arm_ccfsm_state == 4)
11091 if (insn == arm_target_insn)
11093 arm_target_insn = NULL;
11094 arm_ccfsm_state = 0;
11096 return;
11099 /* If in state 3, it is possible to repeat the trick, if this insn is an
11100 unconditional branch to a label, and immediately following this branch
11101 is the previous target label which is only used once, and the label this
11102 branch jumps to is not too far off. */
11103 if (arm_ccfsm_state == 3)
11105 if (simplejump_p (insn))
11107 start_insn = next_nonnote_insn (start_insn);
11108 if (GET_CODE (start_insn) == BARRIER)
11110 /* XXX Isn't this always a barrier? */
11111 start_insn = next_nonnote_insn (start_insn);
11113 if (GET_CODE (start_insn) == CODE_LABEL
11114 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11115 && LABEL_NUSES (start_insn) == 1)
11116 reverse = TRUE;
11117 else
11118 return;
11120 else if (GET_CODE (body) == RETURN)
11122 start_insn = next_nonnote_insn (start_insn);
11123 if (GET_CODE (start_insn) == BARRIER)
11124 start_insn = next_nonnote_insn (start_insn);
11125 if (GET_CODE (start_insn) == CODE_LABEL
11126 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11127 && LABEL_NUSES (start_insn) == 1)
11129 reverse = TRUE;
11130 seeking_return = 1;
11132 else
11133 return;
11135 else
11136 return;
11139 if (arm_ccfsm_state != 0 && !reverse)
11140 abort ();
11141 if (GET_CODE (insn) != JUMP_INSN)
11142 return;
11144 /* This jump might be paralleled with a clobber of the condition codes
11145 the jump should always come first */
11146 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11147 body = XVECEXP (body, 0, 0);
11149 if (reverse
11150 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11151 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11153 int insns_skipped;
11154 int fail = FALSE, succeed = FALSE;
11155 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11156 int then_not_else = TRUE;
11157 rtx this_insn = start_insn, label = 0;
11159 /* If the jump cannot be done with one instruction, we cannot
11160 conditionally execute the instruction in the inverse case. */
11161 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11163 jump_clobbers = 1;
11164 return;
11167 /* Register the insn jumped to. */
11168 if (reverse)
11170 if (!seeking_return)
11171 label = XEXP (SET_SRC (body), 0);
11173 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11174 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11175 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11177 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11178 then_not_else = FALSE;
11180 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11181 seeking_return = 1;
11182 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11184 seeking_return = 1;
11185 then_not_else = FALSE;
11187 else
11188 abort ();
11190 /* See how many insns this branch skips, and what kind of insns. If all
11191 insns are okay, and the label or unconditional branch to the same
11192 label is not too far away, succeed. */
11193 for (insns_skipped = 0;
11194 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11196 rtx scanbody;
11198 this_insn = next_nonnote_insn (this_insn);
11199 if (!this_insn)
11200 break;
11202 switch (GET_CODE (this_insn))
11204 case CODE_LABEL:
11205 /* Succeed if it is the target label, otherwise fail since
11206 control falls in from somewhere else. */
11207 if (this_insn == label)
11209 if (jump_clobbers)
11211 arm_ccfsm_state = 2;
11212 this_insn = next_nonnote_insn (this_insn);
11214 else
11215 arm_ccfsm_state = 1;
11216 succeed = TRUE;
11218 else
11219 fail = TRUE;
11220 break;
11222 case BARRIER:
11223 /* Succeed if the following insn is the target label.
11224 Otherwise fail.
11225 If return insns are used then the last insn in a function
11226 will be a barrier. */
11227 this_insn = next_nonnote_insn (this_insn);
11228 if (this_insn && this_insn == label)
11230 if (jump_clobbers)
11232 arm_ccfsm_state = 2;
11233 this_insn = next_nonnote_insn (this_insn);
11235 else
11236 arm_ccfsm_state = 1;
11237 succeed = TRUE;
11239 else
11240 fail = TRUE;
11241 break;
11243 case CALL_INSN:
11244 /* The AAPCS says that conditional calls should not be
11245 used since they make interworking inefficient (the
11246 linker can't transform BL<cond> into BLX). That's
11247 only a problem if the machine has BLX. */
11248 if (arm_arch5)
11250 fail = TRUE;
11251 break;
11254 /* Succeed if the following insn is the target label, or
11255 if the following two insns are a barrier and the
11256 target label. */
11257 this_insn = next_nonnote_insn (this_insn);
11258 if (this_insn && GET_CODE (this_insn) == BARRIER)
11259 this_insn = next_nonnote_insn (this_insn);
11261 if (this_insn && this_insn == label
11262 && insns_skipped < max_insns_skipped)
11264 if (jump_clobbers)
11266 arm_ccfsm_state = 2;
11267 this_insn = next_nonnote_insn (this_insn);
11269 else
11270 arm_ccfsm_state = 1;
11271 succeed = TRUE;
11273 else
11274 fail = TRUE;
11275 break;
11277 case JUMP_INSN:
11278 /* If this is an unconditional branch to the same label, succeed.
11279 If it is to another label, do nothing. If it is conditional,
11280 fail. */
11281 /* XXX Probably, the tests for SET and the PC are
11282 unnecessary. */
11284 scanbody = PATTERN (this_insn);
11285 if (GET_CODE (scanbody) == SET
11286 && GET_CODE (SET_DEST (scanbody)) == PC)
11288 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11289 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11291 arm_ccfsm_state = 2;
11292 succeed = TRUE;
11294 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11295 fail = TRUE;
11297 /* Fail if a conditional return is undesirable (e.g. on a
11298 StrongARM), but still allow this if optimizing for size. */
11299 else if (GET_CODE (scanbody) == RETURN
11300 && !use_return_insn (TRUE, NULL)
11301 && !optimize_size)
11302 fail = TRUE;
11303 else if (GET_CODE (scanbody) == RETURN
11304 && seeking_return)
11306 arm_ccfsm_state = 2;
11307 succeed = TRUE;
11309 else if (GET_CODE (scanbody) == PARALLEL)
11311 switch (get_attr_conds (this_insn))
11313 case CONDS_NOCOND:
11314 break;
11315 default:
11316 fail = TRUE;
11317 break;
11320 else
11321 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11323 break;
11325 case INSN:
11326 /* Instructions using or affecting the condition codes make it
11327 fail. */
11328 scanbody = PATTERN (this_insn);
11329 if (!(GET_CODE (scanbody) == SET
11330 || GET_CODE (scanbody) == PARALLEL)
11331 || get_attr_conds (this_insn) != CONDS_NOCOND)
11332 fail = TRUE;
11334 /* A conditional cirrus instruction must be followed by
11335 a non Cirrus instruction. However, since we
11336 conditionalize instructions in this function and by
11337 the time we get here we can't add instructions
11338 (nops), because shorten_branches() has already been
11339 called, we will disable conditionalizing Cirrus
11340 instructions to be safe. */
11341 if (GET_CODE (scanbody) != USE
11342 && GET_CODE (scanbody) != CLOBBER
11343 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11344 fail = TRUE;
11345 break;
11347 default:
11348 break;
11351 if (succeed)
11353 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11354 arm_target_label = CODE_LABEL_NUMBER (label);
11355 else if (seeking_return || arm_ccfsm_state == 2)
11357 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11359 this_insn = next_nonnote_insn (this_insn);
11360 if (this_insn && (GET_CODE (this_insn) == BARRIER
11361 || GET_CODE (this_insn) == CODE_LABEL))
11362 abort ();
11364 if (!this_insn)
11366 /* Oh, dear! we ran off the end.. give up. */
11367 recog (PATTERN (insn), insn, NULL);
11368 arm_ccfsm_state = 0;
11369 arm_target_insn = NULL;
11370 return;
11372 arm_target_insn = this_insn;
11374 else
11375 abort ();
11376 if (jump_clobbers)
11378 if (reverse)
11379 abort ();
11380 arm_current_cc =
11381 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11382 0), 0), 1));
11383 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11384 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11385 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11386 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11388 else
11390 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11391 what it was. */
11392 if (!reverse)
11393 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11394 0));
11397 if (reverse || then_not_else)
11398 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11401 /* Restore recog_data (getting the attributes of other insns can
11402 destroy this array, but final.c assumes that it remains intact
11403 across this call; since the insn has been recognized already we
11404 call recog direct). */
11405 recog (PATTERN (insn), insn, NULL);
11409 /* Returns true if REGNO is a valid register
11410 for holding a quantity of type MODE. */
11412 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11414 if (GET_MODE_CLASS (mode) == MODE_CC)
11415 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11417 if (TARGET_THUMB)
11418 /* For the Thumb we only allow values bigger than SImode in
11419 registers 0 - 6, so that there is always a second low
11420 register available to hold the upper part of the value.
11421 We probably we ought to ensure that the register is the
11422 start of an even numbered register pair. */
11423 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11425 if (IS_CIRRUS_REGNUM (regno))
11426 /* We have outlawed SI values in Cirrus registers because they
11427 reside in the lower 32 bits, but SF values reside in the
11428 upper 32 bits. This causes gcc all sorts of grief. We can't
11429 even split the registers into pairs because Cirrus SI values
11430 get sign extended to 64bits-- aldyh. */
11431 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11433 if (IS_VFP_REGNUM (regno))
11435 if (mode == SFmode || mode == SImode)
11436 return TRUE;
11438 /* DFmode values are only valid in even register pairs. */
11439 if (mode == DFmode)
11440 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11441 return FALSE;
11444 if (IS_IWMMXT_GR_REGNUM (regno))
11445 return mode == SImode;
11447 if (IS_IWMMXT_REGNUM (regno))
11448 return VALID_IWMMXT_REG_MODE (mode);
11450 /* We allow any value to be stored in the general registers.
11451 Restrict doubleword quantities to even register pairs so that we can
11452 use ldrd. */
11453 if (regno <= LAST_ARM_REGNUM)
11454 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11456 if ( regno == FRAME_POINTER_REGNUM
11457 || regno == ARG_POINTER_REGNUM)
11458 /* We only allow integers in the fake hard registers. */
11459 return GET_MODE_CLASS (mode) == MODE_INT;
11461 /* The only registers left are the FPA registers
11462 which we only allow to hold FP values. */
11463 return GET_MODE_CLASS (mode) == MODE_FLOAT
11464 && regno >= FIRST_FPA_REGNUM
11465 && regno <= LAST_FPA_REGNUM;
11469 arm_regno_class (int regno)
11471 if (TARGET_THUMB)
11473 if (regno == STACK_POINTER_REGNUM)
11474 return STACK_REG;
11475 if (regno == CC_REGNUM)
11476 return CC_REG;
11477 if (regno < 8)
11478 return LO_REGS;
11479 return HI_REGS;
11482 if ( regno <= LAST_ARM_REGNUM
11483 || regno == FRAME_POINTER_REGNUM
11484 || regno == ARG_POINTER_REGNUM)
11485 return GENERAL_REGS;
11487 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11488 return NO_REGS;
11490 if (IS_CIRRUS_REGNUM (regno))
11491 return CIRRUS_REGS;
11493 if (IS_VFP_REGNUM (regno))
11494 return VFP_REGS;
11496 if (IS_IWMMXT_REGNUM (regno))
11497 return IWMMXT_REGS;
11499 if (IS_IWMMXT_GR_REGNUM (regno))
11500 return IWMMXT_GR_REGS;
11502 return FPA_REGS;
11505 /* Handle a special case when computing the offset
11506 of an argument from the frame pointer. */
11508 arm_debugger_arg_offset (int value, rtx addr)
11510 rtx insn;
11512 /* We are only interested if dbxout_parms() failed to compute the offset. */
11513 if (value != 0)
11514 return 0;
11516 /* We can only cope with the case where the address is held in a register. */
11517 if (GET_CODE (addr) != REG)
11518 return 0;
11520 /* If we are using the frame pointer to point at the argument, then
11521 an offset of 0 is correct. */
11522 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11523 return 0;
11525 /* If we are using the stack pointer to point at the
11526 argument, then an offset of 0 is correct. */
11527 if ((TARGET_THUMB || !frame_pointer_needed)
11528 && REGNO (addr) == SP_REGNUM)
11529 return 0;
11531 /* Oh dear. The argument is pointed to by a register rather
11532 than being held in a register, or being stored at a known
11533 offset from the frame pointer. Since GDB only understands
11534 those two kinds of argument we must translate the address
11535 held in the register into an offset from the frame pointer.
11536 We do this by searching through the insns for the function
11537 looking to see where this register gets its value. If the
11538 register is initialized from the frame pointer plus an offset
11539 then we are in luck and we can continue, otherwise we give up.
11541 This code is exercised by producing debugging information
11542 for a function with arguments like this:
11544 double func (double a, double b, int c, double d) {return d;}
11546 Without this code the stab for parameter 'd' will be set to
11547 an offset of 0 from the frame pointer, rather than 8. */
11549 /* The if() statement says:
11551 If the insn is a normal instruction
11552 and if the insn is setting the value in a register
11553 and if the register being set is the register holding the address of the argument
11554 and if the address is computing by an addition
11555 that involves adding to a register
11556 which is the frame pointer
11557 a constant integer
11559 then... */
11561 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11563 if ( GET_CODE (insn) == INSN
11564 && GET_CODE (PATTERN (insn)) == SET
11565 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11566 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11567 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11568 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11569 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11572 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11574 break;
11578 if (value == 0)
11580 debug_rtx (addr);
11581 warning ("unable to compute real location of stacked parameter");
11582 value = 8; /* XXX magic hack */
11585 return value;
11588 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11589 do \
11591 if ((MASK) & insn_flags) \
11592 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11593 BUILT_IN_MD, NULL, NULL_TREE); \
11595 while (0)
11597 struct builtin_description
11599 const unsigned int mask;
11600 const enum insn_code icode;
11601 const char * const name;
11602 const enum arm_builtins code;
11603 const enum rtx_code comparison;
11604 const unsigned int flag;
11607 static const struct builtin_description bdesc_2arg[] =
11609 #define IWMMXT_BUILTIN(code, string, builtin) \
11610 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11611 ARM_BUILTIN_##builtin, 0, 0 },
11613 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11614 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11615 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11616 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11617 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11618 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11619 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11620 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11621 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11622 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11623 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11624 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11625 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11626 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11627 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11628 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11629 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11630 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11631 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11632 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11633 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11634 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11635 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11636 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11637 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11638 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11639 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11640 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11641 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11642 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11643 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11644 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11645 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11646 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11647 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11648 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11649 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11650 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11651 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11652 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11653 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11654 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11655 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11656 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11657 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11658 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11659 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11660 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11661 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11662 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11663 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11664 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11665 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11666 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11667 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11668 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11669 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11670 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11672 #define IWMMXT_BUILTIN2(code, builtin) \
11673 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11675 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11676 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11677 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11678 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11679 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11680 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11681 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11682 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11683 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11684 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11685 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11686 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11687 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11688 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11689 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11690 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11691 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11692 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11693 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11694 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11695 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11696 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11697 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11698 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11699 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11700 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11701 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11702 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11703 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11704 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11705 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11706 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11709 static const struct builtin_description bdesc_1arg[] =
11711 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11712 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11713 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11714 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11715 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11716 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11717 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11718 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11719 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11720 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11721 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11722 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11723 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11724 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11725 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11726 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11727 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11728 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11731 /* Set up all the iWMMXt builtins. This is
11732 not called if TARGET_IWMMXT is zero. */
11734 static void
11735 arm_init_iwmmxt_builtins (void)
11737 const struct builtin_description * d;
11738 size_t i;
11739 tree endlink = void_list_node;
11741 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11742 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11743 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11745 tree int_ftype_int
11746 = build_function_type (integer_type_node,
11747 tree_cons (NULL_TREE, integer_type_node, endlink));
11748 tree v8qi_ftype_v8qi_v8qi_int
11749 = build_function_type (V8QI_type_node,
11750 tree_cons (NULL_TREE, V8QI_type_node,
11751 tree_cons (NULL_TREE, V8QI_type_node,
11752 tree_cons (NULL_TREE,
11753 integer_type_node,
11754 endlink))));
11755 tree v4hi_ftype_v4hi_int
11756 = build_function_type (V4HI_type_node,
11757 tree_cons (NULL_TREE, V4HI_type_node,
11758 tree_cons (NULL_TREE, integer_type_node,
11759 endlink)));
11760 tree v2si_ftype_v2si_int
11761 = build_function_type (V2SI_type_node,
11762 tree_cons (NULL_TREE, V2SI_type_node,
11763 tree_cons (NULL_TREE, integer_type_node,
11764 endlink)));
11765 tree v2si_ftype_di_di
11766 = build_function_type (V2SI_type_node,
11767 tree_cons (NULL_TREE, long_long_integer_type_node,
11768 tree_cons (NULL_TREE, long_long_integer_type_node,
11769 endlink)));
11770 tree di_ftype_di_int
11771 = build_function_type (long_long_integer_type_node,
11772 tree_cons (NULL_TREE, long_long_integer_type_node,
11773 tree_cons (NULL_TREE, integer_type_node,
11774 endlink)));
11775 tree di_ftype_di_int_int
11776 = build_function_type (long_long_integer_type_node,
11777 tree_cons (NULL_TREE, long_long_integer_type_node,
11778 tree_cons (NULL_TREE, integer_type_node,
11779 tree_cons (NULL_TREE,
11780 integer_type_node,
11781 endlink))));
11782 tree int_ftype_v8qi
11783 = build_function_type (integer_type_node,
11784 tree_cons (NULL_TREE, V8QI_type_node,
11785 endlink));
11786 tree int_ftype_v4hi
11787 = build_function_type (integer_type_node,
11788 tree_cons (NULL_TREE, V4HI_type_node,
11789 endlink));
11790 tree int_ftype_v2si
11791 = build_function_type (integer_type_node,
11792 tree_cons (NULL_TREE, V2SI_type_node,
11793 endlink));
11794 tree int_ftype_v8qi_int
11795 = build_function_type (integer_type_node,
11796 tree_cons (NULL_TREE, V8QI_type_node,
11797 tree_cons (NULL_TREE, integer_type_node,
11798 endlink)));
11799 tree int_ftype_v4hi_int
11800 = build_function_type (integer_type_node,
11801 tree_cons (NULL_TREE, V4HI_type_node,
11802 tree_cons (NULL_TREE, integer_type_node,
11803 endlink)));
11804 tree int_ftype_v2si_int
11805 = build_function_type (integer_type_node,
11806 tree_cons (NULL_TREE, V2SI_type_node,
11807 tree_cons (NULL_TREE, integer_type_node,
11808 endlink)));
11809 tree v8qi_ftype_v8qi_int_int
11810 = build_function_type (V8QI_type_node,
11811 tree_cons (NULL_TREE, V8QI_type_node,
11812 tree_cons (NULL_TREE, integer_type_node,
11813 tree_cons (NULL_TREE,
11814 integer_type_node,
11815 endlink))));
11816 tree v4hi_ftype_v4hi_int_int
11817 = build_function_type (V4HI_type_node,
11818 tree_cons (NULL_TREE, V4HI_type_node,
11819 tree_cons (NULL_TREE, integer_type_node,
11820 tree_cons (NULL_TREE,
11821 integer_type_node,
11822 endlink))));
11823 tree v2si_ftype_v2si_int_int
11824 = build_function_type (V2SI_type_node,
11825 tree_cons (NULL_TREE, V2SI_type_node,
11826 tree_cons (NULL_TREE, integer_type_node,
11827 tree_cons (NULL_TREE,
11828 integer_type_node,
11829 endlink))));
11830 /* Miscellaneous. */
11831 tree v8qi_ftype_v4hi_v4hi
11832 = build_function_type (V8QI_type_node,
11833 tree_cons (NULL_TREE, V4HI_type_node,
11834 tree_cons (NULL_TREE, V4HI_type_node,
11835 endlink)));
11836 tree v4hi_ftype_v2si_v2si
11837 = build_function_type (V4HI_type_node,
11838 tree_cons (NULL_TREE, V2SI_type_node,
11839 tree_cons (NULL_TREE, V2SI_type_node,
11840 endlink)));
11841 tree v2si_ftype_v4hi_v4hi
11842 = build_function_type (V2SI_type_node,
11843 tree_cons (NULL_TREE, V4HI_type_node,
11844 tree_cons (NULL_TREE, V4HI_type_node,
11845 endlink)));
11846 tree v2si_ftype_v8qi_v8qi
11847 = build_function_type (V2SI_type_node,
11848 tree_cons (NULL_TREE, V8QI_type_node,
11849 tree_cons (NULL_TREE, V8QI_type_node,
11850 endlink)));
11851 tree v4hi_ftype_v4hi_di
11852 = build_function_type (V4HI_type_node,
11853 tree_cons (NULL_TREE, V4HI_type_node,
11854 tree_cons (NULL_TREE,
11855 long_long_integer_type_node,
11856 endlink)));
11857 tree v2si_ftype_v2si_di
11858 = build_function_type (V2SI_type_node,
11859 tree_cons (NULL_TREE, V2SI_type_node,
11860 tree_cons (NULL_TREE,
11861 long_long_integer_type_node,
11862 endlink)));
11863 tree void_ftype_int_int
11864 = build_function_type (void_type_node,
11865 tree_cons (NULL_TREE, integer_type_node,
11866 tree_cons (NULL_TREE, integer_type_node,
11867 endlink)));
11868 tree di_ftype_void
11869 = build_function_type (long_long_unsigned_type_node, endlink);
11870 tree di_ftype_v8qi
11871 = build_function_type (long_long_integer_type_node,
11872 tree_cons (NULL_TREE, V8QI_type_node,
11873 endlink));
11874 tree di_ftype_v4hi
11875 = build_function_type (long_long_integer_type_node,
11876 tree_cons (NULL_TREE, V4HI_type_node,
11877 endlink));
11878 tree di_ftype_v2si
11879 = build_function_type (long_long_integer_type_node,
11880 tree_cons (NULL_TREE, V2SI_type_node,
11881 endlink));
11882 tree v2si_ftype_v4hi
11883 = build_function_type (V2SI_type_node,
11884 tree_cons (NULL_TREE, V4HI_type_node,
11885 endlink));
11886 tree v4hi_ftype_v8qi
11887 = build_function_type (V4HI_type_node,
11888 tree_cons (NULL_TREE, V8QI_type_node,
11889 endlink));
11891 tree di_ftype_di_v4hi_v4hi
11892 = build_function_type (long_long_unsigned_type_node,
11893 tree_cons (NULL_TREE,
11894 long_long_unsigned_type_node,
11895 tree_cons (NULL_TREE, V4HI_type_node,
11896 tree_cons (NULL_TREE,
11897 V4HI_type_node,
11898 endlink))));
11900 tree di_ftype_v4hi_v4hi
11901 = build_function_type (long_long_unsigned_type_node,
11902 tree_cons (NULL_TREE, V4HI_type_node,
11903 tree_cons (NULL_TREE, V4HI_type_node,
11904 endlink)));
11906 /* Normal vector binops. */
11907 tree v8qi_ftype_v8qi_v8qi
11908 = build_function_type (V8QI_type_node,
11909 tree_cons (NULL_TREE, V8QI_type_node,
11910 tree_cons (NULL_TREE, V8QI_type_node,
11911 endlink)));
11912 tree v4hi_ftype_v4hi_v4hi
11913 = build_function_type (V4HI_type_node,
11914 tree_cons (NULL_TREE, V4HI_type_node,
11915 tree_cons (NULL_TREE, V4HI_type_node,
11916 endlink)));
11917 tree v2si_ftype_v2si_v2si
11918 = build_function_type (V2SI_type_node,
11919 tree_cons (NULL_TREE, V2SI_type_node,
11920 tree_cons (NULL_TREE, V2SI_type_node,
11921 endlink)));
11922 tree di_ftype_di_di
11923 = build_function_type (long_long_unsigned_type_node,
11924 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11925 tree_cons (NULL_TREE,
11926 long_long_unsigned_type_node,
11927 endlink)));
11929 /* Add all builtins that are more or less simple operations on two
11930 operands. */
11931 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11933 /* Use one of the operands; the target can have a different mode for
11934 mask-generating compares. */
11935 enum machine_mode mode;
11936 tree type;
11938 if (d->name == 0)
11939 continue;
11941 mode = insn_data[d->icode].operand[1].mode;
11943 switch (mode)
11945 case V8QImode:
11946 type = v8qi_ftype_v8qi_v8qi;
11947 break;
11948 case V4HImode:
11949 type = v4hi_ftype_v4hi_v4hi;
11950 break;
11951 case V2SImode:
11952 type = v2si_ftype_v2si_v2si;
11953 break;
11954 case DImode:
11955 type = di_ftype_di_di;
11956 break;
11958 default:
11959 abort ();
11962 def_mbuiltin (d->mask, d->name, type, d->code);
11965 /* Add the remaining MMX insns with somewhat more complicated types. */
11966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11967 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12015 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12030 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12034 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12041 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12048 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12050 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12053 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12054 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12057 static void
12058 arm_init_builtins (void)
12060 if (TARGET_REALLY_IWMMXT)
12061 arm_init_iwmmxt_builtins ();
12064 /* Errors in the source file can cause expand_expr to return const0_rtx
12065 where we expect a vector. To avoid crashing, use one of the vector
12066 clear instructions. */
12068 static rtx
12069 safe_vector_operand (rtx x, enum machine_mode mode)
12071 if (x != const0_rtx)
12072 return x;
12073 x = gen_reg_rtx (mode);
12075 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12076 : gen_rtx_SUBREG (DImode, x, 0)));
12077 return x;
12080 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12082 static rtx
12083 arm_expand_binop_builtin (enum insn_code icode,
12084 tree arglist, rtx target)
12086 rtx pat;
12087 tree arg0 = TREE_VALUE (arglist);
12088 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12089 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12090 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12091 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12092 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12093 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12095 if (VECTOR_MODE_P (mode0))
12096 op0 = safe_vector_operand (op0, mode0);
12097 if (VECTOR_MODE_P (mode1))
12098 op1 = safe_vector_operand (op1, mode1);
12100 if (! target
12101 || GET_MODE (target) != tmode
12102 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12103 target = gen_reg_rtx (tmode);
12105 /* In case the insn wants input operands in modes different from
12106 the result, abort. */
12107 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12108 abort ();
12110 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12111 op0 = copy_to_mode_reg (mode0, op0);
12112 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12113 op1 = copy_to_mode_reg (mode1, op1);
12115 pat = GEN_FCN (icode) (target, op0, op1);
12116 if (! pat)
12117 return 0;
12118 emit_insn (pat);
12119 return target;
12122 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12124 static rtx
12125 arm_expand_unop_builtin (enum insn_code icode,
12126 tree arglist, rtx target, int do_load)
12128 rtx pat;
12129 tree arg0 = TREE_VALUE (arglist);
12130 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12131 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12132 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12134 if (! target
12135 || GET_MODE (target) != tmode
12136 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12137 target = gen_reg_rtx (tmode);
12138 if (do_load)
12139 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12140 else
12142 if (VECTOR_MODE_P (mode0))
12143 op0 = safe_vector_operand (op0, mode0);
12145 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12146 op0 = copy_to_mode_reg (mode0, op0);
12149 pat = GEN_FCN (icode) (target, op0);
12150 if (! pat)
12151 return 0;
12152 emit_insn (pat);
12153 return target;
12156 /* Expand an expression EXP that calls a built-in function,
12157 with result going to TARGET if that's convenient
12158 (and in mode MODE if that's convenient).
12159 SUBTARGET may be used as the target for computing one of EXP's operands.
12160 IGNORE is nonzero if the value is to be ignored. */
12162 static rtx
12163 arm_expand_builtin (tree exp,
12164 rtx target,
12165 rtx subtarget ATTRIBUTE_UNUSED,
12166 enum machine_mode mode ATTRIBUTE_UNUSED,
12167 int ignore ATTRIBUTE_UNUSED)
12169 const struct builtin_description * d;
12170 enum insn_code icode;
12171 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12172 tree arglist = TREE_OPERAND (exp, 1);
12173 tree arg0;
12174 tree arg1;
12175 tree arg2;
12176 rtx op0;
12177 rtx op1;
12178 rtx op2;
12179 rtx pat;
12180 int fcode = DECL_FUNCTION_CODE (fndecl);
12181 size_t i;
12182 enum machine_mode tmode;
12183 enum machine_mode mode0;
12184 enum machine_mode mode1;
12185 enum machine_mode mode2;
12187 switch (fcode)
12189 case ARM_BUILTIN_TEXTRMSB:
12190 case ARM_BUILTIN_TEXTRMUB:
12191 case ARM_BUILTIN_TEXTRMSH:
12192 case ARM_BUILTIN_TEXTRMUH:
12193 case ARM_BUILTIN_TEXTRMSW:
12194 case ARM_BUILTIN_TEXTRMUW:
12195 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12196 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12197 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12198 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12199 : CODE_FOR_iwmmxt_textrmw);
12201 arg0 = TREE_VALUE (arglist);
12202 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12203 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12204 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12205 tmode = insn_data[icode].operand[0].mode;
12206 mode0 = insn_data[icode].operand[1].mode;
12207 mode1 = insn_data[icode].operand[2].mode;
12209 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12210 op0 = copy_to_mode_reg (mode0, op0);
12211 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12213 /* @@@ better error message */
12214 error ("selector must be an immediate");
12215 return gen_reg_rtx (tmode);
12217 if (target == 0
12218 || GET_MODE (target) != tmode
12219 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12220 target = gen_reg_rtx (tmode);
12221 pat = GEN_FCN (icode) (target, op0, op1);
12222 if (! pat)
12223 return 0;
12224 emit_insn (pat);
12225 return target;
12227 case ARM_BUILTIN_TINSRB:
12228 case ARM_BUILTIN_TINSRH:
12229 case ARM_BUILTIN_TINSRW:
12230 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12231 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12232 : CODE_FOR_iwmmxt_tinsrw);
12233 arg0 = TREE_VALUE (arglist);
12234 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12235 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12236 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12237 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12238 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12239 tmode = insn_data[icode].operand[0].mode;
12240 mode0 = insn_data[icode].operand[1].mode;
12241 mode1 = insn_data[icode].operand[2].mode;
12242 mode2 = insn_data[icode].operand[3].mode;
12244 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12245 op0 = copy_to_mode_reg (mode0, op0);
12246 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12247 op1 = copy_to_mode_reg (mode1, op1);
12248 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12250 /* @@@ better error message */
12251 error ("selector must be an immediate");
12252 return const0_rtx;
12254 if (target == 0
12255 || GET_MODE (target) != tmode
12256 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12257 target = gen_reg_rtx (tmode);
12258 pat = GEN_FCN (icode) (target, op0, op1, op2);
12259 if (! pat)
12260 return 0;
12261 emit_insn (pat);
12262 return target;
12264 case ARM_BUILTIN_SETWCX:
12265 arg0 = TREE_VALUE (arglist);
12266 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12267 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12268 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12269 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12270 return 0;
12272 case ARM_BUILTIN_GETWCX:
12273 arg0 = TREE_VALUE (arglist);
12274 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12275 target = gen_reg_rtx (SImode);
12276 emit_insn (gen_iwmmxt_tmrc (target, op0));
12277 return target;
12279 case ARM_BUILTIN_WSHUFH:
12280 icode = CODE_FOR_iwmmxt_wshufh;
12281 arg0 = TREE_VALUE (arglist);
12282 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12283 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12284 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12285 tmode = insn_data[icode].operand[0].mode;
12286 mode1 = insn_data[icode].operand[1].mode;
12287 mode2 = insn_data[icode].operand[2].mode;
12289 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12290 op0 = copy_to_mode_reg (mode1, op0);
12291 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12293 /* @@@ better error message */
12294 error ("mask must be an immediate");
12295 return const0_rtx;
12297 if (target == 0
12298 || GET_MODE (target) != tmode
12299 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12300 target = gen_reg_rtx (tmode);
12301 pat = GEN_FCN (icode) (target, op0, op1);
12302 if (! pat)
12303 return 0;
12304 emit_insn (pat);
12305 return target;
12307 case ARM_BUILTIN_WSADB:
12308 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12309 case ARM_BUILTIN_WSADH:
12310 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12311 case ARM_BUILTIN_WSADBZ:
12312 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12313 case ARM_BUILTIN_WSADHZ:
12314 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12316 /* Several three-argument builtins. */
12317 case ARM_BUILTIN_WMACS:
12318 case ARM_BUILTIN_WMACU:
12319 case ARM_BUILTIN_WALIGN:
12320 case ARM_BUILTIN_TMIA:
12321 case ARM_BUILTIN_TMIAPH:
12322 case ARM_BUILTIN_TMIATT:
12323 case ARM_BUILTIN_TMIATB:
12324 case ARM_BUILTIN_TMIABT:
12325 case ARM_BUILTIN_TMIABB:
12326 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12327 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12328 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12329 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12330 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12331 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12332 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12333 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12334 : CODE_FOR_iwmmxt_walign);
12335 arg0 = TREE_VALUE (arglist);
12336 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12337 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12338 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12339 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12340 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12341 tmode = insn_data[icode].operand[0].mode;
12342 mode0 = insn_data[icode].operand[1].mode;
12343 mode1 = insn_data[icode].operand[2].mode;
12344 mode2 = insn_data[icode].operand[3].mode;
12346 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12347 op0 = copy_to_mode_reg (mode0, op0);
12348 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12349 op1 = copy_to_mode_reg (mode1, op1);
12350 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12351 op2 = copy_to_mode_reg (mode2, op2);
12352 if (target == 0
12353 || GET_MODE (target) != tmode
12354 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12355 target = gen_reg_rtx (tmode);
12356 pat = GEN_FCN (icode) (target, op0, op1, op2);
12357 if (! pat)
12358 return 0;
12359 emit_insn (pat);
12360 return target;
12362 case ARM_BUILTIN_WZERO:
12363 target = gen_reg_rtx (DImode);
12364 emit_insn (gen_iwmmxt_clrdi (target));
12365 return target;
12367 default:
12368 break;
12371 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12372 if (d->code == (const enum arm_builtins) fcode)
12373 return arm_expand_binop_builtin (d->icode, arglist, target);
12375 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12376 if (d->code == (const enum arm_builtins) fcode)
12377 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12379 /* @@@ Should really do something sensible here. */
12380 return NULL_RTX;
12383 /* Recursively search through all of the blocks in a function
12384 checking to see if any of the variables created in that
12385 function match the RTX called 'orig'. If they do then
12386 replace them with the RTX called 'new'. */
12387 static void
12388 replace_symbols_in_block (tree block, rtx orig, rtx new)
12390 for (; block; block = BLOCK_CHAIN (block))
12392 tree sym;
12394 if (!TREE_USED (block))
12395 continue;
12397 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12399 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12400 || DECL_IGNORED_P (sym)
12401 || TREE_CODE (sym) != VAR_DECL
12402 || DECL_EXTERNAL (sym)
12403 || !rtx_equal_p (DECL_RTL (sym), orig)
12405 continue;
12407 SET_DECL_RTL (sym, new);
12410 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12414 /* Return the number (counting from 0) of
12415 the least significant set bit in MASK. */
12417 inline static int
12418 number_of_first_bit_set (unsigned mask)
12420 int bit;
12422 for (bit = 0;
12423 (mask & (1 << bit)) == 0;
12424 ++bit)
12425 continue;
12427 return bit;
12430 /* Emit code to push or pop registers to or from the stack. F is the
12431 assembly file. MASK is the registers to push or pop. PUSH is
12432 nonzero if we should push, and zero if we should pop. For debugging
12433 output, if pushing, adjust CFA_OFFSET by the amount of space added
12434 to the stack. REAL_REGS should have the same number of bits set as
12435 MASK, and will be used instead (in the same order) to describe which
12436 registers were saved - this is used to mark the save slots when we
12437 push high registers after moving them to low registers. */
12438 static void
12439 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12440 unsigned long real_regs)
12442 int regno;
12443 int lo_mask = mask & 0xFF;
12444 int pushed_words = 0;
12446 if (mask == 0)
12447 abort ();
12449 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12451 /* Special case. Do not generate a POP PC statement here, do it in
12452 thumb_exit() */
12453 thumb_exit (f, -1);
12454 return;
12457 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12459 /* Look at the low registers first. */
12460 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12462 if (lo_mask & 1)
12464 asm_fprintf (f, "%r", regno);
12466 if ((lo_mask & ~1) != 0)
12467 fprintf (f, ", ");
12469 pushed_words++;
12473 if (push && (mask & (1 << LR_REGNUM)))
12475 /* Catch pushing the LR. */
12476 if (mask & 0xFF)
12477 fprintf (f, ", ");
12479 asm_fprintf (f, "%r", LR_REGNUM);
12481 pushed_words++;
12483 else if (!push && (mask & (1 << PC_REGNUM)))
12485 /* Catch popping the PC. */
12486 if (TARGET_INTERWORK || TARGET_BACKTRACE
12487 || current_function_calls_eh_return)
12489 /* The PC is never poped directly, instead
12490 it is popped into r3 and then BX is used. */
12491 fprintf (f, "}\n");
12493 thumb_exit (f, -1);
12495 return;
12497 else
12499 if (mask & 0xFF)
12500 fprintf (f, ", ");
12502 asm_fprintf (f, "%r", PC_REGNUM);
12506 fprintf (f, "}\n");
12508 if (push && pushed_words && dwarf2out_do_frame ())
12510 char *l = dwarf2out_cfi_label ();
12511 int pushed_mask = real_regs;
12513 *cfa_offset += pushed_words * 4;
12514 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12516 pushed_words = 0;
12517 pushed_mask = real_regs;
12518 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12520 if (pushed_mask & 1)
12521 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12526 /* Generate code to return from a thumb function.
12527 If 'reg_containing_return_addr' is -1, then the return address is
12528 actually on the stack, at the stack pointer. */
12529 static void
12530 thumb_exit (FILE *f, int reg_containing_return_addr)
12532 unsigned regs_available_for_popping;
12533 unsigned regs_to_pop;
12534 int pops_needed;
12535 unsigned available;
12536 unsigned required;
12537 int mode;
12538 int size;
12539 int restore_a4 = FALSE;
12541 /* Compute the registers we need to pop. */
12542 regs_to_pop = 0;
12543 pops_needed = 0;
12545 if (reg_containing_return_addr == -1)
12547 regs_to_pop |= 1 << LR_REGNUM;
12548 ++pops_needed;
12551 if (TARGET_BACKTRACE)
12553 /* Restore the (ARM) frame pointer and stack pointer. */
12554 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12555 pops_needed += 2;
12558 /* If there is nothing to pop then just emit the BX instruction and
12559 return. */
12560 if (pops_needed == 0)
12562 if (current_function_calls_eh_return)
12563 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12565 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12566 return;
12568 /* Otherwise if we are not supporting interworking and we have not created
12569 a backtrace structure and the function was not entered in ARM mode then
12570 just pop the return address straight into the PC. */
12571 else if (!TARGET_INTERWORK
12572 && !TARGET_BACKTRACE
12573 && !is_called_in_ARM_mode (current_function_decl)
12574 && !current_function_calls_eh_return)
12576 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12577 return;
12580 /* Find out how many of the (return) argument registers we can corrupt. */
12581 regs_available_for_popping = 0;
12583 /* If returning via __builtin_eh_return, the bottom three registers
12584 all contain information needed for the return. */
12585 if (current_function_calls_eh_return)
12586 size = 12;
12587 else
12589 /* If we can deduce the registers used from the function's
12590 return value. This is more reliable that examining
12591 regs_ever_live[] because that will be set if the register is
12592 ever used in the function, not just if the register is used
12593 to hold a return value. */
12595 if (current_function_return_rtx != 0)
12596 mode = GET_MODE (current_function_return_rtx);
12597 else
12598 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12600 size = GET_MODE_SIZE (mode);
12602 if (size == 0)
12604 /* In a void function we can use any argument register.
12605 In a function that returns a structure on the stack
12606 we can use the second and third argument registers. */
12607 if (mode == VOIDmode)
12608 regs_available_for_popping =
12609 (1 << ARG_REGISTER (1))
12610 | (1 << ARG_REGISTER (2))
12611 | (1 << ARG_REGISTER (3));
12612 else
12613 regs_available_for_popping =
12614 (1 << ARG_REGISTER (2))
12615 | (1 << ARG_REGISTER (3));
12617 else if (size <= 4)
12618 regs_available_for_popping =
12619 (1 << ARG_REGISTER (2))
12620 | (1 << ARG_REGISTER (3));
12621 else if (size <= 8)
12622 regs_available_for_popping =
12623 (1 << ARG_REGISTER (3));
12626 /* Match registers to be popped with registers into which we pop them. */
12627 for (available = regs_available_for_popping,
12628 required = regs_to_pop;
12629 required != 0 && available != 0;
12630 available &= ~(available & - available),
12631 required &= ~(required & - required))
12632 -- pops_needed;
12634 /* If we have any popping registers left over, remove them. */
12635 if (available > 0)
12636 regs_available_for_popping &= ~available;
12638 /* Otherwise if we need another popping register we can use
12639 the fourth argument register. */
12640 else if (pops_needed)
12642 /* If we have not found any free argument registers and
12643 reg a4 contains the return address, we must move it. */
12644 if (regs_available_for_popping == 0
12645 && reg_containing_return_addr == LAST_ARG_REGNUM)
12647 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12648 reg_containing_return_addr = LR_REGNUM;
12650 else if (size > 12)
12652 /* Register a4 is being used to hold part of the return value,
12653 but we have dire need of a free, low register. */
12654 restore_a4 = TRUE;
12656 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12659 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12661 /* The fourth argument register is available. */
12662 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12664 --pops_needed;
12668 /* Pop as many registers as we can. */
12669 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12670 regs_available_for_popping);
12672 /* Process the registers we popped. */
12673 if (reg_containing_return_addr == -1)
12675 /* The return address was popped into the lowest numbered register. */
12676 regs_to_pop &= ~(1 << LR_REGNUM);
12678 reg_containing_return_addr =
12679 number_of_first_bit_set (regs_available_for_popping);
12681 /* Remove this register for the mask of available registers, so that
12682 the return address will not be corrupted by further pops. */
12683 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12686 /* If we popped other registers then handle them here. */
12687 if (regs_available_for_popping)
12689 int frame_pointer;
12691 /* Work out which register currently contains the frame pointer. */
12692 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12694 /* Move it into the correct place. */
12695 asm_fprintf (f, "\tmov\t%r, %r\n",
12696 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12698 /* (Temporarily) remove it from the mask of popped registers. */
12699 regs_available_for_popping &= ~(1 << frame_pointer);
12700 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12702 if (regs_available_for_popping)
12704 int stack_pointer;
12706 /* We popped the stack pointer as well,
12707 find the register that contains it. */
12708 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12710 /* Move it into the stack register. */
12711 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12713 /* At this point we have popped all necessary registers, so
12714 do not worry about restoring regs_available_for_popping
12715 to its correct value:
12717 assert (pops_needed == 0)
12718 assert (regs_available_for_popping == (1 << frame_pointer))
12719 assert (regs_to_pop == (1 << STACK_POINTER)) */
12721 else
12723 /* Since we have just move the popped value into the frame
12724 pointer, the popping register is available for reuse, and
12725 we know that we still have the stack pointer left to pop. */
12726 regs_available_for_popping |= (1 << frame_pointer);
12730 /* If we still have registers left on the stack, but we no longer have
12731 any registers into which we can pop them, then we must move the return
12732 address into the link register and make available the register that
12733 contained it. */
12734 if (regs_available_for_popping == 0 && pops_needed > 0)
12736 regs_available_for_popping |= 1 << reg_containing_return_addr;
12738 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12739 reg_containing_return_addr);
12741 reg_containing_return_addr = LR_REGNUM;
12744 /* If we have registers left on the stack then pop some more.
12745 We know that at most we will want to pop FP and SP. */
12746 if (pops_needed > 0)
12748 int popped_into;
12749 int move_to;
12751 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12752 regs_available_for_popping);
12754 /* We have popped either FP or SP.
12755 Move whichever one it is into the correct register. */
12756 popped_into = number_of_first_bit_set (regs_available_for_popping);
12757 move_to = number_of_first_bit_set (regs_to_pop);
12759 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12761 regs_to_pop &= ~(1 << move_to);
12763 --pops_needed;
12766 /* If we still have not popped everything then we must have only
12767 had one register available to us and we are now popping the SP. */
12768 if (pops_needed > 0)
12770 int popped_into;
12772 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12773 regs_available_for_popping);
12775 popped_into = number_of_first_bit_set (regs_available_for_popping);
12777 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12779 assert (regs_to_pop == (1 << STACK_POINTER))
12780 assert (pops_needed == 1)
12784 /* If necessary restore the a4 register. */
12785 if (restore_a4)
12787 if (reg_containing_return_addr != LR_REGNUM)
12789 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12790 reg_containing_return_addr = LR_REGNUM;
12793 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12796 if (current_function_calls_eh_return)
12797 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12799 /* Return to caller. */
12800 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12804 void
12805 thumb_final_prescan_insn (rtx insn)
12807 if (flag_print_asm_name)
12808 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12809 INSN_ADDRESSES (INSN_UID (insn)));
12813 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12815 unsigned HOST_WIDE_INT mask = 0xff;
12816 int i;
12818 if (val == 0) /* XXX */
12819 return 0;
12821 for (i = 0; i < 25; i++)
12822 if ((val & (mask << i)) == val)
12823 return 1;
12825 return 0;
12828 /* Returns nonzero if the current function contains,
12829 or might contain a far jump. */
12830 static int
12831 thumb_far_jump_used_p (void)
12833 rtx insn;
12835 /* This test is only important for leaf functions. */
12836 /* assert (!leaf_function_p ()); */
12838 /* If we have already decided that far jumps may be used,
12839 do not bother checking again, and always return true even if
12840 it turns out that they are not being used. Once we have made
12841 the decision that far jumps are present (and that hence the link
12842 register will be pushed onto the stack) we cannot go back on it. */
12843 if (cfun->machine->far_jump_used)
12844 return 1;
12846 /* If this function is not being called from the prologue/epilogue
12847 generation code then it must be being called from the
12848 INITIAL_ELIMINATION_OFFSET macro. */
12849 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12851 /* In this case we know that we are being asked about the elimination
12852 of the arg pointer register. If that register is not being used,
12853 then there are no arguments on the stack, and we do not have to
12854 worry that a far jump might force the prologue to push the link
12855 register, changing the stack offsets. In this case we can just
12856 return false, since the presence of far jumps in the function will
12857 not affect stack offsets.
12859 If the arg pointer is live (or if it was live, but has now been
12860 eliminated and so set to dead) then we do have to test to see if
12861 the function might contain a far jump. This test can lead to some
12862 false negatives, since before reload is completed, then length of
12863 branch instructions is not known, so gcc defaults to returning their
12864 longest length, which in turn sets the far jump attribute to true.
12866 A false negative will not result in bad code being generated, but it
12867 will result in a needless push and pop of the link register. We
12868 hope that this does not occur too often.
12870 If we need doubleword stack alignment this could affect the other
12871 elimination offsets so we can't risk getting it wrong. */
12872 if (regs_ever_live [ARG_POINTER_REGNUM])
12873 cfun->machine->arg_pointer_live = 1;
12874 else if (!cfun->machine->arg_pointer_live)
12875 return 0;
12878 /* Check to see if the function contains a branch
12879 insn with the far jump attribute set. */
12880 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12882 if (GET_CODE (insn) == JUMP_INSN
12883 /* Ignore tablejump patterns. */
12884 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12885 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12886 && get_attr_far_jump (insn) == FAR_JUMP_YES
12889 /* Record the fact that we have decided that
12890 the function does use far jumps. */
12891 cfun->machine->far_jump_used = 1;
12892 return 1;
12896 return 0;
12899 /* Return nonzero if FUNC must be entered in ARM mode. */
12901 is_called_in_ARM_mode (tree func)
12903 if (TREE_CODE (func) != FUNCTION_DECL)
12904 abort ();
12906 /* Ignore the problem about functions whoes address is taken. */
12907 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12908 return TRUE;
12910 #ifdef ARM_PE
12911 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12912 #else
12913 return FALSE;
12914 #endif
12917 /* The bits which aren't usefully expanded as rtl. */
12918 const char *
12919 thumb_unexpanded_epilogue (void)
12921 int regno;
12922 unsigned long live_regs_mask = 0;
12923 int high_regs_pushed = 0;
12924 int had_to_push_lr;
12925 int size;
12926 int mode;
12928 if (return_used_this_function)
12929 return "";
12931 if (IS_NAKED (arm_current_func_type ()))
12932 return "";
12934 live_regs_mask = thumb_compute_save_reg_mask ();
12935 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12937 /* If we can deduce the registers used from the function's return value.
12938 This is more reliable that examining regs_ever_live[] because that
12939 will be set if the register is ever used in the function, not just if
12940 the register is used to hold a return value. */
12942 if (current_function_return_rtx != 0)
12943 mode = GET_MODE (current_function_return_rtx);
12944 else
12945 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12947 size = GET_MODE_SIZE (mode);
12949 /* The prolog may have pushed some high registers to use as
12950 work registers. e.g. the testsuite file:
12951 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12952 compiles to produce:
12953 push {r4, r5, r6, r7, lr}
12954 mov r7, r9
12955 mov r6, r8
12956 push {r6, r7}
12957 as part of the prolog. We have to undo that pushing here. */
12959 if (high_regs_pushed)
12961 unsigned long mask = live_regs_mask & 0xff;
12962 int next_hi_reg;
12964 /* The available low registers depend on the size of the value we are
12965 returning. */
12966 if (size <= 12)
12967 mask |= 1 << 3;
12968 if (size <= 8)
12969 mask |= 1 << 2;
12971 if (mask == 0)
12972 /* Oh dear! We have no low registers into which we can pop
12973 high registers! */
12974 internal_error
12975 ("no low registers available for popping high registers");
12977 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12978 if (live_regs_mask & (1 << next_hi_reg))
12979 break;
12981 while (high_regs_pushed)
12983 /* Find lo register(s) into which the high register(s) can
12984 be popped. */
12985 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12987 if (mask & (1 << regno))
12988 high_regs_pushed--;
12989 if (high_regs_pushed == 0)
12990 break;
12993 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12995 /* Pop the values into the low register(s). */
12996 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12998 /* Move the value(s) into the high registers. */
12999 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13001 if (mask & (1 << regno))
13003 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13004 regno);
13006 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13007 if (live_regs_mask & (1 << next_hi_reg))
13008 break;
13012 live_regs_mask &= ~0x0f00;
13015 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13016 live_regs_mask &= 0xff;
13018 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13020 /* Pop the return address into the PC. */
13021 if (had_to_push_lr)
13022 live_regs_mask |= 1 << PC_REGNUM;
13024 /* Either no argument registers were pushed or a backtrace
13025 structure was created which includes an adjusted stack
13026 pointer, so just pop everything. */
13027 if (live_regs_mask)
13028 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13029 live_regs_mask);
13031 /* We have either just popped the return address into the
13032 PC or it is was kept in LR for the entire function. */
13033 if (!had_to_push_lr)
13034 thumb_exit (asm_out_file, LR_REGNUM);
13036 else
13038 /* Pop everything but the return address. */
13039 if (live_regs_mask)
13040 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13041 live_regs_mask);
13043 if (had_to_push_lr)
13045 if (size > 12)
13047 /* We have no free low regs, so save one. */
13048 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13049 LAST_ARG_REGNUM);
13052 /* Get the return address into a temporary register. */
13053 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13054 1 << LAST_ARG_REGNUM);
13056 if (size > 12)
13058 /* Move the return address to lr. */
13059 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13060 LAST_ARG_REGNUM);
13061 /* Restore the low register. */
13062 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13063 IP_REGNUM);
13064 regno = LR_REGNUM;
13066 else
13067 regno = LAST_ARG_REGNUM;
13069 else
13070 regno = LR_REGNUM;
13072 /* Remove the argument registers that were pushed onto the stack. */
13073 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13074 SP_REGNUM, SP_REGNUM,
13075 current_function_pretend_args_size);
13077 thumb_exit (asm_out_file, regno);
13080 return "";
13083 /* Functions to save and restore machine-specific function data. */
13084 static struct machine_function *
13085 arm_init_machine_status (void)
13087 struct machine_function *machine;
13088 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13090 #if ARM_FT_UNKNOWN != 0
13091 machine->func_type = ARM_FT_UNKNOWN;
13092 #endif
13093 return machine;
13096 /* Return an RTX indicating where the return address to the
13097 calling function can be found. */
13099 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13101 if (count != 0)
13102 return NULL_RTX;
13104 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13107 /* Do anything needed before RTL is emitted for each function. */
13108 void
13109 arm_init_expanders (void)
13111 /* Arrange to initialize and mark the machine per-function status. */
13112 init_machine_status = arm_init_machine_status;
13114 /* This is to stop the combine pass optimizing away the alignment
13115 adjustment of va_arg. */
13116 /* ??? It is claimed that this should not be necessary. */
13117 if (cfun)
13118 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13122 /* Like arm_compute_initial_elimination offset. Simpler because
13123 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13125 HOST_WIDE_INT
13126 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13128 arm_stack_offsets *offsets;
13130 offsets = arm_get_frame_offsets ();
13132 switch (from)
13134 case ARG_POINTER_REGNUM:
13135 switch (to)
13137 case STACK_POINTER_REGNUM:
13138 return offsets->outgoing_args - offsets->saved_args;
13140 case FRAME_POINTER_REGNUM:
13141 return offsets->soft_frame - offsets->saved_args;
13143 case THUMB_HARD_FRAME_POINTER_REGNUM:
13144 case ARM_HARD_FRAME_POINTER_REGNUM:
13145 return offsets->saved_regs - offsets->saved_args;
13147 default:
13148 abort();
13150 break;
13152 case FRAME_POINTER_REGNUM:
13153 switch (to)
13155 case STACK_POINTER_REGNUM:
13156 return offsets->outgoing_args - offsets->soft_frame;
13158 case THUMB_HARD_FRAME_POINTER_REGNUM:
13159 case ARM_HARD_FRAME_POINTER_REGNUM:
13160 return offsets->saved_regs - offsets->soft_frame;
13162 default:
13163 abort();
13165 break;
13167 default:
13168 abort ();
13173 /* Generate the rest of a function's prologue. */
13174 void
13175 thumb_expand_prologue (void)
13177 rtx insn, dwarf;
13179 HOST_WIDE_INT amount;
13180 arm_stack_offsets *offsets;
13181 unsigned long func_type;
13182 int regno;
13183 unsigned long live_regs_mask;
13185 func_type = arm_current_func_type ();
13187 /* Naked functions don't have prologues. */
13188 if (IS_NAKED (func_type))
13189 return;
13191 if (IS_INTERRUPT (func_type))
13193 error ("interrupt Service Routines cannot be coded in Thumb mode");
13194 return;
13197 live_regs_mask = thumb_compute_save_reg_mask ();
13198 /* Load the pic register before setting the frame pointer,
13199 so we can use r7 as a temporary work register. */
13200 if (flag_pic)
13201 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13203 offsets = arm_get_frame_offsets ();
13205 if (frame_pointer_needed)
13207 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13208 stack_pointer_rtx));
13209 RTX_FRAME_RELATED_P (insn) = 1;
13211 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13212 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13213 stack_pointer_rtx);
13215 amount = offsets->outgoing_args - offsets->saved_regs;
13216 if (amount)
13218 if (amount < 512)
13220 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13221 GEN_INT (- amount)));
13222 RTX_FRAME_RELATED_P (insn) = 1;
13224 else
13226 rtx reg;
13228 /* The stack decrement is too big for an immediate value in a single
13229 insn. In theory we could issue multiple subtracts, but after
13230 three of them it becomes more space efficient to place the full
13231 value in the constant pool and load into a register. (Also the
13232 ARM debugger really likes to see only one stack decrement per
13233 function). So instead we look for a scratch register into which
13234 we can load the decrement, and then we subtract this from the
13235 stack pointer. Unfortunately on the thumb the only available
13236 scratch registers are the argument registers, and we cannot use
13237 these as they may hold arguments to the function. Instead we
13238 attempt to locate a call preserved register which is used by this
13239 function. If we can find one, then we know that it will have
13240 been pushed at the start of the prologue and so we can corrupt
13241 it now. */
13242 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13243 if (live_regs_mask & (1 << regno)
13244 && !(frame_pointer_needed
13245 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13246 break;
13248 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13250 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13252 /* Choose an arbitrary, non-argument low register. */
13253 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13255 /* Save it by copying it into a high, scratch register. */
13256 emit_insn (gen_movsi (spare, reg));
13257 /* Add a USE to stop propagate_one_insn() from barfing. */
13258 emit_insn (gen_prologue_use (spare));
13260 /* Decrement the stack. */
13261 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13262 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13263 stack_pointer_rtx, reg));
13264 RTX_FRAME_RELATED_P (insn) = 1;
13265 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13266 plus_constant (stack_pointer_rtx,
13267 -amount));
13268 RTX_FRAME_RELATED_P (dwarf) = 1;
13269 REG_NOTES (insn)
13270 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13271 REG_NOTES (insn));
13273 /* Restore the low register's original value. */
13274 emit_insn (gen_movsi (reg, spare));
13276 /* Emit a USE of the restored scratch register, so that flow
13277 analysis will not consider the restore redundant. The
13278 register won't be used again in this function and isn't
13279 restored by the epilogue. */
13280 emit_insn (gen_prologue_use (reg));
13282 else
13284 reg = gen_rtx_REG (SImode, regno);
13286 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13288 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13289 stack_pointer_rtx, reg));
13290 RTX_FRAME_RELATED_P (insn) = 1;
13291 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13292 plus_constant (stack_pointer_rtx,
13293 -amount));
13294 RTX_FRAME_RELATED_P (dwarf) = 1;
13295 REG_NOTES (insn)
13296 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13297 REG_NOTES (insn));
13300 /* If the frame pointer is needed, emit a special barrier that
13301 will prevent the scheduler from moving stores to the frame
13302 before the stack adjustment. */
13303 if (frame_pointer_needed)
13304 emit_insn (gen_stack_tie (stack_pointer_rtx,
13305 hard_frame_pointer_rtx));
13308 if (current_function_profile || TARGET_NO_SCHED_PRO)
13309 emit_insn (gen_blockage ());
13311 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13312 if (live_regs_mask & 0xff)
13313 cfun->machine->lr_save_eliminated = 0;
13315 /* If the link register is being kept alive, with the return address in it,
13316 then make sure that it does not get reused by the ce2 pass. */
13317 if (cfun->machine->lr_save_eliminated)
13318 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13322 void
13323 thumb_expand_epilogue (void)
13325 HOST_WIDE_INT amount;
13326 arm_stack_offsets *offsets;
13327 int regno;
13329 /* Naked functions don't have prologues. */
13330 if (IS_NAKED (arm_current_func_type ()))
13331 return;
13333 offsets = arm_get_frame_offsets ();
13334 amount = offsets->outgoing_args - offsets->saved_regs;
13336 if (frame_pointer_needed)
13337 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13338 else if (amount)
13340 if (amount < 512)
13341 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13342 GEN_INT (amount)));
13343 else
13345 /* r3 is always free in the epilogue. */
13346 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13348 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13349 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13353 /* Emit a USE (stack_pointer_rtx), so that
13354 the stack adjustment will not be deleted. */
13355 emit_insn (gen_prologue_use (stack_pointer_rtx));
13357 if (current_function_profile || TARGET_NO_SCHED_PRO)
13358 emit_insn (gen_blockage ());
13360 /* Emit a clobber for each insn that will be restored in the epilogue,
13361 so that flow2 will get register lifetimes correct. */
13362 for (regno = 0; regno < 13; regno++)
13363 if (regs_ever_live[regno] && !call_used_regs[regno])
13364 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13366 if (! regs_ever_live[LR_REGNUM])
13367 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13370 static void
13371 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13373 unsigned long live_regs_mask = 0;
13374 unsigned long l_mask;
13375 unsigned high_regs_pushed = 0;
13376 int cfa_offset = 0;
13377 int regno;
13379 if (IS_NAKED (arm_current_func_type ()))
13380 return;
13382 if (is_called_in_ARM_mode (current_function_decl))
13384 const char * name;
13386 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13387 abort ();
13388 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13389 abort ();
13390 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13392 /* Generate code sequence to switch us into Thumb mode. */
13393 /* The .code 32 directive has already been emitted by
13394 ASM_DECLARE_FUNCTION_NAME. */
13395 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13396 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13398 /* Generate a label, so that the debugger will notice the
13399 change in instruction sets. This label is also used by
13400 the assembler to bypass the ARM code when this function
13401 is called from a Thumb encoded function elsewhere in the
13402 same file. Hence the definition of STUB_NAME here must
13403 agree with the definition in gas/config/tc-arm.c. */
13405 #define STUB_NAME ".real_start_of"
13407 fprintf (f, "\t.code\t16\n");
13408 #ifdef ARM_PE
13409 if (arm_dllexport_name_p (name))
13410 name = arm_strip_name_encoding (name);
13411 #endif
13412 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13413 fprintf (f, "\t.thumb_func\n");
13414 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13417 if (current_function_pretend_args_size)
13419 if (cfun->machine->uses_anonymous_args)
13421 int num_pushes;
13423 fprintf (f, "\tpush\t{");
13425 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13427 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13428 regno <= LAST_ARG_REGNUM;
13429 regno++)
13430 asm_fprintf (f, "%r%s", regno,
13431 regno == LAST_ARG_REGNUM ? "" : ", ");
13433 fprintf (f, "}\n");
13435 else
13436 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13437 SP_REGNUM, SP_REGNUM,
13438 current_function_pretend_args_size);
13440 /* We don't need to record the stores for unwinding (would it
13441 help the debugger any if we did?), but record the change in
13442 the stack pointer. */
13443 if (dwarf2out_do_frame ())
13445 char *l = dwarf2out_cfi_label ();
13447 cfa_offset = cfa_offset + current_function_pretend_args_size;
13448 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13452 /* Get the registers we are going to push. */
13453 live_regs_mask = thumb_compute_save_reg_mask ();
13454 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13455 l_mask = live_regs_mask & 0x40ff;
13456 /* Then count how many other high registers will need to be pushed. */
13457 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13459 if (TARGET_BACKTRACE)
13461 unsigned offset;
13462 unsigned work_register;
13464 /* We have been asked to create a stack backtrace structure.
13465 The code looks like this:
13467 0 .align 2
13468 0 func:
13469 0 sub SP, #16 Reserve space for 4 registers.
13470 2 push {R7} Push low registers.
13471 4 add R7, SP, #20 Get the stack pointer before the push.
13472 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13473 8 mov R7, PC Get hold of the start of this code plus 12.
13474 10 str R7, [SP, #16] Store it.
13475 12 mov R7, FP Get hold of the current frame pointer.
13476 14 str R7, [SP, #4] Store it.
13477 16 mov R7, LR Get hold of the current return address.
13478 18 str R7, [SP, #12] Store it.
13479 20 add R7, SP, #16 Point at the start of the backtrace structure.
13480 22 mov FP, R7 Put this value into the frame pointer. */
13482 work_register = thumb_find_work_register (live_regs_mask);
13484 asm_fprintf
13485 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13486 SP_REGNUM, SP_REGNUM);
13488 if (dwarf2out_do_frame ())
13490 char *l = dwarf2out_cfi_label ();
13492 cfa_offset = cfa_offset + 16;
13493 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13496 if (l_mask)
13498 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13499 offset = bit_count (l_mask);
13501 else
13502 offset = 0;
13504 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13505 offset + 16 + current_function_pretend_args_size);
13507 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13508 offset + 4);
13510 /* Make sure that the instruction fetching the PC is in the right place
13511 to calculate "start of backtrace creation code + 12". */
13512 if (l_mask)
13514 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13515 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13516 offset + 12);
13517 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13518 ARM_HARD_FRAME_POINTER_REGNUM);
13519 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13520 offset);
13522 else
13524 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13525 ARM_HARD_FRAME_POINTER_REGNUM);
13526 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13527 offset);
13528 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13529 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13530 offset + 12);
13533 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13534 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13535 offset + 8);
13536 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13537 offset + 12);
13538 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13539 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13541 /* Optimisation: If we are not pushing any low registers but we are going
13542 to push some high registers then delay our first push. This will just
13543 be a push of LR and we can combine it with the push of the first high
13544 register. */
13545 else if ((l_mask & 0xff) != 0
13546 || (high_regs_pushed == 0 && l_mask))
13547 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13549 if (high_regs_pushed)
13551 unsigned pushable_regs;
13552 unsigned next_hi_reg;
13554 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13555 if (live_regs_mask & (1 << next_hi_reg))
13556 break;
13558 pushable_regs = l_mask & 0xff;
13560 if (pushable_regs == 0)
13561 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13563 while (high_regs_pushed > 0)
13565 unsigned long real_regs_mask = 0;
13567 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13569 if (pushable_regs & (1 << regno))
13571 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13573 high_regs_pushed --;
13574 real_regs_mask |= (1 << next_hi_reg);
13576 if (high_regs_pushed)
13578 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13579 next_hi_reg --)
13580 if (live_regs_mask & (1 << next_hi_reg))
13581 break;
13583 else
13585 pushable_regs &= ~((1 << regno) - 1);
13586 break;
13591 /* If we had to find a work register and we have not yet
13592 saved the LR then add it to the list of regs to push. */
13593 if (l_mask == (1 << LR_REGNUM))
13595 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13596 1, &cfa_offset,
13597 real_regs_mask | (1 << LR_REGNUM));
13598 l_mask = 0;
13600 else
13601 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13606 /* Handle the case of a double word load into a low register from
13607 a computed memory address. The computed address may involve a
13608 register which is overwritten by the load. */
13609 const char *
13610 thumb_load_double_from_address (rtx *operands)
13612 rtx addr;
13613 rtx base;
13614 rtx offset;
13615 rtx arg1;
13616 rtx arg2;
13618 if (GET_CODE (operands[0]) != REG)
13619 abort ();
13621 if (GET_CODE (operands[1]) != MEM)
13622 abort ();
13624 /* Get the memory address. */
13625 addr = XEXP (operands[1], 0);
13627 /* Work out how the memory address is computed. */
13628 switch (GET_CODE (addr))
13630 case REG:
13631 operands[2] = gen_rtx_MEM (SImode,
13632 plus_constant (XEXP (operands[1], 0), 4));
13634 if (REGNO (operands[0]) == REGNO (addr))
13636 output_asm_insn ("ldr\t%H0, %2", operands);
13637 output_asm_insn ("ldr\t%0, %1", operands);
13639 else
13641 output_asm_insn ("ldr\t%0, %1", operands);
13642 output_asm_insn ("ldr\t%H0, %2", operands);
13644 break;
13646 case CONST:
13647 /* Compute <address> + 4 for the high order load. */
13648 operands[2] = gen_rtx_MEM (SImode,
13649 plus_constant (XEXP (operands[1], 0), 4));
13651 output_asm_insn ("ldr\t%0, %1", operands);
13652 output_asm_insn ("ldr\t%H0, %2", operands);
13653 break;
13655 case PLUS:
13656 arg1 = XEXP (addr, 0);
13657 arg2 = XEXP (addr, 1);
13659 if (CONSTANT_P (arg1))
13660 base = arg2, offset = arg1;
13661 else
13662 base = arg1, offset = arg2;
13664 if (GET_CODE (base) != REG)
13665 abort ();
13667 /* Catch the case of <address> = <reg> + <reg> */
13668 if (GET_CODE (offset) == REG)
13670 int reg_offset = REGNO (offset);
13671 int reg_base = REGNO (base);
13672 int reg_dest = REGNO (operands[0]);
13674 /* Add the base and offset registers together into the
13675 higher destination register. */
13676 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13677 reg_dest + 1, reg_base, reg_offset);
13679 /* Load the lower destination register from the address in
13680 the higher destination register. */
13681 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13682 reg_dest, reg_dest + 1);
13684 /* Load the higher destination register from its own address
13685 plus 4. */
13686 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13687 reg_dest + 1, reg_dest + 1);
13689 else
13691 /* Compute <address> + 4 for the high order load. */
13692 operands[2] = gen_rtx_MEM (SImode,
13693 plus_constant (XEXP (operands[1], 0), 4));
13695 /* If the computed address is held in the low order register
13696 then load the high order register first, otherwise always
13697 load the low order register first. */
13698 if (REGNO (operands[0]) == REGNO (base))
13700 output_asm_insn ("ldr\t%H0, %2", operands);
13701 output_asm_insn ("ldr\t%0, %1", operands);
13703 else
13705 output_asm_insn ("ldr\t%0, %1", operands);
13706 output_asm_insn ("ldr\t%H0, %2", operands);
13709 break;
13711 case LABEL_REF:
13712 /* With no registers to worry about we can just load the value
13713 directly. */
13714 operands[2] = gen_rtx_MEM (SImode,
13715 plus_constant (XEXP (operands[1], 0), 4));
13717 output_asm_insn ("ldr\t%H0, %2", operands);
13718 output_asm_insn ("ldr\t%0, %1", operands);
13719 break;
13721 default:
13722 abort ();
13723 break;
13726 return "";
13729 const char *
13730 thumb_output_move_mem_multiple (int n, rtx *operands)
13732 rtx tmp;
13734 switch (n)
13736 case 2:
13737 if (REGNO (operands[4]) > REGNO (operands[5]))
13739 tmp = operands[4];
13740 operands[4] = operands[5];
13741 operands[5] = tmp;
13743 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13744 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13745 break;
13747 case 3:
13748 if (REGNO (operands[4]) > REGNO (operands[5]))
13750 tmp = operands[4];
13751 operands[4] = operands[5];
13752 operands[5] = tmp;
13754 if (REGNO (operands[5]) > REGNO (operands[6]))
13756 tmp = operands[5];
13757 operands[5] = operands[6];
13758 operands[6] = tmp;
13760 if (REGNO (operands[4]) > REGNO (operands[5]))
13762 tmp = operands[4];
13763 operands[4] = operands[5];
13764 operands[5] = tmp;
13767 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13768 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13769 break;
13771 default:
13772 abort ();
13775 return "";
13778 /* Output a call-via instruction for thumb state. */
13779 const char *
13780 thumb_call_via_reg (rtx reg)
13782 int regno = REGNO (reg);
13783 rtx *labelp;
13785 gcc_assert (regno < LR_REGNUM);
13787 /* If we are in the normal text section we can use a single instance
13788 per compilation unit. If we are doing function sections, then we need
13789 an entry per section, since we can't rely on reachability. */
13790 if (in_text_section ())
13792 thumb_call_reg_needed = 1;
13794 if (thumb_call_via_label[regno] == NULL)
13795 thumb_call_via_label[regno] = gen_label_rtx ();
13796 labelp = thumb_call_via_label + regno;
13798 else
13800 if (cfun->machine->call_via[regno] == NULL)
13801 cfun->machine->call_via[regno] = gen_label_rtx ();
13802 labelp = cfun->machine->call_via + regno;
13805 output_asm_insn ("bl\t%a0", labelp);
13806 return "";
13809 /* Routines for generating rtl. */
13810 void
13811 thumb_expand_movmemqi (rtx *operands)
13813 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13814 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13815 HOST_WIDE_INT len = INTVAL (operands[2]);
13816 HOST_WIDE_INT offset = 0;
13818 while (len >= 12)
13820 emit_insn (gen_movmem12b (out, in, out, in));
13821 len -= 12;
13824 if (len >= 8)
13826 emit_insn (gen_movmem8b (out, in, out, in));
13827 len -= 8;
13830 if (len >= 4)
13832 rtx reg = gen_reg_rtx (SImode);
13833 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13834 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13835 len -= 4;
13836 offset += 4;
13839 if (len >= 2)
13841 rtx reg = gen_reg_rtx (HImode);
13842 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13843 plus_constant (in, offset))));
13844 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13845 reg));
13846 len -= 2;
13847 offset += 2;
13850 if (len)
13852 rtx reg = gen_reg_rtx (QImode);
13853 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13854 plus_constant (in, offset))));
13855 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13856 reg));
13860 void
13861 thumb_reload_out_hi (rtx *operands)
13863 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13866 /* Handle reading a half-word from memory during reload. */
13867 void
13868 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13870 abort ();
13873 /* Return the length of a function name prefix
13874 that starts with the character 'c'. */
13875 static int
13876 arm_get_strip_length (int c)
13878 switch (c)
13880 ARM_NAME_ENCODING_LENGTHS
13881 default: return 0;
13885 /* Return a pointer to a function's name with any
13886 and all prefix encodings stripped from it. */
13887 const char *
13888 arm_strip_name_encoding (const char *name)
13890 int skip;
13892 while ((skip = arm_get_strip_length (* name)))
13893 name += skip;
13895 return name;
13898 /* If there is a '*' anywhere in the name's prefix, then
13899 emit the stripped name verbatim, otherwise prepend an
13900 underscore if leading underscores are being used. */
13901 void
13902 arm_asm_output_labelref (FILE *stream, const char *name)
13904 int skip;
13905 int verbatim = 0;
13907 while ((skip = arm_get_strip_length (* name)))
13909 verbatim |= (*name == '*');
13910 name += skip;
13913 if (verbatim)
13914 fputs (name, stream);
13915 else
13916 asm_fprintf (stream, "%U%s", name);
13919 static void
13920 arm_file_end (void)
13922 int regno;
13924 if (! thumb_call_reg_needed)
13925 return;
13927 text_section ();
13928 asm_fprintf (asm_out_file, "\t.code 16\n");
13929 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13931 for (regno = 0; regno < LR_REGNUM; regno++)
13933 rtx label = thumb_call_via_label[regno];
13935 if (label != 0)
13937 targetm.asm_out.internal_label (asm_out_file, "L",
13938 CODE_LABEL_NUMBER (label));
13939 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13944 rtx aof_pic_label;
13946 #ifdef AOF_ASSEMBLER
13947 /* Special functions only needed when producing AOF syntax assembler. */
13949 struct pic_chain
13951 struct pic_chain * next;
13952 const char * symname;
13955 static struct pic_chain * aof_pic_chain = NULL;
13958 aof_pic_entry (rtx x)
13960 struct pic_chain ** chainp;
13961 int offset;
13963 if (aof_pic_label == NULL_RTX)
13965 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13968 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13969 offset += 4, chainp = &(*chainp)->next)
13970 if ((*chainp)->symname == XSTR (x, 0))
13971 return plus_constant (aof_pic_label, offset);
13973 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13974 (*chainp)->next = NULL;
13975 (*chainp)->symname = XSTR (x, 0);
13976 return plus_constant (aof_pic_label, offset);
13979 void
13980 aof_dump_pic_table (FILE *f)
13982 struct pic_chain * chain;
13984 if (aof_pic_chain == NULL)
13985 return;
13987 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13988 PIC_OFFSET_TABLE_REGNUM,
13989 PIC_OFFSET_TABLE_REGNUM);
13990 fputs ("|x$adcons|\n", f);
13992 for (chain = aof_pic_chain; chain; chain = chain->next)
13994 fputs ("\tDCD\t", f);
13995 assemble_name (f, chain->symname);
13996 fputs ("\n", f);
14000 int arm_text_section_count = 1;
14002 char *
14003 aof_text_section (void )
14005 static char buf[100];
14006 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
14007 arm_text_section_count++);
14008 if (flag_pic)
14009 strcat (buf, ", PIC, REENTRANT");
14010 return buf;
14013 static int arm_data_section_count = 1;
14015 char *
14016 aof_data_section (void)
14018 static char buf[100];
14019 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14020 return buf;
14023 /* The AOF assembler is religiously strict about declarations of
14024 imported and exported symbols, so that it is impossible to declare
14025 a function as imported near the beginning of the file, and then to
14026 export it later on. It is, however, possible to delay the decision
14027 until all the functions in the file have been compiled. To get
14028 around this, we maintain a list of the imports and exports, and
14029 delete from it any that are subsequently defined. At the end of
14030 compilation we spit the remainder of the list out before the END
14031 directive. */
14033 struct import
14035 struct import * next;
14036 const char * name;
14039 static struct import * imports_list = NULL;
14041 void
14042 aof_add_import (const char *name)
14044 struct import * new;
14046 for (new = imports_list; new; new = new->next)
14047 if (new->name == name)
14048 return;
14050 new = (struct import *) xmalloc (sizeof (struct import));
14051 new->next = imports_list;
14052 imports_list = new;
14053 new->name = name;
14056 void
14057 aof_delete_import (const char *name)
14059 struct import ** old;
14061 for (old = &imports_list; *old; old = & (*old)->next)
14063 if ((*old)->name == name)
14065 *old = (*old)->next;
14066 return;
14071 int arm_main_function = 0;
14073 static void
14074 aof_dump_imports (FILE *f)
14076 /* The AOF assembler needs this to cause the startup code to be extracted
14077 from the library. Brining in __main causes the whole thing to work
14078 automagically. */
14079 if (arm_main_function)
14081 text_section ();
14082 fputs ("\tIMPORT __main\n", f);
14083 fputs ("\tDCD __main\n", f);
14086 /* Now dump the remaining imports. */
14087 while (imports_list)
14089 fprintf (f, "\tIMPORT\t");
14090 assemble_name (f, imports_list->name);
14091 fputc ('\n', f);
14092 imports_list = imports_list->next;
14096 static void
14097 aof_globalize_label (FILE *stream, const char *name)
14099 default_globalize_label (stream, name);
14100 if (! strcmp (name, "main"))
14101 arm_main_function = 1;
14104 static void
14105 aof_file_start (void)
14107 fputs ("__r0\tRN\t0\n", asm_out_file);
14108 fputs ("__a1\tRN\t0\n", asm_out_file);
14109 fputs ("__a2\tRN\t1\n", asm_out_file);
14110 fputs ("__a3\tRN\t2\n", asm_out_file);
14111 fputs ("__a4\tRN\t3\n", asm_out_file);
14112 fputs ("__v1\tRN\t4\n", asm_out_file);
14113 fputs ("__v2\tRN\t5\n", asm_out_file);
14114 fputs ("__v3\tRN\t6\n", asm_out_file);
14115 fputs ("__v4\tRN\t7\n", asm_out_file);
14116 fputs ("__v5\tRN\t8\n", asm_out_file);
14117 fputs ("__v6\tRN\t9\n", asm_out_file);
14118 fputs ("__sl\tRN\t10\n", asm_out_file);
14119 fputs ("__fp\tRN\t11\n", asm_out_file);
14120 fputs ("__ip\tRN\t12\n", asm_out_file);
14121 fputs ("__sp\tRN\t13\n", asm_out_file);
14122 fputs ("__lr\tRN\t14\n", asm_out_file);
14123 fputs ("__pc\tRN\t15\n", asm_out_file);
14124 fputs ("__f0\tFN\t0\n", asm_out_file);
14125 fputs ("__f1\tFN\t1\n", asm_out_file);
14126 fputs ("__f2\tFN\t2\n", asm_out_file);
14127 fputs ("__f3\tFN\t3\n", asm_out_file);
14128 fputs ("__f4\tFN\t4\n", asm_out_file);
14129 fputs ("__f5\tFN\t5\n", asm_out_file);
14130 fputs ("__f6\tFN\t6\n", asm_out_file);
14131 fputs ("__f7\tFN\t7\n", asm_out_file);
14132 text_section ();
14135 static void
14136 aof_file_end (void)
14138 if (flag_pic)
14139 aof_dump_pic_table (asm_out_file);
14140 arm_file_end ();
14141 aof_dump_imports (asm_out_file);
14142 fputs ("\tEND\n", asm_out_file);
14144 #endif /* AOF_ASSEMBLER */
14146 #ifndef ARM_PE
14147 /* Symbols in the text segment can be accessed without indirecting via the
14148 constant pool; it may take an extra binary operation, but this is still
14149 faster than indirecting via memory. Don't do this when not optimizing,
14150 since we won't be calculating al of the offsets necessary to do this
14151 simplification. */
14153 static void
14154 arm_encode_section_info (tree decl, rtx rtl, int first)
14156 /* This doesn't work with AOF syntax, since the string table may be in
14157 a different AREA. */
14158 #ifndef AOF_ASSEMBLER
14159 if (optimize > 0 && TREE_CONSTANT (decl))
14160 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14161 #endif
14163 /* If we are referencing a function that is weak then encode a long call
14164 flag in the function name, otherwise if the function is static or
14165 or known to be defined in this file then encode a short call flag. */
14166 if (first && DECL_P (decl))
14168 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14169 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14170 else if (! TREE_PUBLIC (decl))
14171 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14174 #endif /* !ARM_PE */
14176 static void
14177 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14179 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14180 && !strcmp (prefix, "L"))
14182 arm_ccfsm_state = 0;
14183 arm_target_insn = NULL;
14185 default_internal_label (stream, prefix, labelno);
14188 /* Output code to add DELTA to the first argument, and then jump
14189 to FUNCTION. Used for C++ multiple inheritance. */
14190 static void
14191 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14192 HOST_WIDE_INT delta,
14193 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14194 tree function)
14196 static int thunk_label = 0;
14197 char label[256];
14198 int mi_delta = delta;
14199 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14200 int shift = 0;
14201 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14202 ? 1 : 0);
14203 if (mi_delta < 0)
14204 mi_delta = - mi_delta;
14205 if (TARGET_THUMB)
14207 int labelno = thunk_label++;
14208 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14209 fputs ("\tldr\tr12, ", file);
14210 assemble_name (file, label);
14211 fputc ('\n', file);
14213 while (mi_delta != 0)
14215 if ((mi_delta & (3 << shift)) == 0)
14216 shift += 2;
14217 else
14219 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14220 mi_op, this_regno, this_regno,
14221 mi_delta & (0xff << shift));
14222 mi_delta &= ~(0xff << shift);
14223 shift += 8;
14226 if (TARGET_THUMB)
14228 fprintf (file, "\tbx\tr12\n");
14229 ASM_OUTPUT_ALIGN (file, 2);
14230 assemble_name (file, label);
14231 fputs (":\n", file);
14232 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14234 else
14236 fputs ("\tb\t", file);
14237 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14238 if (NEED_PLT_RELOC)
14239 fputs ("(PLT)", file);
14240 fputc ('\n', file);
14245 arm_emit_vector_const (FILE *file, rtx x)
14247 int i;
14248 const char * pattern;
14250 if (GET_CODE (x) != CONST_VECTOR)
14251 abort ();
14253 switch (GET_MODE (x))
14255 case V2SImode: pattern = "%08x"; break;
14256 case V4HImode: pattern = "%04x"; break;
14257 case V8QImode: pattern = "%02x"; break;
14258 default: abort ();
14261 fprintf (file, "0x");
14262 for (i = CONST_VECTOR_NUNITS (x); i--;)
14264 rtx element;
14266 element = CONST_VECTOR_ELT (x, i);
14267 fprintf (file, pattern, INTVAL (element));
14270 return 1;
14273 const char *
14274 arm_output_load_gr (rtx *operands)
14276 rtx reg;
14277 rtx offset;
14278 rtx wcgr;
14279 rtx sum;
14281 if (GET_CODE (operands [1]) != MEM
14282 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14283 || GET_CODE (reg = XEXP (sum, 0)) != REG
14284 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14285 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14286 return "wldrw%?\t%0, %1";
14288 /* Fix up an out-of-range load of a GR register. */
14289 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14290 wcgr = operands[0];
14291 operands[0] = reg;
14292 output_asm_insn ("ldr%?\t%0, %1", operands);
14294 operands[0] = wcgr;
14295 operands[1] = reg;
14296 output_asm_insn ("tmcr%?\t%0, %1", operands);
14297 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14299 return "";
14302 static rtx
14303 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14304 int incoming ATTRIBUTE_UNUSED)
14306 #if 0
14307 /* FIXME: The ARM backend has special code to handle structure
14308 returns, and will reserve its own hidden first argument. So
14309 if this macro is enabled a *second* hidden argument will be
14310 reserved, which will break binary compatibility with old
14311 toolchains and also thunk handling. One day this should be
14312 fixed. */
14313 return 0;
14314 #else
14315 /* Register in which address to store a structure value
14316 is passed to a function. */
14317 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14318 #endif
14321 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14323 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14324 named arg and all anonymous args onto the stack.
14325 XXX I know the prologue shouldn't be pushing registers, but it is faster
14326 that way. */
14328 static void
14329 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14330 enum machine_mode mode ATTRIBUTE_UNUSED,
14331 tree type ATTRIBUTE_UNUSED,
14332 int *pretend_size,
14333 int second_time ATTRIBUTE_UNUSED)
14335 cfun->machine->uses_anonymous_args = 1;
14336 if (cum->nregs < NUM_ARG_REGS)
14337 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14340 /* Return nonzero if the CONSUMER instruction (a store) does not need
14341 PRODUCER's value to calculate the address. */
14344 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14346 rtx value = PATTERN (producer);
14347 rtx addr = PATTERN (consumer);
14349 if (GET_CODE (value) == COND_EXEC)
14350 value = COND_EXEC_CODE (value);
14351 if (GET_CODE (value) == PARALLEL)
14352 value = XVECEXP (value, 0, 0);
14353 value = XEXP (value, 0);
14354 if (GET_CODE (addr) == COND_EXEC)
14355 addr = COND_EXEC_CODE (addr);
14356 if (GET_CODE (addr) == PARALLEL)
14357 addr = XVECEXP (addr, 0, 0);
14358 addr = XEXP (addr, 0);
14360 return !reg_overlap_mentioned_p (value, addr);
14363 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14364 have an early register shift value or amount dependency on the
14365 result of PRODUCER. */
14368 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14370 rtx value = PATTERN (producer);
14371 rtx op = PATTERN (consumer);
14372 rtx early_op;
14374 if (GET_CODE (value) == COND_EXEC)
14375 value = COND_EXEC_CODE (value);
14376 if (GET_CODE (value) == PARALLEL)
14377 value = XVECEXP (value, 0, 0);
14378 value = XEXP (value, 0);
14379 if (GET_CODE (op) == COND_EXEC)
14380 op = COND_EXEC_CODE (op);
14381 if (GET_CODE (op) == PARALLEL)
14382 op = XVECEXP (op, 0, 0);
14383 op = XEXP (op, 1);
14385 early_op = XEXP (op, 0);
14386 /* This is either an actual independent shift, or a shift applied to
14387 the first operand of another operation. We want the whole shift
14388 operation. */
14389 if (GET_CODE (early_op) == REG)
14390 early_op = op;
14392 return !reg_overlap_mentioned_p (value, early_op);
14395 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14396 have an early register shift value dependency on the result of
14397 PRODUCER. */
14400 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14402 rtx value = PATTERN (producer);
14403 rtx op = PATTERN (consumer);
14404 rtx early_op;
14406 if (GET_CODE (value) == COND_EXEC)
14407 value = COND_EXEC_CODE (value);
14408 if (GET_CODE (value) == PARALLEL)
14409 value = XVECEXP (value, 0, 0);
14410 value = XEXP (value, 0);
14411 if (GET_CODE (op) == COND_EXEC)
14412 op = COND_EXEC_CODE (op);
14413 if (GET_CODE (op) == PARALLEL)
14414 op = XVECEXP (op, 0, 0);
14415 op = XEXP (op, 1);
14417 early_op = XEXP (op, 0);
14419 /* This is either an actual independent shift, or a shift applied to
14420 the first operand of another operation. We want the value being
14421 shifted, in either case. */
14422 if (GET_CODE (early_op) != REG)
14423 early_op = XEXP (early_op, 0);
14425 return !reg_overlap_mentioned_p (value, early_op);
14428 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14429 have an early register mult dependency on the result of
14430 PRODUCER. */
14433 arm_no_early_mul_dep (rtx producer, rtx consumer)
14435 rtx value = PATTERN (producer);
14436 rtx op = PATTERN (consumer);
14438 if (GET_CODE (value) == COND_EXEC)
14439 value = COND_EXEC_CODE (value);
14440 if (GET_CODE (value) == PARALLEL)
14441 value = XVECEXP (value, 0, 0);
14442 value = XEXP (value, 0);
14443 if (GET_CODE (op) == COND_EXEC)
14444 op = COND_EXEC_CODE (op);
14445 if (GET_CODE (op) == PARALLEL)
14446 op = XVECEXP (op, 0, 0);
14447 op = XEXP (op, 1);
14449 return (GET_CODE (op) == PLUS
14450 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14454 /* We can't rely on the caller doing the proper promotion when
14455 using APCS or ATPCS. */
14457 static bool
14458 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14460 return !TARGET_AAPCS_BASED;
14464 /* AAPCS based ABIs use short enums by default. */
14466 static bool
14467 arm_default_short_enums (void)
14469 return TARGET_AAPCS_BASED;
14473 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14475 static bool
14476 arm_align_anon_bitfield (void)
14478 return TARGET_AAPCS_BASED;
14482 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14484 static tree
14485 arm_cxx_guard_type (void)
14487 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14491 /* The EABI says test the least significan bit of a guard variable. */
14493 static bool
14494 arm_cxx_guard_mask_bit (void)
14496 return TARGET_AAPCS_BASED;
14500 /* The EABI specifies that all array cookies are 8 bytes long. */
14502 static tree
14503 arm_get_cookie_size (tree type)
14505 tree size;
14507 if (!TARGET_AAPCS_BASED)
14508 return default_cxx_get_cookie_size (type);
14510 size = build_int_cst (sizetype, 8);
14511 return size;
14515 /* The EABI says that array cookies should also contain the element size. */
14517 static bool
14518 arm_cookie_has_size (void)
14520 return TARGET_AAPCS_BASED;
14524 /* The EABI says constructors and destructors should return a pointer to
14525 the object constructed/destroyed. */
14527 static bool
14528 arm_cxx_cdtor_returns_this (void)
14530 return TARGET_AAPCS_BASED;
14533 /* The EABI says that an inline function may never be the key
14534 method. */
14536 static bool
14537 arm_cxx_key_method_may_be_inline (void)
14539 return !TARGET_AAPCS_BASED;
14542 /* The EABI says that the virtual table, etc., for a class must be
14543 exported if it has a key method. The EABI does not specific the
14544 behavior if there is no key method, but there is no harm in
14545 exporting the class data in that case too. */
14547 static bool
14548 arm_cxx_export_class_data (void)
14550 return TARGET_AAPCS_BASED;
14553 void
14554 arm_set_return_address (rtx source, rtx scratch)
14556 arm_stack_offsets *offsets;
14557 HOST_WIDE_INT delta;
14558 rtx addr;
14559 unsigned long saved_regs;
14561 saved_regs = arm_compute_save_reg_mask ();
14563 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14564 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14565 else
14567 if (frame_pointer_needed)
14568 addr = plus_constant(hard_frame_pointer_rtx, -4);
14569 else
14571 /* LR will be the first saved register. */
14572 offsets = arm_get_frame_offsets ();
14573 delta = offsets->outgoing_args - (offsets->frame + 4);
14576 if (delta >= 4096)
14578 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14579 GEN_INT (delta & ~4095)));
14580 addr = scratch;
14581 delta &= 4095;
14583 else
14584 addr = stack_pointer_rtx;
14586 addr = plus_constant (addr, delta);
14588 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14593 void
14594 thumb_set_return_address (rtx source, rtx scratch)
14596 arm_stack_offsets *offsets;
14597 HOST_WIDE_INT delta;
14598 int reg;
14599 rtx addr;
14600 unsigned long mask;
14602 emit_insn (gen_rtx_USE (VOIDmode, source));
14604 mask = thumb_compute_save_reg_mask ();
14605 if (mask & (1 << LR_REGNUM))
14607 offsets = arm_get_frame_offsets ();
14609 /* Find the saved regs. */
14610 if (frame_pointer_needed)
14612 delta = offsets->soft_frame - offsets->saved_args;
14613 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14615 else
14617 delta = offsets->outgoing_args - offsets->saved_args;
14618 reg = SP_REGNUM;
14620 /* Allow for the stack frame. */
14621 if (TARGET_BACKTRACE)
14622 delta -= 16;
14623 /* The link register is always the first saved register. */
14624 delta -= 4;
14626 /* Construct the address. */
14627 addr = gen_rtx_REG (SImode, reg);
14628 if ((reg != SP_REGNUM && delta >= 128)
14629 || delta >= 1024)
14631 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14632 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14633 addr = scratch;
14635 else
14636 addr = plus_constant (addr, delta);
14638 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14640 else
14641 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14644 /* Implements target hook vector_mode_supported_p. */
14645 bool
14646 arm_vector_mode_supported_p (enum machine_mode mode)
14648 if ((mode == V2SImode)
14649 || (mode == V4HImode)
14650 || (mode == V8QImode))
14651 return true;
14653 return false;
14656 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14657 ARM insns and therefore guarantee that the shift count is modulo 256.
14658 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14659 guarantee no particular behavior for out-of-range counts. */
14661 static unsigned HOST_WIDE_INT
14662 arm_shift_truncation_mask (enum machine_mode mode)
14664 return mode == SImode ? 255 : 0;
14668 /* Map internal gcc register numbers to DWARF2 register numbers. */
14670 unsigned int
14671 arm_dbx_register_number (unsigned int regno)
14673 if (regno < 16)
14674 return regno;
14676 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14677 compatibility. The EABI defines them as registers 96-103. */
14678 if (IS_FPA_REGNUM (regno))
14679 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14681 if (IS_VFP_REGNUM (regno))
14682 return 64 + regno - FIRST_VFP_REGNUM;
14684 if (IS_IWMMXT_GR_REGNUM (regno))
14685 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14687 if (IS_IWMMXT_REGNUM (regno))
14688 return 112 + regno - FIRST_IWMMXT_REGNUM;
14690 abort ();