* arm.c (adjacent_mem_locations): Reject volatile memory refs.
[official-gcc.git] / gcc / config / arm / arm.c
blob66c36fb9a39bb3bfcf8f5fa61c003c5101c37b74
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void replace_symbols_in_block (tree, rtx, rtx);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
146 tree, bool);
148 #ifndef ARM_PE
149 static void arm_encode_section_info (tree, rtx, int);
150 #endif
152 static void arm_file_end (void);
154 #ifdef AOF_ASSEMBLER
155 static void aof_globalize_label (FILE *, const char *);
156 static void aof_dump_imports (FILE *);
157 static void aof_dump_pic_table (FILE *);
158 static void aof_file_start (void);
159 static void aof_file_end (void);
160 #endif
161 static rtx arm_struct_value_rtx (tree, int);
162 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
163 tree, int *, int);
164 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
165 enum machine_mode, tree, bool);
166 static bool arm_promote_prototypes (tree);
167 static bool arm_default_short_enums (void);
168 static bool arm_align_anon_bitfield (void);
170 static tree arm_cxx_guard_type (void);
171 static bool arm_cxx_guard_mask_bit (void);
172 static tree arm_get_cookie_size (tree);
173 static bool arm_cookie_has_size (void);
174 static bool arm_cxx_cdtor_returns_this (void);
175 static bool arm_cxx_key_method_may_be_inline (void);
176 static bool arm_cxx_export_class_data (void);
177 static void arm_init_libfuncs (void);
178 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
180 /* Initialize the GCC target structure. */
181 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
182 #undef TARGET_MERGE_DECL_ATTRIBUTES
183 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
184 #endif
186 #undef TARGET_ATTRIBUTE_TABLE
187 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
189 #undef TARGET_ASM_FILE_END
190 #define TARGET_ASM_FILE_END arm_file_end
192 #ifdef AOF_ASSEMBLER
193 #undef TARGET_ASM_BYTE_OP
194 #define TARGET_ASM_BYTE_OP "\tDCB\t"
195 #undef TARGET_ASM_ALIGNED_HI_OP
196 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
197 #undef TARGET_ASM_ALIGNED_SI_OP
198 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
199 #undef TARGET_ASM_GLOBALIZE_LABEL
200 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
201 #undef TARGET_ASM_FILE_START
202 #define TARGET_ASM_FILE_START aof_file_start
203 #undef TARGET_ASM_FILE_END
204 #define TARGET_ASM_FILE_END aof_file_end
205 #else
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP NULL
208 #undef TARGET_ASM_INTEGER
209 #define TARGET_ASM_INTEGER arm_assemble_integer
210 #endif
212 #undef TARGET_ASM_FUNCTION_PROLOGUE
213 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
215 #undef TARGET_ASM_FUNCTION_EPILOGUE
216 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
218 #undef TARGET_COMP_TYPE_ATTRIBUTES
219 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
221 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
222 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
227 #undef TARGET_ENCODE_SECTION_INFO
228 #ifdef ARM_PE
229 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
230 #else
231 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
232 #endif
234 #undef TARGET_STRIP_NAME_ENCODING
235 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
237 #undef TARGET_ASM_INTERNAL_LABEL
238 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
240 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
241 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
243 #undef TARGET_ASM_OUTPUT_MI_THUNK
244 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
245 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
246 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
248 /* This will be overridden in arm_override_options. */
249 #undef TARGET_RTX_COSTS
250 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
251 #undef TARGET_ADDRESS_COST
252 #define TARGET_ADDRESS_COST arm_address_cost
254 #undef TARGET_SHIFT_TRUNCATION_MASK
255 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
256 #undef TARGET_VECTOR_MODE_SUPPORTED_P
257 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
259 #undef TARGET_MACHINE_DEPENDENT_REORG
260 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
262 #undef TARGET_INIT_BUILTINS
263 #define TARGET_INIT_BUILTINS arm_init_builtins
264 #undef TARGET_EXPAND_BUILTIN
265 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
267 #undef TARGET_INIT_LIBFUNCS
268 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
270 #undef TARGET_PROMOTE_FUNCTION_ARGS
271 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
272 #undef TARGET_PROMOTE_FUNCTION_RETURN
273 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
274 #undef TARGET_PROMOTE_PROTOTYPES
275 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
276 #undef TARGET_PASS_BY_REFERENCE
277 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
278 #undef TARGET_ARG_PARTIAL_BYTES
279 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
281 #undef TARGET_STRUCT_VALUE_RTX
282 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
284 #undef TARGET_SETUP_INCOMING_VARARGS
285 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
287 #undef TARGET_DEFAULT_SHORT_ENUMS
288 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
290 #undef TARGET_ALIGN_ANON_BITFIELD
291 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
293 #undef TARGET_CXX_GUARD_TYPE
294 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
296 #undef TARGET_CXX_GUARD_MASK_BIT
297 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
299 #undef TARGET_CXX_GET_COOKIE_SIZE
300 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
302 #undef TARGET_CXX_COOKIE_HAS_SIZE
303 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
305 #undef TARGET_CXX_CDTOR_RETURNS_THIS
306 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
308 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
309 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
311 #undef TARGET_CXX_EXPORT_CLASS_DATA
312 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
314 struct gcc_target targetm = TARGET_INITIALIZER;
316 /* Obstack for minipool constant handling. */
317 static struct obstack minipool_obstack;
318 static char * minipool_startobj;
320 /* The maximum number of insns skipped which
321 will be conditionalised if possible. */
322 static int max_insns_skipped = 5;
324 extern FILE * asm_out_file;
326 /* True if we are currently building a constant table. */
327 int making_const_table;
329 /* Define the information needed to generate branch insns. This is
330 stored from the compare operation. */
331 rtx arm_compare_op0, arm_compare_op1;
333 /* The processor for which instructions should be scheduled. */
334 enum processor_type arm_tune = arm_none;
336 /* Which floating point model to use. */
337 enum arm_fp_model arm_fp_model;
339 /* Which floating point hardware is available. */
340 enum fputype arm_fpu_arch;
342 /* Which floating point hardware to schedule for. */
343 enum fputype arm_fpu_tune;
345 /* Whether to use floating point hardware. */
346 enum float_abi_type arm_float_abi;
348 /* Which ABI to use. */
349 enum arm_abi_type arm_abi;
351 /* Set by the -mfpu=... option. */
352 const char * target_fpu_name = NULL;
354 /* Set by the -mfpe=... option. */
355 const char * target_fpe_name = NULL;
357 /* Set by the -mfloat-abi=... option. */
358 const char * target_float_abi_name = NULL;
360 /* Set by the legacy -mhard-float and -msoft-float options. */
361 const char * target_float_switch = NULL;
363 /* Set by the -mabi=... option. */
364 const char * target_abi_name = NULL;
366 /* Used to parse -mstructure_size_boundary command line option. */
367 const char * structure_size_string = NULL;
368 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
370 /* Used for Thumb call_via trampolines. */
371 rtx thumb_call_via_label[13];
372 static int thumb_call_reg_needed;
374 /* Bit values used to identify processor capabilities. */
375 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
376 #define FL_ARCH3M (1 << 1) /* Extended multiply */
377 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
378 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
379 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
380 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
381 #define FL_THUMB (1 << 6) /* Thumb aware */
382 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
383 #define FL_STRONG (1 << 8) /* StrongARM */
384 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
385 #define FL_XSCALE (1 << 10) /* XScale */
386 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
387 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
388 media instructions. */
389 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
391 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
393 #define FL_FOR_ARCH2 0
394 #define FL_FOR_ARCH3 FL_MODE32
395 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
396 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
397 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
398 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
399 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
400 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
401 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
402 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
403 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
404 #define FL_FOR_ARCH6J FL_FOR_ARCH6
405 #define FL_FOR_ARCH6K FL_FOR_ARCH6
406 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
407 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
409 /* The bits in this mask specify which
410 instructions we are allowed to generate. */
411 static unsigned long insn_flags = 0;
413 /* The bits in this mask specify which instruction scheduling options should
414 be used. */
415 static unsigned long tune_flags = 0;
417 /* The following are used in the arm.md file as equivalents to bits
418 in the above two flag variables. */
420 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
421 int arm_arch3m = 0;
423 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
424 int arm_arch4 = 0;
426 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
427 int arm_arch4t = 0;
429 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
430 int arm_arch5 = 0;
432 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
433 int arm_arch5e = 0;
435 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
436 int arm_arch6 = 0;
438 /* Nonzero if this chip can benefit from load scheduling. */
439 int arm_ld_sched = 0;
441 /* Nonzero if this chip is a StrongARM. */
442 int arm_is_strong = 0;
444 /* Nonzero if this chip is a Cirrus variant. */
445 int arm_arch_cirrus = 0;
447 /* Nonzero if this chip supports Intel Wireless MMX technology. */
448 int arm_arch_iwmmxt = 0;
450 /* Nonzero if this chip is an XScale. */
451 int arm_arch_xscale = 0;
453 /* Nonzero if tuning for XScale */
454 int arm_tune_xscale = 0;
456 /* Nonzero if this chip is an ARM6 or an ARM7. */
457 int arm_is_6_or_7 = 0;
459 /* Nonzero if generating Thumb instructions. */
460 int thumb_code = 0;
462 /* Nonzero if we should define __THUMB_INTERWORK__ in the
463 preprocessor.
464 XXX This is a bit of a hack, it's intended to help work around
465 problems in GLD which doesn't understand that armv5t code is
466 interworking clean. */
467 int arm_cpp_interwork = 0;
469 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
470 must report the mode of the memory reference from PRINT_OPERAND to
471 PRINT_OPERAND_ADDRESS. */
472 enum machine_mode output_memory_reference_mode;
474 /* The register number to be used for the PIC offset register. */
475 const char * arm_pic_register_string = NULL;
476 int arm_pic_register = INVALID_REGNUM;
478 /* Set to 1 when a return insn is output, this means that the epilogue
479 is not needed. */
480 int return_used_this_function;
482 /* Set to 1 after arm_reorg has started. Reset to start at the start of
483 the next function. */
484 static int after_arm_reorg = 0;
486 /* The maximum number of insns to be used when loading a constant. */
487 static int arm_constant_limit = 3;
489 /* For an explanation of these variables, see final_prescan_insn below. */
490 int arm_ccfsm_state;
491 enum arm_cond_code arm_current_cc;
492 rtx arm_target_insn;
493 int arm_target_label;
495 /* The condition codes of the ARM, and the inverse function. */
496 static const char * const arm_condition_codes[] =
498 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
499 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
502 #define streq(string1, string2) (strcmp (string1, string2) == 0)
504 /* Initialization code. */
506 struct processors
508 const char *const name;
509 enum processor_type core;
510 const char *arch;
511 const unsigned long flags;
512 bool (* rtx_costs) (rtx, int, int, int *);
515 /* Not all of these give usefully different compilation alternatives,
516 but there is no simple way of generalizing them. */
517 static const struct processors all_cores[] =
519 /* ARM Cores */
520 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
521 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
522 #include "arm-cores.def"
523 #undef ARM_CORE
524 {NULL, arm_none, NULL, 0, NULL}
527 static const struct processors all_architectures[] =
529 /* ARM Architectures */
530 /* We don't specify rtx_costs here as it will be figured out
531 from the core. */
533 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
534 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
535 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
536 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
537 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
538 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
539 implementations that support it, so we will leave it out for now. */
540 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
541 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
542 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
543 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
544 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
545 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
546 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
547 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
548 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
549 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
550 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
551 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
552 {NULL, arm_none, NULL, 0 , NULL}
555 /* This is a magic structure. The 'string' field is magically filled in
556 with a pointer to the value specified by the user on the command line
557 assuming that the user has specified such a value. */
559 struct arm_cpu_select arm_select[] =
561 /* string name processors */
562 { NULL, "-mcpu=", all_cores },
563 { NULL, "-march=", all_architectures },
564 { NULL, "-mtune=", all_cores }
568 /* The name of the proprocessor macro to define for this architecture. */
570 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
572 struct fpu_desc
574 const char * name;
575 enum fputype fpu;
579 /* Available values for for -mfpu=. */
581 static const struct fpu_desc all_fpus[] =
583 {"fpa", FPUTYPE_FPA},
584 {"fpe2", FPUTYPE_FPA_EMU2},
585 {"fpe3", FPUTYPE_FPA_EMU2},
586 {"maverick", FPUTYPE_MAVERICK},
587 {"vfp", FPUTYPE_VFP}
591 /* Floating point models used by the different hardware.
592 See fputype in arm.h. */
594 static const enum fputype fp_model_for_fpu[] =
596 /* No FP hardware. */
597 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
598 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
599 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
600 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
601 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
602 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
606 struct float_abi
608 const char * name;
609 enum float_abi_type abi_type;
613 /* Available values for -mfloat-abi=. */
615 static const struct float_abi all_float_abis[] =
617 {"soft", ARM_FLOAT_ABI_SOFT},
618 {"softfp", ARM_FLOAT_ABI_SOFTFP},
619 {"hard", ARM_FLOAT_ABI_HARD}
623 struct abi_name
625 const char *name;
626 enum arm_abi_type abi_type;
630 /* Available values for -mabi=. */
632 static const struct abi_name arm_all_abis[] =
634 {"apcs-gnu", ARM_ABI_APCS},
635 {"atpcs", ARM_ABI_ATPCS},
636 {"aapcs", ARM_ABI_AAPCS},
637 {"iwmmxt", ARM_ABI_IWMMXT}
640 /* Return the number of bits set in VALUE. */
641 static unsigned
642 bit_count (unsigned long value)
644 unsigned long count = 0;
646 while (value)
648 count++;
649 value &= value - 1; /* Clear the least-significant set bit. */
652 return count;
655 /* Set up library functions unique to ARM. */
657 static void
658 arm_init_libfuncs (void)
660 /* There are no special library functions unless we are using the
661 ARM BPABI. */
662 if (!TARGET_BPABI)
663 return;
665 /* The functions below are described in Section 4 of the "Run-Time
666 ABI for the ARM architecture", Version 1.0. */
668 /* Double-precision floating-point arithmetic. Table 2. */
669 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
670 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
671 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
672 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
673 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
675 /* Double-precision comparisons. Table 3. */
676 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
677 set_optab_libfunc (ne_optab, DFmode, NULL);
678 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
679 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
680 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
681 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
682 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
684 /* Single-precision floating-point arithmetic. Table 4. */
685 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
686 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
687 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
688 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
689 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
691 /* Single-precision comparisons. Table 5. */
692 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
693 set_optab_libfunc (ne_optab, SFmode, NULL);
694 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
695 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
696 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
697 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
698 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
700 /* Floating-point to integer conversions. Table 6. */
701 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
702 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
703 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
704 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
705 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
706 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
707 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
708 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
710 /* Conversions between floating types. Table 7. */
711 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
712 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
714 /* Integer to floating-point conversions. Table 8. */
715 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
716 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
717 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
718 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
719 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
720 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
721 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
722 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
724 /* Long long. Table 9. */
725 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
726 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
727 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
728 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
729 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
730 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
731 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
732 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
734 /* Integer (32/32->32) division. \S 4.3.1. */
735 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
736 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
738 /* The divmod functions are designed so that they can be used for
739 plain division, even though they return both the quotient and the
740 remainder. The quotient is returned in the usual location (i.e.,
741 r0 for SImode, {r0, r1} for DImode), just as would be expected
742 for an ordinary division routine. Because the AAPCS calling
743 conventions specify that all of { r0, r1, r2, r3 } are
744 callee-saved registers, there is no need to tell the compiler
745 explicitly that those registers are clobbered by these
746 routines. */
747 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
748 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
749 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
750 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
753 /* Fix up any incompatible options that the user has specified.
754 This has now turned into a maze. */
755 void
756 arm_override_options (void)
758 unsigned i;
760 /* Set up the flags based on the cpu/architecture selected by the user. */
761 for (i = ARRAY_SIZE (arm_select); i--;)
763 struct arm_cpu_select * ptr = arm_select + i;
765 if (ptr->string != NULL && ptr->string[0] != '\0')
767 const struct processors * sel;
769 for (sel = ptr->processors; sel->name != NULL; sel++)
770 if (streq (ptr->string, sel->name))
772 /* Set the architecture define. */
773 if (i != 2)
774 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
776 /* Determine the processor core for which we should
777 tune code-generation. */
778 if (/* -mcpu= is a sensible default. */
779 i == 0
780 /* If -march= is used, and -mcpu= has not been used,
781 assume that we should tune for a representative
782 CPU from that architecture. */
783 || i == 1
784 /* -mtune= overrides -mcpu= and -march=. */
785 || i == 2)
786 arm_tune = (enum processor_type) (sel - ptr->processors);
788 if (i != 2)
790 /* If we have been given an architecture and a processor
791 make sure that they are compatible. We only generate
792 a warning though, and we prefer the CPU over the
793 architecture. */
794 if (insn_flags != 0 && (insn_flags ^ sel->flags))
795 warning ("switch -mcpu=%s conflicts with -march= switch",
796 ptr->string);
798 insn_flags = sel->flags;
801 break;
804 if (sel->name == NULL)
805 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
809 /* If the user did not specify a processor, choose one for them. */
810 if (insn_flags == 0)
812 const struct processors * sel;
813 unsigned int sought;
814 enum processor_type cpu;
816 cpu = TARGET_CPU_DEFAULT;
817 if (cpu == arm_none)
819 #ifdef SUBTARGET_CPU_DEFAULT
820 /* Use the subtarget default CPU if none was specified by
821 configure. */
822 cpu = SUBTARGET_CPU_DEFAULT;
823 #endif
824 /* Default to ARM6. */
825 if (cpu == arm_none)
826 cpu = arm6;
828 sel = &all_cores[cpu];
830 insn_flags = sel->flags;
832 /* Now check to see if the user has specified some command line
833 switch that require certain abilities from the cpu. */
834 sought = 0;
836 if (TARGET_INTERWORK || TARGET_THUMB)
838 sought |= (FL_THUMB | FL_MODE32);
840 /* There are no ARM processors that support both APCS-26 and
841 interworking. Therefore we force FL_MODE26 to be removed
842 from insn_flags here (if it was set), so that the search
843 below will always be able to find a compatible processor. */
844 insn_flags &= ~FL_MODE26;
847 if (sought != 0 && ((sought & insn_flags) != sought))
849 /* Try to locate a CPU type that supports all of the abilities
850 of the default CPU, plus the extra abilities requested by
851 the user. */
852 for (sel = all_cores; sel->name != NULL; sel++)
853 if ((sel->flags & sought) == (sought | insn_flags))
854 break;
856 if (sel->name == NULL)
858 unsigned current_bit_count = 0;
859 const struct processors * best_fit = NULL;
861 /* Ideally we would like to issue an error message here
862 saying that it was not possible to find a CPU compatible
863 with the default CPU, but which also supports the command
864 line options specified by the programmer, and so they
865 ought to use the -mcpu=<name> command line option to
866 override the default CPU type.
868 If we cannot find a cpu that has both the
869 characteristics of the default cpu and the given
870 command line options we scan the array again looking
871 for a best match. */
872 for (sel = all_cores; sel->name != NULL; sel++)
873 if ((sel->flags & sought) == sought)
875 unsigned count;
877 count = bit_count (sel->flags & insn_flags);
879 if (count >= current_bit_count)
881 best_fit = sel;
882 current_bit_count = count;
886 if (best_fit == NULL)
887 abort ();
888 else
889 sel = best_fit;
892 insn_flags = sel->flags;
894 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
895 if (arm_tune == arm_none)
896 arm_tune = (enum processor_type) (sel - all_cores);
899 /* The processor for which we should tune should now have been
900 chosen. */
901 if (arm_tune == arm_none)
902 abort ();
904 tune_flags = all_cores[(int)arm_tune].flags;
905 if (optimize_size)
906 targetm.rtx_costs = arm_size_rtx_costs;
907 else
908 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
910 /* Make sure that the processor choice does not conflict with any of the
911 other command line choices. */
912 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
914 warning ("target CPU does not support interworking" );
915 target_flags &= ~ARM_FLAG_INTERWORK;
918 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
920 warning ("target CPU does not support THUMB instructions");
921 target_flags &= ~ARM_FLAG_THUMB;
924 if (TARGET_APCS_FRAME && TARGET_THUMB)
926 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
927 target_flags &= ~ARM_FLAG_APCS_FRAME;
930 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
931 from here where no function is being compiled currently. */
932 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
933 && TARGET_ARM)
934 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
936 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
937 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
939 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
940 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
942 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
944 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
945 target_flags |= ARM_FLAG_APCS_FRAME;
948 if (TARGET_POKE_FUNCTION_NAME)
949 target_flags |= ARM_FLAG_APCS_FRAME;
951 if (TARGET_APCS_REENT && flag_pic)
952 error ("-fpic and -mapcs-reent are incompatible");
954 if (TARGET_APCS_REENT)
955 warning ("APCS reentrant code not supported. Ignored");
957 /* If this target is normally configured to use APCS frames, warn if they
958 are turned off and debugging is turned on. */
959 if (TARGET_ARM
960 && write_symbols != NO_DEBUG
961 && !TARGET_APCS_FRAME
962 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
963 warning ("-g with -mno-apcs-frame may not give sensible debugging");
965 /* If stack checking is disabled, we can use r10 as the PIC register,
966 which keeps r9 available. */
967 if (flag_pic)
968 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
970 if (TARGET_APCS_FLOAT)
971 warning ("passing floating point arguments in fp regs not yet supported");
973 /* Initialize boolean versions of the flags, for use in the arm.md file. */
974 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
975 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
976 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
977 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
978 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
979 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
980 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
981 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
983 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
984 arm_is_strong = (tune_flags & FL_STRONG) != 0;
985 thumb_code = (TARGET_ARM == 0);
986 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
987 && !(tune_flags & FL_ARCH4))) != 0;
988 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
989 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
991 /* V5 code we generate is completely interworking capable, so we turn off
992 TARGET_INTERWORK here to avoid many tests later on. */
994 /* XXX However, we must pass the right pre-processor defines to CPP
995 or GLD can get confused. This is a hack. */
996 if (TARGET_INTERWORK)
997 arm_cpp_interwork = 1;
999 if (arm_arch5)
1000 target_flags &= ~ARM_FLAG_INTERWORK;
1002 if (target_abi_name)
1004 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1006 if (streq (arm_all_abis[i].name, target_abi_name))
1008 arm_abi = arm_all_abis[i].abi_type;
1009 break;
1012 if (i == ARRAY_SIZE (arm_all_abis))
1013 error ("invalid ABI option: -mabi=%s", target_abi_name);
1015 else
1016 arm_abi = ARM_DEFAULT_ABI;
1018 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1019 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1021 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1022 error ("iwmmxt abi requires an iwmmxt capable cpu");
1024 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1025 if (target_fpu_name == NULL && target_fpe_name != NULL)
1027 if (streq (target_fpe_name, "2"))
1028 target_fpu_name = "fpe2";
1029 else if (streq (target_fpe_name, "3"))
1030 target_fpu_name = "fpe3";
1031 else
1032 error ("invalid floating point emulation option: -mfpe=%s",
1033 target_fpe_name);
1035 if (target_fpu_name != NULL)
1037 /* The user specified a FPU. */
1038 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1040 if (streq (all_fpus[i].name, target_fpu_name))
1042 arm_fpu_arch = all_fpus[i].fpu;
1043 arm_fpu_tune = arm_fpu_arch;
1044 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1045 break;
1048 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1049 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1051 else
1053 #ifdef FPUTYPE_DEFAULT
1054 /* Use the default if it is specified for this platform. */
1055 arm_fpu_arch = FPUTYPE_DEFAULT;
1056 arm_fpu_tune = FPUTYPE_DEFAULT;
1057 #else
1058 /* Pick one based on CPU type. */
1059 /* ??? Some targets assume FPA is the default.
1060 if ((insn_flags & FL_VFP) != 0)
1061 arm_fpu_arch = FPUTYPE_VFP;
1062 else
1064 if (arm_arch_cirrus)
1065 arm_fpu_arch = FPUTYPE_MAVERICK;
1066 else
1067 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1068 #endif
1069 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1070 arm_fpu_tune = FPUTYPE_FPA;
1071 else
1072 arm_fpu_tune = arm_fpu_arch;
1073 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1074 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1075 abort ();
1078 if (target_float_abi_name != NULL)
1080 /* The user specified a FP ABI. */
1081 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1083 if (streq (all_float_abis[i].name, target_float_abi_name))
1085 arm_float_abi = all_float_abis[i].abi_type;
1086 break;
1089 if (i == ARRAY_SIZE (all_float_abis))
1090 error ("invalid floating point abi: -mfloat-abi=%s",
1091 target_float_abi_name);
1093 else if (target_float_switch)
1095 /* This is a bit of a hack to avoid needing target flags for these. */
1096 if (target_float_switch[0] == 'h')
1097 arm_float_abi = ARM_FLOAT_ABI_HARD;
1098 else
1099 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1101 else
1102 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1104 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1105 sorry ("-mfloat-abi=hard and VFP");
1107 /* If soft-float is specified then don't use FPU. */
1108 if (TARGET_SOFT_FLOAT)
1109 arm_fpu_arch = FPUTYPE_NONE;
1111 /* For arm2/3 there is no need to do any scheduling if there is only
1112 a floating point emulator, or we are doing software floating-point. */
1113 if ((TARGET_SOFT_FLOAT
1114 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1115 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1116 && (tune_flags & FL_MODE32) == 0)
1117 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1119 /* Override the default structure alignment for AAPCS ABI. */
1120 if (arm_abi == ARM_ABI_AAPCS)
1121 arm_structure_size_boundary = 8;
1123 if (structure_size_string != NULL)
1125 int size = strtol (structure_size_string, NULL, 0);
1127 if (size == 8 || size == 32
1128 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1129 arm_structure_size_boundary = size;
1130 else
1131 warning ("structure size boundary can only be set to %s",
1132 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1135 if (arm_pic_register_string != NULL)
1137 int pic_register = decode_reg_name (arm_pic_register_string);
1139 if (!flag_pic)
1140 warning ("-mpic-register= is useless without -fpic");
1142 /* Prevent the user from choosing an obviously stupid PIC register. */
1143 else if (pic_register < 0 || call_used_regs[pic_register]
1144 || pic_register == HARD_FRAME_POINTER_REGNUM
1145 || pic_register == STACK_POINTER_REGNUM
1146 || pic_register >= PC_REGNUM)
1147 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1148 else
1149 arm_pic_register = pic_register;
1152 if (TARGET_THUMB && flag_schedule_insns)
1154 /* Don't warn since it's on by default in -O2. */
1155 flag_schedule_insns = 0;
1158 if (optimize_size)
1160 /* There's some dispute as to whether this should be 1 or 2. However,
1161 experiments seem to show that in pathological cases a setting of
1162 1 degrades less severely than a setting of 2. This could change if
1163 other parts of the compiler change their behavior. */
1164 arm_constant_limit = 1;
1166 /* If optimizing for size, bump the number of instructions that we
1167 are prepared to conditionally execute (even on a StrongARM). */
1168 max_insns_skipped = 6;
1170 else
1172 /* For processors with load scheduling, it never costs more than
1173 2 cycles to load a constant, and the load scheduler may well
1174 reduce that to 1. */
1175 if (arm_ld_sched)
1176 arm_constant_limit = 1;
1178 /* On XScale the longer latency of a load makes it more difficult
1179 to achieve a good schedule, so it's faster to synthesize
1180 constants that can be done in two insns. */
1181 if (arm_tune_xscale)
1182 arm_constant_limit = 2;
1184 /* StrongARM has early execution of branches, so a sequence
1185 that is worth skipping is shorter. */
1186 if (arm_is_strong)
1187 max_insns_skipped = 3;
1190 /* Register global variables with the garbage collector. */
1191 arm_add_gc_roots ();
1194 static void
1195 arm_add_gc_roots (void)
1197 gcc_obstack_init(&minipool_obstack);
1198 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1201 /* A table of known ARM exception types.
1202 For use with the interrupt function attribute. */
1204 typedef struct
1206 const char *const arg;
1207 const unsigned long return_value;
1209 isr_attribute_arg;
1211 static const isr_attribute_arg isr_attribute_args [] =
1213 { "IRQ", ARM_FT_ISR },
1214 { "irq", ARM_FT_ISR },
1215 { "FIQ", ARM_FT_FIQ },
1216 { "fiq", ARM_FT_FIQ },
1217 { "ABORT", ARM_FT_ISR },
1218 { "abort", ARM_FT_ISR },
1219 { "ABORT", ARM_FT_ISR },
1220 { "abort", ARM_FT_ISR },
1221 { "UNDEF", ARM_FT_EXCEPTION },
1222 { "undef", ARM_FT_EXCEPTION },
1223 { "SWI", ARM_FT_EXCEPTION },
1224 { "swi", ARM_FT_EXCEPTION },
1225 { NULL, ARM_FT_NORMAL }
1228 /* Returns the (interrupt) function type of the current
1229 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1231 static unsigned long
1232 arm_isr_value (tree argument)
1234 const isr_attribute_arg * ptr;
1235 const char * arg;
1237 /* No argument - default to IRQ. */
1238 if (argument == NULL_TREE)
1239 return ARM_FT_ISR;
1241 /* Get the value of the argument. */
1242 if (TREE_VALUE (argument) == NULL_TREE
1243 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1244 return ARM_FT_UNKNOWN;
1246 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1248 /* Check it against the list of known arguments. */
1249 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1250 if (streq (arg, ptr->arg))
1251 return ptr->return_value;
1253 /* An unrecognized interrupt type. */
1254 return ARM_FT_UNKNOWN;
1257 /* Computes the type of the current function. */
1259 static unsigned long
1260 arm_compute_func_type (void)
1262 unsigned long type = ARM_FT_UNKNOWN;
1263 tree a;
1264 tree attr;
1266 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1267 abort ();
1269 /* Decide if the current function is volatile. Such functions
1270 never return, and many memory cycles can be saved by not storing
1271 register values that will never be needed again. This optimization
1272 was added to speed up context switching in a kernel application. */
1273 if (optimize > 0
1274 && TREE_NOTHROW (current_function_decl)
1275 && TREE_THIS_VOLATILE (current_function_decl))
1276 type |= ARM_FT_VOLATILE;
1278 if (cfun->static_chain_decl != NULL)
1279 type |= ARM_FT_NESTED;
1281 attr = DECL_ATTRIBUTES (current_function_decl);
1283 a = lookup_attribute ("naked", attr);
1284 if (a != NULL_TREE)
1285 type |= ARM_FT_NAKED;
1287 a = lookup_attribute ("isr", attr);
1288 if (a == NULL_TREE)
1289 a = lookup_attribute ("interrupt", attr);
1291 if (a == NULL_TREE)
1292 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1293 else
1294 type |= arm_isr_value (TREE_VALUE (a));
1296 return type;
1299 /* Returns the type of the current function. */
1301 unsigned long
1302 arm_current_func_type (void)
1304 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1305 cfun->machine->func_type = arm_compute_func_type ();
1307 return cfun->machine->func_type;
1310 /* Return 1 if it is possible to return using a single instruction.
1311 If SIBLING is non-null, this is a test for a return before a sibling
1312 call. SIBLING is the call insn, so we can examine its register usage. */
1315 use_return_insn (int iscond, rtx sibling)
1317 int regno;
1318 unsigned int func_type;
1319 unsigned long saved_int_regs;
1320 unsigned HOST_WIDE_INT stack_adjust;
1321 arm_stack_offsets *offsets;
1323 /* Never use a return instruction before reload has run. */
1324 if (!reload_completed)
1325 return 0;
1327 func_type = arm_current_func_type ();
1329 /* Naked functions and volatile functions need special
1330 consideration. */
1331 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1332 return 0;
1334 /* So do interrupt functions that use the frame pointer. */
1335 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1336 return 0;
1338 offsets = arm_get_frame_offsets ();
1339 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1341 /* As do variadic functions. */
1342 if (current_function_pretend_args_size
1343 || cfun->machine->uses_anonymous_args
1344 /* Or if the function calls __builtin_eh_return () */
1345 || current_function_calls_eh_return
1346 /* Or if the function calls alloca */
1347 || current_function_calls_alloca
1348 /* Or if there is a stack adjustment. However, if the stack pointer
1349 is saved on the stack, we can use a pre-incrementing stack load. */
1350 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1351 return 0;
1353 saved_int_regs = arm_compute_save_reg_mask ();
1355 /* Unfortunately, the insn
1357 ldmib sp, {..., sp, ...}
1359 triggers a bug on most SA-110 based devices, such that the stack
1360 pointer won't be correctly restored if the instruction takes a
1361 page fault. We work around this problem by popping r3 along with
1362 the other registers, since that is never slower than executing
1363 another instruction.
1365 We test for !arm_arch5 here, because code for any architecture
1366 less than this could potentially be run on one of the buggy
1367 chips. */
1368 if (stack_adjust == 4 && !arm_arch5)
1370 /* Validate that r3 is a call-clobbered register (always true in
1371 the default abi) ... */
1372 if (!call_used_regs[3])
1373 return 0;
1375 /* ... that it isn't being used for a return value (always true
1376 until we implement return-in-regs), or for a tail-call
1377 argument ... */
1378 if (sibling)
1380 if (GET_CODE (sibling) != CALL_INSN)
1381 abort ();
1383 if (find_regno_fusage (sibling, USE, 3))
1384 return 0;
1387 /* ... and that there are no call-saved registers in r0-r2
1388 (always true in the default ABI). */
1389 if (saved_int_regs & 0x7)
1390 return 0;
1393 /* Can't be done if interworking with Thumb, and any registers have been
1394 stacked. */
1395 if (TARGET_INTERWORK && saved_int_regs != 0)
1396 return 0;
1398 /* On StrongARM, conditional returns are expensive if they aren't
1399 taken and multiple registers have been stacked. */
1400 if (iscond && arm_is_strong)
1402 /* Conditional return when just the LR is stored is a simple
1403 conditional-load instruction, that's not expensive. */
1404 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1405 return 0;
1407 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1408 return 0;
1411 /* If there are saved registers but the LR isn't saved, then we need
1412 two instructions for the return. */
1413 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1414 return 0;
1416 /* Can't be done if any of the FPA regs are pushed,
1417 since this also requires an insn. */
1418 if (TARGET_HARD_FLOAT && TARGET_FPA)
1419 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1420 if (regs_ever_live[regno] && !call_used_regs[regno])
1421 return 0;
1423 /* Likewise VFP regs. */
1424 if (TARGET_HARD_FLOAT && TARGET_VFP)
1425 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1426 if (regs_ever_live[regno] && !call_used_regs[regno])
1427 return 0;
1429 if (TARGET_REALLY_IWMMXT)
1430 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1431 if (regs_ever_live[regno] && ! call_used_regs [regno])
1432 return 0;
1434 return 1;
1437 /* Return TRUE if int I is a valid immediate ARM constant. */
1440 const_ok_for_arm (HOST_WIDE_INT i)
1442 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1444 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1445 be all zero, or all one. */
1446 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1447 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1448 != ((~(unsigned HOST_WIDE_INT) 0)
1449 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1450 return FALSE;
1452 /* Fast return for 0 and powers of 2 */
1453 if ((i & (i - 1)) == 0)
1454 return TRUE;
1458 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1459 return TRUE;
1460 mask =
1461 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1462 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1464 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1466 return FALSE;
1469 /* Return true if I is a valid constant for the operation CODE. */
1470 static int
1471 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1473 if (const_ok_for_arm (i))
1474 return 1;
1476 switch (code)
1478 case PLUS:
1479 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1481 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1482 case XOR:
1483 case IOR:
1484 return 0;
1486 case AND:
1487 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1489 default:
1490 abort ();
1494 /* Emit a sequence of insns to handle a large constant.
1495 CODE is the code of the operation required, it can be any of SET, PLUS,
1496 IOR, AND, XOR, MINUS;
1497 MODE is the mode in which the operation is being performed;
1498 VAL is the integer to operate on;
1499 SOURCE is the other operand (a register, or a null-pointer for SET);
1500 SUBTARGETS means it is safe to create scratch registers if that will
1501 either produce a simpler sequence, or we will want to cse the values.
1502 Return value is the number of insns emitted. */
1505 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1506 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1508 rtx cond;
1510 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1511 cond = COND_EXEC_TEST (PATTERN (insn));
1512 else
1513 cond = NULL_RTX;
1515 if (subtargets || code == SET
1516 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1517 && REGNO (target) != REGNO (source)))
1519 /* After arm_reorg has been called, we can't fix up expensive
1520 constants by pushing them into memory so we must synthesize
1521 them in-line, regardless of the cost. This is only likely to
1522 be more costly on chips that have load delay slots and we are
1523 compiling without running the scheduler (so no splitting
1524 occurred before the final instruction emission).
1526 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1528 if (!after_arm_reorg
1529 && !cond
1530 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1531 1, 0)
1532 > arm_constant_limit + (code != SET)))
1534 if (code == SET)
1536 /* Currently SET is the only monadic value for CODE, all
1537 the rest are diadic. */
1538 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1539 return 1;
1541 else
1543 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1545 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1546 /* For MINUS, the value is subtracted from, since we never
1547 have subtraction of a constant. */
1548 if (code == MINUS)
1549 emit_insn (gen_rtx_SET (VOIDmode, target,
1550 gen_rtx_MINUS (mode, temp, source)));
1551 else
1552 emit_insn (gen_rtx_SET (VOIDmode, target,
1553 gen_rtx_fmt_ee (code, mode, source, temp)));
1554 return 2;
1559 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1563 static int
1564 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1566 HOST_WIDE_INT temp1;
1567 int num_insns = 0;
1570 int end;
1572 if (i <= 0)
1573 i += 32;
1574 if (remainder & (3 << (i - 2)))
1576 end = i - 8;
1577 if (end < 0)
1578 end += 32;
1579 temp1 = remainder & ((0x0ff << end)
1580 | ((i < end) ? (0xff >> (32 - end)) : 0));
1581 remainder &= ~temp1;
1582 num_insns++;
1583 i -= 6;
1585 i -= 2;
1586 } while (remainder);
1587 return num_insns;
1590 /* Emit an instruction with the indicated PATTERN. If COND is
1591 non-NULL, conditionalize the execution of the instruction on COND
1592 being true. */
1594 static void
1595 emit_constant_insn (rtx cond, rtx pattern)
1597 if (cond)
1598 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1599 emit_insn (pattern);
1602 /* As above, but extra parameter GENERATE which, if clear, suppresses
1603 RTL generation. */
1605 static int
1606 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1607 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1608 int generate)
1610 int can_invert = 0;
1611 int can_negate = 0;
1612 int can_negate_initial = 0;
1613 int can_shift = 0;
1614 int i;
1615 int num_bits_set = 0;
1616 int set_sign_bit_copies = 0;
1617 int clear_sign_bit_copies = 0;
1618 int clear_zero_bit_copies = 0;
1619 int set_zero_bit_copies = 0;
1620 int insns = 0;
1621 unsigned HOST_WIDE_INT temp1, temp2;
1622 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1624 /* Find out which operations are safe for a given CODE. Also do a quick
1625 check for degenerate cases; these can occur when DImode operations
1626 are split. */
1627 switch (code)
1629 case SET:
1630 can_invert = 1;
1631 can_shift = 1;
1632 can_negate = 1;
1633 break;
1635 case PLUS:
1636 can_negate = 1;
1637 can_negate_initial = 1;
1638 break;
1640 case IOR:
1641 if (remainder == 0xffffffff)
1643 if (generate)
1644 emit_constant_insn (cond,
1645 gen_rtx_SET (VOIDmode, target,
1646 GEN_INT (ARM_SIGN_EXTEND (val))));
1647 return 1;
1649 if (remainder == 0)
1651 if (reload_completed && rtx_equal_p (target, source))
1652 return 0;
1653 if (generate)
1654 emit_constant_insn (cond,
1655 gen_rtx_SET (VOIDmode, target, source));
1656 return 1;
1658 break;
1660 case AND:
1661 if (remainder == 0)
1663 if (generate)
1664 emit_constant_insn (cond,
1665 gen_rtx_SET (VOIDmode, target, const0_rtx));
1666 return 1;
1668 if (remainder == 0xffffffff)
1670 if (reload_completed && rtx_equal_p (target, source))
1671 return 0;
1672 if (generate)
1673 emit_constant_insn (cond,
1674 gen_rtx_SET (VOIDmode, target, source));
1675 return 1;
1677 can_invert = 1;
1678 break;
1680 case XOR:
1681 if (remainder == 0)
1683 if (reload_completed && rtx_equal_p (target, source))
1684 return 0;
1685 if (generate)
1686 emit_constant_insn (cond,
1687 gen_rtx_SET (VOIDmode, target, source));
1688 return 1;
1690 if (remainder == 0xffffffff)
1692 if (generate)
1693 emit_constant_insn (cond,
1694 gen_rtx_SET (VOIDmode, target,
1695 gen_rtx_NOT (mode, source)));
1696 return 1;
1699 /* We don't know how to handle this yet below. */
1700 abort ();
1702 case MINUS:
1703 /* We treat MINUS as (val - source), since (source - val) is always
1704 passed as (source + (-val)). */
1705 if (remainder == 0)
1707 if (generate)
1708 emit_constant_insn (cond,
1709 gen_rtx_SET (VOIDmode, target,
1710 gen_rtx_NEG (mode, source)));
1711 return 1;
1713 if (const_ok_for_arm (val))
1715 if (generate)
1716 emit_constant_insn (cond,
1717 gen_rtx_SET (VOIDmode, target,
1718 gen_rtx_MINUS (mode, GEN_INT (val),
1719 source)));
1720 return 1;
1722 can_negate = 1;
1724 break;
1726 default:
1727 abort ();
1730 /* If we can do it in one insn get out quickly. */
1731 if (const_ok_for_arm (val)
1732 || (can_negate_initial && const_ok_for_arm (-val))
1733 || (can_invert && const_ok_for_arm (~val)))
1735 if (generate)
1736 emit_constant_insn (cond,
1737 gen_rtx_SET (VOIDmode, target,
1738 (source
1739 ? gen_rtx_fmt_ee (code, mode, source,
1740 GEN_INT (val))
1741 : GEN_INT (val))));
1742 return 1;
1745 /* Calculate a few attributes that may be useful for specific
1746 optimizations. */
1747 for (i = 31; i >= 0; i--)
1749 if ((remainder & (1 << i)) == 0)
1750 clear_sign_bit_copies++;
1751 else
1752 break;
1755 for (i = 31; i >= 0; i--)
1757 if ((remainder & (1 << i)) != 0)
1758 set_sign_bit_copies++;
1759 else
1760 break;
1763 for (i = 0; i <= 31; i++)
1765 if ((remainder & (1 << i)) == 0)
1766 clear_zero_bit_copies++;
1767 else
1768 break;
1771 for (i = 0; i <= 31; i++)
1773 if ((remainder & (1 << i)) != 0)
1774 set_zero_bit_copies++;
1775 else
1776 break;
1779 switch (code)
1781 case SET:
1782 /* See if we can do this by sign_extending a constant that is known
1783 to be negative. This is a good, way of doing it, since the shift
1784 may well merge into a subsequent insn. */
1785 if (set_sign_bit_copies > 1)
1787 if (const_ok_for_arm
1788 (temp1 = ARM_SIGN_EXTEND (remainder
1789 << (set_sign_bit_copies - 1))))
1791 if (generate)
1793 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1794 emit_constant_insn (cond,
1795 gen_rtx_SET (VOIDmode, new_src,
1796 GEN_INT (temp1)));
1797 emit_constant_insn (cond,
1798 gen_ashrsi3 (target, new_src,
1799 GEN_INT (set_sign_bit_copies - 1)));
1801 return 2;
1803 /* For an inverted constant, we will need to set the low bits,
1804 these will be shifted out of harm's way. */
1805 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1806 if (const_ok_for_arm (~temp1))
1808 if (generate)
1810 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, new_src,
1813 GEN_INT (temp1)));
1814 emit_constant_insn (cond,
1815 gen_ashrsi3 (target, new_src,
1816 GEN_INT (set_sign_bit_copies - 1)));
1818 return 2;
1822 /* See if we can generate this by setting the bottom (or the top)
1823 16 bits, and then shifting these into the other half of the
1824 word. We only look for the simplest cases, to do more would cost
1825 too much. Be careful, however, not to generate this when the
1826 alternative would take fewer insns. */
1827 if (val & 0xffff0000)
1829 temp1 = remainder & 0xffff0000;
1830 temp2 = remainder & 0x0000ffff;
1832 /* Overlaps outside this range are best done using other methods. */
1833 for (i = 9; i < 24; i++)
1835 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1836 && !const_ok_for_arm (temp2))
1838 rtx new_src = (subtargets
1839 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1840 : target);
1841 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1842 source, subtargets, generate);
1843 source = new_src;
1844 if (generate)
1845 emit_constant_insn
1846 (cond,
1847 gen_rtx_SET
1848 (VOIDmode, target,
1849 gen_rtx_IOR (mode,
1850 gen_rtx_ASHIFT (mode, source,
1851 GEN_INT (i)),
1852 source)));
1853 return insns + 1;
1857 /* Don't duplicate cases already considered. */
1858 for (i = 17; i < 24; i++)
1860 if (((temp1 | (temp1 >> i)) == remainder)
1861 && !const_ok_for_arm (temp1))
1863 rtx new_src = (subtargets
1864 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1865 : target);
1866 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1867 source, subtargets, generate);
1868 source = new_src;
1869 if (generate)
1870 emit_constant_insn
1871 (cond,
1872 gen_rtx_SET (VOIDmode, target,
1873 gen_rtx_IOR
1874 (mode,
1875 gen_rtx_LSHIFTRT (mode, source,
1876 GEN_INT (i)),
1877 source)));
1878 return insns + 1;
1882 break;
1884 case IOR:
1885 case XOR:
1886 /* If we have IOR or XOR, and the constant can be loaded in a
1887 single instruction, and we can find a temporary to put it in,
1888 then this can be done in two instructions instead of 3-4. */
1889 if (subtargets
1890 /* TARGET can't be NULL if SUBTARGETS is 0 */
1891 || (reload_completed && !reg_mentioned_p (target, source)))
1893 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1895 if (generate)
1897 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1899 emit_constant_insn (cond,
1900 gen_rtx_SET (VOIDmode, sub,
1901 GEN_INT (val)));
1902 emit_constant_insn (cond,
1903 gen_rtx_SET (VOIDmode, target,
1904 gen_rtx_fmt_ee (code, mode,
1905 source, sub)));
1907 return 2;
1911 if (code == XOR)
1912 break;
1914 if (set_sign_bit_copies > 8
1915 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1917 if (generate)
1919 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1920 rtx shift = GEN_INT (set_sign_bit_copies);
1922 emit_constant_insn
1923 (cond,
1924 gen_rtx_SET (VOIDmode, sub,
1925 gen_rtx_NOT (mode,
1926 gen_rtx_ASHIFT (mode,
1927 source,
1928 shift))));
1929 emit_constant_insn
1930 (cond,
1931 gen_rtx_SET (VOIDmode, target,
1932 gen_rtx_NOT (mode,
1933 gen_rtx_LSHIFTRT (mode, sub,
1934 shift))));
1936 return 2;
1939 if (set_zero_bit_copies > 8
1940 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1942 if (generate)
1944 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1945 rtx shift = GEN_INT (set_zero_bit_copies);
1947 emit_constant_insn
1948 (cond,
1949 gen_rtx_SET (VOIDmode, sub,
1950 gen_rtx_NOT (mode,
1951 gen_rtx_LSHIFTRT (mode,
1952 source,
1953 shift))));
1954 emit_constant_insn
1955 (cond,
1956 gen_rtx_SET (VOIDmode, target,
1957 gen_rtx_NOT (mode,
1958 gen_rtx_ASHIFT (mode, sub,
1959 shift))));
1961 return 2;
1964 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1966 if (generate)
1968 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1969 emit_constant_insn (cond,
1970 gen_rtx_SET (VOIDmode, sub,
1971 gen_rtx_NOT (mode, source)));
1972 source = sub;
1973 if (subtargets)
1974 sub = gen_reg_rtx (mode);
1975 emit_constant_insn (cond,
1976 gen_rtx_SET (VOIDmode, sub,
1977 gen_rtx_AND (mode, source,
1978 GEN_INT (temp1))));
1979 emit_constant_insn (cond,
1980 gen_rtx_SET (VOIDmode, target,
1981 gen_rtx_NOT (mode, sub)));
1983 return 3;
1985 break;
1987 case AND:
1988 /* See if two shifts will do 2 or more insn's worth of work. */
1989 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1991 HOST_WIDE_INT shift_mask = ((0xffffffff
1992 << (32 - clear_sign_bit_copies))
1993 & 0xffffffff);
1995 if ((remainder | shift_mask) != 0xffffffff)
1997 if (generate)
1999 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2000 insns = arm_gen_constant (AND, mode, cond,
2001 remainder | shift_mask,
2002 new_src, source, subtargets, 1);
2003 source = new_src;
2005 else
2007 rtx targ = subtargets ? NULL_RTX : target;
2008 insns = arm_gen_constant (AND, mode, cond,
2009 remainder | shift_mask,
2010 targ, source, subtargets, 0);
2014 if (generate)
2016 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2017 rtx shift = GEN_INT (clear_sign_bit_copies);
2019 emit_insn (gen_ashlsi3 (new_src, source, shift));
2020 emit_insn (gen_lshrsi3 (target, new_src, shift));
2023 return insns + 2;
2026 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2028 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2030 if ((remainder | shift_mask) != 0xffffffff)
2032 if (generate)
2034 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2036 insns = arm_gen_constant (AND, mode, cond,
2037 remainder | shift_mask,
2038 new_src, source, subtargets, 1);
2039 source = new_src;
2041 else
2043 rtx targ = subtargets ? NULL_RTX : target;
2045 insns = arm_gen_constant (AND, mode, cond,
2046 remainder | shift_mask,
2047 targ, source, subtargets, 0);
2051 if (generate)
2053 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2054 rtx shift = GEN_INT (clear_zero_bit_copies);
2056 emit_insn (gen_lshrsi3 (new_src, source, shift));
2057 emit_insn (gen_ashlsi3 (target, new_src, shift));
2060 return insns + 2;
2063 break;
2065 default:
2066 break;
2069 for (i = 0; i < 32; i++)
2070 if (remainder & (1 << i))
2071 num_bits_set++;
2073 if (code == AND || (can_invert && num_bits_set > 16))
2074 remainder = (~remainder) & 0xffffffff;
2075 else if (code == PLUS && num_bits_set > 16)
2076 remainder = (-remainder) & 0xffffffff;
2077 else
2079 can_invert = 0;
2080 can_negate = 0;
2083 /* Now try and find a way of doing the job in either two or three
2084 instructions.
2085 We start by looking for the largest block of zeros that are aligned on
2086 a 2-bit boundary, we then fill up the temps, wrapping around to the
2087 top of the word when we drop off the bottom.
2088 In the worst case this code should produce no more than four insns. */
2090 int best_start = 0;
2091 int best_consecutive_zeros = 0;
2093 for (i = 0; i < 32; i += 2)
2095 int consecutive_zeros = 0;
2097 if (!(remainder & (3 << i)))
2099 while ((i < 32) && !(remainder & (3 << i)))
2101 consecutive_zeros += 2;
2102 i += 2;
2104 if (consecutive_zeros > best_consecutive_zeros)
2106 best_consecutive_zeros = consecutive_zeros;
2107 best_start = i - consecutive_zeros;
2109 i -= 2;
2113 /* So long as it won't require any more insns to do so, it's
2114 desirable to emit a small constant (in bits 0...9) in the last
2115 insn. This way there is more chance that it can be combined with
2116 a later addressing insn to form a pre-indexed load or store
2117 operation. Consider:
2119 *((volatile int *)0xe0000100) = 1;
2120 *((volatile int *)0xe0000110) = 2;
2122 We want this to wind up as:
2124 mov rA, #0xe0000000
2125 mov rB, #1
2126 str rB, [rA, #0x100]
2127 mov rB, #2
2128 str rB, [rA, #0x110]
2130 rather than having to synthesize both large constants from scratch.
2132 Therefore, we calculate how many insns would be required to emit
2133 the constant starting from `best_start', and also starting from
2134 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2135 yield a shorter sequence, we may as well use zero. */
2136 if (best_start != 0
2137 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2138 && (count_insns_for_constant (remainder, 0) <=
2139 count_insns_for_constant (remainder, best_start)))
2140 best_start = 0;
2142 /* Now start emitting the insns. */
2143 i = best_start;
2146 int end;
2148 if (i <= 0)
2149 i += 32;
2150 if (remainder & (3 << (i - 2)))
2152 end = i - 8;
2153 if (end < 0)
2154 end += 32;
2155 temp1 = remainder & ((0x0ff << end)
2156 | ((i < end) ? (0xff >> (32 - end)) : 0));
2157 remainder &= ~temp1;
2159 if (generate)
2161 rtx new_src, temp1_rtx;
2163 if (code == SET || code == MINUS)
2165 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2166 if (can_invert && code != MINUS)
2167 temp1 = ~temp1;
2169 else
2171 if (remainder && subtargets)
2172 new_src = gen_reg_rtx (mode);
2173 else
2174 new_src = target;
2175 if (can_invert)
2176 temp1 = ~temp1;
2177 else if (can_negate)
2178 temp1 = -temp1;
2181 temp1 = trunc_int_for_mode (temp1, mode);
2182 temp1_rtx = GEN_INT (temp1);
2184 if (code == SET)
2186 else if (code == MINUS)
2187 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2188 else
2189 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2191 emit_constant_insn (cond,
2192 gen_rtx_SET (VOIDmode, new_src,
2193 temp1_rtx));
2194 source = new_src;
2197 if (code == SET)
2199 can_invert = 0;
2200 code = PLUS;
2202 else if (code == MINUS)
2203 code = PLUS;
2205 insns++;
2206 i -= 6;
2208 i -= 2;
2210 while (remainder);
2213 return insns;
2216 /* Canonicalize a comparison so that we are more likely to recognize it.
2217 This can be done for a few constant compares, where we can make the
2218 immediate value easier to load. */
2220 enum rtx_code
2221 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2223 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2225 switch (code)
2227 case EQ:
2228 case NE:
2229 return code;
2231 case GT:
2232 case LE:
2233 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2234 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2236 *op1 = GEN_INT (i + 1);
2237 return code == GT ? GE : LT;
2239 break;
2241 case GE:
2242 case LT:
2243 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2244 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2246 *op1 = GEN_INT (i - 1);
2247 return code == GE ? GT : LE;
2249 break;
2251 case GTU:
2252 case LEU:
2253 if (i != ~((unsigned HOST_WIDE_INT) 0)
2254 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2256 *op1 = GEN_INT (i + 1);
2257 return code == GTU ? GEU : LTU;
2259 break;
2261 case GEU:
2262 case LTU:
2263 if (i != 0
2264 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2266 *op1 = GEN_INT (i - 1);
2267 return code == GEU ? GTU : LEU;
2269 break;
2271 default:
2272 abort ();
2275 return code;
2279 /* Define how to find the value returned by a function. */
2282 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2284 enum machine_mode mode;
2285 int unsignedp ATTRIBUTE_UNUSED;
2286 rtx r ATTRIBUTE_UNUSED;
2289 mode = TYPE_MODE (type);
2290 /* Promote integer types. */
2291 if (INTEGRAL_TYPE_P (type))
2292 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2293 return LIBCALL_VALUE(mode);
2296 /* Determine the amount of memory needed to store the possible return
2297 registers of an untyped call. */
2299 arm_apply_result_size (void)
2301 int size = 16;
2303 if (TARGET_ARM)
2305 if (TARGET_HARD_FLOAT_ABI)
2307 if (TARGET_FPA)
2308 size += 12;
2309 if (TARGET_MAVERICK)
2310 size += 8;
2312 if (TARGET_IWMMXT_ABI)
2313 size += 8;
2316 return size;
2319 /* Decide whether a type should be returned in memory (true)
2320 or in a register (false). This is called by the macro
2321 RETURN_IN_MEMORY. */
2323 arm_return_in_memory (tree type)
2325 HOST_WIDE_INT size;
2327 if (!AGGREGATE_TYPE_P (type) &&
2328 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2329 /* All simple types are returned in registers.
2330 For AAPCS, complex types are treated the same as aggregates. */
2331 return 0;
2333 size = int_size_in_bytes (type);
2335 if (arm_abi != ARM_ABI_APCS)
2337 /* ATPCS and later return aggregate types in memory only if they are
2338 larger than a word (or are variable size). */
2339 return (size < 0 || size > UNITS_PER_WORD);
2342 /* For the arm-wince targets we choose to be compatible with Microsoft's
2343 ARM and Thumb compilers, which always return aggregates in memory. */
2344 #ifndef ARM_WINCE
2345 /* All structures/unions bigger than one word are returned in memory.
2346 Also catch the case where int_size_in_bytes returns -1. In this case
2347 the aggregate is either huge or of variable size, and in either case
2348 we will want to return it via memory and not in a register. */
2349 if (size < 0 || size > UNITS_PER_WORD)
2350 return 1;
2352 if (TREE_CODE (type) == RECORD_TYPE)
2354 tree field;
2356 /* For a struct the APCS says that we only return in a register
2357 if the type is 'integer like' and every addressable element
2358 has an offset of zero. For practical purposes this means
2359 that the structure can have at most one non bit-field element
2360 and that this element must be the first one in the structure. */
2362 /* Find the first field, ignoring non FIELD_DECL things which will
2363 have been created by C++. */
2364 for (field = TYPE_FIELDS (type);
2365 field && TREE_CODE (field) != FIELD_DECL;
2366 field = TREE_CHAIN (field))
2367 continue;
2369 if (field == NULL)
2370 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2372 /* Check that the first field is valid for returning in a register. */
2374 /* ... Floats are not allowed */
2375 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2376 return 1;
2378 /* ... Aggregates that are not themselves valid for returning in
2379 a register are not allowed. */
2380 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2381 return 1;
2383 /* Now check the remaining fields, if any. Only bitfields are allowed,
2384 since they are not addressable. */
2385 for (field = TREE_CHAIN (field);
2386 field;
2387 field = TREE_CHAIN (field))
2389 if (TREE_CODE (field) != FIELD_DECL)
2390 continue;
2392 if (!DECL_BIT_FIELD_TYPE (field))
2393 return 1;
2396 return 0;
2399 if (TREE_CODE (type) == UNION_TYPE)
2401 tree field;
2403 /* Unions can be returned in registers if every element is
2404 integral, or can be returned in an integer register. */
2405 for (field = TYPE_FIELDS (type);
2406 field;
2407 field = TREE_CHAIN (field))
2409 if (TREE_CODE (field) != FIELD_DECL)
2410 continue;
2412 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2413 return 1;
2415 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2416 return 1;
2419 return 0;
2421 #endif /* not ARM_WINCE */
2423 /* Return all other types in memory. */
2424 return 1;
2427 /* Indicate whether or not words of a double are in big-endian order. */
2430 arm_float_words_big_endian (void)
2432 if (TARGET_MAVERICK)
2433 return 0;
2435 /* For FPA, float words are always big-endian. For VFP, floats words
2436 follow the memory system mode. */
2438 if (TARGET_FPA)
2440 return 1;
2443 if (TARGET_VFP)
2444 return (TARGET_BIG_END ? 1 : 0);
2446 return 1;
2449 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2450 for a call to a function whose data type is FNTYPE.
2451 For a library call, FNTYPE is NULL. */
2452 void
2453 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2454 rtx libname ATTRIBUTE_UNUSED,
2455 tree fndecl ATTRIBUTE_UNUSED)
2457 /* On the ARM, the offset starts at 0. */
2458 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2459 pcum->iwmmxt_nregs = 0;
2460 pcum->can_split = true;
2462 pcum->call_cookie = CALL_NORMAL;
2464 if (TARGET_LONG_CALLS)
2465 pcum->call_cookie = CALL_LONG;
2467 /* Check for long call/short call attributes. The attributes
2468 override any command line option. */
2469 if (fntype)
2471 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2472 pcum->call_cookie = CALL_SHORT;
2473 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2474 pcum->call_cookie = CALL_LONG;
2477 /* Varargs vectors are treated the same as long long.
2478 named_count avoids having to change the way arm handles 'named' */
2479 pcum->named_count = 0;
2480 pcum->nargs = 0;
2482 if (TARGET_REALLY_IWMMXT && fntype)
2484 tree fn_arg;
2486 for (fn_arg = TYPE_ARG_TYPES (fntype);
2487 fn_arg;
2488 fn_arg = TREE_CHAIN (fn_arg))
2489 pcum->named_count += 1;
2491 if (! pcum->named_count)
2492 pcum->named_count = INT_MAX;
2497 /* Return true if mode/type need doubleword alignment. */
2498 bool
2499 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2501 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2502 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2506 /* Determine where to put an argument to a function.
2507 Value is zero to push the argument on the stack,
2508 or a hard register in which to store the argument.
2510 MODE is the argument's machine mode.
2511 TYPE is the data type of the argument (as a tree).
2512 This is null for libcalls where that information may
2513 not be available.
2514 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2515 the preceding args and about the function being called.
2516 NAMED is nonzero if this argument is a named parameter
2517 (otherwise it is an extra parameter matching an ellipsis). */
2520 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2521 tree type, int named)
2523 int nregs;
2525 /* Varargs vectors are treated the same as long long.
2526 named_count avoids having to change the way arm handles 'named' */
2527 if (TARGET_IWMMXT_ABI
2528 && arm_vector_mode_supported_p (mode)
2529 && pcum->named_count > pcum->nargs + 1)
2531 if (pcum->iwmmxt_nregs <= 9)
2532 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2533 else
2535 pcum->can_split = false;
2536 return NULL_RTX;
2540 /* Put doubleword aligned quantities in even register pairs. */
2541 if (pcum->nregs & 1
2542 && ARM_DOUBLEWORD_ALIGN
2543 && arm_needs_doubleword_align (mode, type))
2544 pcum->nregs++;
2546 if (mode == VOIDmode)
2547 /* Compute operand 2 of the call insn. */
2548 return GEN_INT (pcum->call_cookie);
2550 /* Only allow splitting an arg between regs and memory if all preceding
2551 args were allocated to regs. For args passed by reference we only count
2552 the reference pointer. */
2553 if (pcum->can_split)
2554 nregs = 1;
2555 else
2556 nregs = ARM_NUM_REGS2 (mode, type);
2558 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2559 return NULL_RTX;
2561 return gen_rtx_REG (mode, pcum->nregs);
2564 static int
2565 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2566 tree type, bool named ATTRIBUTE_UNUSED)
2568 int nregs = pcum->nregs;
2570 if (arm_vector_mode_supported_p (mode))
2571 return 0;
2573 if (NUM_ARG_REGS > nregs
2574 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2575 && pcum->can_split)
2576 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2578 return 0;
2581 /* Variable sized types are passed by reference. This is a GCC
2582 extension to the ARM ABI. */
2584 static bool
2585 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2586 enum machine_mode mode ATTRIBUTE_UNUSED,
2587 tree type, bool named ATTRIBUTE_UNUSED)
2589 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2592 /* Encode the current state of the #pragma [no_]long_calls. */
2593 typedef enum
2595 OFF, /* No #pramgma [no_]long_calls is in effect. */
2596 LONG, /* #pragma long_calls is in effect. */
2597 SHORT /* #pragma no_long_calls is in effect. */
2598 } arm_pragma_enum;
2600 static arm_pragma_enum arm_pragma_long_calls = OFF;
2602 void
2603 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2605 arm_pragma_long_calls = LONG;
2608 void
2609 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2611 arm_pragma_long_calls = SHORT;
2614 void
2615 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2617 arm_pragma_long_calls = OFF;
2620 /* Table of machine attributes. */
2621 const struct attribute_spec arm_attribute_table[] =
2623 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2624 /* Function calls made to this symbol must be done indirectly, because
2625 it may lie outside of the 26 bit addressing range of a normal function
2626 call. */
2627 { "long_call", 0, 0, false, true, true, NULL },
2628 /* Whereas these functions are always known to reside within the 26 bit
2629 addressing range. */
2630 { "short_call", 0, 0, false, true, true, NULL },
2631 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2632 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2633 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2634 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2635 #ifdef ARM_PE
2636 /* ARM/PE has three new attributes:
2637 interfacearm - ?
2638 dllexport - for exporting a function/variable that will live in a dll
2639 dllimport - for importing a function/variable from a dll
2641 Microsoft allows multiple declspecs in one __declspec, separating
2642 them with spaces. We do NOT support this. Instead, use __declspec
2643 multiple times.
2645 { "dllimport", 0, 0, true, false, false, NULL },
2646 { "dllexport", 0, 0, true, false, false, NULL },
2647 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2648 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2649 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2650 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2651 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2652 #endif
2653 { NULL, 0, 0, false, false, false, NULL }
2656 /* Handle an attribute requiring a FUNCTION_DECL;
2657 arguments as in struct attribute_spec.handler. */
2658 static tree
2659 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2660 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2662 if (TREE_CODE (*node) != FUNCTION_DECL)
2664 warning ("%qs attribute only applies to functions",
2665 IDENTIFIER_POINTER (name));
2666 *no_add_attrs = true;
2669 return NULL_TREE;
2672 /* Handle an "interrupt" or "isr" attribute;
2673 arguments as in struct attribute_spec.handler. */
2674 static tree
2675 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2676 bool *no_add_attrs)
2678 if (DECL_P (*node))
2680 if (TREE_CODE (*node) != FUNCTION_DECL)
2682 warning ("%qs attribute only applies to functions",
2683 IDENTIFIER_POINTER (name));
2684 *no_add_attrs = true;
2686 /* FIXME: the argument if any is checked for type attributes;
2687 should it be checked for decl ones? */
2689 else
2691 if (TREE_CODE (*node) == FUNCTION_TYPE
2692 || TREE_CODE (*node) == METHOD_TYPE)
2694 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2696 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2697 *no_add_attrs = true;
2700 else if (TREE_CODE (*node) == POINTER_TYPE
2701 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2702 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2703 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2705 *node = build_variant_type_copy (*node);
2706 TREE_TYPE (*node) = build_type_attribute_variant
2707 (TREE_TYPE (*node),
2708 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2709 *no_add_attrs = true;
2711 else
2713 /* Possibly pass this attribute on from the type to a decl. */
2714 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2715 | (int) ATTR_FLAG_FUNCTION_NEXT
2716 | (int) ATTR_FLAG_ARRAY_NEXT))
2718 *no_add_attrs = true;
2719 return tree_cons (name, args, NULL_TREE);
2721 else
2723 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2728 return NULL_TREE;
2731 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2732 /* Handle the "notshared" attribute. This attribute is another way of
2733 requesting hidden visibility. ARM's compiler supports
2734 "__declspec(notshared)"; we support the same thing via an
2735 attribute. */
2737 static tree
2738 arm_handle_notshared_attribute (tree *node,
2739 tree name ATTRIBUTE_UNUSED,
2740 tree args ATTRIBUTE_UNUSED,
2741 int flags ATTRIBUTE_UNUSED,
2742 bool *no_add_attrs)
2744 tree decl = TYPE_NAME (*node);
2746 if (decl)
2748 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2749 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2750 *no_add_attrs = false;
2752 return NULL_TREE;
2754 #endif
2756 /* Return 0 if the attributes for two types are incompatible, 1 if they
2757 are compatible, and 2 if they are nearly compatible (which causes a
2758 warning to be generated). */
2759 static int
2760 arm_comp_type_attributes (tree type1, tree type2)
2762 int l1, l2, s1, s2;
2764 /* Check for mismatch of non-default calling convention. */
2765 if (TREE_CODE (type1) != FUNCTION_TYPE)
2766 return 1;
2768 /* Check for mismatched call attributes. */
2769 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2770 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2771 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2772 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2774 /* Only bother to check if an attribute is defined. */
2775 if (l1 | l2 | s1 | s2)
2777 /* If one type has an attribute, the other must have the same attribute. */
2778 if ((l1 != l2) || (s1 != s2))
2779 return 0;
2781 /* Disallow mixed attributes. */
2782 if ((l1 & s2) || (l2 & s1))
2783 return 0;
2786 /* Check for mismatched ISR attribute. */
2787 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2788 if (! l1)
2789 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2790 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2791 if (! l2)
2792 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2793 if (l1 != l2)
2794 return 0;
2796 return 1;
2799 /* Encode long_call or short_call attribute by prefixing
2800 symbol name in DECL with a special character FLAG. */
2801 void
2802 arm_encode_call_attribute (tree decl, int flag)
2804 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2805 int len = strlen (str);
2806 char * newstr;
2808 /* Do not allow weak functions to be treated as short call. */
2809 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2810 return;
2812 newstr = alloca (len + 2);
2813 newstr[0] = flag;
2814 strcpy (newstr + 1, str);
2816 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2817 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2820 /* Assigns default attributes to newly defined type. This is used to
2821 set short_call/long_call attributes for function types of
2822 functions defined inside corresponding #pragma scopes. */
2823 static void
2824 arm_set_default_type_attributes (tree type)
2826 /* Add __attribute__ ((long_call)) to all functions, when
2827 inside #pragma long_calls or __attribute__ ((short_call)),
2828 when inside #pragma no_long_calls. */
2829 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2831 tree type_attr_list, attr_name;
2832 type_attr_list = TYPE_ATTRIBUTES (type);
2834 if (arm_pragma_long_calls == LONG)
2835 attr_name = get_identifier ("long_call");
2836 else if (arm_pragma_long_calls == SHORT)
2837 attr_name = get_identifier ("short_call");
2838 else
2839 return;
2841 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2842 TYPE_ATTRIBUTES (type) = type_attr_list;
2846 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2847 defined within the current compilation unit. If this cannot be
2848 determined, then 0 is returned. */
2849 static int
2850 current_file_function_operand (rtx sym_ref)
2852 /* This is a bit of a fib. A function will have a short call flag
2853 applied to its name if it has the short call attribute, or it has
2854 already been defined within the current compilation unit. */
2855 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2856 return 1;
2858 /* The current function is always defined within the current compilation
2859 unit. If it s a weak definition however, then this may not be the real
2860 definition of the function, and so we have to say no. */
2861 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2862 && !DECL_WEAK (current_function_decl))
2863 return 1;
2865 /* We cannot make the determination - default to returning 0. */
2866 return 0;
2869 /* Return nonzero if a 32 bit "long_call" should be generated for
2870 this call. We generate a long_call if the function:
2872 a. has an __attribute__((long call))
2873 or b. is within the scope of a #pragma long_calls
2874 or c. the -mlong-calls command line switch has been specified
2875 . and either:
2876 1. -ffunction-sections is in effect
2877 or 2. the current function has __attribute__ ((section))
2878 or 3. the target function has __attribute__ ((section))
2880 However we do not generate a long call if the function:
2882 d. has an __attribute__ ((short_call))
2883 or e. is inside the scope of a #pragma no_long_calls
2884 or f. is defined within the current compilation unit.
2886 This function will be called by C fragments contained in the machine
2887 description file. SYM_REF and CALL_COOKIE correspond to the matched
2888 rtl operands. CALL_SYMBOL is used to distinguish between
2889 two different callers of the function. It is set to 1 in the
2890 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2891 and "call_value" patterns. This is because of the difference in the
2892 SYM_REFs passed by these patterns. */
2894 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2896 if (!call_symbol)
2898 if (GET_CODE (sym_ref) != MEM)
2899 return 0;
2901 sym_ref = XEXP (sym_ref, 0);
2904 if (GET_CODE (sym_ref) != SYMBOL_REF)
2905 return 0;
2907 if (call_cookie & CALL_SHORT)
2908 return 0;
2910 if (TARGET_LONG_CALLS)
2912 if (flag_function_sections
2913 || DECL_SECTION_NAME (current_function_decl))
2914 /* c.3 is handled by the definition of the
2915 ARM_DECLARE_FUNCTION_SIZE macro. */
2916 return 1;
2919 if (current_file_function_operand (sym_ref))
2920 return 0;
2922 return (call_cookie & CALL_LONG)
2923 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2924 || TARGET_LONG_CALLS;
2927 /* Return nonzero if it is ok to make a tail-call to DECL. */
2928 static bool
2929 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2931 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2933 if (cfun->machine->sibcall_blocked)
2934 return false;
2936 /* Never tailcall something for which we have no decl, or if we
2937 are in Thumb mode. */
2938 if (decl == NULL || TARGET_THUMB)
2939 return false;
2941 /* Get the calling method. */
2942 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2943 call_type = CALL_SHORT;
2944 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2945 call_type = CALL_LONG;
2947 /* Cannot tail-call to long calls, since these are out of range of
2948 a branch instruction. However, if not compiling PIC, we know
2949 we can reach the symbol if it is in this compilation unit. */
2950 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2951 return false;
2953 /* If we are interworking and the function is not declared static
2954 then we can't tail-call it unless we know that it exists in this
2955 compilation unit (since it might be a Thumb routine). */
2956 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2957 return false;
2959 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2960 if (IS_INTERRUPT (arm_current_func_type ()))
2961 return false;
2963 /* Everything else is ok. */
2964 return true;
2968 /* Addressing mode support functions. */
2970 /* Return nonzero if X is a legitimate immediate operand when compiling
2971 for PIC. */
2973 legitimate_pic_operand_p (rtx x)
2975 if (CONSTANT_P (x)
2976 && flag_pic
2977 && (GET_CODE (x) == SYMBOL_REF
2978 || (GET_CODE (x) == CONST
2979 && GET_CODE (XEXP (x, 0)) == PLUS
2980 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2981 return 0;
2983 return 1;
2987 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2989 if (GET_CODE (orig) == SYMBOL_REF
2990 || GET_CODE (orig) == LABEL_REF)
2992 #ifndef AOF_ASSEMBLER
2993 rtx pic_ref, address;
2994 #endif
2995 rtx insn;
2996 int subregs = 0;
2998 if (reg == 0)
3000 if (no_new_pseudos)
3001 abort ();
3002 else
3003 reg = gen_reg_rtx (Pmode);
3005 subregs = 1;
3008 #ifdef AOF_ASSEMBLER
3009 /* The AOF assembler can generate relocations for these directly, and
3010 understands that the PIC register has to be added into the offset. */
3011 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3012 #else
3013 if (subregs)
3014 address = gen_reg_rtx (Pmode);
3015 else
3016 address = reg;
3018 if (TARGET_ARM)
3019 emit_insn (gen_pic_load_addr_arm (address, orig));
3020 else
3021 emit_insn (gen_pic_load_addr_thumb (address, orig));
3023 if ((GET_CODE (orig) == LABEL_REF
3024 || (GET_CODE (orig) == SYMBOL_REF &&
3025 SYMBOL_REF_LOCAL_P (orig)))
3026 && NEED_GOT_RELOC)
3027 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3028 else
3030 pic_ref = gen_const_mem (Pmode,
3031 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3032 address));
3035 insn = emit_move_insn (reg, pic_ref);
3036 #endif
3037 current_function_uses_pic_offset_table = 1;
3038 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3039 by loop. */
3040 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3041 REG_NOTES (insn));
3042 return reg;
3044 else if (GET_CODE (orig) == CONST)
3046 rtx base, offset;
3048 if (GET_CODE (XEXP (orig, 0)) == PLUS
3049 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3050 return orig;
3052 if (reg == 0)
3054 if (no_new_pseudos)
3055 abort ();
3056 else
3057 reg = gen_reg_rtx (Pmode);
3060 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3062 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3063 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3064 base == reg ? 0 : reg);
3066 else
3067 abort ();
3069 if (GET_CODE (offset) == CONST_INT)
3071 /* The base register doesn't really matter, we only want to
3072 test the index for the appropriate mode. */
3073 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3075 if (!no_new_pseudos)
3076 offset = force_reg (Pmode, offset);
3077 else
3078 abort ();
3081 if (GET_CODE (offset) == CONST_INT)
3082 return plus_constant (base, INTVAL (offset));
3085 if (GET_MODE_SIZE (mode) > 4
3086 && (GET_MODE_CLASS (mode) == MODE_INT
3087 || TARGET_SOFT_FLOAT))
3089 emit_insn (gen_addsi3 (reg, base, offset));
3090 return reg;
3093 return gen_rtx_PLUS (Pmode, base, offset);
3096 return orig;
3100 /* Find a spare low register to use during the prolog of a function. */
3102 static int
3103 thumb_find_work_register (unsigned long pushed_regs_mask)
3105 int reg;
3107 /* Check the argument registers first as these are call-used. The
3108 register allocation order means that sometimes r3 might be used
3109 but earlier argument registers might not, so check them all. */
3110 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3111 if (!regs_ever_live[reg])
3112 return reg;
3114 /* Before going on to check the call-saved registers we can try a couple
3115 more ways of deducing that r3 is available. The first is when we are
3116 pushing anonymous arguments onto the stack and we have less than 4
3117 registers worth of fixed arguments(*). In this case r3 will be part of
3118 the variable argument list and so we can be sure that it will be
3119 pushed right at the start of the function. Hence it will be available
3120 for the rest of the prologue.
3121 (*): ie current_function_pretend_args_size is greater than 0. */
3122 if (cfun->machine->uses_anonymous_args
3123 && current_function_pretend_args_size > 0)
3124 return LAST_ARG_REGNUM;
3126 /* The other case is when we have fixed arguments but less than 4 registers
3127 worth. In this case r3 might be used in the body of the function, but
3128 it is not being used to convey an argument into the function. In theory
3129 we could just check current_function_args_size to see how many bytes are
3130 being passed in argument registers, but it seems that it is unreliable.
3131 Sometimes it will have the value 0 when in fact arguments are being
3132 passed. (See testcase execute/20021111-1.c for an example). So we also
3133 check the args_info.nregs field as well. The problem with this field is
3134 that it makes no allowances for arguments that are passed to the
3135 function but which are not used. Hence we could miss an opportunity
3136 when a function has an unused argument in r3. But it is better to be
3137 safe than to be sorry. */
3138 if (! cfun->machine->uses_anonymous_args
3139 && current_function_args_size >= 0
3140 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3141 && cfun->args_info.nregs < 4)
3142 return LAST_ARG_REGNUM;
3144 /* Otherwise look for a call-saved register that is going to be pushed. */
3145 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3146 if (pushed_regs_mask & (1 << reg))
3147 return reg;
3149 /* Something went wrong - thumb_compute_save_reg_mask()
3150 should have arranged for a suitable register to be pushed. */
3151 abort ();
3155 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3156 low register. */
3158 void
3159 arm_load_pic_register (unsigned int scratch)
3161 #ifndef AOF_ASSEMBLER
3162 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3163 rtx global_offset_table;
3165 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3166 return;
3168 if (!flag_pic)
3169 abort ();
3171 l1 = gen_label_rtx ();
3173 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3174 /* On the ARM the PC register contains 'dot + 8' at the time of the
3175 addition, on the Thumb it is 'dot + 4'. */
3176 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3177 if (GOT_PCREL)
3178 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3179 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3180 else
3181 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3183 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3185 if (TARGET_ARM)
3187 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3188 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3190 else
3192 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3194 /* We will have pushed the pic register, so should always be
3195 able to find a work register. */
3196 pic_tmp = gen_rtx_REG (SImode, scratch);
3197 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3198 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3200 else
3201 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3202 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3205 /* Need to emit this whether or not we obey regdecls,
3206 since setjmp/longjmp can cause life info to screw up. */
3207 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3208 #endif /* AOF_ASSEMBLER */
3212 /* Return nonzero if X is valid as an ARM state addressing register. */
3213 static int
3214 arm_address_register_rtx_p (rtx x, int strict_p)
3216 int regno;
3218 if (GET_CODE (x) != REG)
3219 return 0;
3221 regno = REGNO (x);
3223 if (strict_p)
3224 return ARM_REGNO_OK_FOR_BASE_P (regno);
3226 return (regno <= LAST_ARM_REGNUM
3227 || regno >= FIRST_PSEUDO_REGISTER
3228 || regno == FRAME_POINTER_REGNUM
3229 || regno == ARG_POINTER_REGNUM);
3232 /* Return nonzero if X is a valid ARM state address operand. */
3234 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3235 int strict_p)
3237 bool use_ldrd;
3238 enum rtx_code code = GET_CODE (x);
3240 if (arm_address_register_rtx_p (x, strict_p))
3241 return 1;
3243 use_ldrd = (TARGET_LDRD
3244 && (mode == DImode
3245 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3247 if (code == POST_INC || code == PRE_DEC
3248 || ((code == PRE_INC || code == POST_DEC)
3249 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3250 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3252 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3253 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3254 && GET_CODE (XEXP (x, 1)) == PLUS
3255 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3257 rtx addend = XEXP (XEXP (x, 1), 1);
3259 /* Don't allow ldrd post increment by register because it's hard
3260 to fixup invalid register choices. */
3261 if (use_ldrd
3262 && GET_CODE (x) == POST_MODIFY
3263 && GET_CODE (addend) == REG)
3264 return 0;
3266 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3267 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3270 /* After reload constants split into minipools will have addresses
3271 from a LABEL_REF. */
3272 else if (reload_completed
3273 && (code == LABEL_REF
3274 || (code == CONST
3275 && GET_CODE (XEXP (x, 0)) == PLUS
3276 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3277 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3278 return 1;
3280 else if (mode == TImode)
3281 return 0;
3283 else if (code == PLUS)
3285 rtx xop0 = XEXP (x, 0);
3286 rtx xop1 = XEXP (x, 1);
3288 return ((arm_address_register_rtx_p (xop0, strict_p)
3289 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3290 || (arm_address_register_rtx_p (xop1, strict_p)
3291 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3294 #if 0
3295 /* Reload currently can't handle MINUS, so disable this for now */
3296 else if (GET_CODE (x) == MINUS)
3298 rtx xop0 = XEXP (x, 0);
3299 rtx xop1 = XEXP (x, 1);
3301 return (arm_address_register_rtx_p (xop0, strict_p)
3302 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3304 #endif
3306 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3307 && code == SYMBOL_REF
3308 && CONSTANT_POOL_ADDRESS_P (x)
3309 && ! (flag_pic
3310 && symbol_mentioned_p (get_pool_constant (x))))
3311 return 1;
3313 return 0;
3316 /* Return nonzero if INDEX is valid for an address index operand in
3317 ARM state. */
3318 static int
3319 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3320 int strict_p)
3322 HOST_WIDE_INT range;
3323 enum rtx_code code = GET_CODE (index);
3325 /* Standard coprocessor addressing modes. */
3326 if (TARGET_HARD_FLOAT
3327 && (TARGET_FPA || TARGET_MAVERICK)
3328 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3329 || (TARGET_MAVERICK && mode == DImode)))
3330 return (code == CONST_INT && INTVAL (index) < 1024
3331 && INTVAL (index) > -1024
3332 && (INTVAL (index) & 3) == 0);
3334 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3335 return (code == CONST_INT
3336 && INTVAL (index) < 1024
3337 && INTVAL (index) > -1024
3338 && (INTVAL (index) & 3) == 0);
3340 if (arm_address_register_rtx_p (index, strict_p)
3341 && (GET_MODE_SIZE (mode) <= 4))
3342 return 1;
3344 if (mode == DImode || mode == DFmode)
3346 if (code == CONST_INT)
3348 HOST_WIDE_INT val = INTVAL (index);
3350 if (TARGET_LDRD)
3351 return val > -256 && val < 256;
3352 else
3353 return val > -4096 && val < 4092;
3356 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3359 if (GET_MODE_SIZE (mode) <= 4
3360 && ! (arm_arch4
3361 && (mode == HImode
3362 || (mode == QImode && outer == SIGN_EXTEND))))
3364 if (code == MULT)
3366 rtx xiop0 = XEXP (index, 0);
3367 rtx xiop1 = XEXP (index, 1);
3369 return ((arm_address_register_rtx_p (xiop0, strict_p)
3370 && power_of_two_operand (xiop1, SImode))
3371 || (arm_address_register_rtx_p (xiop1, strict_p)
3372 && power_of_two_operand (xiop0, SImode)));
3374 else if (code == LSHIFTRT || code == ASHIFTRT
3375 || code == ASHIFT || code == ROTATERT)
3377 rtx op = XEXP (index, 1);
3379 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3380 && GET_CODE (op) == CONST_INT
3381 && INTVAL (op) > 0
3382 && INTVAL (op) <= 31);
3386 /* For ARM v4 we may be doing a sign-extend operation during the
3387 load. */
3388 if (arm_arch4)
3390 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3391 range = 256;
3392 else
3393 range = 4096;
3395 else
3396 range = (mode == HImode) ? 4095 : 4096;
3398 return (code == CONST_INT
3399 && INTVAL (index) < range
3400 && INTVAL (index) > -range);
3403 /* Return nonzero if X is valid as a Thumb state base register. */
3404 static int
3405 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3407 int regno;
3409 if (GET_CODE (x) != REG)
3410 return 0;
3412 regno = REGNO (x);
3414 if (strict_p)
3415 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3417 return (regno <= LAST_LO_REGNUM
3418 || regno > LAST_VIRTUAL_REGISTER
3419 || regno == FRAME_POINTER_REGNUM
3420 || (GET_MODE_SIZE (mode) >= 4
3421 && (regno == STACK_POINTER_REGNUM
3422 || regno >= FIRST_PSEUDO_REGISTER
3423 || x == hard_frame_pointer_rtx
3424 || x == arg_pointer_rtx)));
3427 /* Return nonzero if x is a legitimate index register. This is the case
3428 for any base register that can access a QImode object. */
3429 inline static int
3430 thumb_index_register_rtx_p (rtx x, int strict_p)
3432 return thumb_base_register_rtx_p (x, QImode, strict_p);
3435 /* Return nonzero if x is a legitimate Thumb-state address.
3437 The AP may be eliminated to either the SP or the FP, so we use the
3438 least common denominator, e.g. SImode, and offsets from 0 to 64.
3440 ??? Verify whether the above is the right approach.
3442 ??? Also, the FP may be eliminated to the SP, so perhaps that
3443 needs special handling also.
3445 ??? Look at how the mips16 port solves this problem. It probably uses
3446 better ways to solve some of these problems.
3448 Although it is not incorrect, we don't accept QImode and HImode
3449 addresses based on the frame pointer or arg pointer until the
3450 reload pass starts. This is so that eliminating such addresses
3451 into stack based ones won't produce impossible code. */
3453 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3455 /* ??? Not clear if this is right. Experiment. */
3456 if (GET_MODE_SIZE (mode) < 4
3457 && !(reload_in_progress || reload_completed)
3458 && (reg_mentioned_p (frame_pointer_rtx, x)
3459 || reg_mentioned_p (arg_pointer_rtx, x)
3460 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3461 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3462 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3463 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3464 return 0;
3466 /* Accept any base register. SP only in SImode or larger. */
3467 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3468 return 1;
3470 /* This is PC relative data before arm_reorg runs. */
3471 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3472 && GET_CODE (x) == SYMBOL_REF
3473 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3474 return 1;
3476 /* This is PC relative data after arm_reorg runs. */
3477 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3478 && (GET_CODE (x) == LABEL_REF
3479 || (GET_CODE (x) == CONST
3480 && GET_CODE (XEXP (x, 0)) == PLUS
3481 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3482 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3483 return 1;
3485 /* Post-inc indexing only supported for SImode and larger. */
3486 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3487 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3488 return 1;
3490 else if (GET_CODE (x) == PLUS)
3492 /* REG+REG address can be any two index registers. */
3493 /* We disallow FRAME+REG addressing since we know that FRAME
3494 will be replaced with STACK, and SP relative addressing only
3495 permits SP+OFFSET. */
3496 if (GET_MODE_SIZE (mode) <= 4
3497 && XEXP (x, 0) != frame_pointer_rtx
3498 && XEXP (x, 1) != frame_pointer_rtx
3499 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3500 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3501 return 1;
3503 /* REG+const has 5-7 bit offset for non-SP registers. */
3504 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3505 || XEXP (x, 0) == arg_pointer_rtx)
3506 && GET_CODE (XEXP (x, 1)) == CONST_INT
3507 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3508 return 1;
3510 /* REG+const has 10 bit offset for SP, but only SImode and
3511 larger is supported. */
3512 /* ??? Should probably check for DI/DFmode overflow here
3513 just like GO_IF_LEGITIMATE_OFFSET does. */
3514 else if (GET_CODE (XEXP (x, 0)) == REG
3515 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3516 && GET_MODE_SIZE (mode) >= 4
3517 && GET_CODE (XEXP (x, 1)) == CONST_INT
3518 && INTVAL (XEXP (x, 1)) >= 0
3519 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3520 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3521 return 1;
3523 else if (GET_CODE (XEXP (x, 0)) == REG
3524 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3525 && GET_MODE_SIZE (mode) >= 4
3526 && GET_CODE (XEXP (x, 1)) == CONST_INT
3527 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3528 return 1;
3531 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3532 && GET_MODE_SIZE (mode) == 4
3533 && GET_CODE (x) == SYMBOL_REF
3534 && CONSTANT_POOL_ADDRESS_P (x)
3535 && !(flag_pic
3536 && symbol_mentioned_p (get_pool_constant (x))))
3537 return 1;
3539 return 0;
3542 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3543 instruction of mode MODE. */
3545 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3547 switch (GET_MODE_SIZE (mode))
3549 case 1:
3550 return val >= 0 && val < 32;
3552 case 2:
3553 return val >= 0 && val < 64 && (val & 1) == 0;
3555 default:
3556 return (val >= 0
3557 && (val + GET_MODE_SIZE (mode)) <= 128
3558 && (val & 3) == 0);
3562 /* Try machine-dependent ways of modifying an illegitimate address
3563 to be legitimate. If we find one, return the new, valid address. */
3565 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3567 if (GET_CODE (x) == PLUS)
3569 rtx xop0 = XEXP (x, 0);
3570 rtx xop1 = XEXP (x, 1);
3572 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3573 xop0 = force_reg (SImode, xop0);
3575 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3576 xop1 = force_reg (SImode, xop1);
3578 if (ARM_BASE_REGISTER_RTX_P (xop0)
3579 && GET_CODE (xop1) == CONST_INT)
3581 HOST_WIDE_INT n, low_n;
3582 rtx base_reg, val;
3583 n = INTVAL (xop1);
3585 /* VFP addressing modes actually allow greater offsets, but for
3586 now we just stick with the lowest common denominator. */
3587 if (mode == DImode
3588 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3590 low_n = n & 0x0f;
3591 n &= ~0x0f;
3592 if (low_n > 4)
3594 n += 16;
3595 low_n -= 16;
3598 else
3600 low_n = ((mode) == TImode ? 0
3601 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3602 n -= low_n;
3605 base_reg = gen_reg_rtx (SImode);
3606 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3607 GEN_INT (n)), NULL_RTX);
3608 emit_move_insn (base_reg, val);
3609 x = (low_n == 0 ? base_reg
3610 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3612 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3613 x = gen_rtx_PLUS (SImode, xop0, xop1);
3616 /* XXX We don't allow MINUS any more -- see comment in
3617 arm_legitimate_address_p (). */
3618 else if (GET_CODE (x) == MINUS)
3620 rtx xop0 = XEXP (x, 0);
3621 rtx xop1 = XEXP (x, 1);
3623 if (CONSTANT_P (xop0))
3624 xop0 = force_reg (SImode, xop0);
3626 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3627 xop1 = force_reg (SImode, xop1);
3629 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3630 x = gen_rtx_MINUS (SImode, xop0, xop1);
3633 if (flag_pic)
3635 /* We need to find and carefully transform any SYMBOL and LABEL
3636 references; so go back to the original address expression. */
3637 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3639 if (new_x != orig_x)
3640 x = new_x;
3643 return x;
3647 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3648 to be legitimate. If we find one, return the new, valid address. */
3650 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3652 if (GET_CODE (x) == PLUS
3653 && GET_CODE (XEXP (x, 1)) == CONST_INT
3654 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3655 || INTVAL (XEXP (x, 1)) < 0))
3657 rtx xop0 = XEXP (x, 0);
3658 rtx xop1 = XEXP (x, 1);
3659 HOST_WIDE_INT offset = INTVAL (xop1);
3661 /* Try and fold the offset into a biasing of the base register and
3662 then offsetting that. Don't do this when optimizing for space
3663 since it can cause too many CSEs. */
3664 if (optimize_size && offset >= 0
3665 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3667 HOST_WIDE_INT delta;
3669 if (offset >= 256)
3670 delta = offset - (256 - GET_MODE_SIZE (mode));
3671 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3672 delta = 31 * GET_MODE_SIZE (mode);
3673 else
3674 delta = offset & (~31 * GET_MODE_SIZE (mode));
3676 xop0 = force_operand (plus_constant (xop0, offset - delta),
3677 NULL_RTX);
3678 x = plus_constant (xop0, delta);
3680 else if (offset < 0 && offset > -256)
3681 /* Small negative offsets are best done with a subtract before the
3682 dereference, forcing these into a register normally takes two
3683 instructions. */
3684 x = force_operand (x, NULL_RTX);
3685 else
3687 /* For the remaining cases, force the constant into a register. */
3688 xop1 = force_reg (SImode, xop1);
3689 x = gen_rtx_PLUS (SImode, xop0, xop1);
3692 else if (GET_CODE (x) == PLUS
3693 && s_register_operand (XEXP (x, 1), SImode)
3694 && !s_register_operand (XEXP (x, 0), SImode))
3696 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3698 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3701 if (flag_pic)
3703 /* We need to find and carefully transform any SYMBOL and LABEL
3704 references; so go back to the original address expression. */
3705 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3707 if (new_x != orig_x)
3708 x = new_x;
3711 return x;
3716 #define REG_OR_SUBREG_REG(X) \
3717 (GET_CODE (X) == REG \
3718 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3720 #define REG_OR_SUBREG_RTX(X) \
3721 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3723 #ifndef COSTS_N_INSNS
3724 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3725 #endif
3726 static inline int
3727 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3729 enum machine_mode mode = GET_MODE (x);
3731 switch (code)
3733 case ASHIFT:
3734 case ASHIFTRT:
3735 case LSHIFTRT:
3736 case ROTATERT:
3737 case PLUS:
3738 case MINUS:
3739 case COMPARE:
3740 case NEG:
3741 case NOT:
3742 return COSTS_N_INSNS (1);
3744 case MULT:
3745 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3747 int cycles = 0;
3748 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3750 while (i)
3752 i >>= 2;
3753 cycles++;
3755 return COSTS_N_INSNS (2) + cycles;
3757 return COSTS_N_INSNS (1) + 16;
3759 case SET:
3760 return (COSTS_N_INSNS (1)
3761 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3762 + GET_CODE (SET_DEST (x)) == MEM));
3764 case CONST_INT:
3765 if (outer == SET)
3767 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3768 return 0;
3769 if (thumb_shiftable_const (INTVAL (x)))
3770 return COSTS_N_INSNS (2);
3771 return COSTS_N_INSNS (3);
3773 else if ((outer == PLUS || outer == COMPARE)
3774 && INTVAL (x) < 256 && INTVAL (x) > -256)
3775 return 0;
3776 else if (outer == AND
3777 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3778 return COSTS_N_INSNS (1);
3779 else if (outer == ASHIFT || outer == ASHIFTRT
3780 || outer == LSHIFTRT)
3781 return 0;
3782 return COSTS_N_INSNS (2);
3784 case CONST:
3785 case CONST_DOUBLE:
3786 case LABEL_REF:
3787 case SYMBOL_REF:
3788 return COSTS_N_INSNS (3);
3790 case UDIV:
3791 case UMOD:
3792 case DIV:
3793 case MOD:
3794 return 100;
3796 case TRUNCATE:
3797 return 99;
3799 case AND:
3800 case XOR:
3801 case IOR:
3802 /* XXX guess. */
3803 return 8;
3805 case MEM:
3806 /* XXX another guess. */
3807 /* Memory costs quite a lot for the first word, but subsequent words
3808 load at the equivalent of a single insn each. */
3809 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3810 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3811 ? 4 : 0));
3813 case IF_THEN_ELSE:
3814 /* XXX a guess. */
3815 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3816 return 14;
3817 return 2;
3819 case ZERO_EXTEND:
3820 /* XXX still guessing. */
3821 switch (GET_MODE (XEXP (x, 0)))
3823 case QImode:
3824 return (1 + (mode == DImode ? 4 : 0)
3825 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3827 case HImode:
3828 return (4 + (mode == DImode ? 4 : 0)
3829 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3831 case SImode:
3832 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3834 default:
3835 return 99;
3838 default:
3839 return 99;
3844 /* Worker routine for arm_rtx_costs. */
3845 static inline int
3846 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3848 enum machine_mode mode = GET_MODE (x);
3849 enum rtx_code subcode;
3850 int extra_cost;
3852 switch (code)
3854 case MEM:
3855 /* Memory costs quite a lot for the first word, but subsequent words
3856 load at the equivalent of a single insn each. */
3857 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3858 + (GET_CODE (x) == SYMBOL_REF
3859 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3861 case DIV:
3862 case MOD:
3863 case UDIV:
3864 case UMOD:
3865 return optimize_size ? COSTS_N_INSNS (2) : 100;
3867 case ROTATE:
3868 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3869 return 4;
3870 /* Fall through */
3871 case ROTATERT:
3872 if (mode != SImode)
3873 return 8;
3874 /* Fall through */
3875 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3876 if (mode == DImode)
3877 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3878 + ((GET_CODE (XEXP (x, 0)) == REG
3879 || (GET_CODE (XEXP (x, 0)) == SUBREG
3880 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3881 ? 0 : 8));
3882 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3883 || (GET_CODE (XEXP (x, 0)) == SUBREG
3884 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3885 ? 0 : 4)
3886 + ((GET_CODE (XEXP (x, 1)) == REG
3887 || (GET_CODE (XEXP (x, 1)) == SUBREG
3888 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3889 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3890 ? 0 : 4));
3892 case MINUS:
3893 if (mode == DImode)
3894 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3895 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3896 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3897 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3898 ? 0 : 8));
3900 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3901 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3902 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3903 && arm_const_double_rtx (XEXP (x, 1))))
3904 ? 0 : 8)
3905 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3906 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3907 && arm_const_double_rtx (XEXP (x, 0))))
3908 ? 0 : 8));
3910 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3911 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3912 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3913 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3914 || subcode == ASHIFTRT || subcode == LSHIFTRT
3915 || subcode == ROTATE || subcode == ROTATERT
3916 || (subcode == MULT
3917 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3918 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3919 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3920 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3921 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3922 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3923 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3924 return 1;
3925 /* Fall through */
3927 case PLUS:
3928 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3929 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3930 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3931 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3932 && arm_const_double_rtx (XEXP (x, 1))))
3933 ? 0 : 8));
3935 /* Fall through */
3936 case AND: case XOR: case IOR:
3937 extra_cost = 0;
3939 /* Normally the frame registers will be spilt into reg+const during
3940 reload, so it is a bad idea to combine them with other instructions,
3941 since then they might not be moved outside of loops. As a compromise
3942 we allow integration with ops that have a constant as their second
3943 operand. */
3944 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3945 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3946 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3947 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3948 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3949 extra_cost = 4;
3951 if (mode == DImode)
3952 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3953 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3954 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3955 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3956 ? 0 : 8));
3958 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3959 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3960 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3961 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3962 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3963 ? 0 : 4));
3965 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3966 return (1 + extra_cost
3967 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3968 || subcode == LSHIFTRT || subcode == ASHIFTRT
3969 || subcode == ROTATE || subcode == ROTATERT
3970 || (subcode == MULT
3971 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3972 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3973 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3974 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3975 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3976 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3977 ? 0 : 4));
3979 return 8;
3981 case MULT:
3982 /* This should have been handled by the CPU specific routines. */
3983 abort ();
3985 case TRUNCATE:
3986 if (arm_arch3m && mode == SImode
3987 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3988 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3989 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3990 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3991 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3992 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3993 return 8;
3994 return 99;
3996 case NEG:
3997 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3998 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3999 /* Fall through */
4000 case NOT:
4001 if (mode == DImode)
4002 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4004 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4006 case IF_THEN_ELSE:
4007 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4008 return 14;
4009 return 2;
4011 case COMPARE:
4012 return 1;
4014 case ABS:
4015 return 4 + (mode == DImode ? 4 : 0);
4017 case SIGN_EXTEND:
4018 if (GET_MODE (XEXP (x, 0)) == QImode)
4019 return (4 + (mode == DImode ? 4 : 0)
4020 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4021 /* Fall through */
4022 case ZERO_EXTEND:
4023 switch (GET_MODE (XEXP (x, 0)))
4025 case QImode:
4026 return (1 + (mode == DImode ? 4 : 0)
4027 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4029 case HImode:
4030 return (4 + (mode == DImode ? 4 : 0)
4031 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4033 case SImode:
4034 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4036 case V8QImode:
4037 case V4HImode:
4038 case V2SImode:
4039 case V4QImode:
4040 case V2HImode:
4041 return 1;
4043 default:
4044 break;
4046 abort ();
4048 case CONST_INT:
4049 if (const_ok_for_arm (INTVAL (x)))
4050 return outer == SET ? 2 : -1;
4051 else if (outer == AND
4052 && const_ok_for_arm (~INTVAL (x)))
4053 return -1;
4054 else if ((outer == COMPARE
4055 || outer == PLUS || outer == MINUS)
4056 && const_ok_for_arm (-INTVAL (x)))
4057 return -1;
4058 else
4059 return 5;
4061 case CONST:
4062 case LABEL_REF:
4063 case SYMBOL_REF:
4064 return 6;
4066 case CONST_DOUBLE:
4067 if (arm_const_double_rtx (x))
4068 return outer == SET ? 2 : -1;
4069 else if ((outer == COMPARE || outer == PLUS)
4070 && neg_const_double_rtx_ok_for_fpa (x))
4071 return -1;
4072 return 7;
4074 default:
4075 return 99;
4079 /* RTX costs when optimizing for size. */
4080 static bool
4081 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4083 enum machine_mode mode = GET_MODE (x);
4085 if (TARGET_THUMB)
4087 /* XXX TBD. For now, use the standard costs. */
4088 *total = thumb_rtx_costs (x, code, outer_code);
4089 return true;
4092 switch (code)
4094 case MEM:
4095 /* A memory access costs 1 insn if the mode is small, or the address is
4096 a single register, otherwise it costs one insn per word. */
4097 if (REG_P (XEXP (x, 0)))
4098 *total = COSTS_N_INSNS (1);
4099 else
4100 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4101 return true;
4103 case DIV:
4104 case MOD:
4105 case UDIV:
4106 case UMOD:
4107 /* Needs a libcall, so it costs about this. */
4108 *total = COSTS_N_INSNS (2);
4109 return false;
4111 case ROTATE:
4112 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4114 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4115 return true;
4117 /* Fall through */
4118 case ROTATERT:
4119 case ASHIFT:
4120 case LSHIFTRT:
4121 case ASHIFTRT:
4122 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4124 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4125 return true;
4127 else if (mode == SImode)
4129 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4130 /* Slightly disparage register shifts, but not by much. */
4131 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4132 *total += 1 + rtx_cost (XEXP (x, 1), code);
4133 return true;
4136 /* Needs a libcall. */
4137 *total = COSTS_N_INSNS (2);
4138 return false;
4140 case MINUS:
4141 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4143 *total = COSTS_N_INSNS (1);
4144 return false;
4147 if (mode == SImode)
4149 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4150 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4152 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4153 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4154 || subcode1 == ROTATE || subcode1 == ROTATERT
4155 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4156 || subcode1 == ASHIFTRT)
4158 /* It's just the cost of the two operands. */
4159 *total = 0;
4160 return false;
4163 *total = COSTS_N_INSNS (1);
4164 return false;
4167 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4168 return false;
4170 case PLUS:
4171 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4173 *total = COSTS_N_INSNS (1);
4174 return false;
4177 /* Fall through */
4178 case AND: case XOR: case IOR:
4179 if (mode == SImode)
4181 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4183 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4184 || subcode == LSHIFTRT || subcode == ASHIFTRT
4185 || (code == AND && subcode == NOT))
4187 /* It's just the cost of the two operands. */
4188 *total = 0;
4189 return false;
4193 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4194 return false;
4196 case MULT:
4197 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4198 return false;
4200 case NEG:
4201 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4202 *total = COSTS_N_INSNS (1);
4203 /* Fall through */
4204 case NOT:
4205 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4207 return false;
4209 case IF_THEN_ELSE:
4210 *total = 0;
4211 return false;
4213 case COMPARE:
4214 if (cc_register (XEXP (x, 0), VOIDmode))
4215 * total = 0;
4216 else
4217 *total = COSTS_N_INSNS (1);
4218 return false;
4220 case ABS:
4221 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4222 *total = COSTS_N_INSNS (1);
4223 else
4224 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4225 return false;
4227 case SIGN_EXTEND:
4228 *total = 0;
4229 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4231 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4232 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4234 if (mode == DImode)
4235 *total += COSTS_N_INSNS (1);
4236 return false;
4238 case ZERO_EXTEND:
4239 *total = 0;
4240 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4242 switch (GET_MODE (XEXP (x, 0)))
4244 case QImode:
4245 *total += COSTS_N_INSNS (1);
4246 break;
4248 case HImode:
4249 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4251 case SImode:
4252 break;
4254 default:
4255 *total += COSTS_N_INSNS (2);
4259 if (mode == DImode)
4260 *total += COSTS_N_INSNS (1);
4262 return false;
4264 case CONST_INT:
4265 if (const_ok_for_arm (INTVAL (x)))
4266 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4267 else if (const_ok_for_arm (~INTVAL (x)))
4268 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4269 else if (const_ok_for_arm (-INTVAL (x)))
4271 if (outer_code == COMPARE || outer_code == PLUS
4272 || outer_code == MINUS)
4273 *total = 0;
4274 else
4275 *total = COSTS_N_INSNS (1);
4277 else
4278 *total = COSTS_N_INSNS (2);
4279 return true;
4281 case CONST:
4282 case LABEL_REF:
4283 case SYMBOL_REF:
4284 *total = COSTS_N_INSNS (2);
4285 return true;
4287 case CONST_DOUBLE:
4288 *total = COSTS_N_INSNS (4);
4289 return true;
4291 default:
4292 if (mode != VOIDmode)
4293 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4294 else
4295 *total = COSTS_N_INSNS (4); /* How knows? */
4296 return false;
4300 /* RTX costs for cores with a slow MUL implementation. */
4302 static bool
4303 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4305 enum machine_mode mode = GET_MODE (x);
4307 if (TARGET_THUMB)
4309 *total = thumb_rtx_costs (x, code, outer_code);
4310 return true;
4313 switch (code)
4315 case MULT:
4316 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4317 || mode == DImode)
4319 *total = 30;
4320 return true;
4323 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4325 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4326 & (unsigned HOST_WIDE_INT) 0xffffffff);
4327 int cost, const_ok = const_ok_for_arm (i);
4328 int j, booth_unit_size;
4330 /* Tune as appropriate. */
4331 cost = const_ok ? 4 : 8;
4332 booth_unit_size = 2;
4333 for (j = 0; i && j < 32; j += booth_unit_size)
4335 i >>= booth_unit_size;
4336 cost += 2;
4339 *total = cost;
4340 return true;
4343 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4344 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4345 return true;
4347 default:
4348 *total = arm_rtx_costs_1 (x, code, outer_code);
4349 return true;
4354 /* RTX cost for cores with a fast multiply unit (M variants). */
4356 static bool
4357 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4359 enum machine_mode mode = GET_MODE (x);
4361 if (TARGET_THUMB)
4363 *total = thumb_rtx_costs (x, code, outer_code);
4364 return true;
4367 switch (code)
4369 case MULT:
4370 /* There is no point basing this on the tuning, since it is always the
4371 fast variant if it exists at all. */
4372 if (mode == DImode
4373 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4374 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4375 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4377 *total = 8;
4378 return true;
4382 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4383 || mode == DImode)
4385 *total = 30;
4386 return true;
4389 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4391 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4392 & (unsigned HOST_WIDE_INT) 0xffffffff);
4393 int cost, const_ok = const_ok_for_arm (i);
4394 int j, booth_unit_size;
4396 /* Tune as appropriate. */
4397 cost = const_ok ? 4 : 8;
4398 booth_unit_size = 8;
4399 for (j = 0; i && j < 32; j += booth_unit_size)
4401 i >>= booth_unit_size;
4402 cost += 2;
4405 *total = cost;
4406 return true;
4409 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4410 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4411 return true;
4413 default:
4414 *total = arm_rtx_costs_1 (x, code, outer_code);
4415 return true;
4420 /* RTX cost for XScale CPUs. */
4422 static bool
4423 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4425 enum machine_mode mode = GET_MODE (x);
4427 if (TARGET_THUMB)
4429 *total = thumb_rtx_costs (x, code, outer_code);
4430 return true;
4433 switch (code)
4435 case MULT:
4436 /* There is no point basing this on the tuning, since it is always the
4437 fast variant if it exists at all. */
4438 if (mode == DImode
4439 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4440 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4441 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4443 *total = 8;
4444 return true;
4448 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4449 || mode == DImode)
4451 *total = 30;
4452 return true;
4455 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4457 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4458 & (unsigned HOST_WIDE_INT) 0xffffffff);
4459 int cost, const_ok = const_ok_for_arm (i);
4460 unsigned HOST_WIDE_INT masked_const;
4462 /* The cost will be related to two insns.
4463 First a load of the constant (MOV or LDR), then a multiply. */
4464 cost = 2;
4465 if (! const_ok)
4466 cost += 1; /* LDR is probably more expensive because
4467 of longer result latency. */
4468 masked_const = i & 0xffff8000;
4469 if (masked_const != 0 && masked_const != 0xffff8000)
4471 masked_const = i & 0xf8000000;
4472 if (masked_const == 0 || masked_const == 0xf8000000)
4473 cost += 1;
4474 else
4475 cost += 2;
4477 *total = cost;
4478 return true;
4481 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4482 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4483 return true;
4485 case COMPARE:
4486 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4487 will stall until the multiplication is complete. */
4488 if (GET_CODE (XEXP (x, 0)) == MULT)
4489 *total = 4 + rtx_cost (XEXP (x, 0), code);
4490 else
4491 *total = arm_rtx_costs_1 (x, code, outer_code);
4492 return true;
4494 default:
4495 *total = arm_rtx_costs_1 (x, code, outer_code);
4496 return true;
4501 /* RTX costs for 9e (and later) cores. */
4503 static bool
4504 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4506 enum machine_mode mode = GET_MODE (x);
4507 int nonreg_cost;
4508 int cost;
4510 if (TARGET_THUMB)
4512 switch (code)
4514 case MULT:
4515 *total = COSTS_N_INSNS (3);
4516 return true;
4518 default:
4519 *total = thumb_rtx_costs (x, code, outer_code);
4520 return true;
4524 switch (code)
4526 case MULT:
4527 /* There is no point basing this on the tuning, since it is always the
4528 fast variant if it exists at all. */
4529 if (mode == DImode
4530 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4531 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4532 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4534 *total = 3;
4535 return true;
4539 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4541 *total = 30;
4542 return true;
4544 if (mode == DImode)
4546 cost = 7;
4547 nonreg_cost = 8;
4549 else
4551 cost = 2;
4552 nonreg_cost = 4;
4556 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4557 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4558 return true;
4560 default:
4561 *total = arm_rtx_costs_1 (x, code, outer_code);
4562 return true;
4565 /* All address computations that can be done are free, but rtx cost returns
4566 the same for practically all of them. So we weight the different types
4567 of address here in the order (most pref first):
4568 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4569 static inline int
4570 arm_arm_address_cost (rtx x)
4572 enum rtx_code c = GET_CODE (x);
4574 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4575 return 0;
4576 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4577 return 10;
4579 if (c == PLUS || c == MINUS)
4581 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4582 return 2;
4584 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4585 return 3;
4587 return 4;
4590 return 6;
4593 static inline int
4594 arm_thumb_address_cost (rtx x)
4596 enum rtx_code c = GET_CODE (x);
4598 if (c == REG)
4599 return 1;
4600 if (c == PLUS
4601 && GET_CODE (XEXP (x, 0)) == REG
4602 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4603 return 1;
4605 return 2;
4608 static int
4609 arm_address_cost (rtx x)
4611 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4614 static int
4615 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4617 rtx i_pat, d_pat;
4619 /* Some true dependencies can have a higher cost depending
4620 on precisely how certain input operands are used. */
4621 if (arm_tune_xscale
4622 && REG_NOTE_KIND (link) == 0
4623 && recog_memoized (insn) >= 0
4624 && recog_memoized (dep) >= 0)
4626 int shift_opnum = get_attr_shift (insn);
4627 enum attr_type attr_type = get_attr_type (dep);
4629 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4630 operand for INSN. If we have a shifted input operand and the
4631 instruction we depend on is another ALU instruction, then we may
4632 have to account for an additional stall. */
4633 if (shift_opnum != 0
4634 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4636 rtx shifted_operand;
4637 int opno;
4639 /* Get the shifted operand. */
4640 extract_insn (insn);
4641 shifted_operand = recog_data.operand[shift_opnum];
4643 /* Iterate over all the operands in DEP. If we write an operand
4644 that overlaps with SHIFTED_OPERAND, then we have increase the
4645 cost of this dependency. */
4646 extract_insn (dep);
4647 preprocess_constraints ();
4648 for (opno = 0; opno < recog_data.n_operands; opno++)
4650 /* We can ignore strict inputs. */
4651 if (recog_data.operand_type[opno] == OP_IN)
4652 continue;
4654 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4655 shifted_operand))
4656 return 2;
4661 /* XXX This is not strictly true for the FPA. */
4662 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4663 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4664 return 0;
4666 /* Call insns don't incur a stall, even if they follow a load. */
4667 if (REG_NOTE_KIND (link) == 0
4668 && GET_CODE (insn) == CALL_INSN)
4669 return 1;
4671 if ((i_pat = single_set (insn)) != NULL
4672 && GET_CODE (SET_SRC (i_pat)) == MEM
4673 && (d_pat = single_set (dep)) != NULL
4674 && GET_CODE (SET_DEST (d_pat)) == MEM)
4676 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4677 /* This is a load after a store, there is no conflict if the load reads
4678 from a cached area. Assume that loads from the stack, and from the
4679 constant pool are cached, and that others will miss. This is a
4680 hack. */
4682 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4683 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4684 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4685 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4686 return 1;
4689 return cost;
4692 static int fp_consts_inited = 0;
4694 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4695 static const char * const strings_fp[8] =
4697 "0", "1", "2", "3",
4698 "4", "5", "0.5", "10"
4701 static REAL_VALUE_TYPE values_fp[8];
4703 static void
4704 init_fp_table (void)
4706 int i;
4707 REAL_VALUE_TYPE r;
4709 if (TARGET_VFP)
4710 fp_consts_inited = 1;
4711 else
4712 fp_consts_inited = 8;
4714 for (i = 0; i < fp_consts_inited; i++)
4716 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4717 values_fp[i] = r;
4721 /* Return TRUE if rtx X is a valid immediate FP constant. */
4723 arm_const_double_rtx (rtx x)
4725 REAL_VALUE_TYPE r;
4726 int i;
4728 if (!fp_consts_inited)
4729 init_fp_table ();
4731 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4732 if (REAL_VALUE_MINUS_ZERO (r))
4733 return 0;
4735 for (i = 0; i < fp_consts_inited; i++)
4736 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4737 return 1;
4739 return 0;
4742 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4744 neg_const_double_rtx_ok_for_fpa (rtx x)
4746 REAL_VALUE_TYPE r;
4747 int i;
4749 if (!fp_consts_inited)
4750 init_fp_table ();
4752 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4753 r = REAL_VALUE_NEGATE (r);
4754 if (REAL_VALUE_MINUS_ZERO (r))
4755 return 0;
4757 for (i = 0; i < 8; i++)
4758 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4759 return 1;
4761 return 0;
4764 /* Predicates for `match_operand' and `match_operator'. */
4766 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4768 cirrus_memory_offset (rtx op)
4770 /* Reject eliminable registers. */
4771 if (! (reload_in_progress || reload_completed)
4772 && ( reg_mentioned_p (frame_pointer_rtx, op)
4773 || reg_mentioned_p (arg_pointer_rtx, op)
4774 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4775 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4776 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4777 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4778 return 0;
4780 if (GET_CODE (op) == MEM)
4782 rtx ind;
4784 ind = XEXP (op, 0);
4786 /* Match: (mem (reg)). */
4787 if (GET_CODE (ind) == REG)
4788 return 1;
4790 /* Match:
4791 (mem (plus (reg)
4792 (const))). */
4793 if (GET_CODE (ind) == PLUS
4794 && GET_CODE (XEXP (ind, 0)) == REG
4795 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4796 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4797 return 1;
4800 return 0;
4803 /* Return TRUE if OP is a valid VFP memory address pattern.
4804 WB if true if writeback address modes are allowed. */
4807 arm_coproc_mem_operand (rtx op, bool wb)
4809 rtx ind;
4811 /* Reject eliminable registers. */
4812 if (! (reload_in_progress || reload_completed)
4813 && ( reg_mentioned_p (frame_pointer_rtx, op)
4814 || reg_mentioned_p (arg_pointer_rtx, op)
4815 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4816 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4817 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4818 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4819 return FALSE;
4821 /* Constants are converted into offsets from labels. */
4822 if (GET_CODE (op) != MEM)
4823 return FALSE;
4825 ind = XEXP (op, 0);
4827 if (reload_completed
4828 && (GET_CODE (ind) == LABEL_REF
4829 || (GET_CODE (ind) == CONST
4830 && GET_CODE (XEXP (ind, 0)) == PLUS
4831 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4832 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4833 return TRUE;
4835 /* Match: (mem (reg)). */
4836 if (GET_CODE (ind) == REG)
4837 return arm_address_register_rtx_p (ind, 0);
4839 /* Autoincremment addressing modes. */
4840 if (wb
4841 && (GET_CODE (ind) == PRE_INC
4842 || GET_CODE (ind) == POST_INC
4843 || GET_CODE (ind) == PRE_DEC
4844 || GET_CODE (ind) == POST_DEC))
4845 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4847 if (wb
4848 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4849 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4850 && GET_CODE (XEXP (ind, 1)) == PLUS
4851 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4852 ind = XEXP (ind, 1);
4854 /* Match:
4855 (plus (reg)
4856 (const)). */
4857 if (GET_CODE (ind) == PLUS
4858 && GET_CODE (XEXP (ind, 0)) == REG
4859 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4860 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4861 && INTVAL (XEXP (ind, 1)) > -1024
4862 && INTVAL (XEXP (ind, 1)) < 1024
4863 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4864 return TRUE;
4866 return FALSE;
4869 /* Return true if X is a register that will be eliminated later on. */
4871 arm_eliminable_register (rtx x)
4873 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4874 || REGNO (x) == ARG_POINTER_REGNUM
4875 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4876 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4879 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4880 VFP registers. Otherwise return NO_REGS. */
4882 enum reg_class
4883 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4885 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4886 return NO_REGS;
4888 return GENERAL_REGS;
4892 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4893 Use by the Cirrus Maverick code which has to workaround
4894 a hardware bug triggered by such instructions. */
4895 static bool
4896 arm_memory_load_p (rtx insn)
4898 rtx body, lhs, rhs;;
4900 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4901 return false;
4903 body = PATTERN (insn);
4905 if (GET_CODE (body) != SET)
4906 return false;
4908 lhs = XEXP (body, 0);
4909 rhs = XEXP (body, 1);
4911 lhs = REG_OR_SUBREG_RTX (lhs);
4913 /* If the destination is not a general purpose
4914 register we do not have to worry. */
4915 if (GET_CODE (lhs) != REG
4916 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4917 return false;
4919 /* As well as loads from memory we also have to react
4920 to loads of invalid constants which will be turned
4921 into loads from the minipool. */
4922 return (GET_CODE (rhs) == MEM
4923 || GET_CODE (rhs) == SYMBOL_REF
4924 || note_invalid_constants (insn, -1, false));
4927 /* Return TRUE if INSN is a Cirrus instruction. */
4928 static bool
4929 arm_cirrus_insn_p (rtx insn)
4931 enum attr_cirrus attr;
4933 /* get_attr aborts on USE and CLOBBER. */
4934 if (!insn
4935 || GET_CODE (insn) != INSN
4936 || GET_CODE (PATTERN (insn)) == USE
4937 || GET_CODE (PATTERN (insn)) == CLOBBER)
4938 return 0;
4940 attr = get_attr_cirrus (insn);
4942 return attr != CIRRUS_NOT;
4945 /* Cirrus reorg for invalid instruction combinations. */
4946 static void
4947 cirrus_reorg (rtx first)
4949 enum attr_cirrus attr;
4950 rtx body = PATTERN (first);
4951 rtx t;
4952 int nops;
4954 /* Any branch must be followed by 2 non Cirrus instructions. */
4955 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4957 nops = 0;
4958 t = next_nonnote_insn (first);
4960 if (arm_cirrus_insn_p (t))
4961 ++ nops;
4963 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4964 ++ nops;
4966 while (nops --)
4967 emit_insn_after (gen_nop (), first);
4969 return;
4972 /* (float (blah)) is in parallel with a clobber. */
4973 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4974 body = XVECEXP (body, 0, 0);
4976 if (GET_CODE (body) == SET)
4978 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4980 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4981 be followed by a non Cirrus insn. */
4982 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4984 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4985 emit_insn_after (gen_nop (), first);
4987 return;
4989 else if (arm_memory_load_p (first))
4991 unsigned int arm_regno;
4993 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4994 ldr/cfmv64hr combination where the Rd field is the same
4995 in both instructions must be split with a non Cirrus
4996 insn. Example:
4998 ldr r0, blah
5000 cfmvsr mvf0, r0. */
5002 /* Get Arm register number for ldr insn. */
5003 if (GET_CODE (lhs) == REG)
5004 arm_regno = REGNO (lhs);
5005 else if (GET_CODE (rhs) == REG)
5006 arm_regno = REGNO (rhs);
5007 else
5008 abort ();
5010 /* Next insn. */
5011 first = next_nonnote_insn (first);
5013 if (! arm_cirrus_insn_p (first))
5014 return;
5016 body = PATTERN (first);
5018 /* (float (blah)) is in parallel with a clobber. */
5019 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5020 body = XVECEXP (body, 0, 0);
5022 if (GET_CODE (body) == FLOAT)
5023 body = XEXP (body, 0);
5025 if (get_attr_cirrus (first) == CIRRUS_MOVE
5026 && GET_CODE (XEXP (body, 1)) == REG
5027 && arm_regno == REGNO (XEXP (body, 1)))
5028 emit_insn_after (gen_nop (), first);
5030 return;
5034 /* get_attr aborts on USE and CLOBBER. */
5035 if (!first
5036 || GET_CODE (first) != INSN
5037 || GET_CODE (PATTERN (first)) == USE
5038 || GET_CODE (PATTERN (first)) == CLOBBER)
5039 return;
5041 attr = get_attr_cirrus (first);
5043 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5044 must be followed by a non-coprocessor instruction. */
5045 if (attr == CIRRUS_COMPARE)
5047 nops = 0;
5049 t = next_nonnote_insn (first);
5051 if (arm_cirrus_insn_p (t))
5052 ++ nops;
5054 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5055 ++ nops;
5057 while (nops --)
5058 emit_insn_after (gen_nop (), first);
5060 return;
5064 /* Return TRUE if X references a SYMBOL_REF. */
5066 symbol_mentioned_p (rtx x)
5068 const char * fmt;
5069 int i;
5071 if (GET_CODE (x) == SYMBOL_REF)
5072 return 1;
5074 fmt = GET_RTX_FORMAT (GET_CODE (x));
5076 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5078 if (fmt[i] == 'E')
5080 int j;
5082 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5083 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5084 return 1;
5086 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5087 return 1;
5090 return 0;
5093 /* Return TRUE if X references a LABEL_REF. */
5095 label_mentioned_p (rtx x)
5097 const char * fmt;
5098 int i;
5100 if (GET_CODE (x) == LABEL_REF)
5101 return 1;
5103 fmt = GET_RTX_FORMAT (GET_CODE (x));
5104 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5106 if (fmt[i] == 'E')
5108 int j;
5110 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5111 if (label_mentioned_p (XVECEXP (x, i, j)))
5112 return 1;
5114 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5115 return 1;
5118 return 0;
5121 enum rtx_code
5122 minmax_code (rtx x)
5124 enum rtx_code code = GET_CODE (x);
5126 if (code == SMAX)
5127 return GE;
5128 else if (code == SMIN)
5129 return LE;
5130 else if (code == UMIN)
5131 return LEU;
5132 else if (code == UMAX)
5133 return GEU;
5135 abort ();
5138 /* Return 1 if memory locations are adjacent. */
5140 adjacent_mem_locations (rtx a, rtx b)
5142 /* We don't guarantee to preserve the order of these memory refs. */
5143 if (volatile_refs_p (a) || volatile_refs_p (b))
5144 return 0;
5146 if ((GET_CODE (XEXP (a, 0)) == REG
5147 || (GET_CODE (XEXP (a, 0)) == PLUS
5148 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5149 && (GET_CODE (XEXP (b, 0)) == REG
5150 || (GET_CODE (XEXP (b, 0)) == PLUS
5151 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5153 HOST_WIDE_INT val0 = 0, val1 = 0;
5154 rtx reg0, reg1;
5155 int val_diff;
5157 if (GET_CODE (XEXP (a, 0)) == PLUS)
5159 reg0 = XEXP (XEXP (a, 0), 0);
5160 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5162 else
5163 reg0 = XEXP (a, 0);
5165 if (GET_CODE (XEXP (b, 0)) == PLUS)
5167 reg1 = XEXP (XEXP (b, 0), 0);
5168 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5170 else
5171 reg1 = XEXP (b, 0);
5173 /* Don't accept any offset that will require multiple
5174 instructions to handle, since this would cause the
5175 arith_adjacentmem pattern to output an overlong sequence. */
5176 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5177 return 0;
5179 /* Don't allow an eliminable register: register elimination can make
5180 the offset too large. */
5181 if (arm_eliminable_register (reg0))
5182 return 0;
5184 val_diff = val1 - val0;
5186 if (arm_ld_sched)
5188 /* If the target has load delay slots, then there's no benefit
5189 to using an ldm instruction unless the offset is zero and
5190 we are optimizing for size. */
5191 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5192 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5193 && (val_diff == 4 || val_diff == -4));
5196 return ((REGNO (reg0) == REGNO (reg1))
5197 && (val_diff == 4 || val_diff == -4));
5200 return 0;
5204 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5205 HOST_WIDE_INT *load_offset)
5207 int unsorted_regs[4];
5208 HOST_WIDE_INT unsorted_offsets[4];
5209 int order[4];
5210 int base_reg = -1;
5211 int i;
5213 /* Can only handle 2, 3, or 4 insns at present,
5214 though could be easily extended if required. */
5215 if (nops < 2 || nops > 4)
5216 abort ();
5218 /* Loop over the operands and check that the memory references are
5219 suitable (i.e. immediate offsets from the same base register). At
5220 the same time, extract the target register, and the memory
5221 offsets. */
5222 for (i = 0; i < nops; i++)
5224 rtx reg;
5225 rtx offset;
5227 /* Convert a subreg of a mem into the mem itself. */
5228 if (GET_CODE (operands[nops + i]) == SUBREG)
5229 operands[nops + i] = alter_subreg (operands + (nops + i));
5231 if (GET_CODE (operands[nops + i]) != MEM)
5232 abort ();
5234 /* Don't reorder volatile memory references; it doesn't seem worth
5235 looking for the case where the order is ok anyway. */
5236 if (MEM_VOLATILE_P (operands[nops + i]))
5237 return 0;
5239 offset = const0_rtx;
5241 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5242 || (GET_CODE (reg) == SUBREG
5243 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5244 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5245 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5246 == REG)
5247 || (GET_CODE (reg) == SUBREG
5248 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5249 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5250 == CONST_INT)))
5252 if (i == 0)
5254 base_reg = REGNO (reg);
5255 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5256 ? REGNO (operands[i])
5257 : REGNO (SUBREG_REG (operands[i])));
5258 order[0] = 0;
5260 else
5262 if (base_reg != (int) REGNO (reg))
5263 /* Not addressed from the same base register. */
5264 return 0;
5266 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5267 ? REGNO (operands[i])
5268 : REGNO (SUBREG_REG (operands[i])));
5269 if (unsorted_regs[i] < unsorted_regs[order[0]])
5270 order[0] = i;
5273 /* If it isn't an integer register, or if it overwrites the
5274 base register but isn't the last insn in the list, then
5275 we can't do this. */
5276 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5277 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5278 return 0;
5280 unsorted_offsets[i] = INTVAL (offset);
5282 else
5283 /* Not a suitable memory address. */
5284 return 0;
5287 /* All the useful information has now been extracted from the
5288 operands into unsorted_regs and unsorted_offsets; additionally,
5289 order[0] has been set to the lowest numbered register in the
5290 list. Sort the registers into order, and check that the memory
5291 offsets are ascending and adjacent. */
5293 for (i = 1; i < nops; i++)
5295 int j;
5297 order[i] = order[i - 1];
5298 for (j = 0; j < nops; j++)
5299 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5300 && (order[i] == order[i - 1]
5301 || unsorted_regs[j] < unsorted_regs[order[i]]))
5302 order[i] = j;
5304 /* Have we found a suitable register? if not, one must be used more
5305 than once. */
5306 if (order[i] == order[i - 1])
5307 return 0;
5309 /* Is the memory address adjacent and ascending? */
5310 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5311 return 0;
5314 if (base)
5316 *base = base_reg;
5318 for (i = 0; i < nops; i++)
5319 regs[i] = unsorted_regs[order[i]];
5321 *load_offset = unsorted_offsets[order[0]];
5324 if (unsorted_offsets[order[0]] == 0)
5325 return 1; /* ldmia */
5327 if (unsorted_offsets[order[0]] == 4)
5328 return 2; /* ldmib */
5330 if (unsorted_offsets[order[nops - 1]] == 0)
5331 return 3; /* ldmda */
5333 if (unsorted_offsets[order[nops - 1]] == -4)
5334 return 4; /* ldmdb */
5336 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5337 if the offset isn't small enough. The reason 2 ldrs are faster
5338 is because these ARMs are able to do more than one cache access
5339 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5340 whilst the ARM8 has a double bandwidth cache. This means that
5341 these cores can do both an instruction fetch and a data fetch in
5342 a single cycle, so the trick of calculating the address into a
5343 scratch register (one of the result regs) and then doing a load
5344 multiple actually becomes slower (and no smaller in code size).
5345 That is the transformation
5347 ldr rd1, [rbase + offset]
5348 ldr rd2, [rbase + offset + 4]
5352 add rd1, rbase, offset
5353 ldmia rd1, {rd1, rd2}
5355 produces worse code -- '3 cycles + any stalls on rd2' instead of
5356 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5357 access per cycle, the first sequence could never complete in less
5358 than 6 cycles, whereas the ldm sequence would only take 5 and
5359 would make better use of sequential accesses if not hitting the
5360 cache.
5362 We cheat here and test 'arm_ld_sched' which we currently know to
5363 only be true for the ARM8, ARM9 and StrongARM. If this ever
5364 changes, then the test below needs to be reworked. */
5365 if (nops == 2 && arm_ld_sched)
5366 return 0;
5368 /* Can't do it without setting up the offset, only do this if it takes
5369 no more than one insn. */
5370 return (const_ok_for_arm (unsorted_offsets[order[0]])
5371 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5374 const char *
5375 emit_ldm_seq (rtx *operands, int nops)
5377 int regs[4];
5378 int base_reg;
5379 HOST_WIDE_INT offset;
5380 char buf[100];
5381 int i;
5383 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5385 case 1:
5386 strcpy (buf, "ldm%?ia\t");
5387 break;
5389 case 2:
5390 strcpy (buf, "ldm%?ib\t");
5391 break;
5393 case 3:
5394 strcpy (buf, "ldm%?da\t");
5395 break;
5397 case 4:
5398 strcpy (buf, "ldm%?db\t");
5399 break;
5401 case 5:
5402 if (offset >= 0)
5403 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5404 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5405 (long) offset);
5406 else
5407 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5408 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5409 (long) -offset);
5410 output_asm_insn (buf, operands);
5411 base_reg = regs[0];
5412 strcpy (buf, "ldm%?ia\t");
5413 break;
5415 default:
5416 abort ();
5419 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5420 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5422 for (i = 1; i < nops; i++)
5423 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5424 reg_names[regs[i]]);
5426 strcat (buf, "}\t%@ phole ldm");
5428 output_asm_insn (buf, operands);
5429 return "";
5433 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5434 HOST_WIDE_INT * load_offset)
5436 int unsorted_regs[4];
5437 HOST_WIDE_INT unsorted_offsets[4];
5438 int order[4];
5439 int base_reg = -1;
5440 int i;
5442 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5443 extended if required. */
5444 if (nops < 2 || nops > 4)
5445 abort ();
5447 /* Loop over the operands and check that the memory references are
5448 suitable (i.e. immediate offsets from the same base register). At
5449 the same time, extract the target register, and the memory
5450 offsets. */
5451 for (i = 0; i < nops; i++)
5453 rtx reg;
5454 rtx offset;
5456 /* Convert a subreg of a mem into the mem itself. */
5457 if (GET_CODE (operands[nops + i]) == SUBREG)
5458 operands[nops + i] = alter_subreg (operands + (nops + i));
5460 if (GET_CODE (operands[nops + i]) != MEM)
5461 abort ();
5463 /* Don't reorder volatile memory references; it doesn't seem worth
5464 looking for the case where the order is ok anyway. */
5465 if (MEM_VOLATILE_P (operands[nops + i]))
5466 return 0;
5468 offset = const0_rtx;
5470 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5471 || (GET_CODE (reg) == SUBREG
5472 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5473 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5474 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5475 == REG)
5476 || (GET_CODE (reg) == SUBREG
5477 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5478 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5479 == CONST_INT)))
5481 if (i == 0)
5483 base_reg = REGNO (reg);
5484 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5485 ? REGNO (operands[i])
5486 : REGNO (SUBREG_REG (operands[i])));
5487 order[0] = 0;
5489 else
5491 if (base_reg != (int) REGNO (reg))
5492 /* Not addressed from the same base register. */
5493 return 0;
5495 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5496 ? REGNO (operands[i])
5497 : REGNO (SUBREG_REG (operands[i])));
5498 if (unsorted_regs[i] < unsorted_regs[order[0]])
5499 order[0] = i;
5502 /* If it isn't an integer register, then we can't do this. */
5503 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5504 return 0;
5506 unsorted_offsets[i] = INTVAL (offset);
5508 else
5509 /* Not a suitable memory address. */
5510 return 0;
5513 /* All the useful information has now been extracted from the
5514 operands into unsorted_regs and unsorted_offsets; additionally,
5515 order[0] has been set to the lowest numbered register in the
5516 list. Sort the registers into order, and check that the memory
5517 offsets are ascending and adjacent. */
5519 for (i = 1; i < nops; i++)
5521 int j;
5523 order[i] = order[i - 1];
5524 for (j = 0; j < nops; j++)
5525 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5526 && (order[i] == order[i - 1]
5527 || unsorted_regs[j] < unsorted_regs[order[i]]))
5528 order[i] = j;
5530 /* Have we found a suitable register? if not, one must be used more
5531 than once. */
5532 if (order[i] == order[i - 1])
5533 return 0;
5535 /* Is the memory address adjacent and ascending? */
5536 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5537 return 0;
5540 if (base)
5542 *base = base_reg;
5544 for (i = 0; i < nops; i++)
5545 regs[i] = unsorted_regs[order[i]];
5547 *load_offset = unsorted_offsets[order[0]];
5550 if (unsorted_offsets[order[0]] == 0)
5551 return 1; /* stmia */
5553 if (unsorted_offsets[order[0]] == 4)
5554 return 2; /* stmib */
5556 if (unsorted_offsets[order[nops - 1]] == 0)
5557 return 3; /* stmda */
5559 if (unsorted_offsets[order[nops - 1]] == -4)
5560 return 4; /* stmdb */
5562 return 0;
5565 const char *
5566 emit_stm_seq (rtx *operands, int nops)
5568 int regs[4];
5569 int base_reg;
5570 HOST_WIDE_INT offset;
5571 char buf[100];
5572 int i;
5574 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5576 case 1:
5577 strcpy (buf, "stm%?ia\t");
5578 break;
5580 case 2:
5581 strcpy (buf, "stm%?ib\t");
5582 break;
5584 case 3:
5585 strcpy (buf, "stm%?da\t");
5586 break;
5588 case 4:
5589 strcpy (buf, "stm%?db\t");
5590 break;
5592 default:
5593 abort ();
5596 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5597 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5599 for (i = 1; i < nops; i++)
5600 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5601 reg_names[regs[i]]);
5603 strcat (buf, "}\t%@ phole stm");
5605 output_asm_insn (buf, operands);
5606 return "";
5610 /* Routines for use in generating RTL. */
5613 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5614 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5616 HOST_WIDE_INT offset = *offsetp;
5617 int i = 0, j;
5618 rtx result;
5619 int sign = up ? 1 : -1;
5620 rtx mem, addr;
5622 /* XScale has load-store double instructions, but they have stricter
5623 alignment requirements than load-store multiple, so we cannot
5624 use them.
5626 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5627 the pipeline until completion.
5629 NREGS CYCLES
5635 An ldr instruction takes 1-3 cycles, but does not block the
5636 pipeline.
5638 NREGS CYCLES
5639 1 1-3
5640 2 2-6
5641 3 3-9
5642 4 4-12
5644 Best case ldr will always win. However, the more ldr instructions
5645 we issue, the less likely we are to be able to schedule them well.
5646 Using ldr instructions also increases code size.
5648 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5649 for counts of 3 or 4 regs. */
5650 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5652 rtx seq;
5654 start_sequence ();
5656 for (i = 0; i < count; i++)
5658 addr = plus_constant (from, i * 4 * sign);
5659 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5660 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5661 offset += 4 * sign;
5664 if (write_back)
5666 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5667 *offsetp = offset;
5670 seq = get_insns ();
5671 end_sequence ();
5673 return seq;
5676 result = gen_rtx_PARALLEL (VOIDmode,
5677 rtvec_alloc (count + (write_back ? 1 : 0)));
5678 if (write_back)
5680 XVECEXP (result, 0, 0)
5681 = gen_rtx_SET (GET_MODE (from), from,
5682 plus_constant (from, count * 4 * sign));
5683 i = 1;
5684 count++;
5687 for (j = 0; i < count; i++, j++)
5689 addr = plus_constant (from, j * 4 * sign);
5690 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5691 XVECEXP (result, 0, i)
5692 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5693 offset += 4 * sign;
5696 if (write_back)
5697 *offsetp = offset;
5699 return result;
5703 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5704 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5706 HOST_WIDE_INT offset = *offsetp;
5707 int i = 0, j;
5708 rtx result;
5709 int sign = up ? 1 : -1;
5710 rtx mem, addr;
5712 /* See arm_gen_load_multiple for discussion of
5713 the pros/cons of ldm/stm usage for XScale. */
5714 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5716 rtx seq;
5718 start_sequence ();
5720 for (i = 0; i < count; i++)
5722 addr = plus_constant (to, i * 4 * sign);
5723 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5724 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5725 offset += 4 * sign;
5728 if (write_back)
5730 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5731 *offsetp = offset;
5734 seq = get_insns ();
5735 end_sequence ();
5737 return seq;
5740 result = gen_rtx_PARALLEL (VOIDmode,
5741 rtvec_alloc (count + (write_back ? 1 : 0)));
5742 if (write_back)
5744 XVECEXP (result, 0, 0)
5745 = gen_rtx_SET (GET_MODE (to), to,
5746 plus_constant (to, count * 4 * sign));
5747 i = 1;
5748 count++;
5751 for (j = 0; i < count; i++, j++)
5753 addr = plus_constant (to, j * 4 * sign);
5754 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5755 XVECEXP (result, 0, i)
5756 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5757 offset += 4 * sign;
5760 if (write_back)
5761 *offsetp = offset;
5763 return result;
5767 arm_gen_movmemqi (rtx *operands)
5769 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5770 HOST_WIDE_INT srcoffset, dstoffset;
5771 int i;
5772 rtx src, dst, srcbase, dstbase;
5773 rtx part_bytes_reg = NULL;
5774 rtx mem;
5776 if (GET_CODE (operands[2]) != CONST_INT
5777 || GET_CODE (operands[3]) != CONST_INT
5778 || INTVAL (operands[2]) > 64
5779 || INTVAL (operands[3]) & 3)
5780 return 0;
5782 dstbase = operands[0];
5783 srcbase = operands[1];
5785 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5786 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5788 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5789 out_words_to_go = INTVAL (operands[2]) / 4;
5790 last_bytes = INTVAL (operands[2]) & 3;
5791 dstoffset = srcoffset = 0;
5793 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5794 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5796 for (i = 0; in_words_to_go >= 2; i+=4)
5798 if (in_words_to_go > 4)
5799 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5800 srcbase, &srcoffset));
5801 else
5802 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5803 FALSE, srcbase, &srcoffset));
5805 if (out_words_to_go)
5807 if (out_words_to_go > 4)
5808 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5809 dstbase, &dstoffset));
5810 else if (out_words_to_go != 1)
5811 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5812 dst, TRUE,
5813 (last_bytes == 0
5814 ? FALSE : TRUE),
5815 dstbase, &dstoffset));
5816 else
5818 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5819 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5820 if (last_bytes != 0)
5822 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5823 dstoffset += 4;
5828 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5829 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5832 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5833 if (out_words_to_go)
5835 rtx sreg;
5837 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5838 sreg = copy_to_reg (mem);
5840 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5841 emit_move_insn (mem, sreg);
5842 in_words_to_go--;
5844 if (in_words_to_go) /* Sanity check */
5845 abort ();
5848 if (in_words_to_go)
5850 if (in_words_to_go < 0)
5851 abort ();
5853 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5854 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5857 if (last_bytes && part_bytes_reg == NULL)
5858 abort ();
5860 if (BYTES_BIG_ENDIAN && last_bytes)
5862 rtx tmp = gen_reg_rtx (SImode);
5864 /* The bytes we want are in the top end of the word. */
5865 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5866 GEN_INT (8 * (4 - last_bytes))));
5867 part_bytes_reg = tmp;
5869 while (last_bytes)
5871 mem = adjust_automodify_address (dstbase, QImode,
5872 plus_constant (dst, last_bytes - 1),
5873 dstoffset + last_bytes - 1);
5874 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5876 if (--last_bytes)
5878 tmp = gen_reg_rtx (SImode);
5879 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5880 part_bytes_reg = tmp;
5885 else
5887 if (last_bytes > 1)
5889 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5890 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5891 last_bytes -= 2;
5892 if (last_bytes)
5894 rtx tmp = gen_reg_rtx (SImode);
5895 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5896 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5897 part_bytes_reg = tmp;
5898 dstoffset += 2;
5902 if (last_bytes)
5904 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5905 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5909 return 1;
5912 /* Generate a memory reference for a half word, such that it will be loaded
5913 into the top 16 bits of the word. We can assume that the address is
5914 known to be alignable and of the form reg, or plus (reg, const). */
5917 arm_gen_rotated_half_load (rtx memref)
5919 HOST_WIDE_INT offset = 0;
5920 rtx base = XEXP (memref, 0);
5922 if (GET_CODE (base) == PLUS)
5924 offset = INTVAL (XEXP (base, 1));
5925 base = XEXP (base, 0);
5928 /* If we aren't allowed to generate unaligned addresses, then fail. */
5929 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5930 return NULL;
5932 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5934 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5935 return base;
5937 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5940 /* Select a dominance comparison mode if possible for a test of the general
5941 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5942 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5943 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5944 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5945 In all cases OP will be either EQ or NE, but we don't need to know which
5946 here. If we are unable to support a dominance comparison we return
5947 CC mode. This will then fail to match for the RTL expressions that
5948 generate this call. */
5949 enum machine_mode
5950 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5952 enum rtx_code cond1, cond2;
5953 int swapped = 0;
5955 /* Currently we will probably get the wrong result if the individual
5956 comparisons are not simple. This also ensures that it is safe to
5957 reverse a comparison if necessary. */
5958 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5959 != CCmode)
5960 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5961 != CCmode))
5962 return CCmode;
5964 /* The if_then_else variant of this tests the second condition if the
5965 first passes, but is true if the first fails. Reverse the first
5966 condition to get a true "inclusive-or" expression. */
5967 if (cond_or == DOM_CC_NX_OR_Y)
5968 cond1 = reverse_condition (cond1);
5970 /* If the comparisons are not equal, and one doesn't dominate the other,
5971 then we can't do this. */
5972 if (cond1 != cond2
5973 && !comparison_dominates_p (cond1, cond2)
5974 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5975 return CCmode;
5977 if (swapped)
5979 enum rtx_code temp = cond1;
5980 cond1 = cond2;
5981 cond2 = temp;
5984 switch (cond1)
5986 case EQ:
5987 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5988 return CC_DEQmode;
5990 switch (cond2)
5992 case LE: return CC_DLEmode;
5993 case LEU: return CC_DLEUmode;
5994 case GE: return CC_DGEmode;
5995 case GEU: return CC_DGEUmode;
5996 default: break;
5999 break;
6001 case LT:
6002 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6003 return CC_DLTmode;
6004 if (cond2 == LE)
6005 return CC_DLEmode;
6006 if (cond2 == NE)
6007 return CC_DNEmode;
6008 break;
6010 case GT:
6011 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6012 return CC_DGTmode;
6013 if (cond2 == GE)
6014 return CC_DGEmode;
6015 if (cond2 == NE)
6016 return CC_DNEmode;
6017 break;
6019 case LTU:
6020 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6021 return CC_DLTUmode;
6022 if (cond2 == LEU)
6023 return CC_DLEUmode;
6024 if (cond2 == NE)
6025 return CC_DNEmode;
6026 break;
6028 case GTU:
6029 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6030 return CC_DGTUmode;
6031 if (cond2 == GEU)
6032 return CC_DGEUmode;
6033 if (cond2 == NE)
6034 return CC_DNEmode;
6035 break;
6037 /* The remaining cases only occur when both comparisons are the
6038 same. */
6039 case NE:
6040 return CC_DNEmode;
6042 case LE:
6043 return CC_DLEmode;
6045 case GE:
6046 return CC_DGEmode;
6048 case LEU:
6049 return CC_DLEUmode;
6051 case GEU:
6052 return CC_DGEUmode;
6054 default:
6055 break;
6058 abort ();
6061 enum machine_mode
6062 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6064 /* All floating point compares return CCFP if it is an equality
6065 comparison, and CCFPE otherwise. */
6066 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6068 switch (op)
6070 case EQ:
6071 case NE:
6072 case UNORDERED:
6073 case ORDERED:
6074 case UNLT:
6075 case UNLE:
6076 case UNGT:
6077 case UNGE:
6078 case UNEQ:
6079 case LTGT:
6080 return CCFPmode;
6082 case LT:
6083 case LE:
6084 case GT:
6085 case GE:
6086 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6087 return CCFPmode;
6088 return CCFPEmode;
6090 default:
6091 abort ();
6095 /* A compare with a shifted operand. Because of canonicalization, the
6096 comparison will have to be swapped when we emit the assembler. */
6097 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6098 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6099 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6100 || GET_CODE (x) == ROTATERT))
6101 return CC_SWPmode;
6103 /* This is a special case that is used by combine to allow a
6104 comparison of a shifted byte load to be split into a zero-extend
6105 followed by a comparison of the shifted integer (only valid for
6106 equalities and unsigned inequalities). */
6107 if (GET_MODE (x) == SImode
6108 && GET_CODE (x) == ASHIFT
6109 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6110 && GET_CODE (XEXP (x, 0)) == SUBREG
6111 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6112 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6113 && (op == EQ || op == NE
6114 || op == GEU || op == GTU || op == LTU || op == LEU)
6115 && GET_CODE (y) == CONST_INT)
6116 return CC_Zmode;
6118 /* A construct for a conditional compare, if the false arm contains
6119 0, then both conditions must be true, otherwise either condition
6120 must be true. Not all conditions are possible, so CCmode is
6121 returned if it can't be done. */
6122 if (GET_CODE (x) == IF_THEN_ELSE
6123 && (XEXP (x, 2) == const0_rtx
6124 || XEXP (x, 2) == const1_rtx)
6125 && COMPARISON_P (XEXP (x, 0))
6126 && COMPARISON_P (XEXP (x, 1)))
6127 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6128 INTVAL (XEXP (x, 2)));
6130 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6131 if (GET_CODE (x) == AND
6132 && COMPARISON_P (XEXP (x, 0))
6133 && COMPARISON_P (XEXP (x, 1)))
6134 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6135 DOM_CC_X_AND_Y);
6137 if (GET_CODE (x) == IOR
6138 && COMPARISON_P (XEXP (x, 0))
6139 && COMPARISON_P (XEXP (x, 1)))
6140 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6141 DOM_CC_X_OR_Y);
6143 /* An operation (on Thumb) where we want to test for a single bit.
6144 This is done by shifting that bit up into the top bit of a
6145 scratch register; we can then branch on the sign bit. */
6146 if (TARGET_THUMB
6147 && GET_MODE (x) == SImode
6148 && (op == EQ || op == NE)
6149 && (GET_CODE (x) == ZERO_EXTRACT))
6150 return CC_Nmode;
6152 /* An operation that sets the condition codes as a side-effect, the
6153 V flag is not set correctly, so we can only use comparisons where
6154 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6155 instead.) */
6156 if (GET_MODE (x) == SImode
6157 && y == const0_rtx
6158 && (op == EQ || op == NE || op == LT || op == GE)
6159 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6160 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6161 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6162 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6163 || GET_CODE (x) == LSHIFTRT
6164 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6165 || GET_CODE (x) == ROTATERT
6166 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6167 return CC_NOOVmode;
6169 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6170 return CC_Zmode;
6172 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6173 && GET_CODE (x) == PLUS
6174 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6175 return CC_Cmode;
6177 return CCmode;
6180 /* X and Y are two things to compare using CODE. Emit the compare insn and
6181 return the rtx for register 0 in the proper mode. FP means this is a
6182 floating point compare: I don't think that it is needed on the arm. */
6184 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6186 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6187 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6189 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6190 gen_rtx_COMPARE (mode, x, y)));
6192 return cc_reg;
6195 /* Generate a sequence of insns that will generate the correct return
6196 address mask depending on the physical architecture that the program
6197 is running on. */
6199 arm_gen_return_addr_mask (void)
6201 rtx reg = gen_reg_rtx (Pmode);
6203 emit_insn (gen_return_addr_mask (reg));
6204 return reg;
6207 void
6208 arm_reload_in_hi (rtx *operands)
6210 rtx ref = operands[1];
6211 rtx base, scratch;
6212 HOST_WIDE_INT offset = 0;
6214 if (GET_CODE (ref) == SUBREG)
6216 offset = SUBREG_BYTE (ref);
6217 ref = SUBREG_REG (ref);
6220 if (GET_CODE (ref) == REG)
6222 /* We have a pseudo which has been spilt onto the stack; there
6223 are two cases here: the first where there is a simple
6224 stack-slot replacement and a second where the stack-slot is
6225 out of range, or is used as a subreg. */
6226 if (reg_equiv_mem[REGNO (ref)])
6228 ref = reg_equiv_mem[REGNO (ref)];
6229 base = find_replacement (&XEXP (ref, 0));
6231 else
6232 /* The slot is out of range, or was dressed up in a SUBREG. */
6233 base = reg_equiv_address[REGNO (ref)];
6235 else
6236 base = find_replacement (&XEXP (ref, 0));
6238 /* Handle the case where the address is too complex to be offset by 1. */
6239 if (GET_CODE (base) == MINUS
6240 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6242 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6244 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6245 base = base_plus;
6247 else if (GET_CODE (base) == PLUS)
6249 /* The addend must be CONST_INT, or we would have dealt with it above. */
6250 HOST_WIDE_INT hi, lo;
6252 offset += INTVAL (XEXP (base, 1));
6253 base = XEXP (base, 0);
6255 /* Rework the address into a legal sequence of insns. */
6256 /* Valid range for lo is -4095 -> 4095 */
6257 lo = (offset >= 0
6258 ? (offset & 0xfff)
6259 : -((-offset) & 0xfff));
6261 /* Corner case, if lo is the max offset then we would be out of range
6262 once we have added the additional 1 below, so bump the msb into the
6263 pre-loading insn(s). */
6264 if (lo == 4095)
6265 lo &= 0x7ff;
6267 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6268 ^ (HOST_WIDE_INT) 0x80000000)
6269 - (HOST_WIDE_INT) 0x80000000);
6271 if (hi + lo != offset)
6272 abort ();
6274 if (hi != 0)
6276 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6278 /* Get the base address; addsi3 knows how to handle constants
6279 that require more than one insn. */
6280 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6281 base = base_plus;
6282 offset = lo;
6286 /* Operands[2] may overlap operands[0] (though it won't overlap
6287 operands[1]), that's why we asked for a DImode reg -- so we can
6288 use the bit that does not overlap. */
6289 if (REGNO (operands[2]) == REGNO (operands[0]))
6290 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6291 else
6292 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6294 emit_insn (gen_zero_extendqisi2 (scratch,
6295 gen_rtx_MEM (QImode,
6296 plus_constant (base,
6297 offset))));
6298 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6299 gen_rtx_MEM (QImode,
6300 plus_constant (base,
6301 offset + 1))));
6302 if (!BYTES_BIG_ENDIAN)
6303 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6304 gen_rtx_IOR (SImode,
6305 gen_rtx_ASHIFT
6306 (SImode,
6307 gen_rtx_SUBREG (SImode, operands[0], 0),
6308 GEN_INT (8)),
6309 scratch)));
6310 else
6311 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6312 gen_rtx_IOR (SImode,
6313 gen_rtx_ASHIFT (SImode, scratch,
6314 GEN_INT (8)),
6315 gen_rtx_SUBREG (SImode, operands[0],
6316 0))));
6319 /* Handle storing a half-word to memory during reload by synthesizing as two
6320 byte stores. Take care not to clobber the input values until after we
6321 have moved them somewhere safe. This code assumes that if the DImode
6322 scratch in operands[2] overlaps either the input value or output address
6323 in some way, then that value must die in this insn (we absolutely need
6324 two scratch registers for some corner cases). */
6325 void
6326 arm_reload_out_hi (rtx *operands)
6328 rtx ref = operands[0];
6329 rtx outval = operands[1];
6330 rtx base, scratch;
6331 HOST_WIDE_INT offset = 0;
6333 if (GET_CODE (ref) == SUBREG)
6335 offset = SUBREG_BYTE (ref);
6336 ref = SUBREG_REG (ref);
6339 if (GET_CODE (ref) == REG)
6341 /* We have a pseudo which has been spilt onto the stack; there
6342 are two cases here: the first where there is a simple
6343 stack-slot replacement and a second where the stack-slot is
6344 out of range, or is used as a subreg. */
6345 if (reg_equiv_mem[REGNO (ref)])
6347 ref = reg_equiv_mem[REGNO (ref)];
6348 base = find_replacement (&XEXP (ref, 0));
6350 else
6351 /* The slot is out of range, or was dressed up in a SUBREG. */
6352 base = reg_equiv_address[REGNO (ref)];
6354 else
6355 base = find_replacement (&XEXP (ref, 0));
6357 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6359 /* Handle the case where the address is too complex to be offset by 1. */
6360 if (GET_CODE (base) == MINUS
6361 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6363 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6365 /* Be careful not to destroy OUTVAL. */
6366 if (reg_overlap_mentioned_p (base_plus, outval))
6368 /* Updating base_plus might destroy outval, see if we can
6369 swap the scratch and base_plus. */
6370 if (!reg_overlap_mentioned_p (scratch, outval))
6372 rtx tmp = scratch;
6373 scratch = base_plus;
6374 base_plus = tmp;
6376 else
6378 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6380 /* Be conservative and copy OUTVAL into the scratch now,
6381 this should only be necessary if outval is a subreg
6382 of something larger than a word. */
6383 /* XXX Might this clobber base? I can't see how it can,
6384 since scratch is known to overlap with OUTVAL, and
6385 must be wider than a word. */
6386 emit_insn (gen_movhi (scratch_hi, outval));
6387 outval = scratch_hi;
6391 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6392 base = base_plus;
6394 else if (GET_CODE (base) == PLUS)
6396 /* The addend must be CONST_INT, or we would have dealt with it above. */
6397 HOST_WIDE_INT hi, lo;
6399 offset += INTVAL (XEXP (base, 1));
6400 base = XEXP (base, 0);
6402 /* Rework the address into a legal sequence of insns. */
6403 /* Valid range for lo is -4095 -> 4095 */
6404 lo = (offset >= 0
6405 ? (offset & 0xfff)
6406 : -((-offset) & 0xfff));
6408 /* Corner case, if lo is the max offset then we would be out of range
6409 once we have added the additional 1 below, so bump the msb into the
6410 pre-loading insn(s). */
6411 if (lo == 4095)
6412 lo &= 0x7ff;
6414 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6415 ^ (HOST_WIDE_INT) 0x80000000)
6416 - (HOST_WIDE_INT) 0x80000000);
6418 if (hi + lo != offset)
6419 abort ();
6421 if (hi != 0)
6423 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6425 /* Be careful not to destroy OUTVAL. */
6426 if (reg_overlap_mentioned_p (base_plus, outval))
6428 /* Updating base_plus might destroy outval, see if we
6429 can swap the scratch and base_plus. */
6430 if (!reg_overlap_mentioned_p (scratch, outval))
6432 rtx tmp = scratch;
6433 scratch = base_plus;
6434 base_plus = tmp;
6436 else
6438 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6440 /* Be conservative and copy outval into scratch now,
6441 this should only be necessary if outval is a
6442 subreg of something larger than a word. */
6443 /* XXX Might this clobber base? I can't see how it
6444 can, since scratch is known to overlap with
6445 outval. */
6446 emit_insn (gen_movhi (scratch_hi, outval));
6447 outval = scratch_hi;
6451 /* Get the base address; addsi3 knows how to handle constants
6452 that require more than one insn. */
6453 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6454 base = base_plus;
6455 offset = lo;
6459 if (BYTES_BIG_ENDIAN)
6461 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6462 plus_constant (base, offset + 1)),
6463 gen_lowpart (QImode, outval)));
6464 emit_insn (gen_lshrsi3 (scratch,
6465 gen_rtx_SUBREG (SImode, outval, 0),
6466 GEN_INT (8)));
6467 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6468 gen_lowpart (QImode, scratch)));
6470 else
6472 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6473 gen_lowpart (QImode, outval)));
6474 emit_insn (gen_lshrsi3 (scratch,
6475 gen_rtx_SUBREG (SImode, outval, 0),
6476 GEN_INT (8)));
6477 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6478 plus_constant (base, offset + 1)),
6479 gen_lowpart (QImode, scratch)));
6483 /* Print a symbolic form of X to the debug file, F. */
6484 static void
6485 arm_print_value (FILE *f, rtx x)
6487 switch (GET_CODE (x))
6489 case CONST_INT:
6490 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6491 return;
6493 case CONST_DOUBLE:
6494 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6495 return;
6497 case CONST_VECTOR:
6499 int i;
6501 fprintf (f, "<");
6502 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6504 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6505 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6506 fputc (',', f);
6508 fprintf (f, ">");
6510 return;
6512 case CONST_STRING:
6513 fprintf (f, "\"%s\"", XSTR (x, 0));
6514 return;
6516 case SYMBOL_REF:
6517 fprintf (f, "`%s'", XSTR (x, 0));
6518 return;
6520 case LABEL_REF:
6521 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6522 return;
6524 case CONST:
6525 arm_print_value (f, XEXP (x, 0));
6526 return;
6528 case PLUS:
6529 arm_print_value (f, XEXP (x, 0));
6530 fprintf (f, "+");
6531 arm_print_value (f, XEXP (x, 1));
6532 return;
6534 case PC:
6535 fprintf (f, "pc");
6536 return;
6538 default:
6539 fprintf (f, "????");
6540 return;
6544 /* Routines for manipulation of the constant pool. */
6546 /* Arm instructions cannot load a large constant directly into a
6547 register; they have to come from a pc relative load. The constant
6548 must therefore be placed in the addressable range of the pc
6549 relative load. Depending on the precise pc relative load
6550 instruction the range is somewhere between 256 bytes and 4k. This
6551 means that we often have to dump a constant inside a function, and
6552 generate code to branch around it.
6554 It is important to minimize this, since the branches will slow
6555 things down and make the code larger.
6557 Normally we can hide the table after an existing unconditional
6558 branch so that there is no interruption of the flow, but in the
6559 worst case the code looks like this:
6561 ldr rn, L1
6563 b L2
6564 align
6565 L1: .long value
6569 ldr rn, L3
6571 b L4
6572 align
6573 L3: .long value
6577 We fix this by performing a scan after scheduling, which notices
6578 which instructions need to have their operands fetched from the
6579 constant table and builds the table.
6581 The algorithm starts by building a table of all the constants that
6582 need fixing up and all the natural barriers in the function (places
6583 where a constant table can be dropped without breaking the flow).
6584 For each fixup we note how far the pc-relative replacement will be
6585 able to reach and the offset of the instruction into the function.
6587 Having built the table we then group the fixes together to form
6588 tables that are as large as possible (subject to addressing
6589 constraints) and emit each table of constants after the last
6590 barrier that is within range of all the instructions in the group.
6591 If a group does not contain a barrier, then we forcibly create one
6592 by inserting a jump instruction into the flow. Once the table has
6593 been inserted, the insns are then modified to reference the
6594 relevant entry in the pool.
6596 Possible enhancements to the algorithm (not implemented) are:
6598 1) For some processors and object formats, there may be benefit in
6599 aligning the pools to the start of cache lines; this alignment
6600 would need to be taken into account when calculating addressability
6601 of a pool. */
6603 /* These typedefs are located at the start of this file, so that
6604 they can be used in the prototypes there. This comment is to
6605 remind readers of that fact so that the following structures
6606 can be understood more easily.
6608 typedef struct minipool_node Mnode;
6609 typedef struct minipool_fixup Mfix; */
6611 struct minipool_node
6613 /* Doubly linked chain of entries. */
6614 Mnode * next;
6615 Mnode * prev;
6616 /* The maximum offset into the code that this entry can be placed. While
6617 pushing fixes for forward references, all entries are sorted in order
6618 of increasing max_address. */
6619 HOST_WIDE_INT max_address;
6620 /* Similarly for an entry inserted for a backwards ref. */
6621 HOST_WIDE_INT min_address;
6622 /* The number of fixes referencing this entry. This can become zero
6623 if we "unpush" an entry. In this case we ignore the entry when we
6624 come to emit the code. */
6625 int refcount;
6626 /* The offset from the start of the minipool. */
6627 HOST_WIDE_INT offset;
6628 /* The value in table. */
6629 rtx value;
6630 /* The mode of value. */
6631 enum machine_mode mode;
6632 /* The size of the value. With iWMMXt enabled
6633 sizes > 4 also imply an alignment of 8-bytes. */
6634 int fix_size;
6637 struct minipool_fixup
6639 Mfix * next;
6640 rtx insn;
6641 HOST_WIDE_INT address;
6642 rtx * loc;
6643 enum machine_mode mode;
6644 int fix_size;
6645 rtx value;
6646 Mnode * minipool;
6647 HOST_WIDE_INT forwards;
6648 HOST_WIDE_INT backwards;
6651 /* Fixes less than a word need padding out to a word boundary. */
6652 #define MINIPOOL_FIX_SIZE(mode) \
6653 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6655 static Mnode * minipool_vector_head;
6656 static Mnode * minipool_vector_tail;
6657 static rtx minipool_vector_label;
6659 /* The linked list of all minipool fixes required for this function. */
6660 Mfix * minipool_fix_head;
6661 Mfix * minipool_fix_tail;
6662 /* The fix entry for the current minipool, once it has been placed. */
6663 Mfix * minipool_barrier;
6665 /* Determines if INSN is the start of a jump table. Returns the end
6666 of the TABLE or NULL_RTX. */
6667 static rtx
6668 is_jump_table (rtx insn)
6670 rtx table;
6672 if (GET_CODE (insn) == JUMP_INSN
6673 && JUMP_LABEL (insn) != NULL
6674 && ((table = next_real_insn (JUMP_LABEL (insn)))
6675 == next_real_insn (insn))
6676 && table != NULL
6677 && GET_CODE (table) == JUMP_INSN
6678 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6679 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6680 return table;
6682 return NULL_RTX;
6685 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6686 #define JUMP_TABLES_IN_TEXT_SECTION 0
6687 #endif
6689 static HOST_WIDE_INT
6690 get_jump_table_size (rtx insn)
6692 /* ADDR_VECs only take room if read-only data does into the text
6693 section. */
6694 if (JUMP_TABLES_IN_TEXT_SECTION
6695 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6696 || 1
6697 #endif
6700 rtx body = PATTERN (insn);
6701 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6703 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6706 return 0;
6709 /* Move a minipool fix MP from its current location to before MAX_MP.
6710 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6711 constraints may need updating. */
6712 static Mnode *
6713 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6714 HOST_WIDE_INT max_address)
6716 /* This should never be true and the code below assumes these are
6717 different. */
6718 if (mp == max_mp)
6719 abort ();
6721 if (max_mp == NULL)
6723 if (max_address < mp->max_address)
6724 mp->max_address = max_address;
6726 else
6728 if (max_address > max_mp->max_address - mp->fix_size)
6729 mp->max_address = max_mp->max_address - mp->fix_size;
6730 else
6731 mp->max_address = max_address;
6733 /* Unlink MP from its current position. Since max_mp is non-null,
6734 mp->prev must be non-null. */
6735 mp->prev->next = mp->next;
6736 if (mp->next != NULL)
6737 mp->next->prev = mp->prev;
6738 else
6739 minipool_vector_tail = mp->prev;
6741 /* Re-insert it before MAX_MP. */
6742 mp->next = max_mp;
6743 mp->prev = max_mp->prev;
6744 max_mp->prev = mp;
6746 if (mp->prev != NULL)
6747 mp->prev->next = mp;
6748 else
6749 minipool_vector_head = mp;
6752 /* Save the new entry. */
6753 max_mp = mp;
6755 /* Scan over the preceding entries and adjust their addresses as
6756 required. */
6757 while (mp->prev != NULL
6758 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6760 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6761 mp = mp->prev;
6764 return max_mp;
6767 /* Add a constant to the minipool for a forward reference. Returns the
6768 node added or NULL if the constant will not fit in this pool. */
6769 static Mnode *
6770 add_minipool_forward_ref (Mfix *fix)
6772 /* If set, max_mp is the first pool_entry that has a lower
6773 constraint than the one we are trying to add. */
6774 Mnode * max_mp = NULL;
6775 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6776 Mnode * mp;
6778 /* If this fix's address is greater than the address of the first
6779 entry, then we can't put the fix in this pool. We subtract the
6780 size of the current fix to ensure that if the table is fully
6781 packed we still have enough room to insert this value by suffling
6782 the other fixes forwards. */
6783 if (minipool_vector_head &&
6784 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6785 return NULL;
6787 /* Scan the pool to see if a constant with the same value has
6788 already been added. While we are doing this, also note the
6789 location where we must insert the constant if it doesn't already
6790 exist. */
6791 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6793 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6794 && fix->mode == mp->mode
6795 && (GET_CODE (fix->value) != CODE_LABEL
6796 || (CODE_LABEL_NUMBER (fix->value)
6797 == CODE_LABEL_NUMBER (mp->value)))
6798 && rtx_equal_p (fix->value, mp->value))
6800 /* More than one fix references this entry. */
6801 mp->refcount++;
6802 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6805 /* Note the insertion point if necessary. */
6806 if (max_mp == NULL
6807 && mp->max_address > max_address)
6808 max_mp = mp;
6810 /* If we are inserting an 8-bytes aligned quantity and
6811 we have not already found an insertion point, then
6812 make sure that all such 8-byte aligned quantities are
6813 placed at the start of the pool. */
6814 if (ARM_DOUBLEWORD_ALIGN
6815 && max_mp == NULL
6816 && fix->fix_size == 8
6817 && mp->fix_size != 8)
6819 max_mp = mp;
6820 max_address = mp->max_address;
6824 /* The value is not currently in the minipool, so we need to create
6825 a new entry for it. If MAX_MP is NULL, the entry will be put on
6826 the end of the list since the placement is less constrained than
6827 any existing entry. Otherwise, we insert the new fix before
6828 MAX_MP and, if necessary, adjust the constraints on the other
6829 entries. */
6830 mp = xmalloc (sizeof (* mp));
6831 mp->fix_size = fix->fix_size;
6832 mp->mode = fix->mode;
6833 mp->value = fix->value;
6834 mp->refcount = 1;
6835 /* Not yet required for a backwards ref. */
6836 mp->min_address = -65536;
6838 if (max_mp == NULL)
6840 mp->max_address = max_address;
6841 mp->next = NULL;
6842 mp->prev = minipool_vector_tail;
6844 if (mp->prev == NULL)
6846 minipool_vector_head = mp;
6847 minipool_vector_label = gen_label_rtx ();
6849 else
6850 mp->prev->next = mp;
6852 minipool_vector_tail = mp;
6854 else
6856 if (max_address > max_mp->max_address - mp->fix_size)
6857 mp->max_address = max_mp->max_address - mp->fix_size;
6858 else
6859 mp->max_address = max_address;
6861 mp->next = max_mp;
6862 mp->prev = max_mp->prev;
6863 max_mp->prev = mp;
6864 if (mp->prev != NULL)
6865 mp->prev->next = mp;
6866 else
6867 minipool_vector_head = mp;
6870 /* Save the new entry. */
6871 max_mp = mp;
6873 /* Scan over the preceding entries and adjust their addresses as
6874 required. */
6875 while (mp->prev != NULL
6876 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6878 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6879 mp = mp->prev;
6882 return max_mp;
6885 static Mnode *
6886 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6887 HOST_WIDE_INT min_address)
6889 HOST_WIDE_INT offset;
6891 /* This should never be true, and the code below assumes these are
6892 different. */
6893 if (mp == min_mp)
6894 abort ();
6896 if (min_mp == NULL)
6898 if (min_address > mp->min_address)
6899 mp->min_address = min_address;
6901 else
6903 /* We will adjust this below if it is too loose. */
6904 mp->min_address = min_address;
6906 /* Unlink MP from its current position. Since min_mp is non-null,
6907 mp->next must be non-null. */
6908 mp->next->prev = mp->prev;
6909 if (mp->prev != NULL)
6910 mp->prev->next = mp->next;
6911 else
6912 minipool_vector_head = mp->next;
6914 /* Reinsert it after MIN_MP. */
6915 mp->prev = min_mp;
6916 mp->next = min_mp->next;
6917 min_mp->next = mp;
6918 if (mp->next != NULL)
6919 mp->next->prev = mp;
6920 else
6921 minipool_vector_tail = mp;
6924 min_mp = mp;
6926 offset = 0;
6927 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6929 mp->offset = offset;
6930 if (mp->refcount > 0)
6931 offset += mp->fix_size;
6933 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6934 mp->next->min_address = mp->min_address + mp->fix_size;
6937 return min_mp;
6940 /* Add a constant to the minipool for a backward reference. Returns the
6941 node added or NULL if the constant will not fit in this pool.
6943 Note that the code for insertion for a backwards reference can be
6944 somewhat confusing because the calculated offsets for each fix do
6945 not take into account the size of the pool (which is still under
6946 construction. */
6947 static Mnode *
6948 add_minipool_backward_ref (Mfix *fix)
6950 /* If set, min_mp is the last pool_entry that has a lower constraint
6951 than the one we are trying to add. */
6952 Mnode *min_mp = NULL;
6953 /* This can be negative, since it is only a constraint. */
6954 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6955 Mnode *mp;
6957 /* If we can't reach the current pool from this insn, or if we can't
6958 insert this entry at the end of the pool without pushing other
6959 fixes out of range, then we don't try. This ensures that we
6960 can't fail later on. */
6961 if (min_address >= minipool_barrier->address
6962 || (minipool_vector_tail->min_address + fix->fix_size
6963 >= minipool_barrier->address))
6964 return NULL;
6966 /* Scan the pool to see if a constant with the same value has
6967 already been added. While we are doing this, also note the
6968 location where we must insert the constant if it doesn't already
6969 exist. */
6970 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6972 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6973 && fix->mode == mp->mode
6974 && (GET_CODE (fix->value) != CODE_LABEL
6975 || (CODE_LABEL_NUMBER (fix->value)
6976 == CODE_LABEL_NUMBER (mp->value)))
6977 && rtx_equal_p (fix->value, mp->value)
6978 /* Check that there is enough slack to move this entry to the
6979 end of the table (this is conservative). */
6980 && (mp->max_address
6981 > (minipool_barrier->address
6982 + minipool_vector_tail->offset
6983 + minipool_vector_tail->fix_size)))
6985 mp->refcount++;
6986 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6989 if (min_mp != NULL)
6990 mp->min_address += fix->fix_size;
6991 else
6993 /* Note the insertion point if necessary. */
6994 if (mp->min_address < min_address)
6996 /* For now, we do not allow the insertion of 8-byte alignment
6997 requiring nodes anywhere but at the start of the pool. */
6998 if (ARM_DOUBLEWORD_ALIGN
6999 && fix->fix_size == 8 && mp->fix_size != 8)
7000 return NULL;
7001 else
7002 min_mp = mp;
7004 else if (mp->max_address
7005 < minipool_barrier->address + mp->offset + fix->fix_size)
7007 /* Inserting before this entry would push the fix beyond
7008 its maximum address (which can happen if we have
7009 re-located a forwards fix); force the new fix to come
7010 after it. */
7011 min_mp = mp;
7012 min_address = mp->min_address + fix->fix_size;
7014 /* If we are inserting an 8-bytes aligned quantity and
7015 we have not already found an insertion point, then
7016 make sure that all such 8-byte aligned quantities are
7017 placed at the start of the pool. */
7018 else if (ARM_DOUBLEWORD_ALIGN
7019 && min_mp == NULL
7020 && fix->fix_size == 8
7021 && mp->fix_size < 8)
7023 min_mp = mp;
7024 min_address = mp->min_address + fix->fix_size;
7029 /* We need to create a new entry. */
7030 mp = xmalloc (sizeof (* mp));
7031 mp->fix_size = fix->fix_size;
7032 mp->mode = fix->mode;
7033 mp->value = fix->value;
7034 mp->refcount = 1;
7035 mp->max_address = minipool_barrier->address + 65536;
7037 mp->min_address = min_address;
7039 if (min_mp == NULL)
7041 mp->prev = NULL;
7042 mp->next = minipool_vector_head;
7044 if (mp->next == NULL)
7046 minipool_vector_tail = mp;
7047 minipool_vector_label = gen_label_rtx ();
7049 else
7050 mp->next->prev = mp;
7052 minipool_vector_head = mp;
7054 else
7056 mp->next = min_mp->next;
7057 mp->prev = min_mp;
7058 min_mp->next = mp;
7060 if (mp->next != NULL)
7061 mp->next->prev = mp;
7062 else
7063 minipool_vector_tail = mp;
7066 /* Save the new entry. */
7067 min_mp = mp;
7069 if (mp->prev)
7070 mp = mp->prev;
7071 else
7072 mp->offset = 0;
7074 /* Scan over the following entries and adjust their offsets. */
7075 while (mp->next != NULL)
7077 if (mp->next->min_address < mp->min_address + mp->fix_size)
7078 mp->next->min_address = mp->min_address + mp->fix_size;
7080 if (mp->refcount)
7081 mp->next->offset = mp->offset + mp->fix_size;
7082 else
7083 mp->next->offset = mp->offset;
7085 mp = mp->next;
7088 return min_mp;
7091 static void
7092 assign_minipool_offsets (Mfix *barrier)
7094 HOST_WIDE_INT offset = 0;
7095 Mnode *mp;
7097 minipool_barrier = barrier;
7099 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7101 mp->offset = offset;
7103 if (mp->refcount > 0)
7104 offset += mp->fix_size;
7108 /* Output the literal table */
7109 static void
7110 dump_minipool (rtx scan)
7112 Mnode * mp;
7113 Mnode * nmp;
7114 int align64 = 0;
7116 if (ARM_DOUBLEWORD_ALIGN)
7117 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7118 if (mp->refcount > 0 && mp->fix_size == 8)
7120 align64 = 1;
7121 break;
7124 if (dump_file)
7125 fprintf (dump_file,
7126 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7127 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7129 scan = emit_label_after (gen_label_rtx (), scan);
7130 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7131 scan = emit_label_after (minipool_vector_label, scan);
7133 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7135 if (mp->refcount > 0)
7137 if (dump_file)
7139 fprintf (dump_file,
7140 ";; Offset %u, min %ld, max %ld ",
7141 (unsigned) mp->offset, (unsigned long) mp->min_address,
7142 (unsigned long) mp->max_address);
7143 arm_print_value (dump_file, mp->value);
7144 fputc ('\n', dump_file);
7147 switch (mp->fix_size)
7149 #ifdef HAVE_consttable_1
7150 case 1:
7151 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7152 break;
7154 #endif
7155 #ifdef HAVE_consttable_2
7156 case 2:
7157 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7158 break;
7160 #endif
7161 #ifdef HAVE_consttable_4
7162 case 4:
7163 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7164 break;
7166 #endif
7167 #ifdef HAVE_consttable_8
7168 case 8:
7169 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7170 break;
7172 #endif
7173 default:
7174 abort ();
7175 break;
7179 nmp = mp->next;
7180 free (mp);
7183 minipool_vector_head = minipool_vector_tail = NULL;
7184 scan = emit_insn_after (gen_consttable_end (), scan);
7185 scan = emit_barrier_after (scan);
7188 /* Return the cost of forcibly inserting a barrier after INSN. */
7189 static int
7190 arm_barrier_cost (rtx insn)
7192 /* Basing the location of the pool on the loop depth is preferable,
7193 but at the moment, the basic block information seems to be
7194 corrupt by this stage of the compilation. */
7195 int base_cost = 50;
7196 rtx next = next_nonnote_insn (insn);
7198 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7199 base_cost -= 20;
7201 switch (GET_CODE (insn))
7203 case CODE_LABEL:
7204 /* It will always be better to place the table before the label, rather
7205 than after it. */
7206 return 50;
7208 case INSN:
7209 case CALL_INSN:
7210 return base_cost;
7212 case JUMP_INSN:
7213 return base_cost - 10;
7215 default:
7216 return base_cost + 10;
7220 /* Find the best place in the insn stream in the range
7221 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7222 Create the barrier by inserting a jump and add a new fix entry for
7223 it. */
7224 static Mfix *
7225 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7227 HOST_WIDE_INT count = 0;
7228 rtx barrier;
7229 rtx from = fix->insn;
7230 rtx selected = from;
7231 int selected_cost;
7232 HOST_WIDE_INT selected_address;
7233 Mfix * new_fix;
7234 HOST_WIDE_INT max_count = max_address - fix->address;
7235 rtx label = gen_label_rtx ();
7237 selected_cost = arm_barrier_cost (from);
7238 selected_address = fix->address;
7240 while (from && count < max_count)
7242 rtx tmp;
7243 int new_cost;
7245 /* This code shouldn't have been called if there was a natural barrier
7246 within range. */
7247 if (GET_CODE (from) == BARRIER)
7248 abort ();
7250 /* Count the length of this insn. */
7251 count += get_attr_length (from);
7253 /* If there is a jump table, add its length. */
7254 tmp = is_jump_table (from);
7255 if (tmp != NULL)
7257 count += get_jump_table_size (tmp);
7259 /* Jump tables aren't in a basic block, so base the cost on
7260 the dispatch insn. If we select this location, we will
7261 still put the pool after the table. */
7262 new_cost = arm_barrier_cost (from);
7264 if (count < max_count && new_cost <= selected_cost)
7266 selected = tmp;
7267 selected_cost = new_cost;
7268 selected_address = fix->address + count;
7271 /* Continue after the dispatch table. */
7272 from = NEXT_INSN (tmp);
7273 continue;
7276 new_cost = arm_barrier_cost (from);
7278 if (count < max_count && new_cost <= selected_cost)
7280 selected = from;
7281 selected_cost = new_cost;
7282 selected_address = fix->address + count;
7285 from = NEXT_INSN (from);
7288 /* Create a new JUMP_INSN that branches around a barrier. */
7289 from = emit_jump_insn_after (gen_jump (label), selected);
7290 JUMP_LABEL (from) = label;
7291 barrier = emit_barrier_after (from);
7292 emit_label_after (label, barrier);
7294 /* Create a minipool barrier entry for the new barrier. */
7295 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7296 new_fix->insn = barrier;
7297 new_fix->address = selected_address;
7298 new_fix->next = fix->next;
7299 fix->next = new_fix;
7301 return new_fix;
7304 /* Record that there is a natural barrier in the insn stream at
7305 ADDRESS. */
7306 static void
7307 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7309 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7311 fix->insn = insn;
7312 fix->address = address;
7314 fix->next = NULL;
7315 if (minipool_fix_head != NULL)
7316 minipool_fix_tail->next = fix;
7317 else
7318 minipool_fix_head = fix;
7320 minipool_fix_tail = fix;
7323 /* Record INSN, which will need fixing up to load a value from the
7324 minipool. ADDRESS is the offset of the insn since the start of the
7325 function; LOC is a pointer to the part of the insn which requires
7326 fixing; VALUE is the constant that must be loaded, which is of type
7327 MODE. */
7328 static void
7329 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7330 enum machine_mode mode, rtx value)
7332 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7334 #ifdef AOF_ASSEMBLER
7335 /* PIC symbol references need to be converted into offsets into the
7336 based area. */
7337 /* XXX This shouldn't be done here. */
7338 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7339 value = aof_pic_entry (value);
7340 #endif /* AOF_ASSEMBLER */
7342 fix->insn = insn;
7343 fix->address = address;
7344 fix->loc = loc;
7345 fix->mode = mode;
7346 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7347 fix->value = value;
7348 fix->forwards = get_attr_pool_range (insn);
7349 fix->backwards = get_attr_neg_pool_range (insn);
7350 fix->minipool = NULL;
7352 /* If an insn doesn't have a range defined for it, then it isn't
7353 expecting to be reworked by this code. Better to abort now than
7354 to generate duff assembly code. */
7355 if (fix->forwards == 0 && fix->backwards == 0)
7356 abort ();
7358 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7359 So there might be an empty word before the start of the pool.
7360 Hence we reduce the forward range by 4 to allow for this
7361 possibility. */
7362 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7363 fix->forwards -= 4;
7365 if (dump_file)
7367 fprintf (dump_file,
7368 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7369 GET_MODE_NAME (mode),
7370 INSN_UID (insn), (unsigned long) address,
7371 -1 * (long)fix->backwards, (long)fix->forwards);
7372 arm_print_value (dump_file, fix->value);
7373 fprintf (dump_file, "\n");
7376 /* Add it to the chain of fixes. */
7377 fix->next = NULL;
7379 if (minipool_fix_head != NULL)
7380 minipool_fix_tail->next = fix;
7381 else
7382 minipool_fix_head = fix;
7384 minipool_fix_tail = fix;
7387 /* Return the cost of synthesizing the const_double VAL inline.
7388 Returns the number of insns needed, or 99 if we don't know how to
7389 do it. */
7391 arm_const_double_inline_cost (rtx val)
7393 long parts[2];
7395 if (GET_MODE (val) == DFmode)
7397 REAL_VALUE_TYPE r;
7398 if (!TARGET_SOFT_FLOAT)
7399 return 99;
7400 REAL_VALUE_FROM_CONST_DOUBLE (r, val);
7401 REAL_VALUE_TO_TARGET_DOUBLE (r, parts);
7403 else if (GET_MODE (val) != VOIDmode)
7404 return 99;
7405 else
7407 parts[0] = CONST_DOUBLE_LOW (val);
7408 parts[1] = CONST_DOUBLE_HIGH (val);
7411 return (arm_gen_constant (SET, SImode, NULL_RTX, parts[0],
7412 NULL_RTX, NULL_RTX, 0, 0)
7413 + arm_gen_constant (SET, SImode, NULL_RTX, parts[1],
7414 NULL_RTX, NULL_RTX, 0, 0));
7417 /* Determine if a CONST_DOUBLE should be pushed to the minipool */
7418 static bool
7419 const_double_needs_minipool (rtx val)
7421 /* thumb only knows to load a CONST_DOUBLE from memory at the moment */
7422 if (TARGET_THUMB)
7423 return true;
7425 /* Don't push anything to the minipool if a CONST_DOUBLE can be built with
7426 a few ALU insns directly. On balance, the optimum is likely to be around
7427 3 insns, except when there are no load delay slots where it should be 4.
7428 When optimizing for size, a limit of 3 allows saving at least one word
7429 except for cases where a single minipool entry could be shared more than
7430 2 times which is rather unlikely to outweight the overall savings. */
7431 return (arm_const_double_inline_cost (val)
7432 > ((optimize_size || arm_ld_sched) ? 3 : 4));
7435 /* Scan INSN and note any of its operands that need fixing.
7436 If DO_PUSHES is false we do not actually push any of the fixups
7437 needed. The function returns TRUE is any fixups were needed/pushed.
7438 This is used by arm_memory_load_p() which needs to know about loads
7439 of constants that will be converted into minipool loads. */
7440 static bool
7441 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7443 bool result = false;
7444 int opno;
7446 extract_insn (insn);
7448 if (!constrain_operands (1))
7449 fatal_insn_not_found (insn);
7451 if (recog_data.n_alternatives == 0)
7452 return false;
7454 /* Fill in recog_op_alt with information about the constraints of this insn. */
7455 preprocess_constraints ();
7457 for (opno = 0; opno < recog_data.n_operands; opno++)
7459 /* Things we need to fix can only occur in inputs. */
7460 if (recog_data.operand_type[opno] != OP_IN)
7461 continue;
7463 /* If this alternative is a memory reference, then any mention
7464 of constants in this alternative is really to fool reload
7465 into allowing us to accept one there. We need to fix them up
7466 now so that we output the right code. */
7467 if (recog_op_alt[opno][which_alternative].memory_ok)
7469 rtx op = recog_data.operand[opno];
7471 if (CONSTANT_P (op)
7472 && (GET_CODE (op) != CONST_DOUBLE
7473 || const_double_needs_minipool (op)))
7475 if (do_pushes)
7476 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7477 recog_data.operand_mode[opno], op);
7478 result = true;
7480 else if (GET_CODE (op) == MEM
7481 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7482 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7484 if (do_pushes)
7486 rtx cop = avoid_constant_pool_reference (op);
7488 /* Casting the address of something to a mode narrower
7489 than a word can cause avoid_constant_pool_reference()
7490 to return the pool reference itself. That's no good to
7491 us here. Lets just hope that we can use the
7492 constant pool value directly. */
7493 if (op == cop)
7494 cop = get_pool_constant (XEXP (op, 0));
7496 push_minipool_fix (insn, address,
7497 recog_data.operand_loc[opno],
7498 recog_data.operand_mode[opno], cop);
7501 result = true;
7506 return result;
7509 /* Gcc puts the pool in the wrong place for ARM, since we can only
7510 load addresses a limited distance around the pc. We do some
7511 special munging to move the constant pool values to the correct
7512 point in the code. */
7513 static void
7514 arm_reorg (void)
7516 rtx insn;
7517 HOST_WIDE_INT address = 0;
7518 Mfix * fix;
7520 minipool_fix_head = minipool_fix_tail = NULL;
7522 /* The first insn must always be a note, or the code below won't
7523 scan it properly. */
7524 insn = get_insns ();
7525 if (GET_CODE (insn) != NOTE)
7526 abort ();
7528 /* Scan all the insns and record the operands that will need fixing. */
7529 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7531 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7532 && (arm_cirrus_insn_p (insn)
7533 || GET_CODE (insn) == JUMP_INSN
7534 || arm_memory_load_p (insn)))
7535 cirrus_reorg (insn);
7537 if (GET_CODE (insn) == BARRIER)
7538 push_minipool_barrier (insn, address);
7539 else if (INSN_P (insn))
7541 rtx table;
7543 note_invalid_constants (insn, address, true);
7544 address += get_attr_length (insn);
7546 /* If the insn is a vector jump, add the size of the table
7547 and skip the table. */
7548 if ((table = is_jump_table (insn)) != NULL)
7550 address += get_jump_table_size (table);
7551 insn = table;
7556 fix = minipool_fix_head;
7558 /* Now scan the fixups and perform the required changes. */
7559 while (fix)
7561 Mfix * ftmp;
7562 Mfix * fdel;
7563 Mfix * last_added_fix;
7564 Mfix * last_barrier = NULL;
7565 Mfix * this_fix;
7567 /* Skip any further barriers before the next fix. */
7568 while (fix && GET_CODE (fix->insn) == BARRIER)
7569 fix = fix->next;
7571 /* No more fixes. */
7572 if (fix == NULL)
7573 break;
7575 last_added_fix = NULL;
7577 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7579 if (GET_CODE (ftmp->insn) == BARRIER)
7581 if (ftmp->address >= minipool_vector_head->max_address)
7582 break;
7584 last_barrier = ftmp;
7586 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7587 break;
7589 last_added_fix = ftmp; /* Keep track of the last fix added. */
7592 /* If we found a barrier, drop back to that; any fixes that we
7593 could have reached but come after the barrier will now go in
7594 the next mini-pool. */
7595 if (last_barrier != NULL)
7597 /* Reduce the refcount for those fixes that won't go into this
7598 pool after all. */
7599 for (fdel = last_barrier->next;
7600 fdel && fdel != ftmp;
7601 fdel = fdel->next)
7603 fdel->minipool->refcount--;
7604 fdel->minipool = NULL;
7607 ftmp = last_barrier;
7609 else
7611 /* ftmp is first fix that we can't fit into this pool and
7612 there no natural barriers that we could use. Insert a
7613 new barrier in the code somewhere between the previous
7614 fix and this one, and arrange to jump around it. */
7615 HOST_WIDE_INT max_address;
7617 /* The last item on the list of fixes must be a barrier, so
7618 we can never run off the end of the list of fixes without
7619 last_barrier being set. */
7620 if (ftmp == NULL)
7621 abort ();
7623 max_address = minipool_vector_head->max_address;
7624 /* Check that there isn't another fix that is in range that
7625 we couldn't fit into this pool because the pool was
7626 already too large: we need to put the pool before such an
7627 instruction. */
7628 if (ftmp->address < max_address)
7629 max_address = ftmp->address;
7631 last_barrier = create_fix_barrier (last_added_fix, max_address);
7634 assign_minipool_offsets (last_barrier);
7636 while (ftmp)
7638 if (GET_CODE (ftmp->insn) != BARRIER
7639 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7640 == NULL))
7641 break;
7643 ftmp = ftmp->next;
7646 /* Scan over the fixes we have identified for this pool, fixing them
7647 up and adding the constants to the pool itself. */
7648 for (this_fix = fix; this_fix && ftmp != this_fix;
7649 this_fix = this_fix->next)
7650 if (GET_CODE (this_fix->insn) != BARRIER)
7652 rtx addr
7653 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7654 minipool_vector_label),
7655 this_fix->minipool->offset);
7656 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7659 dump_minipool (last_barrier->insn);
7660 fix = ftmp;
7663 /* From now on we must synthesize any constants that we can't handle
7664 directly. This can happen if the RTL gets split during final
7665 instruction generation. */
7666 after_arm_reorg = 1;
7668 /* Free the minipool memory. */
7669 obstack_free (&minipool_obstack, minipool_startobj);
7672 /* Routines to output assembly language. */
7674 /* If the rtx is the correct value then return the string of the number.
7675 In this way we can ensure that valid double constants are generated even
7676 when cross compiling. */
7677 const char *
7678 fp_immediate_constant (rtx x)
7680 REAL_VALUE_TYPE r;
7681 int i;
7683 if (!fp_consts_inited)
7684 init_fp_table ();
7686 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7687 for (i = 0; i < 8; i++)
7688 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7689 return strings_fp[i];
7691 abort ();
7694 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7695 static const char *
7696 fp_const_from_val (REAL_VALUE_TYPE *r)
7698 int i;
7700 if (!fp_consts_inited)
7701 init_fp_table ();
7703 for (i = 0; i < 8; i++)
7704 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7705 return strings_fp[i];
7707 abort ();
7710 /* Output the operands of a LDM/STM instruction to STREAM.
7711 MASK is the ARM register set mask of which only bits 0-15 are important.
7712 REG is the base register, either the frame pointer or the stack pointer,
7713 INSTR is the possibly suffixed load or store instruction. */
7715 static void
7716 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7717 unsigned long mask)
7719 unsigned i;
7720 bool not_first = FALSE;
7722 fputc ('\t', stream);
7723 asm_fprintf (stream, instr, reg);
7724 fputs (", {", stream);
7726 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7727 if (mask & (1 << i))
7729 if (not_first)
7730 fprintf (stream, ", ");
7732 asm_fprintf (stream, "%r", i);
7733 not_first = TRUE;
7736 fprintf (stream, "}\n");
7740 /* Output a FLDMX instruction to STREAM.
7741 BASE if the register containing the address.
7742 REG and COUNT specify the register range.
7743 Extra registers may be added to avoid hardware bugs. */
7745 static void
7746 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7748 int i;
7750 /* Workaround ARM10 VFPr1 bug. */
7751 if (count == 2 && !arm_arch6)
7753 if (reg == 15)
7754 reg--;
7755 count++;
7758 fputc ('\t', stream);
7759 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7761 for (i = reg; i < reg + count; i++)
7763 if (i > reg)
7764 fputs (", ", stream);
7765 asm_fprintf (stream, "d%d", i);
7767 fputs ("}\n", stream);
7772 /* Output the assembly for a store multiple. */
7774 const char *
7775 vfp_output_fstmx (rtx * operands)
7777 char pattern[100];
7778 int p;
7779 int base;
7780 int i;
7782 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7783 p = strlen (pattern);
7785 if (GET_CODE (operands[1]) != REG)
7786 abort ();
7788 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7789 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7791 p += sprintf (&pattern[p], ", d%d", base + i);
7793 strcpy (&pattern[p], "}");
7795 output_asm_insn (pattern, operands);
7796 return "";
7800 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7801 number of bytes pushed. */
7803 static int
7804 vfp_emit_fstmx (int base_reg, int count)
7806 rtx par;
7807 rtx dwarf;
7808 rtx tmp, reg;
7809 int i;
7811 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7812 register pairs are stored by a store multiple insn. We avoid this
7813 by pushing an extra pair. */
7814 if (count == 2 && !arm_arch6)
7816 if (base_reg == LAST_VFP_REGNUM - 3)
7817 base_reg -= 2;
7818 count++;
7821 /* ??? The frame layout is implementation defined. We describe
7822 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7823 We really need some way of representing the whole block so that the
7824 unwinder can figure it out at runtime. */
7825 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7826 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7828 reg = gen_rtx_REG (DFmode, base_reg);
7829 base_reg += 2;
7831 XVECEXP (par, 0, 0)
7832 = gen_rtx_SET (VOIDmode,
7833 gen_rtx_MEM (BLKmode,
7834 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7835 gen_rtx_UNSPEC (BLKmode,
7836 gen_rtvec (1, reg),
7837 UNSPEC_PUSH_MULT));
7839 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7840 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7841 GEN_INT (-(count * 8 + 4))));
7842 RTX_FRAME_RELATED_P (tmp) = 1;
7843 XVECEXP (dwarf, 0, 0) = tmp;
7845 tmp = gen_rtx_SET (VOIDmode,
7846 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7847 reg);
7848 RTX_FRAME_RELATED_P (tmp) = 1;
7849 XVECEXP (dwarf, 0, 1) = tmp;
7851 for (i = 1; i < count; i++)
7853 reg = gen_rtx_REG (DFmode, base_reg);
7854 base_reg += 2;
7855 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7857 tmp = gen_rtx_SET (VOIDmode,
7858 gen_rtx_MEM (DFmode,
7859 gen_rtx_PLUS (SImode,
7860 stack_pointer_rtx,
7861 GEN_INT (i * 8))),
7862 reg);
7863 RTX_FRAME_RELATED_P (tmp) = 1;
7864 XVECEXP (dwarf, 0, i + 1) = tmp;
7867 par = emit_insn (par);
7868 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7869 REG_NOTES (par));
7870 RTX_FRAME_RELATED_P (par) = 1;
7872 return count * 8 + 4;
7876 /* Output a 'call' insn. */
7877 const char *
7878 output_call (rtx *operands)
7880 if (arm_arch5)
7881 abort (); /* Patterns should call blx <reg> directly. */
7883 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7884 if (REGNO (operands[0]) == LR_REGNUM)
7886 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7887 output_asm_insn ("mov%?\t%0, %|lr", operands);
7890 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7892 if (TARGET_INTERWORK || arm_arch4t)
7893 output_asm_insn ("bx%?\t%0", operands);
7894 else
7895 output_asm_insn ("mov%?\t%|pc, %0", operands);
7897 return "";
7900 /* Output a 'call' insn that is a reference in memory. */
7901 const char *
7902 output_call_mem (rtx *operands)
7904 if (TARGET_INTERWORK && !arm_arch5)
7906 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7907 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7908 output_asm_insn ("bx%?\t%|ip", operands);
7910 else if (regno_use_in (LR_REGNUM, operands[0]))
7912 /* LR is used in the memory address. We load the address in the
7913 first instruction. It's safe to use IP as the target of the
7914 load since the call will kill it anyway. */
7915 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7916 if (arm_arch5)
7917 output_asm_insn ("blx%?\t%|ip", operands);
7918 else
7920 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7921 if (arm_arch4t)
7922 output_asm_insn ("bx%?\t%|ip", operands);
7923 else
7924 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7927 else
7929 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7930 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7933 return "";
7937 /* Output a move from arm registers to an fpa registers.
7938 OPERANDS[0] is an fpa register.
7939 OPERANDS[1] is the first registers of an arm register pair. */
7940 const char *
7941 output_mov_long_double_fpa_from_arm (rtx *operands)
7943 int arm_reg0 = REGNO (operands[1]);
7944 rtx ops[3];
7946 if (arm_reg0 == IP_REGNUM)
7947 abort ();
7949 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7950 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7951 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7953 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7954 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7956 return "";
7959 /* Output a move from an fpa register to arm registers.
7960 OPERANDS[0] is the first registers of an arm register pair.
7961 OPERANDS[1] is an fpa register. */
7962 const char *
7963 output_mov_long_double_arm_from_fpa (rtx *operands)
7965 int arm_reg0 = REGNO (operands[0]);
7966 rtx ops[3];
7968 if (arm_reg0 == IP_REGNUM)
7969 abort ();
7971 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7972 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7973 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7975 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7976 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7977 return "";
7980 /* Output a move from arm registers to arm registers of a long double
7981 OPERANDS[0] is the destination.
7982 OPERANDS[1] is the source. */
7983 const char *
7984 output_mov_long_double_arm_from_arm (rtx *operands)
7986 /* We have to be careful here because the two might overlap. */
7987 int dest_start = REGNO (operands[0]);
7988 int src_start = REGNO (operands[1]);
7989 rtx ops[2];
7990 int i;
7992 if (dest_start < src_start)
7994 for (i = 0; i < 3; i++)
7996 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7997 ops[1] = gen_rtx_REG (SImode, src_start + i);
7998 output_asm_insn ("mov%?\t%0, %1", ops);
8001 else
8003 for (i = 2; i >= 0; i--)
8005 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8006 ops[1] = gen_rtx_REG (SImode, src_start + i);
8007 output_asm_insn ("mov%?\t%0, %1", ops);
8011 return "";
8015 /* Output a move from arm registers to an fpa registers.
8016 OPERANDS[0] is an fpa register.
8017 OPERANDS[1] is the first registers of an arm register pair. */
8018 const char *
8019 output_mov_double_fpa_from_arm (rtx *operands)
8021 int arm_reg0 = REGNO (operands[1]);
8022 rtx ops[2];
8024 if (arm_reg0 == IP_REGNUM)
8025 abort ();
8027 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8028 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8029 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8030 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8031 return "";
8034 /* Output a move from an fpa register to arm registers.
8035 OPERANDS[0] is the first registers of an arm register pair.
8036 OPERANDS[1] is an fpa register. */
8037 const char *
8038 output_mov_double_arm_from_fpa (rtx *operands)
8040 int arm_reg0 = REGNO (operands[0]);
8041 rtx ops[2];
8043 if (arm_reg0 == IP_REGNUM)
8044 abort ();
8046 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8047 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8048 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8049 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8050 return "";
8053 /* Output a move between double words.
8054 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8055 or MEM<-REG and all MEMs must be offsettable addresses. */
8056 const char *
8057 output_move_double (rtx *operands)
8059 enum rtx_code code0 = GET_CODE (operands[0]);
8060 enum rtx_code code1 = GET_CODE (operands[1]);
8061 rtx otherops[3];
8063 if (code0 == REG)
8065 int reg0 = REGNO (operands[0]);
8067 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8069 if (code1 == REG)
8071 int reg1 = REGNO (operands[1]);
8072 if (reg1 == IP_REGNUM)
8073 abort ();
8075 /* Ensure the second source is not overwritten. */
8076 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8077 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8078 else
8079 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8081 else if (code1 == CONST_VECTOR)
8083 HOST_WIDE_INT hint = 0;
8085 switch (GET_MODE (operands[1]))
8087 case V2SImode:
8088 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8089 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8090 break;
8092 case V4HImode:
8093 if (BYTES_BIG_ENDIAN)
8095 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8096 hint <<= 16;
8097 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8099 else
8101 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8102 hint <<= 16;
8103 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8106 otherops[1] = GEN_INT (hint);
8107 hint = 0;
8109 if (BYTES_BIG_ENDIAN)
8111 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8112 hint <<= 16;
8113 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8115 else
8117 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8118 hint <<= 16;
8119 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8122 operands[1] = GEN_INT (hint);
8123 break;
8125 case V8QImode:
8126 if (BYTES_BIG_ENDIAN)
8128 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8129 hint <<= 8;
8130 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8131 hint <<= 8;
8132 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8133 hint <<= 8;
8134 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8136 else
8138 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8139 hint <<= 8;
8140 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8141 hint <<= 8;
8142 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8143 hint <<= 8;
8144 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8147 otherops[1] = GEN_INT (hint);
8148 hint = 0;
8150 if (BYTES_BIG_ENDIAN)
8152 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8153 hint <<= 8;
8154 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8155 hint <<= 8;
8156 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8157 hint <<= 8;
8158 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8160 else
8162 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8163 hint <<= 8;
8164 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8165 hint <<= 8;
8166 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8167 hint <<= 8;
8168 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8171 operands[1] = GEN_INT (hint);
8172 break;
8174 default:
8175 abort ();
8177 output_mov_immediate (operands);
8178 output_mov_immediate (otherops);
8180 else if (code1 == CONST_DOUBLE)
8182 if (GET_MODE (operands[1]) == DFmode)
8184 REAL_VALUE_TYPE r;
8185 long l[2];
8187 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8188 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8189 otherops[1] = GEN_INT (l[1]);
8190 operands[1] = GEN_INT (l[0]);
8192 else if (GET_MODE (operands[1]) != VOIDmode)
8193 abort ();
8194 else if (WORDS_BIG_ENDIAN)
8196 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8197 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8199 else
8201 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8202 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8205 output_mov_immediate (operands);
8206 output_mov_immediate (otherops);
8208 else if (code1 == CONST_INT)
8210 #if HOST_BITS_PER_WIDE_INT > 32
8211 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8212 what the upper word is. */
8213 if (WORDS_BIG_ENDIAN)
8215 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8216 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8218 else
8220 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8221 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8223 #else
8224 /* Sign extend the intval into the high-order word. */
8225 if (WORDS_BIG_ENDIAN)
8227 otherops[1] = operands[1];
8228 operands[1] = (INTVAL (operands[1]) < 0
8229 ? constm1_rtx : const0_rtx);
8231 else
8232 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8233 #endif
8234 output_mov_immediate (otherops);
8235 output_mov_immediate (operands);
8237 else if (code1 == MEM)
8239 switch (GET_CODE (XEXP (operands[1], 0)))
8241 case REG:
8242 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8243 break;
8245 case PRE_INC:
8246 if (!TARGET_LDRD)
8247 abort (); /* Should never happen now. */
8248 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8249 break;
8251 case PRE_DEC:
8252 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8253 break;
8255 case POST_INC:
8256 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8257 break;
8259 case POST_DEC:
8260 if (!TARGET_LDRD)
8261 abort (); /* Should never happen now. */
8262 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8263 break;
8265 case PRE_MODIFY:
8266 case POST_MODIFY:
8267 otherops[0] = operands[0];
8268 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8269 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8271 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8273 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8275 /* Registers overlap so split out the increment. */
8276 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8277 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8279 else
8280 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8282 else
8284 /* We only allow constant increments, so this is safe. */
8285 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8287 break;
8289 case LABEL_REF:
8290 case CONST:
8291 output_asm_insn ("adr%?\t%0, %1", operands);
8292 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8293 break;
8295 default:
8296 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8297 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8299 otherops[0] = operands[0];
8300 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8301 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8303 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8305 if (GET_CODE (otherops[2]) == CONST_INT)
8307 switch ((int) INTVAL (otherops[2]))
8309 case -8:
8310 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8311 return "";
8312 case -4:
8313 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8314 return "";
8315 case 4:
8316 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8317 return "";
8320 if (TARGET_LDRD
8321 && (GET_CODE (otherops[2]) == REG
8322 || (GET_CODE (otherops[2]) == CONST_INT
8323 && INTVAL (otherops[2]) > -256
8324 && INTVAL (otherops[2]) < 256)))
8326 if (reg_overlap_mentioned_p (otherops[0],
8327 otherops[2]))
8329 /* Swap base and index registers over to
8330 avoid a conflict. */
8331 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8332 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8335 /* If both registers conflict, it will usually
8336 have been fixed by a splitter. */
8337 if (reg_overlap_mentioned_p (otherops[0],
8338 otherops[2]))
8340 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8341 output_asm_insn ("ldr%?d\t%0, [%1]",
8342 otherops);
8343 return "";
8345 else
8347 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8348 otherops);
8349 return "";
8352 if (GET_CODE (otherops[2]) == CONST_INT)
8354 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8355 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8356 else
8357 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8359 else
8360 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8362 else
8363 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8365 return "ldm%?ia\t%0, %M0";
8367 else
8369 otherops[1] = adjust_address (operands[1], SImode, 4);
8370 /* Take care of overlapping base/data reg. */
8371 if (reg_mentioned_p (operands[0], operands[1]))
8373 output_asm_insn ("ldr%?\t%0, %1", otherops);
8374 output_asm_insn ("ldr%?\t%0, %1", operands);
8376 else
8378 output_asm_insn ("ldr%?\t%0, %1", operands);
8379 output_asm_insn ("ldr%?\t%0, %1", otherops);
8384 else
8385 abort (); /* Constraints should prevent this. */
8387 else if (code0 == MEM && code1 == REG)
8389 if (REGNO (operands[1]) == IP_REGNUM)
8390 abort ();
8392 switch (GET_CODE (XEXP (operands[0], 0)))
8394 case REG:
8395 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8396 break;
8398 case PRE_INC:
8399 if (!TARGET_LDRD)
8400 abort (); /* Should never happen now. */
8401 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8402 break;
8404 case PRE_DEC:
8405 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8406 break;
8408 case POST_INC:
8409 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8410 break;
8412 case POST_DEC:
8413 if (!TARGET_LDRD)
8414 abort (); /* Should never happen now. */
8415 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8416 break;
8418 case PRE_MODIFY:
8419 case POST_MODIFY:
8420 otherops[0] = operands[1];
8421 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8422 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8424 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8425 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8426 else
8427 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8428 break;
8430 case PLUS:
8431 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8432 if (GET_CODE (otherops[2]) == CONST_INT)
8434 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8436 case -8:
8437 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8438 return "";
8440 case -4:
8441 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8442 return "";
8444 case 4:
8445 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8446 return "";
8449 if (TARGET_LDRD
8450 && (GET_CODE (otherops[2]) == REG
8451 || (GET_CODE (otherops[2]) == CONST_INT
8452 && INTVAL (otherops[2]) > -256
8453 && INTVAL (otherops[2]) < 256)))
8455 otherops[0] = operands[1];
8456 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8457 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8458 return "";
8460 /* Fall through */
8462 default:
8463 otherops[0] = adjust_address (operands[0], SImode, 4);
8464 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8465 output_asm_insn ("str%?\t%1, %0", operands);
8466 output_asm_insn ("str%?\t%1, %0", otherops);
8469 else
8470 /* Constraints should prevent this. */
8471 abort ();
8473 return "";
8477 /* Output an arbitrary MOV reg, #n.
8478 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8479 const char *
8480 output_mov_immediate (rtx *operands)
8482 HOST_WIDE_INT n = INTVAL (operands[1]);
8484 /* Try to use one MOV. */
8485 if (const_ok_for_arm (n))
8486 output_asm_insn ("mov%?\t%0, %1", operands);
8488 /* Try to use one MVN. */
8489 else if (const_ok_for_arm (~n))
8491 operands[1] = GEN_INT (~n);
8492 output_asm_insn ("mvn%?\t%0, %1", operands);
8494 else
8496 int n_ones = 0;
8497 int i;
8499 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8500 for (i = 0; i < 32; i++)
8501 if (n & 1 << i)
8502 n_ones++;
8504 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8505 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8506 else
8507 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8510 return "";
8513 /* Output an ADD r, s, #n where n may be too big for one instruction.
8514 If adding zero to one register, output nothing. */
8515 const char *
8516 output_add_immediate (rtx *operands)
8518 HOST_WIDE_INT n = INTVAL (operands[2]);
8520 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8522 if (n < 0)
8523 output_multi_immediate (operands,
8524 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8525 -n);
8526 else
8527 output_multi_immediate (operands,
8528 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8532 return "";
8535 /* Output a multiple immediate operation.
8536 OPERANDS is the vector of operands referred to in the output patterns.
8537 INSTR1 is the output pattern to use for the first constant.
8538 INSTR2 is the output pattern to use for subsequent constants.
8539 IMMED_OP is the index of the constant slot in OPERANDS.
8540 N is the constant value. */
8541 static const char *
8542 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8543 int immed_op, HOST_WIDE_INT n)
8545 #if HOST_BITS_PER_WIDE_INT > 32
8546 n &= 0xffffffff;
8547 #endif
8549 if (n == 0)
8551 /* Quick and easy output. */
8552 operands[immed_op] = const0_rtx;
8553 output_asm_insn (instr1, operands);
8555 else
8557 int i;
8558 const char * instr = instr1;
8560 /* Note that n is never zero here (which would give no output). */
8561 for (i = 0; i < 32; i += 2)
8563 if (n & (3 << i))
8565 operands[immed_op] = GEN_INT (n & (255 << i));
8566 output_asm_insn (instr, operands);
8567 instr = instr2;
8568 i += 6;
8573 return "";
8576 /* Return the appropriate ARM instruction for the operation code.
8577 The returned result should not be overwritten. OP is the rtx of the
8578 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8579 was shifted. */
8580 const char *
8581 arithmetic_instr (rtx op, int shift_first_arg)
8583 switch (GET_CODE (op))
8585 case PLUS:
8586 return "add";
8588 case MINUS:
8589 return shift_first_arg ? "rsb" : "sub";
8591 case IOR:
8592 return "orr";
8594 case XOR:
8595 return "eor";
8597 case AND:
8598 return "and";
8600 default:
8601 abort ();
8605 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8606 for the operation code. The returned result should not be overwritten.
8607 OP is the rtx code of the shift.
8608 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8609 shift. */
8610 static const char *
8611 shift_op (rtx op, HOST_WIDE_INT *amountp)
8613 const char * mnem;
8614 enum rtx_code code = GET_CODE (op);
8616 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8617 *amountp = -1;
8618 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8619 *amountp = INTVAL (XEXP (op, 1));
8620 else
8621 abort ();
8623 switch (code)
8625 case ASHIFT:
8626 mnem = "asl";
8627 break;
8629 case ASHIFTRT:
8630 mnem = "asr";
8631 break;
8633 case LSHIFTRT:
8634 mnem = "lsr";
8635 break;
8637 case ROTATE:
8638 if (*amountp == -1)
8639 abort ();
8640 *amountp = 32 - *amountp;
8642 /* Fall through. */
8644 case ROTATERT:
8645 mnem = "ror";
8646 break;
8648 case MULT:
8649 /* We never have to worry about the amount being other than a
8650 power of 2, since this case can never be reloaded from a reg. */
8651 if (*amountp != -1)
8652 *amountp = int_log2 (*amountp);
8653 else
8654 abort ();
8655 return "asl";
8657 default:
8658 abort ();
8661 if (*amountp != -1)
8663 /* This is not 100% correct, but follows from the desire to merge
8664 multiplication by a power of 2 with the recognizer for a
8665 shift. >=32 is not a valid shift for "asl", so we must try and
8666 output a shift that produces the correct arithmetical result.
8667 Using lsr #32 is identical except for the fact that the carry bit
8668 is not set correctly if we set the flags; but we never use the
8669 carry bit from such an operation, so we can ignore that. */
8670 if (code == ROTATERT)
8671 /* Rotate is just modulo 32. */
8672 *amountp &= 31;
8673 else if (*amountp != (*amountp & 31))
8675 if (code == ASHIFT)
8676 mnem = "lsr";
8677 *amountp = 32;
8680 /* Shifts of 0 are no-ops. */
8681 if (*amountp == 0)
8682 return NULL;
8685 return mnem;
8688 /* Obtain the shift from the POWER of two. */
8690 static HOST_WIDE_INT
8691 int_log2 (HOST_WIDE_INT power)
8693 HOST_WIDE_INT shift = 0;
8695 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8697 if (shift > 31)
8698 abort ();
8699 shift++;
8702 return shift;
8705 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8706 because /bin/as is horribly restrictive. The judgement about
8707 whether or not each character is 'printable' (and can be output as
8708 is) or not (and must be printed with an octal escape) must be made
8709 with reference to the *host* character set -- the situation is
8710 similar to that discussed in the comments above pp_c_char in
8711 c-pretty-print.c. */
8713 #define MAX_ASCII_LEN 51
8715 void
8716 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8718 int i;
8719 int len_so_far = 0;
8721 fputs ("\t.ascii\t\"", stream);
8723 for (i = 0; i < len; i++)
8725 int c = p[i];
8727 if (len_so_far >= MAX_ASCII_LEN)
8729 fputs ("\"\n\t.ascii\t\"", stream);
8730 len_so_far = 0;
8733 if (ISPRINT (c))
8735 if (c == '\\' || c == '\"')
8737 putc ('\\', stream);
8738 len_so_far++;
8740 putc (c, stream);
8741 len_so_far++;
8743 else
8745 fprintf (stream, "\\%03o", c);
8746 len_so_far += 4;
8750 fputs ("\"\n", stream);
8753 /* Compute the register save mask for registers 0 through 12
8754 inclusive. This code is used by arm_compute_save_reg_mask. */
8756 static unsigned long
8757 arm_compute_save_reg0_reg12_mask (void)
8759 unsigned long func_type = arm_current_func_type ();
8760 unsigned long save_reg_mask = 0;
8761 unsigned int reg;
8763 if (IS_INTERRUPT (func_type))
8765 unsigned int max_reg;
8766 /* Interrupt functions must not corrupt any registers,
8767 even call clobbered ones. If this is a leaf function
8768 we can just examine the registers used by the RTL, but
8769 otherwise we have to assume that whatever function is
8770 called might clobber anything, and so we have to save
8771 all the call-clobbered registers as well. */
8772 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8773 /* FIQ handlers have registers r8 - r12 banked, so
8774 we only need to check r0 - r7, Normal ISRs only
8775 bank r14 and r15, so we must check up to r12.
8776 r13 is the stack pointer which is always preserved,
8777 so we do not need to consider it here. */
8778 max_reg = 7;
8779 else
8780 max_reg = 12;
8782 for (reg = 0; reg <= max_reg; reg++)
8783 if (regs_ever_live[reg]
8784 || (! current_function_is_leaf && call_used_regs [reg]))
8785 save_reg_mask |= (1 << reg);
8787 /* Also save the pic base register if necessary. */
8788 if (flag_pic
8789 && !TARGET_SINGLE_PIC_BASE
8790 && current_function_uses_pic_offset_table)
8791 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8793 else
8795 /* In the normal case we only need to save those registers
8796 which are call saved and which are used by this function. */
8797 for (reg = 0; reg <= 10; reg++)
8798 if (regs_ever_live[reg] && ! call_used_regs [reg])
8799 save_reg_mask |= (1 << reg);
8801 /* Handle the frame pointer as a special case. */
8802 if (! TARGET_APCS_FRAME
8803 && ! frame_pointer_needed
8804 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8805 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8806 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8808 /* If we aren't loading the PIC register,
8809 don't stack it even though it may be live. */
8810 if (flag_pic
8811 && !TARGET_SINGLE_PIC_BASE
8812 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8813 || current_function_uses_pic_offset_table))
8814 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8817 /* Save registers so the exception handler can modify them. */
8818 if (current_function_calls_eh_return)
8820 unsigned int i;
8822 for (i = 0; ; i++)
8824 reg = EH_RETURN_DATA_REGNO (i);
8825 if (reg == INVALID_REGNUM)
8826 break;
8827 save_reg_mask |= 1 << reg;
8831 return save_reg_mask;
8834 /* Compute a bit mask of which registers need to be
8835 saved on the stack for the current function. */
8837 static unsigned long
8838 arm_compute_save_reg_mask (void)
8840 unsigned int save_reg_mask = 0;
8841 unsigned long func_type = arm_current_func_type ();
8843 if (IS_NAKED (func_type))
8844 /* This should never really happen. */
8845 return 0;
8847 /* If we are creating a stack frame, then we must save the frame pointer,
8848 IP (which will hold the old stack pointer), LR and the PC. */
8849 if (frame_pointer_needed)
8850 save_reg_mask |=
8851 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8852 | (1 << IP_REGNUM)
8853 | (1 << LR_REGNUM)
8854 | (1 << PC_REGNUM);
8856 /* Volatile functions do not return, so there
8857 is no need to save any other registers. */
8858 if (IS_VOLATILE (func_type))
8859 return save_reg_mask;
8861 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8863 /* Decide if we need to save the link register.
8864 Interrupt routines have their own banked link register,
8865 so they never need to save it.
8866 Otherwise if we do not use the link register we do not need to save
8867 it. If we are pushing other registers onto the stack however, we
8868 can save an instruction in the epilogue by pushing the link register
8869 now and then popping it back into the PC. This incurs extra memory
8870 accesses though, so we only do it when optimizing for size, and only
8871 if we know that we will not need a fancy return sequence. */
8872 if (regs_ever_live [LR_REGNUM]
8873 || (save_reg_mask
8874 && optimize_size
8875 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8876 && !current_function_calls_eh_return))
8877 save_reg_mask |= 1 << LR_REGNUM;
8879 if (cfun->machine->lr_save_eliminated)
8880 save_reg_mask &= ~ (1 << LR_REGNUM);
8882 if (TARGET_REALLY_IWMMXT
8883 && ((bit_count (save_reg_mask)
8884 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8886 unsigned int reg;
8888 /* The total number of registers that are going to be pushed
8889 onto the stack is odd. We need to ensure that the stack
8890 is 64-bit aligned before we start to save iWMMXt registers,
8891 and also before we start to create locals. (A local variable
8892 might be a double or long long which we will load/store using
8893 an iWMMXt instruction). Therefore we need to push another
8894 ARM register, so that the stack will be 64-bit aligned. We
8895 try to avoid using the arg registers (r0 -r3) as they might be
8896 used to pass values in a tail call. */
8897 for (reg = 4; reg <= 12; reg++)
8898 if ((save_reg_mask & (1 << reg)) == 0)
8899 break;
8901 if (reg <= 12)
8902 save_reg_mask |= (1 << reg);
8903 else
8905 cfun->machine->sibcall_blocked = 1;
8906 save_reg_mask |= (1 << 3);
8910 return save_reg_mask;
8914 /* Compute a bit mask of which registers need to be
8915 saved on the stack for the current function. */
8916 static unsigned long
8917 thumb_compute_save_reg_mask (void)
8919 unsigned long mask;
8920 unsigned reg;
8922 mask = 0;
8923 for (reg = 0; reg < 12; reg ++)
8924 if (regs_ever_live[reg] && !call_used_regs[reg])
8925 mask |= 1 << reg;
8927 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8928 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8930 if (TARGET_SINGLE_PIC_BASE)
8931 mask &= ~(1 << arm_pic_register);
8933 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8934 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8935 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8937 /* LR will also be pushed if any lo regs are pushed. */
8938 if (mask & 0xff || thumb_force_lr_save ())
8939 mask |= (1 << LR_REGNUM);
8941 /* Make sure we have a low work register if we need one.
8942 We will need one if we are going to push a high register,
8943 but we are not currently intending to push a low register. */
8944 if ((mask & 0xff) == 0
8945 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8947 /* Use thumb_find_work_register to choose which register
8948 we will use. If the register is live then we will
8949 have to push it. Use LAST_LO_REGNUM as our fallback
8950 choice for the register to select. */
8951 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8953 if (! call_used_regs[reg])
8954 mask |= 1 << reg;
8957 return mask;
8961 /* Return the number of bytes required to save VFP registers. */
8962 static int
8963 arm_get_vfp_saved_size (void)
8965 unsigned int regno;
8966 int count;
8967 int saved;
8969 saved = 0;
8970 /* Space for saved VFP registers. */
8971 if (TARGET_HARD_FLOAT && TARGET_VFP)
8973 count = 0;
8974 for (regno = FIRST_VFP_REGNUM;
8975 regno < LAST_VFP_REGNUM;
8976 regno += 2)
8978 if ((!regs_ever_live[regno] || call_used_regs[regno])
8979 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8981 if (count > 0)
8983 /* Workaround ARM10 VFPr1 bug. */
8984 if (count == 2 && !arm_arch6)
8985 count++;
8986 saved += count * 8 + 4;
8988 count = 0;
8990 else
8991 count++;
8993 if (count > 0)
8995 if (count == 2 && !arm_arch6)
8996 count++;
8997 saved += count * 8 + 4;
9000 return saved;
9004 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9005 everything bar the final return instruction. */
9006 const char *
9007 output_return_instruction (rtx operand, int really_return, int reverse)
9009 char conditional[10];
9010 char instr[100];
9011 unsigned reg;
9012 unsigned long live_regs_mask;
9013 unsigned long func_type;
9014 arm_stack_offsets *offsets;
9016 func_type = arm_current_func_type ();
9018 if (IS_NAKED (func_type))
9019 return "";
9021 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9023 /* If this function was declared non-returning, and we have
9024 found a tail call, then we have to trust that the called
9025 function won't return. */
9026 if (really_return)
9028 rtx ops[2];
9030 /* Otherwise, trap an attempted return by aborting. */
9031 ops[0] = operand;
9032 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9033 : "abort");
9034 assemble_external_libcall (ops[1]);
9035 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9038 return "";
9041 if (current_function_calls_alloca && !really_return)
9042 abort ();
9044 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9046 return_used_this_function = 1;
9048 live_regs_mask = arm_compute_save_reg_mask ();
9050 if (live_regs_mask)
9052 const char * return_reg;
9054 /* If we do not have any special requirements for function exit
9055 (e.g. interworking, or ISR) then we can load the return address
9056 directly into the PC. Otherwise we must load it into LR. */
9057 if (really_return
9058 && ! TARGET_INTERWORK)
9059 return_reg = reg_names[PC_REGNUM];
9060 else
9061 return_reg = reg_names[LR_REGNUM];
9063 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9065 /* There are three possible reasons for the IP register
9066 being saved. 1) a stack frame was created, in which case
9067 IP contains the old stack pointer, or 2) an ISR routine
9068 corrupted it, or 3) it was saved to align the stack on
9069 iWMMXt. In case 1, restore IP into SP, otherwise just
9070 restore IP. */
9071 if (frame_pointer_needed)
9073 live_regs_mask &= ~ (1 << IP_REGNUM);
9074 live_regs_mask |= (1 << SP_REGNUM);
9076 else
9078 if (! IS_INTERRUPT (func_type)
9079 && ! TARGET_REALLY_IWMMXT)
9080 abort ();
9084 /* On some ARM architectures it is faster to use LDR rather than
9085 LDM to load a single register. On other architectures, the
9086 cost is the same. In 26 bit mode, or for exception handlers,
9087 we have to use LDM to load the PC so that the CPSR is also
9088 restored. */
9089 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9090 if (live_regs_mask == (1U << reg))
9091 break;
9093 if (reg <= LAST_ARM_REGNUM
9094 && (reg != LR_REGNUM
9095 || ! really_return
9096 || ! IS_INTERRUPT (func_type)))
9098 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9099 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9101 else
9103 char *p;
9104 int first = 1;
9106 /* Generate the load multiple instruction to restore the
9107 registers. Note we can get here, even if
9108 frame_pointer_needed is true, but only if sp already
9109 points to the base of the saved core registers. */
9110 if (live_regs_mask & (1 << SP_REGNUM))
9112 unsigned HOST_WIDE_INT stack_adjust;
9114 offsets = arm_get_frame_offsets ();
9115 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9116 if (stack_adjust != 0 && stack_adjust != 4)
9117 abort ();
9119 if (stack_adjust && arm_arch5)
9120 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9121 else
9123 /* If we can't use ldmib (SA110 bug),
9124 then try to pop r3 instead. */
9125 if (stack_adjust)
9126 live_regs_mask |= 1 << 3;
9127 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9130 else
9131 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9133 p = instr + strlen (instr);
9135 for (reg = 0; reg <= SP_REGNUM; reg++)
9136 if (live_regs_mask & (1 << reg))
9138 int l = strlen (reg_names[reg]);
9140 if (first)
9141 first = 0;
9142 else
9144 memcpy (p, ", ", 2);
9145 p += 2;
9148 memcpy (p, "%|", 2);
9149 memcpy (p + 2, reg_names[reg], l);
9150 p += l + 2;
9153 if (live_regs_mask & (1 << LR_REGNUM))
9155 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9156 /* If returning from an interrupt, restore the CPSR. */
9157 if (IS_INTERRUPT (func_type))
9158 strcat (p, "^");
9160 else
9161 strcpy (p, "}");
9164 output_asm_insn (instr, & operand);
9166 /* See if we need to generate an extra instruction to
9167 perform the actual function return. */
9168 if (really_return
9169 && func_type != ARM_FT_INTERWORKED
9170 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9172 /* The return has already been handled
9173 by loading the LR into the PC. */
9174 really_return = 0;
9178 if (really_return)
9180 switch ((int) ARM_FUNC_TYPE (func_type))
9182 case ARM_FT_ISR:
9183 case ARM_FT_FIQ:
9184 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9185 break;
9187 case ARM_FT_INTERWORKED:
9188 sprintf (instr, "bx%s\t%%|lr", conditional);
9189 break;
9191 case ARM_FT_EXCEPTION:
9192 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9193 break;
9195 default:
9196 /* Use bx if it's available. */
9197 if (arm_arch5 || arm_arch4t)
9198 sprintf (instr, "bx%s\t%%|lr", conditional);
9199 else
9200 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9201 break;
9204 output_asm_insn (instr, & operand);
9207 return "";
9210 /* Write the function name into the code section, directly preceding
9211 the function prologue.
9213 Code will be output similar to this:
9215 .ascii "arm_poke_function_name", 0
9216 .align
9218 .word 0xff000000 + (t1 - t0)
9219 arm_poke_function_name
9220 mov ip, sp
9221 stmfd sp!, {fp, ip, lr, pc}
9222 sub fp, ip, #4
9224 When performing a stack backtrace, code can inspect the value
9225 of 'pc' stored at 'fp' + 0. If the trace function then looks
9226 at location pc - 12 and the top 8 bits are set, then we know
9227 that there is a function name embedded immediately preceding this
9228 location and has length ((pc[-3]) & 0xff000000).
9230 We assume that pc is declared as a pointer to an unsigned long.
9232 It is of no benefit to output the function name if we are assembling
9233 a leaf function. These function types will not contain a stack
9234 backtrace structure, therefore it is not possible to determine the
9235 function name. */
9236 void
9237 arm_poke_function_name (FILE *stream, const char *name)
9239 unsigned long alignlength;
9240 unsigned long length;
9241 rtx x;
9243 length = strlen (name) + 1;
9244 alignlength = ROUND_UP_WORD (length);
9246 ASM_OUTPUT_ASCII (stream, name, length);
9247 ASM_OUTPUT_ALIGN (stream, 2);
9248 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9249 assemble_aligned_integer (UNITS_PER_WORD, x);
9252 /* Place some comments into the assembler stream
9253 describing the current function. */
9254 static void
9255 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9257 unsigned long func_type;
9259 if (!TARGET_ARM)
9261 thumb_output_function_prologue (f, frame_size);
9262 return;
9265 /* Sanity check. */
9266 if (arm_ccfsm_state || arm_target_insn)
9267 abort ();
9269 func_type = arm_current_func_type ();
9271 switch ((int) ARM_FUNC_TYPE (func_type))
9273 default:
9274 case ARM_FT_NORMAL:
9275 break;
9276 case ARM_FT_INTERWORKED:
9277 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9278 break;
9279 case ARM_FT_ISR:
9280 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9281 break;
9282 case ARM_FT_FIQ:
9283 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9284 break;
9285 case ARM_FT_EXCEPTION:
9286 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9287 break;
9290 if (IS_NAKED (func_type))
9291 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9293 if (IS_VOLATILE (func_type))
9294 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9296 if (IS_NESTED (func_type))
9297 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9299 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9300 current_function_args_size,
9301 current_function_pretend_args_size, frame_size);
9303 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9304 frame_pointer_needed,
9305 cfun->machine->uses_anonymous_args);
9307 if (cfun->machine->lr_save_eliminated)
9308 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9310 if (current_function_calls_eh_return)
9311 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9313 #ifdef AOF_ASSEMBLER
9314 if (flag_pic)
9315 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9316 #endif
9318 return_used_this_function = 0;
9321 const char *
9322 arm_output_epilogue (rtx sibling)
9324 int reg;
9325 unsigned long saved_regs_mask;
9326 unsigned long func_type;
9327 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9328 frame that is $fp + 4 for a non-variadic function. */
9329 int floats_offset = 0;
9330 rtx operands[3];
9331 FILE * f = asm_out_file;
9332 unsigned int lrm_count = 0;
9333 int really_return = (sibling == NULL);
9334 int start_reg;
9335 arm_stack_offsets *offsets;
9337 /* If we have already generated the return instruction
9338 then it is futile to generate anything else. */
9339 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9340 return "";
9342 func_type = arm_current_func_type ();
9344 if (IS_NAKED (func_type))
9345 /* Naked functions don't have epilogues. */
9346 return "";
9348 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9350 rtx op;
9352 /* A volatile function should never return. Call abort. */
9353 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9354 assemble_external_libcall (op);
9355 output_asm_insn ("bl\t%a0", &op);
9357 return "";
9360 if (current_function_calls_eh_return
9361 && ! really_return)
9362 /* If we are throwing an exception, then we really must
9363 be doing a return, so we can't tail-call. */
9364 abort ();
9366 offsets = arm_get_frame_offsets ();
9367 saved_regs_mask = arm_compute_save_reg_mask ();
9369 if (TARGET_IWMMXT)
9370 lrm_count = bit_count (saved_regs_mask);
9372 floats_offset = offsets->saved_args;
9373 /* Compute how far away the floats will be. */
9374 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9375 if (saved_regs_mask & (1 << reg))
9376 floats_offset += 4;
9378 if (frame_pointer_needed)
9380 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9381 int vfp_offset = offsets->frame;
9383 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9385 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9386 if (regs_ever_live[reg] && !call_used_regs[reg])
9388 floats_offset += 12;
9389 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9390 reg, FP_REGNUM, floats_offset - vfp_offset);
9393 else
9395 start_reg = LAST_FPA_REGNUM;
9397 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9399 if (regs_ever_live[reg] && !call_used_regs[reg])
9401 floats_offset += 12;
9403 /* We can't unstack more than four registers at once. */
9404 if (start_reg - reg == 3)
9406 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9407 reg, FP_REGNUM, floats_offset - vfp_offset);
9408 start_reg = reg - 1;
9411 else
9413 if (reg != start_reg)
9414 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9415 reg + 1, start_reg - reg,
9416 FP_REGNUM, floats_offset - vfp_offset);
9417 start_reg = reg - 1;
9421 /* Just in case the last register checked also needs unstacking. */
9422 if (reg != start_reg)
9423 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9424 reg + 1, start_reg - reg,
9425 FP_REGNUM, floats_offset - vfp_offset);
9428 if (TARGET_HARD_FLOAT && TARGET_VFP)
9430 int saved_size;
9432 /* The fldmx insn does not have base+offset addressing modes,
9433 so we use IP to hold the address. */
9434 saved_size = arm_get_vfp_saved_size ();
9436 if (saved_size > 0)
9438 floats_offset += saved_size;
9439 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9440 FP_REGNUM, floats_offset - vfp_offset);
9442 start_reg = FIRST_VFP_REGNUM;
9443 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9445 if ((!regs_ever_live[reg] || call_used_regs[reg])
9446 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9448 if (start_reg != reg)
9449 arm_output_fldmx (f, IP_REGNUM,
9450 (start_reg - FIRST_VFP_REGNUM) / 2,
9451 (reg - start_reg) / 2);
9452 start_reg = reg + 2;
9455 if (start_reg != reg)
9456 arm_output_fldmx (f, IP_REGNUM,
9457 (start_reg - FIRST_VFP_REGNUM) / 2,
9458 (reg - start_reg) / 2);
9461 if (TARGET_IWMMXT)
9463 /* The frame pointer is guaranteed to be non-double-word aligned.
9464 This is because it is set to (old_stack_pointer - 4) and the
9465 old_stack_pointer was double word aligned. Thus the offset to
9466 the iWMMXt registers to be loaded must also be non-double-word
9467 sized, so that the resultant address *is* double-word aligned.
9468 We can ignore floats_offset since that was already included in
9469 the live_regs_mask. */
9470 lrm_count += (lrm_count % 2 ? 2 : 1);
9472 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9473 if (regs_ever_live[reg] && !call_used_regs[reg])
9475 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9476 reg, FP_REGNUM, lrm_count * 4);
9477 lrm_count += 2;
9481 /* saved_regs_mask should contain the IP, which at the time of stack
9482 frame generation actually contains the old stack pointer. So a
9483 quick way to unwind the stack is just pop the IP register directly
9484 into the stack pointer. */
9485 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9486 abort ();
9487 saved_regs_mask &= ~ (1 << IP_REGNUM);
9488 saved_regs_mask |= (1 << SP_REGNUM);
9490 /* There are two registers left in saved_regs_mask - LR and PC. We
9491 only need to restore the LR register (the return address), but to
9492 save time we can load it directly into the PC, unless we need a
9493 special function exit sequence, or we are not really returning. */
9494 if (really_return
9495 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9496 && !current_function_calls_eh_return)
9497 /* Delete the LR from the register mask, so that the LR on
9498 the stack is loaded into the PC in the register mask. */
9499 saved_regs_mask &= ~ (1 << LR_REGNUM);
9500 else
9501 saved_regs_mask &= ~ (1 << PC_REGNUM);
9503 /* We must use SP as the base register, because SP is one of the
9504 registers being restored. If an interrupt or page fault
9505 happens in the ldm instruction, the SP might or might not
9506 have been restored. That would be bad, as then SP will no
9507 longer indicate the safe area of stack, and we can get stack
9508 corruption. Using SP as the base register means that it will
9509 be reset correctly to the original value, should an interrupt
9510 occur. If the stack pointer already points at the right
9511 place, then omit the subtraction. */
9512 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9513 || current_function_calls_alloca)
9514 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9515 4 * bit_count (saved_regs_mask));
9516 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9518 if (IS_INTERRUPT (func_type))
9519 /* Interrupt handlers will have pushed the
9520 IP onto the stack, so restore it now. */
9521 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9523 else
9525 /* Restore stack pointer if necessary. */
9526 if (offsets->outgoing_args != offsets->saved_regs)
9528 operands[0] = operands[1] = stack_pointer_rtx;
9529 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9530 output_add_immediate (operands);
9533 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9535 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9536 if (regs_ever_live[reg] && !call_used_regs[reg])
9537 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9538 reg, SP_REGNUM);
9540 else
9542 start_reg = FIRST_FPA_REGNUM;
9544 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9546 if (regs_ever_live[reg] && !call_used_regs[reg])
9548 if (reg - start_reg == 3)
9550 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9551 start_reg, SP_REGNUM);
9552 start_reg = reg + 1;
9555 else
9557 if (reg != start_reg)
9558 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9559 start_reg, reg - start_reg,
9560 SP_REGNUM);
9562 start_reg = reg + 1;
9566 /* Just in case the last register checked also needs unstacking. */
9567 if (reg != start_reg)
9568 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9569 start_reg, reg - start_reg, SP_REGNUM);
9572 if (TARGET_HARD_FLOAT && TARGET_VFP)
9574 start_reg = FIRST_VFP_REGNUM;
9575 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9577 if ((!regs_ever_live[reg] || call_used_regs[reg])
9578 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9580 if (start_reg != reg)
9581 arm_output_fldmx (f, SP_REGNUM,
9582 (start_reg - FIRST_VFP_REGNUM) / 2,
9583 (reg - start_reg) / 2);
9584 start_reg = reg + 2;
9587 if (start_reg != reg)
9588 arm_output_fldmx (f, SP_REGNUM,
9589 (start_reg - FIRST_VFP_REGNUM) / 2,
9590 (reg - start_reg) / 2);
9592 if (TARGET_IWMMXT)
9593 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9594 if (regs_ever_live[reg] && !call_used_regs[reg])
9595 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9597 /* If we can, restore the LR into the PC. */
9598 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9599 && really_return
9600 && current_function_pretend_args_size == 0
9601 && saved_regs_mask & (1 << LR_REGNUM)
9602 && !current_function_calls_eh_return)
9604 saved_regs_mask &= ~ (1 << LR_REGNUM);
9605 saved_regs_mask |= (1 << PC_REGNUM);
9608 /* Load the registers off the stack. If we only have one register
9609 to load use the LDR instruction - it is faster. */
9610 if (saved_regs_mask == (1 << LR_REGNUM))
9612 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9614 else if (saved_regs_mask)
9616 if (saved_regs_mask & (1 << SP_REGNUM))
9617 /* Note - write back to the stack register is not enabled
9618 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9619 in the list of registers and if we add writeback the
9620 instruction becomes UNPREDICTABLE. */
9621 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9622 else
9623 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9626 if (current_function_pretend_args_size)
9628 /* Unwind the pre-pushed regs. */
9629 operands[0] = operands[1] = stack_pointer_rtx;
9630 operands[2] = GEN_INT (current_function_pretend_args_size);
9631 output_add_immediate (operands);
9635 /* We may have already restored PC directly from the stack. */
9636 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9637 return "";
9639 /* Stack adjustment for exception handler. */
9640 if (current_function_calls_eh_return)
9641 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9642 ARM_EH_STACKADJ_REGNUM);
9644 /* Generate the return instruction. */
9645 switch ((int) ARM_FUNC_TYPE (func_type))
9647 case ARM_FT_ISR:
9648 case ARM_FT_FIQ:
9649 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9650 break;
9652 case ARM_FT_EXCEPTION:
9653 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9654 break;
9656 case ARM_FT_INTERWORKED:
9657 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9658 break;
9660 default:
9661 if (arm_arch5 || arm_arch4t)
9662 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9663 else
9664 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9665 break;
9668 return "";
9671 static void
9672 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9673 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9675 arm_stack_offsets *offsets;
9677 if (TARGET_THUMB)
9679 int regno;
9681 /* Emit any call-via-reg trampolines that are needed for v4t support
9682 of call_reg and call_value_reg type insns. */
9683 for (regno = 0; regno < SP_REGNUM; regno++)
9685 rtx label = cfun->machine->call_via[regno];
9687 if (label != NULL)
9689 function_section (current_function_decl);
9690 targetm.asm_out.internal_label (asm_out_file, "L",
9691 CODE_LABEL_NUMBER (label));
9692 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9696 /* ??? Probably not safe to set this here, since it assumes that a
9697 function will be emitted as assembly immediately after we generate
9698 RTL for it. This does not happen for inline functions. */
9699 return_used_this_function = 0;
9701 else
9703 /* We need to take into account any stack-frame rounding. */
9704 offsets = arm_get_frame_offsets ();
9706 if (use_return_insn (FALSE, NULL)
9707 && return_used_this_function
9708 && offsets->saved_regs != offsets->outgoing_args
9709 && !frame_pointer_needed)
9710 abort ();
9712 /* Reset the ARM-specific per-function variables. */
9713 after_arm_reorg = 0;
9717 /* Generate and emit an insn that we will recognize as a push_multi.
9718 Unfortunately, since this insn does not reflect very well the actual
9719 semantics of the operation, we need to annotate the insn for the benefit
9720 of DWARF2 frame unwind information. */
9721 static rtx
9722 emit_multi_reg_push (unsigned long mask)
9724 int num_regs = 0;
9725 int num_dwarf_regs;
9726 int i, j;
9727 rtx par;
9728 rtx dwarf;
9729 int dwarf_par_index;
9730 rtx tmp, reg;
9732 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9733 if (mask & (1 << i))
9734 num_regs++;
9736 if (num_regs == 0 || num_regs > 16)
9737 abort ();
9739 /* We don't record the PC in the dwarf frame information. */
9740 num_dwarf_regs = num_regs;
9741 if (mask & (1 << PC_REGNUM))
9742 num_dwarf_regs--;
9744 /* For the body of the insn we are going to generate an UNSPEC in
9745 parallel with several USEs. This allows the insn to be recognized
9746 by the push_multi pattern in the arm.md file. The insn looks
9747 something like this:
9749 (parallel [
9750 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9751 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9752 (use (reg:SI 11 fp))
9753 (use (reg:SI 12 ip))
9754 (use (reg:SI 14 lr))
9755 (use (reg:SI 15 pc))
9758 For the frame note however, we try to be more explicit and actually
9759 show each register being stored into the stack frame, plus a (single)
9760 decrement of the stack pointer. We do it this way in order to be
9761 friendly to the stack unwinding code, which only wants to see a single
9762 stack decrement per instruction. The RTL we generate for the note looks
9763 something like this:
9765 (sequence [
9766 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9767 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9768 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9769 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9770 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9773 This sequence is used both by the code to support stack unwinding for
9774 exceptions handlers and the code to generate dwarf2 frame debugging. */
9776 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9777 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9778 dwarf_par_index = 1;
9780 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9782 if (mask & (1 << i))
9784 reg = gen_rtx_REG (SImode, i);
9786 XVECEXP (par, 0, 0)
9787 = gen_rtx_SET (VOIDmode,
9788 gen_rtx_MEM (BLKmode,
9789 gen_rtx_PRE_DEC (BLKmode,
9790 stack_pointer_rtx)),
9791 gen_rtx_UNSPEC (BLKmode,
9792 gen_rtvec (1, reg),
9793 UNSPEC_PUSH_MULT));
9795 if (i != PC_REGNUM)
9797 tmp = gen_rtx_SET (VOIDmode,
9798 gen_rtx_MEM (SImode, stack_pointer_rtx),
9799 reg);
9800 RTX_FRAME_RELATED_P (tmp) = 1;
9801 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9802 dwarf_par_index++;
9805 break;
9809 for (j = 1, i++; j < num_regs; i++)
9811 if (mask & (1 << i))
9813 reg = gen_rtx_REG (SImode, i);
9815 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9817 if (i != PC_REGNUM)
9819 tmp = gen_rtx_SET (VOIDmode,
9820 gen_rtx_MEM (SImode,
9821 plus_constant (stack_pointer_rtx,
9822 4 * j)),
9823 reg);
9824 RTX_FRAME_RELATED_P (tmp) = 1;
9825 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9828 j++;
9832 par = emit_insn (par);
9834 tmp = gen_rtx_SET (SImode,
9835 stack_pointer_rtx,
9836 gen_rtx_PLUS (SImode,
9837 stack_pointer_rtx,
9838 GEN_INT (-4 * num_regs)));
9839 RTX_FRAME_RELATED_P (tmp) = 1;
9840 XVECEXP (dwarf, 0, 0) = tmp;
9842 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9843 REG_NOTES (par));
9844 return par;
9847 static rtx
9848 emit_sfm (int base_reg, int count)
9850 rtx par;
9851 rtx dwarf;
9852 rtx tmp, reg;
9853 int i;
9855 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9856 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9858 reg = gen_rtx_REG (XFmode, base_reg++);
9860 XVECEXP (par, 0, 0)
9861 = gen_rtx_SET (VOIDmode,
9862 gen_rtx_MEM (BLKmode,
9863 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9864 gen_rtx_UNSPEC (BLKmode,
9865 gen_rtvec (1, reg),
9866 UNSPEC_PUSH_MULT));
9867 tmp = gen_rtx_SET (VOIDmode,
9868 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9869 RTX_FRAME_RELATED_P (tmp) = 1;
9870 XVECEXP (dwarf, 0, 1) = tmp;
9872 for (i = 1; i < count; i++)
9874 reg = gen_rtx_REG (XFmode, base_reg++);
9875 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9877 tmp = gen_rtx_SET (VOIDmode,
9878 gen_rtx_MEM (XFmode,
9879 plus_constant (stack_pointer_rtx,
9880 i * 12)),
9881 reg);
9882 RTX_FRAME_RELATED_P (tmp) = 1;
9883 XVECEXP (dwarf, 0, i + 1) = tmp;
9886 tmp = gen_rtx_SET (VOIDmode,
9887 stack_pointer_rtx,
9888 gen_rtx_PLUS (SImode,
9889 stack_pointer_rtx,
9890 GEN_INT (-12 * count)));
9891 RTX_FRAME_RELATED_P (tmp) = 1;
9892 XVECEXP (dwarf, 0, 0) = tmp;
9894 par = emit_insn (par);
9895 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9896 REG_NOTES (par));
9897 return par;
9901 /* Return true if the current function needs to save/restore LR. */
9903 static bool
9904 thumb_force_lr_save (void)
9906 return !cfun->machine->lr_save_eliminated
9907 && (!leaf_function_p ()
9908 || thumb_far_jump_used_p ()
9909 || regs_ever_live [LR_REGNUM]);
9913 /* Compute the distance from register FROM to register TO.
9914 These can be the arg pointer (26), the soft frame pointer (25),
9915 the stack pointer (13) or the hard frame pointer (11).
9916 In thumb mode r7 is used as the soft frame pointer, if needed.
9917 Typical stack layout looks like this:
9919 old stack pointer -> | |
9920 ----
9921 | | \
9922 | | saved arguments for
9923 | | vararg functions
9924 | | /
9926 hard FP & arg pointer -> | | \
9927 | | stack
9928 | | frame
9929 | | /
9931 | | \
9932 | | call saved
9933 | | registers
9934 soft frame pointer -> | | /
9936 | | \
9937 | | local
9938 | | variables
9939 | | /
9941 | | \
9942 | | outgoing
9943 | | arguments
9944 current stack pointer -> | | /
9947 For a given function some or all of these stack components
9948 may not be needed, giving rise to the possibility of
9949 eliminating some of the registers.
9951 The values returned by this function must reflect the behavior
9952 of arm_expand_prologue() and arm_compute_save_reg_mask().
9954 The sign of the number returned reflects the direction of stack
9955 growth, so the values are positive for all eliminations except
9956 from the soft frame pointer to the hard frame pointer.
9958 SFP may point just inside the local variables block to ensure correct
9959 alignment. */
9962 /* Calculate stack offsets. These are used to calculate register elimination
9963 offsets and in prologue/epilogue code. */
9965 static arm_stack_offsets *
9966 arm_get_frame_offsets (void)
9968 struct arm_stack_offsets *offsets;
9969 unsigned long func_type;
9970 int leaf;
9971 int saved;
9972 HOST_WIDE_INT frame_size;
9974 offsets = &cfun->machine->stack_offsets;
9976 /* We need to know if we are a leaf function. Unfortunately, it
9977 is possible to be called after start_sequence has been called,
9978 which causes get_insns to return the insns for the sequence,
9979 not the function, which will cause leaf_function_p to return
9980 the incorrect result.
9982 to know about leaf functions once reload has completed, and the
9983 frame size cannot be changed after that time, so we can safely
9984 use the cached value. */
9986 if (reload_completed)
9987 return offsets;
9989 /* Initially this is the size of the local variables. It will translated
9990 into an offset once we have determined the size of preceding data. */
9991 frame_size = ROUND_UP_WORD (get_frame_size ());
9993 leaf = leaf_function_p ();
9995 /* Space for variadic functions. */
9996 offsets->saved_args = current_function_pretend_args_size;
9998 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10000 if (TARGET_ARM)
10002 unsigned int regno;
10004 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10006 /* We know that SP will be doubleword aligned on entry, and we must
10007 preserve that condition at any subroutine call. We also require the
10008 soft frame pointer to be doubleword aligned. */
10010 if (TARGET_REALLY_IWMMXT)
10012 /* Check for the call-saved iWMMXt registers. */
10013 for (regno = FIRST_IWMMXT_REGNUM;
10014 regno <= LAST_IWMMXT_REGNUM;
10015 regno++)
10016 if (regs_ever_live [regno] && ! call_used_regs [regno])
10017 saved += 8;
10020 func_type = arm_current_func_type ();
10021 if (! IS_VOLATILE (func_type))
10023 /* Space for saved FPA registers. */
10024 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10025 if (regs_ever_live[regno] && ! call_used_regs[regno])
10026 saved += 12;
10028 /* Space for saved VFP registers. */
10029 if (TARGET_HARD_FLOAT && TARGET_VFP)
10030 saved += arm_get_vfp_saved_size ();
10033 else /* TARGET_THUMB */
10035 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10036 if (TARGET_BACKTRACE)
10037 saved += 16;
10040 /* Saved registers include the stack frame. */
10041 offsets->saved_regs = offsets->saved_args + saved;
10042 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10043 /* A leaf function does not need any stack alignment if it has nothing
10044 on the stack. */
10045 if (leaf && frame_size == 0)
10047 offsets->outgoing_args = offsets->soft_frame;
10048 return offsets;
10051 /* Ensure SFP has the correct alignment. */
10052 if (ARM_DOUBLEWORD_ALIGN
10053 && (offsets->soft_frame & 7))
10054 offsets->soft_frame += 4;
10056 offsets->outgoing_args = offsets->soft_frame + frame_size
10057 + current_function_outgoing_args_size;
10059 if (ARM_DOUBLEWORD_ALIGN)
10061 /* Ensure SP remains doubleword aligned. */
10062 if (offsets->outgoing_args & 7)
10063 offsets->outgoing_args += 4;
10064 if (offsets->outgoing_args & 7)
10065 abort ();
10068 return offsets;
10072 /* Calculate the relative offsets for the different stack pointers. Positive
10073 offsets are in the direction of stack growth. */
10075 HOST_WIDE_INT
10076 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10078 arm_stack_offsets *offsets;
10080 offsets = arm_get_frame_offsets ();
10082 /* OK, now we have enough information to compute the distances.
10083 There must be an entry in these switch tables for each pair
10084 of registers in ELIMINABLE_REGS, even if some of the entries
10085 seem to be redundant or useless. */
10086 switch (from)
10088 case ARG_POINTER_REGNUM:
10089 switch (to)
10091 case THUMB_HARD_FRAME_POINTER_REGNUM:
10092 return 0;
10094 case FRAME_POINTER_REGNUM:
10095 /* This is the reverse of the soft frame pointer
10096 to hard frame pointer elimination below. */
10097 return offsets->soft_frame - offsets->saved_args;
10099 case ARM_HARD_FRAME_POINTER_REGNUM:
10100 /* If there is no stack frame then the hard
10101 frame pointer and the arg pointer coincide. */
10102 if (offsets->frame == offsets->saved_regs)
10103 return 0;
10104 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10105 return (frame_pointer_needed
10106 && cfun->static_chain_decl != NULL
10107 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10109 case STACK_POINTER_REGNUM:
10110 /* If nothing has been pushed on the stack at all
10111 then this will return -4. This *is* correct! */
10112 return offsets->outgoing_args - (offsets->saved_args + 4);
10114 default:
10115 abort ();
10117 break;
10119 case FRAME_POINTER_REGNUM:
10120 switch (to)
10122 case THUMB_HARD_FRAME_POINTER_REGNUM:
10123 return 0;
10125 case ARM_HARD_FRAME_POINTER_REGNUM:
10126 /* The hard frame pointer points to the top entry in the
10127 stack frame. The soft frame pointer to the bottom entry
10128 in the stack frame. If there is no stack frame at all,
10129 then they are identical. */
10131 return offsets->frame - offsets->soft_frame;
10133 case STACK_POINTER_REGNUM:
10134 return offsets->outgoing_args - offsets->soft_frame;
10136 default:
10137 abort ();
10139 break;
10141 default:
10142 /* You cannot eliminate from the stack pointer.
10143 In theory you could eliminate from the hard frame
10144 pointer to the stack pointer, but this will never
10145 happen, since if a stack frame is not needed the
10146 hard frame pointer will never be used. */
10147 abort ();
10152 /* Generate the prologue instructions for entry into an ARM function. */
10153 void
10154 arm_expand_prologue (void)
10156 int reg;
10157 rtx amount;
10158 rtx insn;
10159 rtx ip_rtx;
10160 unsigned long live_regs_mask;
10161 unsigned long func_type;
10162 int fp_offset = 0;
10163 int saved_pretend_args = 0;
10164 int saved_regs = 0;
10165 unsigned HOST_WIDE_INT args_to_push;
10166 arm_stack_offsets *offsets;
10168 func_type = arm_current_func_type ();
10170 /* Naked functions don't have prologues. */
10171 if (IS_NAKED (func_type))
10172 return;
10174 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10175 args_to_push = current_function_pretend_args_size;
10177 /* Compute which register we will have to save onto the stack. */
10178 live_regs_mask = arm_compute_save_reg_mask ();
10180 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10182 if (frame_pointer_needed)
10184 if (IS_INTERRUPT (func_type))
10186 /* Interrupt functions must not corrupt any registers.
10187 Creating a frame pointer however, corrupts the IP
10188 register, so we must push it first. */
10189 insn = emit_multi_reg_push (1 << IP_REGNUM);
10191 /* Do not set RTX_FRAME_RELATED_P on this insn.
10192 The dwarf stack unwinding code only wants to see one
10193 stack decrement per function, and this is not it. If
10194 this instruction is labeled as being part of the frame
10195 creation sequence then dwarf2out_frame_debug_expr will
10196 abort when it encounters the assignment of IP to FP
10197 later on, since the use of SP here establishes SP as
10198 the CFA register and not IP.
10200 Anyway this instruction is not really part of the stack
10201 frame creation although it is part of the prologue. */
10203 else if (IS_NESTED (func_type))
10205 /* The Static chain register is the same as the IP register
10206 used as a scratch register during stack frame creation.
10207 To get around this need to find somewhere to store IP
10208 whilst the frame is being created. We try the following
10209 places in order:
10211 1. The last argument register.
10212 2. A slot on the stack above the frame. (This only
10213 works if the function is not a varargs function).
10214 3. Register r3, after pushing the argument registers
10215 onto the stack.
10217 Note - we only need to tell the dwarf2 backend about the SP
10218 adjustment in the second variant; the static chain register
10219 doesn't need to be unwound, as it doesn't contain a value
10220 inherited from the caller. */
10222 if (regs_ever_live[3] == 0)
10224 insn = gen_rtx_REG (SImode, 3);
10225 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10226 insn = emit_insn (insn);
10228 else if (args_to_push == 0)
10230 rtx dwarf;
10231 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10232 insn = gen_rtx_MEM (SImode, insn);
10233 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10234 insn = emit_insn (insn);
10236 fp_offset = 4;
10238 /* Just tell the dwarf backend that we adjusted SP. */
10239 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10240 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10241 GEN_INT (-fp_offset)));
10242 RTX_FRAME_RELATED_P (insn) = 1;
10243 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10244 dwarf, REG_NOTES (insn));
10246 else
10248 /* Store the args on the stack. */
10249 if (cfun->machine->uses_anonymous_args)
10250 insn = emit_multi_reg_push
10251 ((0xf0 >> (args_to_push / 4)) & 0xf);
10252 else
10253 insn = emit_insn
10254 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10255 GEN_INT (- args_to_push)));
10257 RTX_FRAME_RELATED_P (insn) = 1;
10259 saved_pretend_args = 1;
10260 fp_offset = args_to_push;
10261 args_to_push = 0;
10263 /* Now reuse r3 to preserve IP. */
10264 insn = gen_rtx_REG (SImode, 3);
10265 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10266 (void) emit_insn (insn);
10270 if (fp_offset)
10272 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10273 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10275 else
10276 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10278 insn = emit_insn (insn);
10279 RTX_FRAME_RELATED_P (insn) = 1;
10282 if (args_to_push)
10284 /* Push the argument registers, or reserve space for them. */
10285 if (cfun->machine->uses_anonymous_args)
10286 insn = emit_multi_reg_push
10287 ((0xf0 >> (args_to_push / 4)) & 0xf);
10288 else
10289 insn = emit_insn
10290 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10291 GEN_INT (- args_to_push)));
10292 RTX_FRAME_RELATED_P (insn) = 1;
10295 /* If this is an interrupt service routine, and the link register
10296 is going to be pushed, and we are not creating a stack frame,
10297 (which would involve an extra push of IP and a pop in the epilogue)
10298 subtracting four from LR now will mean that the function return
10299 can be done with a single instruction. */
10300 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10301 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10302 && ! frame_pointer_needed)
10303 emit_insn (gen_rtx_SET (SImode,
10304 gen_rtx_REG (SImode, LR_REGNUM),
10305 gen_rtx_PLUS (SImode,
10306 gen_rtx_REG (SImode, LR_REGNUM),
10307 GEN_INT (-4))));
10309 if (live_regs_mask)
10311 insn = emit_multi_reg_push (live_regs_mask);
10312 saved_regs += bit_count (live_regs_mask) * 4;
10313 RTX_FRAME_RELATED_P (insn) = 1;
10316 if (TARGET_IWMMXT)
10317 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10318 if (regs_ever_live[reg] && ! call_used_regs [reg])
10320 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10321 insn = gen_rtx_MEM (V2SImode, insn);
10322 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10323 gen_rtx_REG (V2SImode, reg)));
10324 RTX_FRAME_RELATED_P (insn) = 1;
10325 saved_regs += 8;
10328 if (! IS_VOLATILE (func_type))
10330 int start_reg;
10332 /* Save any floating point call-saved registers used by this
10333 function. */
10334 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10336 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10337 if (regs_ever_live[reg] && !call_used_regs[reg])
10339 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10340 insn = gen_rtx_MEM (XFmode, insn);
10341 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10342 gen_rtx_REG (XFmode, reg)));
10343 RTX_FRAME_RELATED_P (insn) = 1;
10344 saved_regs += 12;
10347 else
10349 start_reg = LAST_FPA_REGNUM;
10351 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10353 if (regs_ever_live[reg] && !call_used_regs[reg])
10355 if (start_reg - reg == 3)
10357 insn = emit_sfm (reg, 4);
10358 RTX_FRAME_RELATED_P (insn) = 1;
10359 saved_regs += 48;
10360 start_reg = reg - 1;
10363 else
10365 if (start_reg != reg)
10367 insn = emit_sfm (reg + 1, start_reg - reg);
10368 RTX_FRAME_RELATED_P (insn) = 1;
10369 saved_regs += (start_reg - reg) * 12;
10371 start_reg = reg - 1;
10375 if (start_reg != reg)
10377 insn = emit_sfm (reg + 1, start_reg - reg);
10378 saved_regs += (start_reg - reg) * 12;
10379 RTX_FRAME_RELATED_P (insn) = 1;
10382 if (TARGET_HARD_FLOAT && TARGET_VFP)
10384 start_reg = FIRST_VFP_REGNUM;
10386 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10388 if ((!regs_ever_live[reg] || call_used_regs[reg])
10389 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10391 if (start_reg != reg)
10392 saved_regs += vfp_emit_fstmx (start_reg,
10393 (reg - start_reg) / 2);
10394 start_reg = reg + 2;
10397 if (start_reg != reg)
10398 saved_regs += vfp_emit_fstmx (start_reg,
10399 (reg - start_reg) / 2);
10403 if (frame_pointer_needed)
10405 /* Create the new frame pointer. */
10406 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10407 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10408 RTX_FRAME_RELATED_P (insn) = 1;
10410 if (IS_NESTED (func_type))
10412 /* Recover the static chain register. */
10413 if (regs_ever_live [3] == 0
10414 || saved_pretend_args)
10415 insn = gen_rtx_REG (SImode, 3);
10416 else /* if (current_function_pretend_args_size == 0) */
10418 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10419 GEN_INT (4));
10420 insn = gen_rtx_MEM (SImode, insn);
10423 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10424 /* Add a USE to stop propagate_one_insn() from barfing. */
10425 emit_insn (gen_prologue_use (ip_rtx));
10429 offsets = arm_get_frame_offsets ();
10430 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10432 /* This add can produce multiple insns for a large constant, so we
10433 need to get tricky. */
10434 rtx last = get_last_insn ();
10436 amount = GEN_INT (offsets->saved_args + saved_regs
10437 - offsets->outgoing_args);
10439 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10440 amount));
10443 last = last ? NEXT_INSN (last) : get_insns ();
10444 RTX_FRAME_RELATED_P (last) = 1;
10446 while (last != insn);
10448 /* If the frame pointer is needed, emit a special barrier that
10449 will prevent the scheduler from moving stores to the frame
10450 before the stack adjustment. */
10451 if (frame_pointer_needed)
10452 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10453 hard_frame_pointer_rtx));
10457 if (flag_pic)
10458 arm_load_pic_register (INVALID_REGNUM);
10460 /* If we are profiling, make sure no instructions are scheduled before
10461 the call to mcount. Similarly if the user has requested no
10462 scheduling in the prolog. */
10463 if (current_function_profile || TARGET_NO_SCHED_PRO)
10464 emit_insn (gen_blockage ());
10466 /* If the link register is being kept alive, with the return address in it,
10467 then make sure that it does not get reused by the ce2 pass. */
10468 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10470 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10471 cfun->machine->lr_save_eliminated = 1;
10475 /* If CODE is 'd', then the X is a condition operand and the instruction
10476 should only be executed if the condition is true.
10477 if CODE is 'D', then the X is a condition operand and the instruction
10478 should only be executed if the condition is false: however, if the mode
10479 of the comparison is CCFPEmode, then always execute the instruction -- we
10480 do this because in these circumstances !GE does not necessarily imply LT;
10481 in these cases the instruction pattern will take care to make sure that
10482 an instruction containing %d will follow, thereby undoing the effects of
10483 doing this instruction unconditionally.
10484 If CODE is 'N' then X is a floating point operand that must be negated
10485 before output.
10486 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10487 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10488 void
10489 arm_print_operand (FILE *stream, rtx x, int code)
10491 switch (code)
10493 case '@':
10494 fputs (ASM_COMMENT_START, stream);
10495 return;
10497 case '_':
10498 fputs (user_label_prefix, stream);
10499 return;
10501 case '|':
10502 fputs (REGISTER_PREFIX, stream);
10503 return;
10505 case '?':
10506 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10508 if (TARGET_THUMB)
10510 output_operand_lossage ("predicated Thumb instruction");
10511 break;
10513 if (current_insn_predicate != NULL)
10515 output_operand_lossage
10516 ("predicated instruction in conditional sequence");
10517 break;
10520 fputs (arm_condition_codes[arm_current_cc], stream);
10522 else if (current_insn_predicate)
10524 enum arm_cond_code code;
10526 if (TARGET_THUMB)
10528 output_operand_lossage ("predicated Thumb instruction");
10529 break;
10532 code = get_arm_condition_code (current_insn_predicate);
10533 fputs (arm_condition_codes[code], stream);
10535 return;
10537 case 'N':
10539 REAL_VALUE_TYPE r;
10540 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10541 r = REAL_VALUE_NEGATE (r);
10542 fprintf (stream, "%s", fp_const_from_val (&r));
10544 return;
10546 case 'B':
10547 if (GET_CODE (x) == CONST_INT)
10549 HOST_WIDE_INT val;
10550 val = ARM_SIGN_EXTEND (~INTVAL (x));
10551 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10553 else
10555 putc ('~', stream);
10556 output_addr_const (stream, x);
10558 return;
10560 case 'i':
10561 fprintf (stream, "%s", arithmetic_instr (x, 1));
10562 return;
10564 /* Truncate Cirrus shift counts. */
10565 case 's':
10566 if (GET_CODE (x) == CONST_INT)
10568 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10569 return;
10571 arm_print_operand (stream, x, 0);
10572 return;
10574 case 'I':
10575 fprintf (stream, "%s", arithmetic_instr (x, 0));
10576 return;
10578 case 'S':
10580 HOST_WIDE_INT val;
10581 const char * shift = shift_op (x, &val);
10583 if (shift)
10585 fprintf (stream, ", %s ", shift_op (x, &val));
10586 if (val == -1)
10587 arm_print_operand (stream, XEXP (x, 1), 0);
10588 else
10589 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10592 return;
10594 /* An explanation of the 'Q', 'R' and 'H' register operands:
10596 In a pair of registers containing a DI or DF value the 'Q'
10597 operand returns the register number of the register containing
10598 the least significant part of the value. The 'R' operand returns
10599 the register number of the register containing the most
10600 significant part of the value.
10602 The 'H' operand returns the higher of the two register numbers.
10603 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10604 same as the 'Q' operand, since the most significant part of the
10605 value is held in the lower number register. The reverse is true
10606 on systems where WORDS_BIG_ENDIAN is false.
10608 The purpose of these operands is to distinguish between cases
10609 where the endian-ness of the values is important (for example
10610 when they are added together), and cases where the endian-ness
10611 is irrelevant, but the order of register operations is important.
10612 For example when loading a value from memory into a register
10613 pair, the endian-ness does not matter. Provided that the value
10614 from the lower memory address is put into the lower numbered
10615 register, and the value from the higher address is put into the
10616 higher numbered register, the load will work regardless of whether
10617 the value being loaded is big-wordian or little-wordian. The
10618 order of the two register loads can matter however, if the address
10619 of the memory location is actually held in one of the registers
10620 being overwritten by the load. */
10621 case 'Q':
10622 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10624 output_operand_lossage ("invalid operand for code '%c'", code);
10625 return;
10628 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10629 return;
10631 case 'R':
10632 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10634 output_operand_lossage ("invalid operand for code '%c'", code);
10635 return;
10638 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10639 return;
10641 case 'H':
10642 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10644 output_operand_lossage ("invalid operand for code '%c'", code);
10645 return;
10648 asm_fprintf (stream, "%r", REGNO (x) + 1);
10649 return;
10651 case 'm':
10652 asm_fprintf (stream, "%r",
10653 GET_CODE (XEXP (x, 0)) == REG
10654 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10655 return;
10657 case 'M':
10658 asm_fprintf (stream, "{%r-%r}",
10659 REGNO (x),
10660 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10661 return;
10663 case 'd':
10664 /* CONST_TRUE_RTX means always -- that's the default. */
10665 if (x == const_true_rtx)
10666 return;
10668 if (!COMPARISON_P (x))
10670 output_operand_lossage ("invalid operand for code '%c'", code);
10671 return;
10674 fputs (arm_condition_codes[get_arm_condition_code (x)],
10675 stream);
10676 return;
10678 case 'D':
10679 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10680 want to do that. */
10681 if (x == const_true_rtx)
10683 output_operand_lossage ("instruction never exectued");
10684 return;
10686 if (!COMPARISON_P (x))
10688 output_operand_lossage ("invalid operand for code '%c'", code);
10689 return;
10692 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10693 (get_arm_condition_code (x))],
10694 stream);
10695 return;
10697 /* Cirrus registers can be accessed in a variety of ways:
10698 single floating point (f)
10699 double floating point (d)
10700 32bit integer (fx)
10701 64bit integer (dx). */
10702 case 'W': /* Cirrus register in F mode. */
10703 case 'X': /* Cirrus register in D mode. */
10704 case 'Y': /* Cirrus register in FX mode. */
10705 case 'Z': /* Cirrus register in DX mode. */
10706 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10707 abort ();
10709 fprintf (stream, "mv%s%s",
10710 code == 'W' ? "f"
10711 : code == 'X' ? "d"
10712 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10714 return;
10716 /* Print cirrus register in the mode specified by the register's mode. */
10717 case 'V':
10719 int mode = GET_MODE (x);
10721 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10723 output_operand_lossage ("invalid operand for code '%c'", code);
10724 return;
10727 fprintf (stream, "mv%s%s",
10728 mode == DFmode ? "d"
10729 : mode == SImode ? "fx"
10730 : mode == DImode ? "dx"
10731 : "f", reg_names[REGNO (x)] + 2);
10733 return;
10736 case 'U':
10737 if (GET_CODE (x) != REG
10738 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10739 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10740 /* Bad value for wCG register number. */
10742 output_operand_lossage ("invalid operand for code '%c'", code);
10743 return;
10746 else
10747 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10748 return;
10750 /* Print an iWMMXt control register name. */
10751 case 'w':
10752 if (GET_CODE (x) != CONST_INT
10753 || INTVAL (x) < 0
10754 || INTVAL (x) >= 16)
10755 /* Bad value for wC register number. */
10757 output_operand_lossage ("invalid operand for code '%c'", code);
10758 return;
10761 else
10763 static const char * wc_reg_names [16] =
10765 "wCID", "wCon", "wCSSF", "wCASF",
10766 "wC4", "wC5", "wC6", "wC7",
10767 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10768 "wC12", "wC13", "wC14", "wC15"
10771 fprintf (stream, wc_reg_names [INTVAL (x)]);
10773 return;
10775 /* Print a VFP double precision register name. */
10776 case 'P':
10778 int mode = GET_MODE (x);
10779 int num;
10781 if (mode != DImode && mode != DFmode)
10783 output_operand_lossage ("invalid operand for code '%c'", code);
10784 return;
10787 if (GET_CODE (x) != REG
10788 || !IS_VFP_REGNUM (REGNO (x)))
10790 output_operand_lossage ("invalid operand for code '%c'", code);
10791 return;
10794 num = REGNO(x) - FIRST_VFP_REGNUM;
10795 if (num & 1)
10797 output_operand_lossage ("invalid operand for code '%c'", code);
10798 return;
10801 fprintf (stream, "d%d", num >> 1);
10803 return;
10805 default:
10806 if (x == 0)
10808 output_operand_lossage ("missing operand");
10809 return;
10812 if (GET_CODE (x) == REG)
10813 asm_fprintf (stream, "%r", REGNO (x));
10814 else if (GET_CODE (x) == MEM)
10816 output_memory_reference_mode = GET_MODE (x);
10817 output_address (XEXP (x, 0));
10819 else if (GET_CODE (x) == CONST_DOUBLE)
10820 fprintf (stream, "#%s", fp_immediate_constant (x));
10821 else if (GET_CODE (x) == NEG)
10822 abort (); /* This should never happen now. */
10823 else
10825 fputc ('#', stream);
10826 output_addr_const (stream, x);
10831 #ifndef AOF_ASSEMBLER
10832 /* Target hook for assembling integer objects. The ARM version needs to
10833 handle word-sized values specially. */
10834 static bool
10835 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10837 if (size == UNITS_PER_WORD && aligned_p)
10839 fputs ("\t.word\t", asm_out_file);
10840 output_addr_const (asm_out_file, x);
10842 /* Mark symbols as position independent. We only do this in the
10843 .text segment, not in the .data segment. */
10844 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10845 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10847 if (GET_CODE (x) == SYMBOL_REF
10848 && (CONSTANT_POOL_ADDRESS_P (x)
10849 || SYMBOL_REF_LOCAL_P (x)))
10850 fputs ("(GOTOFF)", asm_out_file);
10851 else if (GET_CODE (x) == LABEL_REF)
10852 fputs ("(GOTOFF)", asm_out_file);
10853 else
10854 fputs ("(GOT)", asm_out_file);
10856 fputc ('\n', asm_out_file);
10857 return true;
10860 if (arm_vector_mode_supported_p (GET_MODE (x)))
10862 int i, units;
10864 if (GET_CODE (x) != CONST_VECTOR)
10865 abort ();
10867 units = CONST_VECTOR_NUNITS (x);
10869 switch (GET_MODE (x))
10871 case V2SImode: size = 4; break;
10872 case V4HImode: size = 2; break;
10873 case V8QImode: size = 1; break;
10874 default:
10875 abort ();
10878 for (i = 0; i < units; i++)
10880 rtx elt;
10882 elt = CONST_VECTOR_ELT (x, i);
10883 assemble_integer
10884 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10887 return true;
10890 return default_assemble_integer (x, size, aligned_p);
10892 #endif
10894 /* A finite state machine takes care of noticing whether or not instructions
10895 can be conditionally executed, and thus decrease execution time and code
10896 size by deleting branch instructions. The fsm is controlled by
10897 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10899 /* The state of the fsm controlling condition codes are:
10900 0: normal, do nothing special
10901 1: make ASM_OUTPUT_OPCODE not output this instruction
10902 2: make ASM_OUTPUT_OPCODE not output this instruction
10903 3: make instructions conditional
10904 4: make instructions conditional
10906 State transitions (state->state by whom under condition):
10907 0 -> 1 final_prescan_insn if the `target' is a label
10908 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10909 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10910 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10911 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10912 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10913 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10914 (the target insn is arm_target_insn).
10916 If the jump clobbers the conditions then we use states 2 and 4.
10918 A similar thing can be done with conditional return insns.
10920 XXX In case the `target' is an unconditional branch, this conditionalising
10921 of the instructions always reduces code size, but not always execution
10922 time. But then, I want to reduce the code size to somewhere near what
10923 /bin/cc produces. */
10925 /* Returns the index of the ARM condition code string in
10926 `arm_condition_codes'. COMPARISON should be an rtx like
10927 `(eq (...) (...))'. */
10928 static enum arm_cond_code
10929 get_arm_condition_code (rtx comparison)
10931 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10932 int code;
10933 enum rtx_code comp_code = GET_CODE (comparison);
10935 if (GET_MODE_CLASS (mode) != MODE_CC)
10936 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10937 XEXP (comparison, 1));
10939 switch (mode)
10941 case CC_DNEmode: code = ARM_NE; goto dominance;
10942 case CC_DEQmode: code = ARM_EQ; goto dominance;
10943 case CC_DGEmode: code = ARM_GE; goto dominance;
10944 case CC_DGTmode: code = ARM_GT; goto dominance;
10945 case CC_DLEmode: code = ARM_LE; goto dominance;
10946 case CC_DLTmode: code = ARM_LT; goto dominance;
10947 case CC_DGEUmode: code = ARM_CS; goto dominance;
10948 case CC_DGTUmode: code = ARM_HI; goto dominance;
10949 case CC_DLEUmode: code = ARM_LS; goto dominance;
10950 case CC_DLTUmode: code = ARM_CC;
10952 dominance:
10953 if (comp_code != EQ && comp_code != NE)
10954 abort ();
10956 if (comp_code == EQ)
10957 return ARM_INVERSE_CONDITION_CODE (code);
10958 return code;
10960 case CC_NOOVmode:
10961 switch (comp_code)
10963 case NE: return ARM_NE;
10964 case EQ: return ARM_EQ;
10965 case GE: return ARM_PL;
10966 case LT: return ARM_MI;
10967 default: abort ();
10970 case CC_Zmode:
10971 switch (comp_code)
10973 case NE: return ARM_NE;
10974 case EQ: return ARM_EQ;
10975 default: abort ();
10978 case CC_Nmode:
10979 switch (comp_code)
10981 case NE: return ARM_MI;
10982 case EQ: return ARM_PL;
10983 default: abort ();
10986 case CCFPEmode:
10987 case CCFPmode:
10988 /* These encodings assume that AC=1 in the FPA system control
10989 byte. This allows us to handle all cases except UNEQ and
10990 LTGT. */
10991 switch (comp_code)
10993 case GE: return ARM_GE;
10994 case GT: return ARM_GT;
10995 case LE: return ARM_LS;
10996 case LT: return ARM_MI;
10997 case NE: return ARM_NE;
10998 case EQ: return ARM_EQ;
10999 case ORDERED: return ARM_VC;
11000 case UNORDERED: return ARM_VS;
11001 case UNLT: return ARM_LT;
11002 case UNLE: return ARM_LE;
11003 case UNGT: return ARM_HI;
11004 case UNGE: return ARM_PL;
11005 /* UNEQ and LTGT do not have a representation. */
11006 case UNEQ: /* Fall through. */
11007 case LTGT: /* Fall through. */
11008 default: abort ();
11011 case CC_SWPmode:
11012 switch (comp_code)
11014 case NE: return ARM_NE;
11015 case EQ: return ARM_EQ;
11016 case GE: return ARM_LE;
11017 case GT: return ARM_LT;
11018 case LE: return ARM_GE;
11019 case LT: return ARM_GT;
11020 case GEU: return ARM_LS;
11021 case GTU: return ARM_CC;
11022 case LEU: return ARM_CS;
11023 case LTU: return ARM_HI;
11024 default: abort ();
11027 case CC_Cmode:
11028 switch (comp_code)
11030 case LTU: return ARM_CS;
11031 case GEU: return ARM_CC;
11032 default: abort ();
11035 case CCmode:
11036 switch (comp_code)
11038 case NE: return ARM_NE;
11039 case EQ: return ARM_EQ;
11040 case GE: return ARM_GE;
11041 case GT: return ARM_GT;
11042 case LE: return ARM_LE;
11043 case LT: return ARM_LT;
11044 case GEU: return ARM_CS;
11045 case GTU: return ARM_HI;
11046 case LEU: return ARM_LS;
11047 case LTU: return ARM_CC;
11048 default: abort ();
11051 default: abort ();
11054 abort ();
11057 void
11058 arm_final_prescan_insn (rtx insn)
11060 /* BODY will hold the body of INSN. */
11061 rtx body = PATTERN (insn);
11063 /* This will be 1 if trying to repeat the trick, and things need to be
11064 reversed if it appears to fail. */
11065 int reverse = 0;
11067 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11068 taken are clobbered, even if the rtl suggests otherwise. It also
11069 means that we have to grub around within the jump expression to find
11070 out what the conditions are when the jump isn't taken. */
11071 int jump_clobbers = 0;
11073 /* If we start with a return insn, we only succeed if we find another one. */
11074 int seeking_return = 0;
11076 /* START_INSN will hold the insn from where we start looking. This is the
11077 first insn after the following code_label if REVERSE is true. */
11078 rtx start_insn = insn;
11080 /* If in state 4, check if the target branch is reached, in order to
11081 change back to state 0. */
11082 if (arm_ccfsm_state == 4)
11084 if (insn == arm_target_insn)
11086 arm_target_insn = NULL;
11087 arm_ccfsm_state = 0;
11089 return;
11092 /* If in state 3, it is possible to repeat the trick, if this insn is an
11093 unconditional branch to a label, and immediately following this branch
11094 is the previous target label which is only used once, and the label this
11095 branch jumps to is not too far off. */
11096 if (arm_ccfsm_state == 3)
11098 if (simplejump_p (insn))
11100 start_insn = next_nonnote_insn (start_insn);
11101 if (GET_CODE (start_insn) == BARRIER)
11103 /* XXX Isn't this always a barrier? */
11104 start_insn = next_nonnote_insn (start_insn);
11106 if (GET_CODE (start_insn) == CODE_LABEL
11107 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11108 && LABEL_NUSES (start_insn) == 1)
11109 reverse = TRUE;
11110 else
11111 return;
11113 else if (GET_CODE (body) == RETURN)
11115 start_insn = next_nonnote_insn (start_insn);
11116 if (GET_CODE (start_insn) == BARRIER)
11117 start_insn = next_nonnote_insn (start_insn);
11118 if (GET_CODE (start_insn) == CODE_LABEL
11119 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11120 && LABEL_NUSES (start_insn) == 1)
11122 reverse = TRUE;
11123 seeking_return = 1;
11125 else
11126 return;
11128 else
11129 return;
11132 if (arm_ccfsm_state != 0 && !reverse)
11133 abort ();
11134 if (GET_CODE (insn) != JUMP_INSN)
11135 return;
11137 /* This jump might be paralleled with a clobber of the condition codes
11138 the jump should always come first */
11139 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11140 body = XVECEXP (body, 0, 0);
11142 if (reverse
11143 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11144 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11146 int insns_skipped;
11147 int fail = FALSE, succeed = FALSE;
11148 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11149 int then_not_else = TRUE;
11150 rtx this_insn = start_insn, label = 0;
11152 /* If the jump cannot be done with one instruction, we cannot
11153 conditionally execute the instruction in the inverse case. */
11154 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11156 jump_clobbers = 1;
11157 return;
11160 /* Register the insn jumped to. */
11161 if (reverse)
11163 if (!seeking_return)
11164 label = XEXP (SET_SRC (body), 0);
11166 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11167 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11168 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11170 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11171 then_not_else = FALSE;
11173 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11174 seeking_return = 1;
11175 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11177 seeking_return = 1;
11178 then_not_else = FALSE;
11180 else
11181 abort ();
11183 /* See how many insns this branch skips, and what kind of insns. If all
11184 insns are okay, and the label or unconditional branch to the same
11185 label is not too far away, succeed. */
11186 for (insns_skipped = 0;
11187 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11189 rtx scanbody;
11191 this_insn = next_nonnote_insn (this_insn);
11192 if (!this_insn)
11193 break;
11195 switch (GET_CODE (this_insn))
11197 case CODE_LABEL:
11198 /* Succeed if it is the target label, otherwise fail since
11199 control falls in from somewhere else. */
11200 if (this_insn == label)
11202 if (jump_clobbers)
11204 arm_ccfsm_state = 2;
11205 this_insn = next_nonnote_insn (this_insn);
11207 else
11208 arm_ccfsm_state = 1;
11209 succeed = TRUE;
11211 else
11212 fail = TRUE;
11213 break;
11215 case BARRIER:
11216 /* Succeed if the following insn is the target label.
11217 Otherwise fail.
11218 If return insns are used then the last insn in a function
11219 will be a barrier. */
11220 this_insn = next_nonnote_insn (this_insn);
11221 if (this_insn && this_insn == label)
11223 if (jump_clobbers)
11225 arm_ccfsm_state = 2;
11226 this_insn = next_nonnote_insn (this_insn);
11228 else
11229 arm_ccfsm_state = 1;
11230 succeed = TRUE;
11232 else
11233 fail = TRUE;
11234 break;
11236 case CALL_INSN:
11237 /* The AAPCS says that conditional calls should not be
11238 used since they make interworking inefficient (the
11239 linker can't transform BL<cond> into BLX). That's
11240 only a problem if the machine has BLX. */
11241 if (arm_arch5)
11243 fail = TRUE;
11244 break;
11247 /* Succeed if the following insn is the target label, or
11248 if the following two insns are a barrier and the
11249 target label. */
11250 this_insn = next_nonnote_insn (this_insn);
11251 if (this_insn && GET_CODE (this_insn) == BARRIER)
11252 this_insn = next_nonnote_insn (this_insn);
11254 if (this_insn && this_insn == label
11255 && insns_skipped < max_insns_skipped)
11257 if (jump_clobbers)
11259 arm_ccfsm_state = 2;
11260 this_insn = next_nonnote_insn (this_insn);
11262 else
11263 arm_ccfsm_state = 1;
11264 succeed = TRUE;
11266 else
11267 fail = TRUE;
11268 break;
11270 case JUMP_INSN:
11271 /* If this is an unconditional branch to the same label, succeed.
11272 If it is to another label, do nothing. If it is conditional,
11273 fail. */
11274 /* XXX Probably, the tests for SET and the PC are
11275 unnecessary. */
11277 scanbody = PATTERN (this_insn);
11278 if (GET_CODE (scanbody) == SET
11279 && GET_CODE (SET_DEST (scanbody)) == PC)
11281 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11282 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11284 arm_ccfsm_state = 2;
11285 succeed = TRUE;
11287 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11288 fail = TRUE;
11290 /* Fail if a conditional return is undesirable (e.g. on a
11291 StrongARM), but still allow this if optimizing for size. */
11292 else if (GET_CODE (scanbody) == RETURN
11293 && !use_return_insn (TRUE, NULL)
11294 && !optimize_size)
11295 fail = TRUE;
11296 else if (GET_CODE (scanbody) == RETURN
11297 && seeking_return)
11299 arm_ccfsm_state = 2;
11300 succeed = TRUE;
11302 else if (GET_CODE (scanbody) == PARALLEL)
11304 switch (get_attr_conds (this_insn))
11306 case CONDS_NOCOND:
11307 break;
11308 default:
11309 fail = TRUE;
11310 break;
11313 else
11314 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11316 break;
11318 case INSN:
11319 /* Instructions using or affecting the condition codes make it
11320 fail. */
11321 scanbody = PATTERN (this_insn);
11322 if (!(GET_CODE (scanbody) == SET
11323 || GET_CODE (scanbody) == PARALLEL)
11324 || get_attr_conds (this_insn) != CONDS_NOCOND)
11325 fail = TRUE;
11327 /* A conditional cirrus instruction must be followed by
11328 a non Cirrus instruction. However, since we
11329 conditionalize instructions in this function and by
11330 the time we get here we can't add instructions
11331 (nops), because shorten_branches() has already been
11332 called, we will disable conditionalizing Cirrus
11333 instructions to be safe. */
11334 if (GET_CODE (scanbody) != USE
11335 && GET_CODE (scanbody) != CLOBBER
11336 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11337 fail = TRUE;
11338 break;
11340 default:
11341 break;
11344 if (succeed)
11346 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11347 arm_target_label = CODE_LABEL_NUMBER (label);
11348 else if (seeking_return || arm_ccfsm_state == 2)
11350 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11352 this_insn = next_nonnote_insn (this_insn);
11353 if (this_insn && (GET_CODE (this_insn) == BARRIER
11354 || GET_CODE (this_insn) == CODE_LABEL))
11355 abort ();
11357 if (!this_insn)
11359 /* Oh, dear! we ran off the end.. give up. */
11360 recog (PATTERN (insn), insn, NULL);
11361 arm_ccfsm_state = 0;
11362 arm_target_insn = NULL;
11363 return;
11365 arm_target_insn = this_insn;
11367 else
11368 abort ();
11369 if (jump_clobbers)
11371 if (reverse)
11372 abort ();
11373 arm_current_cc =
11374 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11375 0), 0), 1));
11376 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11377 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11378 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11379 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11381 else
11383 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11384 what it was. */
11385 if (!reverse)
11386 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11387 0));
11390 if (reverse || then_not_else)
11391 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11394 /* Restore recog_data (getting the attributes of other insns can
11395 destroy this array, but final.c assumes that it remains intact
11396 across this call; since the insn has been recognized already we
11397 call recog direct). */
11398 recog (PATTERN (insn), insn, NULL);
11402 /* Returns true if REGNO is a valid register
11403 for holding a quantity of type MODE. */
11405 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11407 if (GET_MODE_CLASS (mode) == MODE_CC)
11408 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11410 if (TARGET_THUMB)
11411 /* For the Thumb we only allow values bigger than SImode in
11412 registers 0 - 6, so that there is always a second low
11413 register available to hold the upper part of the value.
11414 We probably we ought to ensure that the register is the
11415 start of an even numbered register pair. */
11416 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11418 if (IS_CIRRUS_REGNUM (regno))
11419 /* We have outlawed SI values in Cirrus registers because they
11420 reside in the lower 32 bits, but SF values reside in the
11421 upper 32 bits. This causes gcc all sorts of grief. We can't
11422 even split the registers into pairs because Cirrus SI values
11423 get sign extended to 64bits-- aldyh. */
11424 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11426 if (IS_VFP_REGNUM (regno))
11428 if (mode == SFmode || mode == SImode)
11429 return TRUE;
11431 /* DFmode values are only valid in even register pairs. */
11432 if (mode == DFmode)
11433 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11434 return FALSE;
11437 if (IS_IWMMXT_GR_REGNUM (regno))
11438 return mode == SImode;
11440 if (IS_IWMMXT_REGNUM (regno))
11441 return VALID_IWMMXT_REG_MODE (mode);
11443 /* We allow any value to be stored in the general registers.
11444 Restrict doubleword quantities to even register pairs so that we can
11445 use ldrd. */
11446 if (regno <= LAST_ARM_REGNUM)
11447 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11449 if ( regno == FRAME_POINTER_REGNUM
11450 || regno == ARG_POINTER_REGNUM)
11451 /* We only allow integers in the fake hard registers. */
11452 return GET_MODE_CLASS (mode) == MODE_INT;
11454 /* The only registers left are the FPA registers
11455 which we only allow to hold FP values. */
11456 return GET_MODE_CLASS (mode) == MODE_FLOAT
11457 && regno >= FIRST_FPA_REGNUM
11458 && regno <= LAST_FPA_REGNUM;
11462 arm_regno_class (int regno)
11464 if (TARGET_THUMB)
11466 if (regno == STACK_POINTER_REGNUM)
11467 return STACK_REG;
11468 if (regno == CC_REGNUM)
11469 return CC_REG;
11470 if (regno < 8)
11471 return LO_REGS;
11472 return HI_REGS;
11475 if ( regno <= LAST_ARM_REGNUM
11476 || regno == FRAME_POINTER_REGNUM
11477 || regno == ARG_POINTER_REGNUM)
11478 return GENERAL_REGS;
11480 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11481 return NO_REGS;
11483 if (IS_CIRRUS_REGNUM (regno))
11484 return CIRRUS_REGS;
11486 if (IS_VFP_REGNUM (regno))
11487 return VFP_REGS;
11489 if (IS_IWMMXT_REGNUM (regno))
11490 return IWMMXT_REGS;
11492 if (IS_IWMMXT_GR_REGNUM (regno))
11493 return IWMMXT_GR_REGS;
11495 return FPA_REGS;
11498 /* Handle a special case when computing the offset
11499 of an argument from the frame pointer. */
11501 arm_debugger_arg_offset (int value, rtx addr)
11503 rtx insn;
11505 /* We are only interested if dbxout_parms() failed to compute the offset. */
11506 if (value != 0)
11507 return 0;
11509 /* We can only cope with the case where the address is held in a register. */
11510 if (GET_CODE (addr) != REG)
11511 return 0;
11513 /* If we are using the frame pointer to point at the argument, then
11514 an offset of 0 is correct. */
11515 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11516 return 0;
11518 /* If we are using the stack pointer to point at the
11519 argument, then an offset of 0 is correct. */
11520 if ((TARGET_THUMB || !frame_pointer_needed)
11521 && REGNO (addr) == SP_REGNUM)
11522 return 0;
11524 /* Oh dear. The argument is pointed to by a register rather
11525 than being held in a register, or being stored at a known
11526 offset from the frame pointer. Since GDB only understands
11527 those two kinds of argument we must translate the address
11528 held in the register into an offset from the frame pointer.
11529 We do this by searching through the insns for the function
11530 looking to see where this register gets its value. If the
11531 register is initialized from the frame pointer plus an offset
11532 then we are in luck and we can continue, otherwise we give up.
11534 This code is exercised by producing debugging information
11535 for a function with arguments like this:
11537 double func (double a, double b, int c, double d) {return d;}
11539 Without this code the stab for parameter 'd' will be set to
11540 an offset of 0 from the frame pointer, rather than 8. */
11542 /* The if() statement says:
11544 If the insn is a normal instruction
11545 and if the insn is setting the value in a register
11546 and if the register being set is the register holding the address of the argument
11547 and if the address is computing by an addition
11548 that involves adding to a register
11549 which is the frame pointer
11550 a constant integer
11552 then... */
11554 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11556 if ( GET_CODE (insn) == INSN
11557 && GET_CODE (PATTERN (insn)) == SET
11558 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11559 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11560 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11561 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11562 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11565 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11567 break;
11571 if (value == 0)
11573 debug_rtx (addr);
11574 warning ("unable to compute real location of stacked parameter");
11575 value = 8; /* XXX magic hack */
11578 return value;
11581 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11582 do \
11584 if ((MASK) & insn_flags) \
11585 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11586 BUILT_IN_MD, NULL, NULL_TREE); \
11588 while (0)
11590 struct builtin_description
11592 const unsigned int mask;
11593 const enum insn_code icode;
11594 const char * const name;
11595 const enum arm_builtins code;
11596 const enum rtx_code comparison;
11597 const unsigned int flag;
11600 static const struct builtin_description bdesc_2arg[] =
11602 #define IWMMXT_BUILTIN(code, string, builtin) \
11603 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11604 ARM_BUILTIN_##builtin, 0, 0 },
11606 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11607 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11608 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11609 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11610 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11611 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11612 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11613 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11614 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11615 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11616 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11617 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11618 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11619 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11620 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11621 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11622 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11623 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11624 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11625 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11626 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11627 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11628 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11629 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11630 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11631 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11632 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11633 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11634 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11635 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11636 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11637 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11638 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11639 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11640 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11641 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11642 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11643 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11644 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11645 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11646 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11647 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11648 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11649 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11650 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11651 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11652 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11653 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11654 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11655 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11656 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11657 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11658 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11659 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11660 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11661 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11662 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11663 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11665 #define IWMMXT_BUILTIN2(code, builtin) \
11666 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11668 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11669 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11670 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11671 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11672 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11673 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11674 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11675 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11676 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11677 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11678 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11679 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11680 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11681 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11682 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11683 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11684 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11685 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11686 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11687 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11688 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11689 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11690 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11691 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11692 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11693 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11694 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11695 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11696 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11697 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11698 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11699 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11702 static const struct builtin_description bdesc_1arg[] =
11704 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11705 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11706 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11707 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11708 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11709 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11710 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11711 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11712 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11713 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11714 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11715 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11716 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11717 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11718 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11719 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11720 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11721 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11724 /* Set up all the iWMMXt builtins. This is
11725 not called if TARGET_IWMMXT is zero. */
11727 static void
11728 arm_init_iwmmxt_builtins (void)
11730 const struct builtin_description * d;
11731 size_t i;
11732 tree endlink = void_list_node;
11734 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11735 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11736 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11738 tree int_ftype_int
11739 = build_function_type (integer_type_node,
11740 tree_cons (NULL_TREE, integer_type_node, endlink));
11741 tree v8qi_ftype_v8qi_v8qi_int
11742 = build_function_type (V8QI_type_node,
11743 tree_cons (NULL_TREE, V8QI_type_node,
11744 tree_cons (NULL_TREE, V8QI_type_node,
11745 tree_cons (NULL_TREE,
11746 integer_type_node,
11747 endlink))));
11748 tree v4hi_ftype_v4hi_int
11749 = build_function_type (V4HI_type_node,
11750 tree_cons (NULL_TREE, V4HI_type_node,
11751 tree_cons (NULL_TREE, integer_type_node,
11752 endlink)));
11753 tree v2si_ftype_v2si_int
11754 = build_function_type (V2SI_type_node,
11755 tree_cons (NULL_TREE, V2SI_type_node,
11756 tree_cons (NULL_TREE, integer_type_node,
11757 endlink)));
11758 tree v2si_ftype_di_di
11759 = build_function_type (V2SI_type_node,
11760 tree_cons (NULL_TREE, long_long_integer_type_node,
11761 tree_cons (NULL_TREE, long_long_integer_type_node,
11762 endlink)));
11763 tree di_ftype_di_int
11764 = build_function_type (long_long_integer_type_node,
11765 tree_cons (NULL_TREE, long_long_integer_type_node,
11766 tree_cons (NULL_TREE, integer_type_node,
11767 endlink)));
11768 tree di_ftype_di_int_int
11769 = build_function_type (long_long_integer_type_node,
11770 tree_cons (NULL_TREE, long_long_integer_type_node,
11771 tree_cons (NULL_TREE, integer_type_node,
11772 tree_cons (NULL_TREE,
11773 integer_type_node,
11774 endlink))));
11775 tree int_ftype_v8qi
11776 = build_function_type (integer_type_node,
11777 tree_cons (NULL_TREE, V8QI_type_node,
11778 endlink));
11779 tree int_ftype_v4hi
11780 = build_function_type (integer_type_node,
11781 tree_cons (NULL_TREE, V4HI_type_node,
11782 endlink));
11783 tree int_ftype_v2si
11784 = build_function_type (integer_type_node,
11785 tree_cons (NULL_TREE, V2SI_type_node,
11786 endlink));
11787 tree int_ftype_v8qi_int
11788 = build_function_type (integer_type_node,
11789 tree_cons (NULL_TREE, V8QI_type_node,
11790 tree_cons (NULL_TREE, integer_type_node,
11791 endlink)));
11792 tree int_ftype_v4hi_int
11793 = build_function_type (integer_type_node,
11794 tree_cons (NULL_TREE, V4HI_type_node,
11795 tree_cons (NULL_TREE, integer_type_node,
11796 endlink)));
11797 tree int_ftype_v2si_int
11798 = build_function_type (integer_type_node,
11799 tree_cons (NULL_TREE, V2SI_type_node,
11800 tree_cons (NULL_TREE, integer_type_node,
11801 endlink)));
11802 tree v8qi_ftype_v8qi_int_int
11803 = build_function_type (V8QI_type_node,
11804 tree_cons (NULL_TREE, V8QI_type_node,
11805 tree_cons (NULL_TREE, integer_type_node,
11806 tree_cons (NULL_TREE,
11807 integer_type_node,
11808 endlink))));
11809 tree v4hi_ftype_v4hi_int_int
11810 = build_function_type (V4HI_type_node,
11811 tree_cons (NULL_TREE, V4HI_type_node,
11812 tree_cons (NULL_TREE, integer_type_node,
11813 tree_cons (NULL_TREE,
11814 integer_type_node,
11815 endlink))));
11816 tree v2si_ftype_v2si_int_int
11817 = build_function_type (V2SI_type_node,
11818 tree_cons (NULL_TREE, V2SI_type_node,
11819 tree_cons (NULL_TREE, integer_type_node,
11820 tree_cons (NULL_TREE,
11821 integer_type_node,
11822 endlink))));
11823 /* Miscellaneous. */
11824 tree v8qi_ftype_v4hi_v4hi
11825 = build_function_type (V8QI_type_node,
11826 tree_cons (NULL_TREE, V4HI_type_node,
11827 tree_cons (NULL_TREE, V4HI_type_node,
11828 endlink)));
11829 tree v4hi_ftype_v2si_v2si
11830 = build_function_type (V4HI_type_node,
11831 tree_cons (NULL_TREE, V2SI_type_node,
11832 tree_cons (NULL_TREE, V2SI_type_node,
11833 endlink)));
11834 tree v2si_ftype_v4hi_v4hi
11835 = build_function_type (V2SI_type_node,
11836 tree_cons (NULL_TREE, V4HI_type_node,
11837 tree_cons (NULL_TREE, V4HI_type_node,
11838 endlink)));
11839 tree v2si_ftype_v8qi_v8qi
11840 = build_function_type (V2SI_type_node,
11841 tree_cons (NULL_TREE, V8QI_type_node,
11842 tree_cons (NULL_TREE, V8QI_type_node,
11843 endlink)));
11844 tree v4hi_ftype_v4hi_di
11845 = build_function_type (V4HI_type_node,
11846 tree_cons (NULL_TREE, V4HI_type_node,
11847 tree_cons (NULL_TREE,
11848 long_long_integer_type_node,
11849 endlink)));
11850 tree v2si_ftype_v2si_di
11851 = build_function_type (V2SI_type_node,
11852 tree_cons (NULL_TREE, V2SI_type_node,
11853 tree_cons (NULL_TREE,
11854 long_long_integer_type_node,
11855 endlink)));
11856 tree void_ftype_int_int
11857 = build_function_type (void_type_node,
11858 tree_cons (NULL_TREE, integer_type_node,
11859 tree_cons (NULL_TREE, integer_type_node,
11860 endlink)));
11861 tree di_ftype_void
11862 = build_function_type (long_long_unsigned_type_node, endlink);
11863 tree di_ftype_v8qi
11864 = build_function_type (long_long_integer_type_node,
11865 tree_cons (NULL_TREE, V8QI_type_node,
11866 endlink));
11867 tree di_ftype_v4hi
11868 = build_function_type (long_long_integer_type_node,
11869 tree_cons (NULL_TREE, V4HI_type_node,
11870 endlink));
11871 tree di_ftype_v2si
11872 = build_function_type (long_long_integer_type_node,
11873 tree_cons (NULL_TREE, V2SI_type_node,
11874 endlink));
11875 tree v2si_ftype_v4hi
11876 = build_function_type (V2SI_type_node,
11877 tree_cons (NULL_TREE, V4HI_type_node,
11878 endlink));
11879 tree v4hi_ftype_v8qi
11880 = build_function_type (V4HI_type_node,
11881 tree_cons (NULL_TREE, V8QI_type_node,
11882 endlink));
11884 tree di_ftype_di_v4hi_v4hi
11885 = build_function_type (long_long_unsigned_type_node,
11886 tree_cons (NULL_TREE,
11887 long_long_unsigned_type_node,
11888 tree_cons (NULL_TREE, V4HI_type_node,
11889 tree_cons (NULL_TREE,
11890 V4HI_type_node,
11891 endlink))));
11893 tree di_ftype_v4hi_v4hi
11894 = build_function_type (long_long_unsigned_type_node,
11895 tree_cons (NULL_TREE, V4HI_type_node,
11896 tree_cons (NULL_TREE, V4HI_type_node,
11897 endlink)));
11899 /* Normal vector binops. */
11900 tree v8qi_ftype_v8qi_v8qi
11901 = build_function_type (V8QI_type_node,
11902 tree_cons (NULL_TREE, V8QI_type_node,
11903 tree_cons (NULL_TREE, V8QI_type_node,
11904 endlink)));
11905 tree v4hi_ftype_v4hi_v4hi
11906 = build_function_type (V4HI_type_node,
11907 tree_cons (NULL_TREE, V4HI_type_node,
11908 tree_cons (NULL_TREE, V4HI_type_node,
11909 endlink)));
11910 tree v2si_ftype_v2si_v2si
11911 = build_function_type (V2SI_type_node,
11912 tree_cons (NULL_TREE, V2SI_type_node,
11913 tree_cons (NULL_TREE, V2SI_type_node,
11914 endlink)));
11915 tree di_ftype_di_di
11916 = build_function_type (long_long_unsigned_type_node,
11917 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11918 tree_cons (NULL_TREE,
11919 long_long_unsigned_type_node,
11920 endlink)));
11922 /* Add all builtins that are more or less simple operations on two
11923 operands. */
11924 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11926 /* Use one of the operands; the target can have a different mode for
11927 mask-generating compares. */
11928 enum machine_mode mode;
11929 tree type;
11931 if (d->name == 0)
11932 continue;
11934 mode = insn_data[d->icode].operand[1].mode;
11936 switch (mode)
11938 case V8QImode:
11939 type = v8qi_ftype_v8qi_v8qi;
11940 break;
11941 case V4HImode:
11942 type = v4hi_ftype_v4hi_v4hi;
11943 break;
11944 case V2SImode:
11945 type = v2si_ftype_v2si_v2si;
11946 break;
11947 case DImode:
11948 type = di_ftype_di_di;
11949 break;
11951 default:
11952 abort ();
11955 def_mbuiltin (d->mask, d->name, type, d->code);
11958 /* Add the remaining MMX insns with somewhat more complicated types. */
11959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11960 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11967 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12030 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12034 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12041 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12050 static void
12051 arm_init_builtins (void)
12053 if (TARGET_REALLY_IWMMXT)
12054 arm_init_iwmmxt_builtins ();
12057 /* Errors in the source file can cause expand_expr to return const0_rtx
12058 where we expect a vector. To avoid crashing, use one of the vector
12059 clear instructions. */
12061 static rtx
12062 safe_vector_operand (rtx x, enum machine_mode mode)
12064 if (x != const0_rtx)
12065 return x;
12066 x = gen_reg_rtx (mode);
12068 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12069 : gen_rtx_SUBREG (DImode, x, 0)));
12070 return x;
12073 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12075 static rtx
12076 arm_expand_binop_builtin (enum insn_code icode,
12077 tree arglist, rtx target)
12079 rtx pat;
12080 tree arg0 = TREE_VALUE (arglist);
12081 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12082 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12083 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12084 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12085 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12086 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12088 if (VECTOR_MODE_P (mode0))
12089 op0 = safe_vector_operand (op0, mode0);
12090 if (VECTOR_MODE_P (mode1))
12091 op1 = safe_vector_operand (op1, mode1);
12093 if (! target
12094 || GET_MODE (target) != tmode
12095 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12096 target = gen_reg_rtx (tmode);
12098 /* In case the insn wants input operands in modes different from
12099 the result, abort. */
12100 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12101 abort ();
12103 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12104 op0 = copy_to_mode_reg (mode0, op0);
12105 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12106 op1 = copy_to_mode_reg (mode1, op1);
12108 pat = GEN_FCN (icode) (target, op0, op1);
12109 if (! pat)
12110 return 0;
12111 emit_insn (pat);
12112 return target;
12115 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12117 static rtx
12118 arm_expand_unop_builtin (enum insn_code icode,
12119 tree arglist, rtx target, int do_load)
12121 rtx pat;
12122 tree arg0 = TREE_VALUE (arglist);
12123 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12124 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12125 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12127 if (! target
12128 || GET_MODE (target) != tmode
12129 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12130 target = gen_reg_rtx (tmode);
12131 if (do_load)
12132 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12133 else
12135 if (VECTOR_MODE_P (mode0))
12136 op0 = safe_vector_operand (op0, mode0);
12138 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12139 op0 = copy_to_mode_reg (mode0, op0);
12142 pat = GEN_FCN (icode) (target, op0);
12143 if (! pat)
12144 return 0;
12145 emit_insn (pat);
12146 return target;
12149 /* Expand an expression EXP that calls a built-in function,
12150 with result going to TARGET if that's convenient
12151 (and in mode MODE if that's convenient).
12152 SUBTARGET may be used as the target for computing one of EXP's operands.
12153 IGNORE is nonzero if the value is to be ignored. */
12155 static rtx
12156 arm_expand_builtin (tree exp,
12157 rtx target,
12158 rtx subtarget ATTRIBUTE_UNUSED,
12159 enum machine_mode mode ATTRIBUTE_UNUSED,
12160 int ignore ATTRIBUTE_UNUSED)
12162 const struct builtin_description * d;
12163 enum insn_code icode;
12164 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12165 tree arglist = TREE_OPERAND (exp, 1);
12166 tree arg0;
12167 tree arg1;
12168 tree arg2;
12169 rtx op0;
12170 rtx op1;
12171 rtx op2;
12172 rtx pat;
12173 int fcode = DECL_FUNCTION_CODE (fndecl);
12174 size_t i;
12175 enum machine_mode tmode;
12176 enum machine_mode mode0;
12177 enum machine_mode mode1;
12178 enum machine_mode mode2;
12180 switch (fcode)
12182 case ARM_BUILTIN_TEXTRMSB:
12183 case ARM_BUILTIN_TEXTRMUB:
12184 case ARM_BUILTIN_TEXTRMSH:
12185 case ARM_BUILTIN_TEXTRMUH:
12186 case ARM_BUILTIN_TEXTRMSW:
12187 case ARM_BUILTIN_TEXTRMUW:
12188 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12189 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12190 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12191 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12192 : CODE_FOR_iwmmxt_textrmw);
12194 arg0 = TREE_VALUE (arglist);
12195 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12196 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12197 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12198 tmode = insn_data[icode].operand[0].mode;
12199 mode0 = insn_data[icode].operand[1].mode;
12200 mode1 = insn_data[icode].operand[2].mode;
12202 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12203 op0 = copy_to_mode_reg (mode0, op0);
12204 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12206 /* @@@ better error message */
12207 error ("selector must be an immediate");
12208 return gen_reg_rtx (tmode);
12210 if (target == 0
12211 || GET_MODE (target) != tmode
12212 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12213 target = gen_reg_rtx (tmode);
12214 pat = GEN_FCN (icode) (target, op0, op1);
12215 if (! pat)
12216 return 0;
12217 emit_insn (pat);
12218 return target;
12220 case ARM_BUILTIN_TINSRB:
12221 case ARM_BUILTIN_TINSRH:
12222 case ARM_BUILTIN_TINSRW:
12223 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12224 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12225 : CODE_FOR_iwmmxt_tinsrw);
12226 arg0 = TREE_VALUE (arglist);
12227 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12228 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12229 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12230 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12231 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12232 tmode = insn_data[icode].operand[0].mode;
12233 mode0 = insn_data[icode].operand[1].mode;
12234 mode1 = insn_data[icode].operand[2].mode;
12235 mode2 = insn_data[icode].operand[3].mode;
12237 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12238 op0 = copy_to_mode_reg (mode0, op0);
12239 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12240 op1 = copy_to_mode_reg (mode1, op1);
12241 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12243 /* @@@ better error message */
12244 error ("selector must be an immediate");
12245 return const0_rtx;
12247 if (target == 0
12248 || GET_MODE (target) != tmode
12249 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12250 target = gen_reg_rtx (tmode);
12251 pat = GEN_FCN (icode) (target, op0, op1, op2);
12252 if (! pat)
12253 return 0;
12254 emit_insn (pat);
12255 return target;
12257 case ARM_BUILTIN_SETWCX:
12258 arg0 = TREE_VALUE (arglist);
12259 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12260 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12261 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12262 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12263 return 0;
12265 case ARM_BUILTIN_GETWCX:
12266 arg0 = TREE_VALUE (arglist);
12267 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12268 target = gen_reg_rtx (SImode);
12269 emit_insn (gen_iwmmxt_tmrc (target, op0));
12270 return target;
12272 case ARM_BUILTIN_WSHUFH:
12273 icode = CODE_FOR_iwmmxt_wshufh;
12274 arg0 = TREE_VALUE (arglist);
12275 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12276 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12277 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12278 tmode = insn_data[icode].operand[0].mode;
12279 mode1 = insn_data[icode].operand[1].mode;
12280 mode2 = insn_data[icode].operand[2].mode;
12282 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12283 op0 = copy_to_mode_reg (mode1, op0);
12284 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12286 /* @@@ better error message */
12287 error ("mask must be an immediate");
12288 return const0_rtx;
12290 if (target == 0
12291 || GET_MODE (target) != tmode
12292 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12293 target = gen_reg_rtx (tmode);
12294 pat = GEN_FCN (icode) (target, op0, op1);
12295 if (! pat)
12296 return 0;
12297 emit_insn (pat);
12298 return target;
12300 case ARM_BUILTIN_WSADB:
12301 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12302 case ARM_BUILTIN_WSADH:
12303 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12304 case ARM_BUILTIN_WSADBZ:
12305 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12306 case ARM_BUILTIN_WSADHZ:
12307 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12309 /* Several three-argument builtins. */
12310 case ARM_BUILTIN_WMACS:
12311 case ARM_BUILTIN_WMACU:
12312 case ARM_BUILTIN_WALIGN:
12313 case ARM_BUILTIN_TMIA:
12314 case ARM_BUILTIN_TMIAPH:
12315 case ARM_BUILTIN_TMIATT:
12316 case ARM_BUILTIN_TMIATB:
12317 case ARM_BUILTIN_TMIABT:
12318 case ARM_BUILTIN_TMIABB:
12319 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12320 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12321 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12322 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12323 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12324 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12325 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12326 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12327 : CODE_FOR_iwmmxt_walign);
12328 arg0 = TREE_VALUE (arglist);
12329 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12330 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12331 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12332 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12333 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12334 tmode = insn_data[icode].operand[0].mode;
12335 mode0 = insn_data[icode].operand[1].mode;
12336 mode1 = insn_data[icode].operand[2].mode;
12337 mode2 = insn_data[icode].operand[3].mode;
12339 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12340 op0 = copy_to_mode_reg (mode0, op0);
12341 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12342 op1 = copy_to_mode_reg (mode1, op1);
12343 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12344 op2 = copy_to_mode_reg (mode2, op2);
12345 if (target == 0
12346 || GET_MODE (target) != tmode
12347 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12348 target = gen_reg_rtx (tmode);
12349 pat = GEN_FCN (icode) (target, op0, op1, op2);
12350 if (! pat)
12351 return 0;
12352 emit_insn (pat);
12353 return target;
12355 case ARM_BUILTIN_WZERO:
12356 target = gen_reg_rtx (DImode);
12357 emit_insn (gen_iwmmxt_clrdi (target));
12358 return target;
12360 default:
12361 break;
12364 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12365 if (d->code == (const enum arm_builtins) fcode)
12366 return arm_expand_binop_builtin (d->icode, arglist, target);
12368 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12369 if (d->code == (const enum arm_builtins) fcode)
12370 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12372 /* @@@ Should really do something sensible here. */
12373 return NULL_RTX;
12376 /* Recursively search through all of the blocks in a function
12377 checking to see if any of the variables created in that
12378 function match the RTX called 'orig'. If they do then
12379 replace them with the RTX called 'new'. */
12380 static void
12381 replace_symbols_in_block (tree block, rtx orig, rtx new)
12383 for (; block; block = BLOCK_CHAIN (block))
12385 tree sym;
12387 if (!TREE_USED (block))
12388 continue;
12390 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12392 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12393 || DECL_IGNORED_P (sym)
12394 || TREE_CODE (sym) != VAR_DECL
12395 || DECL_EXTERNAL (sym)
12396 || !rtx_equal_p (DECL_RTL (sym), orig)
12398 continue;
12400 SET_DECL_RTL (sym, new);
12403 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12407 /* Return the number (counting from 0) of
12408 the least significant set bit in MASK. */
12410 inline static int
12411 number_of_first_bit_set (unsigned mask)
12413 int bit;
12415 for (bit = 0;
12416 (mask & (1 << bit)) == 0;
12417 ++bit)
12418 continue;
12420 return bit;
12423 /* Emit code to push or pop registers to or from the stack. F is the
12424 assembly file. MASK is the registers to push or pop. PUSH is
12425 nonzero if we should push, and zero if we should pop. For debugging
12426 output, if pushing, adjust CFA_OFFSET by the amount of space added
12427 to the stack. REAL_REGS should have the same number of bits set as
12428 MASK, and will be used instead (in the same order) to describe which
12429 registers were saved - this is used to mark the save slots when we
12430 push high registers after moving them to low registers. */
12431 static void
12432 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12433 unsigned long real_regs)
12435 int regno;
12436 int lo_mask = mask & 0xFF;
12437 int pushed_words = 0;
12439 if (mask == 0)
12440 abort ();
12442 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12444 /* Special case. Do not generate a POP PC statement here, do it in
12445 thumb_exit() */
12446 thumb_exit (f, -1);
12447 return;
12450 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12452 /* Look at the low registers first. */
12453 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12455 if (lo_mask & 1)
12457 asm_fprintf (f, "%r", regno);
12459 if ((lo_mask & ~1) != 0)
12460 fprintf (f, ", ");
12462 pushed_words++;
12466 if (push && (mask & (1 << LR_REGNUM)))
12468 /* Catch pushing the LR. */
12469 if (mask & 0xFF)
12470 fprintf (f, ", ");
12472 asm_fprintf (f, "%r", LR_REGNUM);
12474 pushed_words++;
12476 else if (!push && (mask & (1 << PC_REGNUM)))
12478 /* Catch popping the PC. */
12479 if (TARGET_INTERWORK || TARGET_BACKTRACE
12480 || current_function_calls_eh_return)
12482 /* The PC is never poped directly, instead
12483 it is popped into r3 and then BX is used. */
12484 fprintf (f, "}\n");
12486 thumb_exit (f, -1);
12488 return;
12490 else
12492 if (mask & 0xFF)
12493 fprintf (f, ", ");
12495 asm_fprintf (f, "%r", PC_REGNUM);
12499 fprintf (f, "}\n");
12501 if (push && pushed_words && dwarf2out_do_frame ())
12503 char *l = dwarf2out_cfi_label ();
12504 int pushed_mask = real_regs;
12506 *cfa_offset += pushed_words * 4;
12507 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12509 pushed_words = 0;
12510 pushed_mask = real_regs;
12511 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12513 if (pushed_mask & 1)
12514 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12519 /* Generate code to return from a thumb function.
12520 If 'reg_containing_return_addr' is -1, then the return address is
12521 actually on the stack, at the stack pointer. */
12522 static void
12523 thumb_exit (FILE *f, int reg_containing_return_addr)
12525 unsigned regs_available_for_popping;
12526 unsigned regs_to_pop;
12527 int pops_needed;
12528 unsigned available;
12529 unsigned required;
12530 int mode;
12531 int size;
12532 int restore_a4 = FALSE;
12534 /* Compute the registers we need to pop. */
12535 regs_to_pop = 0;
12536 pops_needed = 0;
12538 if (reg_containing_return_addr == -1)
12540 regs_to_pop |= 1 << LR_REGNUM;
12541 ++pops_needed;
12544 if (TARGET_BACKTRACE)
12546 /* Restore the (ARM) frame pointer and stack pointer. */
12547 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12548 pops_needed += 2;
12551 /* If there is nothing to pop then just emit the BX instruction and
12552 return. */
12553 if (pops_needed == 0)
12555 if (current_function_calls_eh_return)
12556 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12558 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12559 return;
12561 /* Otherwise if we are not supporting interworking and we have not created
12562 a backtrace structure and the function was not entered in ARM mode then
12563 just pop the return address straight into the PC. */
12564 else if (!TARGET_INTERWORK
12565 && !TARGET_BACKTRACE
12566 && !is_called_in_ARM_mode (current_function_decl)
12567 && !current_function_calls_eh_return)
12569 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12570 return;
12573 /* Find out how many of the (return) argument registers we can corrupt. */
12574 regs_available_for_popping = 0;
12576 /* If returning via __builtin_eh_return, the bottom three registers
12577 all contain information needed for the return. */
12578 if (current_function_calls_eh_return)
12579 size = 12;
12580 else
12582 /* If we can deduce the registers used from the function's
12583 return value. This is more reliable that examining
12584 regs_ever_live[] because that will be set if the register is
12585 ever used in the function, not just if the register is used
12586 to hold a return value. */
12588 if (current_function_return_rtx != 0)
12589 mode = GET_MODE (current_function_return_rtx);
12590 else
12591 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12593 size = GET_MODE_SIZE (mode);
12595 if (size == 0)
12597 /* In a void function we can use any argument register.
12598 In a function that returns a structure on the stack
12599 we can use the second and third argument registers. */
12600 if (mode == VOIDmode)
12601 regs_available_for_popping =
12602 (1 << ARG_REGISTER (1))
12603 | (1 << ARG_REGISTER (2))
12604 | (1 << ARG_REGISTER (3));
12605 else
12606 regs_available_for_popping =
12607 (1 << ARG_REGISTER (2))
12608 | (1 << ARG_REGISTER (3));
12610 else if (size <= 4)
12611 regs_available_for_popping =
12612 (1 << ARG_REGISTER (2))
12613 | (1 << ARG_REGISTER (3));
12614 else if (size <= 8)
12615 regs_available_for_popping =
12616 (1 << ARG_REGISTER (3));
12619 /* Match registers to be popped with registers into which we pop them. */
12620 for (available = regs_available_for_popping,
12621 required = regs_to_pop;
12622 required != 0 && available != 0;
12623 available &= ~(available & - available),
12624 required &= ~(required & - required))
12625 -- pops_needed;
12627 /* If we have any popping registers left over, remove them. */
12628 if (available > 0)
12629 regs_available_for_popping &= ~available;
12631 /* Otherwise if we need another popping register we can use
12632 the fourth argument register. */
12633 else if (pops_needed)
12635 /* If we have not found any free argument registers and
12636 reg a4 contains the return address, we must move it. */
12637 if (regs_available_for_popping == 0
12638 && reg_containing_return_addr == LAST_ARG_REGNUM)
12640 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12641 reg_containing_return_addr = LR_REGNUM;
12643 else if (size > 12)
12645 /* Register a4 is being used to hold part of the return value,
12646 but we have dire need of a free, low register. */
12647 restore_a4 = TRUE;
12649 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12652 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12654 /* The fourth argument register is available. */
12655 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12657 --pops_needed;
12661 /* Pop as many registers as we can. */
12662 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12663 regs_available_for_popping);
12665 /* Process the registers we popped. */
12666 if (reg_containing_return_addr == -1)
12668 /* The return address was popped into the lowest numbered register. */
12669 regs_to_pop &= ~(1 << LR_REGNUM);
12671 reg_containing_return_addr =
12672 number_of_first_bit_set (regs_available_for_popping);
12674 /* Remove this register for the mask of available registers, so that
12675 the return address will not be corrupted by further pops. */
12676 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12679 /* If we popped other registers then handle them here. */
12680 if (regs_available_for_popping)
12682 int frame_pointer;
12684 /* Work out which register currently contains the frame pointer. */
12685 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12687 /* Move it into the correct place. */
12688 asm_fprintf (f, "\tmov\t%r, %r\n",
12689 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12691 /* (Temporarily) remove it from the mask of popped registers. */
12692 regs_available_for_popping &= ~(1 << frame_pointer);
12693 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12695 if (regs_available_for_popping)
12697 int stack_pointer;
12699 /* We popped the stack pointer as well,
12700 find the register that contains it. */
12701 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12703 /* Move it into the stack register. */
12704 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12706 /* At this point we have popped all necessary registers, so
12707 do not worry about restoring regs_available_for_popping
12708 to its correct value:
12710 assert (pops_needed == 0)
12711 assert (regs_available_for_popping == (1 << frame_pointer))
12712 assert (regs_to_pop == (1 << STACK_POINTER)) */
12714 else
12716 /* Since we have just move the popped value into the frame
12717 pointer, the popping register is available for reuse, and
12718 we know that we still have the stack pointer left to pop. */
12719 regs_available_for_popping |= (1 << frame_pointer);
12723 /* If we still have registers left on the stack, but we no longer have
12724 any registers into which we can pop them, then we must move the return
12725 address into the link register and make available the register that
12726 contained it. */
12727 if (regs_available_for_popping == 0 && pops_needed > 0)
12729 regs_available_for_popping |= 1 << reg_containing_return_addr;
12731 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12732 reg_containing_return_addr);
12734 reg_containing_return_addr = LR_REGNUM;
12737 /* If we have registers left on the stack then pop some more.
12738 We know that at most we will want to pop FP and SP. */
12739 if (pops_needed > 0)
12741 int popped_into;
12742 int move_to;
12744 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12745 regs_available_for_popping);
12747 /* We have popped either FP or SP.
12748 Move whichever one it is into the correct register. */
12749 popped_into = number_of_first_bit_set (regs_available_for_popping);
12750 move_to = number_of_first_bit_set (regs_to_pop);
12752 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12754 regs_to_pop &= ~(1 << move_to);
12756 --pops_needed;
12759 /* If we still have not popped everything then we must have only
12760 had one register available to us and we are now popping the SP. */
12761 if (pops_needed > 0)
12763 int popped_into;
12765 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12766 regs_available_for_popping);
12768 popped_into = number_of_first_bit_set (regs_available_for_popping);
12770 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12772 assert (regs_to_pop == (1 << STACK_POINTER))
12773 assert (pops_needed == 1)
12777 /* If necessary restore the a4 register. */
12778 if (restore_a4)
12780 if (reg_containing_return_addr != LR_REGNUM)
12782 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12783 reg_containing_return_addr = LR_REGNUM;
12786 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12789 if (current_function_calls_eh_return)
12790 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12792 /* Return to caller. */
12793 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12797 void
12798 thumb_final_prescan_insn (rtx insn)
12800 if (flag_print_asm_name)
12801 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12802 INSN_ADDRESSES (INSN_UID (insn)));
12806 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12808 unsigned HOST_WIDE_INT mask = 0xff;
12809 int i;
12811 if (val == 0) /* XXX */
12812 return 0;
12814 for (i = 0; i < 25; i++)
12815 if ((val & (mask << i)) == val)
12816 return 1;
12818 return 0;
12821 /* Returns nonzero if the current function contains,
12822 or might contain a far jump. */
12823 static int
12824 thumb_far_jump_used_p (void)
12826 rtx insn;
12828 /* This test is only important for leaf functions. */
12829 /* assert (!leaf_function_p ()); */
12831 /* If we have already decided that far jumps may be used,
12832 do not bother checking again, and always return true even if
12833 it turns out that they are not being used. Once we have made
12834 the decision that far jumps are present (and that hence the link
12835 register will be pushed onto the stack) we cannot go back on it. */
12836 if (cfun->machine->far_jump_used)
12837 return 1;
12839 /* If this function is not being called from the prologue/epilogue
12840 generation code then it must be being called from the
12841 INITIAL_ELIMINATION_OFFSET macro. */
12842 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12844 /* In this case we know that we are being asked about the elimination
12845 of the arg pointer register. If that register is not being used,
12846 then there are no arguments on the stack, and we do not have to
12847 worry that a far jump might force the prologue to push the link
12848 register, changing the stack offsets. In this case we can just
12849 return false, since the presence of far jumps in the function will
12850 not affect stack offsets.
12852 If the arg pointer is live (or if it was live, but has now been
12853 eliminated and so set to dead) then we do have to test to see if
12854 the function might contain a far jump. This test can lead to some
12855 false negatives, since before reload is completed, then length of
12856 branch instructions is not known, so gcc defaults to returning their
12857 longest length, which in turn sets the far jump attribute to true.
12859 A false negative will not result in bad code being generated, but it
12860 will result in a needless push and pop of the link register. We
12861 hope that this does not occur too often.
12863 If we need doubleword stack alignment this could affect the other
12864 elimination offsets so we can't risk getting it wrong. */
12865 if (regs_ever_live [ARG_POINTER_REGNUM])
12866 cfun->machine->arg_pointer_live = 1;
12867 else if (!cfun->machine->arg_pointer_live)
12868 return 0;
12871 /* Check to see if the function contains a branch
12872 insn with the far jump attribute set. */
12873 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12875 if (GET_CODE (insn) == JUMP_INSN
12876 /* Ignore tablejump patterns. */
12877 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12878 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12879 && get_attr_far_jump (insn) == FAR_JUMP_YES
12882 /* Record the fact that we have decided that
12883 the function does use far jumps. */
12884 cfun->machine->far_jump_used = 1;
12885 return 1;
12889 return 0;
12892 /* Return nonzero if FUNC must be entered in ARM mode. */
12894 is_called_in_ARM_mode (tree func)
12896 if (TREE_CODE (func) != FUNCTION_DECL)
12897 abort ();
12899 /* Ignore the problem about functions whoes address is taken. */
12900 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12901 return TRUE;
12903 #ifdef ARM_PE
12904 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12905 #else
12906 return FALSE;
12907 #endif
12910 /* The bits which aren't usefully expanded as rtl. */
12911 const char *
12912 thumb_unexpanded_epilogue (void)
12914 int regno;
12915 unsigned long live_regs_mask = 0;
12916 int high_regs_pushed = 0;
12917 int had_to_push_lr;
12918 int size;
12919 int mode;
12921 if (return_used_this_function)
12922 return "";
12924 if (IS_NAKED (arm_current_func_type ()))
12925 return "";
12927 live_regs_mask = thumb_compute_save_reg_mask ();
12928 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12930 /* If we can deduce the registers used from the function's return value.
12931 This is more reliable that examining regs_ever_live[] because that
12932 will be set if the register is ever used in the function, not just if
12933 the register is used to hold a return value. */
12935 if (current_function_return_rtx != 0)
12936 mode = GET_MODE (current_function_return_rtx);
12937 else
12938 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12940 size = GET_MODE_SIZE (mode);
12942 /* The prolog may have pushed some high registers to use as
12943 work registers. e.g. the testsuite file:
12944 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12945 compiles to produce:
12946 push {r4, r5, r6, r7, lr}
12947 mov r7, r9
12948 mov r6, r8
12949 push {r6, r7}
12950 as part of the prolog. We have to undo that pushing here. */
12952 if (high_regs_pushed)
12954 unsigned long mask = live_regs_mask & 0xff;
12955 int next_hi_reg;
12957 /* The available low registers depend on the size of the value we are
12958 returning. */
12959 if (size <= 12)
12960 mask |= 1 << 3;
12961 if (size <= 8)
12962 mask |= 1 << 2;
12964 if (mask == 0)
12965 /* Oh dear! We have no low registers into which we can pop
12966 high registers! */
12967 internal_error
12968 ("no low registers available for popping high registers");
12970 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12971 if (live_regs_mask & (1 << next_hi_reg))
12972 break;
12974 while (high_regs_pushed)
12976 /* Find lo register(s) into which the high register(s) can
12977 be popped. */
12978 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12980 if (mask & (1 << regno))
12981 high_regs_pushed--;
12982 if (high_regs_pushed == 0)
12983 break;
12986 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12988 /* Pop the values into the low register(s). */
12989 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12991 /* Move the value(s) into the high registers. */
12992 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12994 if (mask & (1 << regno))
12996 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12997 regno);
12999 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13000 if (live_regs_mask & (1 << next_hi_reg))
13001 break;
13005 live_regs_mask &= ~0x0f00;
13008 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13009 live_regs_mask &= 0xff;
13011 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13013 /* Pop the return address into the PC. */
13014 if (had_to_push_lr)
13015 live_regs_mask |= 1 << PC_REGNUM;
13017 /* Either no argument registers were pushed or a backtrace
13018 structure was created which includes an adjusted stack
13019 pointer, so just pop everything. */
13020 if (live_regs_mask)
13021 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13022 live_regs_mask);
13024 /* We have either just popped the return address into the
13025 PC or it is was kept in LR for the entire function. */
13026 if (!had_to_push_lr)
13027 thumb_exit (asm_out_file, LR_REGNUM);
13029 else
13031 /* Pop everything but the return address. */
13032 if (live_regs_mask)
13033 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13034 live_regs_mask);
13036 if (had_to_push_lr)
13038 if (size > 12)
13040 /* We have no free low regs, so save one. */
13041 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13042 LAST_ARG_REGNUM);
13045 /* Get the return address into a temporary register. */
13046 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13047 1 << LAST_ARG_REGNUM);
13049 if (size > 12)
13051 /* Move the return address to lr. */
13052 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13053 LAST_ARG_REGNUM);
13054 /* Restore the low register. */
13055 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13056 IP_REGNUM);
13057 regno = LR_REGNUM;
13059 else
13060 regno = LAST_ARG_REGNUM;
13062 else
13063 regno = LR_REGNUM;
13065 /* Remove the argument registers that were pushed onto the stack. */
13066 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13067 SP_REGNUM, SP_REGNUM,
13068 current_function_pretend_args_size);
13070 thumb_exit (asm_out_file, regno);
13073 return "";
13076 /* Functions to save and restore machine-specific function data. */
13077 static struct machine_function *
13078 arm_init_machine_status (void)
13080 struct machine_function *machine;
13081 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13083 #if ARM_FT_UNKNOWN != 0
13084 machine->func_type = ARM_FT_UNKNOWN;
13085 #endif
13086 return machine;
13089 /* Return an RTX indicating where the return address to the
13090 calling function can be found. */
13092 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13094 if (count != 0)
13095 return NULL_RTX;
13097 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13100 /* Do anything needed before RTL is emitted for each function. */
13101 void
13102 arm_init_expanders (void)
13104 /* Arrange to initialize and mark the machine per-function status. */
13105 init_machine_status = arm_init_machine_status;
13107 /* This is to stop the combine pass optimizing away the alignment
13108 adjustment of va_arg. */
13109 /* ??? It is claimed that this should not be necessary. */
13110 if (cfun)
13111 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13115 /* Like arm_compute_initial_elimination offset. Simpler because
13116 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13118 HOST_WIDE_INT
13119 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13121 arm_stack_offsets *offsets;
13123 offsets = arm_get_frame_offsets ();
13125 switch (from)
13127 case ARG_POINTER_REGNUM:
13128 switch (to)
13130 case STACK_POINTER_REGNUM:
13131 return offsets->outgoing_args - offsets->saved_args;
13133 case FRAME_POINTER_REGNUM:
13134 return offsets->soft_frame - offsets->saved_args;
13136 case THUMB_HARD_FRAME_POINTER_REGNUM:
13137 case ARM_HARD_FRAME_POINTER_REGNUM:
13138 return offsets->saved_regs - offsets->saved_args;
13140 default:
13141 abort();
13143 break;
13145 case FRAME_POINTER_REGNUM:
13146 switch (to)
13148 case STACK_POINTER_REGNUM:
13149 return offsets->outgoing_args - offsets->soft_frame;
13151 case THUMB_HARD_FRAME_POINTER_REGNUM:
13152 case ARM_HARD_FRAME_POINTER_REGNUM:
13153 return offsets->saved_regs - offsets->soft_frame;
13155 default:
13156 abort();
13158 break;
13160 default:
13161 abort ();
13166 /* Generate the rest of a function's prologue. */
13167 void
13168 thumb_expand_prologue (void)
13170 rtx insn, dwarf;
13172 HOST_WIDE_INT amount;
13173 arm_stack_offsets *offsets;
13174 unsigned long func_type;
13175 int regno;
13176 unsigned long live_regs_mask;
13178 func_type = arm_current_func_type ();
13180 /* Naked functions don't have prologues. */
13181 if (IS_NAKED (func_type))
13182 return;
13184 if (IS_INTERRUPT (func_type))
13186 error ("interrupt Service Routines cannot be coded in Thumb mode");
13187 return;
13190 live_regs_mask = thumb_compute_save_reg_mask ();
13191 /* Load the pic register before setting the frame pointer,
13192 so we can use r7 as a temporary work register. */
13193 if (flag_pic)
13194 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13196 offsets = arm_get_frame_offsets ();
13198 if (frame_pointer_needed)
13200 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13201 stack_pointer_rtx));
13202 RTX_FRAME_RELATED_P (insn) = 1;
13204 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13205 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13206 stack_pointer_rtx);
13208 amount = offsets->outgoing_args - offsets->saved_regs;
13209 if (amount)
13211 if (amount < 512)
13213 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13214 GEN_INT (- amount)));
13215 RTX_FRAME_RELATED_P (insn) = 1;
13217 else
13219 rtx reg;
13221 /* The stack decrement is too big for an immediate value in a single
13222 insn. In theory we could issue multiple subtracts, but after
13223 three of them it becomes more space efficient to place the full
13224 value in the constant pool and load into a register. (Also the
13225 ARM debugger really likes to see only one stack decrement per
13226 function). So instead we look for a scratch register into which
13227 we can load the decrement, and then we subtract this from the
13228 stack pointer. Unfortunately on the thumb the only available
13229 scratch registers are the argument registers, and we cannot use
13230 these as they may hold arguments to the function. Instead we
13231 attempt to locate a call preserved register which is used by this
13232 function. If we can find one, then we know that it will have
13233 been pushed at the start of the prologue and so we can corrupt
13234 it now. */
13235 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13236 if (live_regs_mask & (1 << regno)
13237 && !(frame_pointer_needed
13238 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13239 break;
13241 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13243 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13245 /* Choose an arbitrary, non-argument low register. */
13246 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13248 /* Save it by copying it into a high, scratch register. */
13249 emit_insn (gen_movsi (spare, reg));
13250 /* Add a USE to stop propagate_one_insn() from barfing. */
13251 emit_insn (gen_prologue_use (spare));
13253 /* Decrement the stack. */
13254 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13255 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13256 stack_pointer_rtx, reg));
13257 RTX_FRAME_RELATED_P (insn) = 1;
13258 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13259 plus_constant (stack_pointer_rtx,
13260 -amount));
13261 RTX_FRAME_RELATED_P (dwarf) = 1;
13262 REG_NOTES (insn)
13263 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13264 REG_NOTES (insn));
13266 /* Restore the low register's original value. */
13267 emit_insn (gen_movsi (reg, spare));
13269 /* Emit a USE of the restored scratch register, so that flow
13270 analysis will not consider the restore redundant. The
13271 register won't be used again in this function and isn't
13272 restored by the epilogue. */
13273 emit_insn (gen_prologue_use (reg));
13275 else
13277 reg = gen_rtx_REG (SImode, regno);
13279 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13281 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13282 stack_pointer_rtx, reg));
13283 RTX_FRAME_RELATED_P (insn) = 1;
13284 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13285 plus_constant (stack_pointer_rtx,
13286 -amount));
13287 RTX_FRAME_RELATED_P (dwarf) = 1;
13288 REG_NOTES (insn)
13289 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13290 REG_NOTES (insn));
13293 /* If the frame pointer is needed, emit a special barrier that
13294 will prevent the scheduler from moving stores to the frame
13295 before the stack adjustment. */
13296 if (frame_pointer_needed)
13297 emit_insn (gen_stack_tie (stack_pointer_rtx,
13298 hard_frame_pointer_rtx));
13301 if (current_function_profile || TARGET_NO_SCHED_PRO)
13302 emit_insn (gen_blockage ());
13304 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13305 if (live_regs_mask & 0xff)
13306 cfun->machine->lr_save_eliminated = 0;
13308 /* If the link register is being kept alive, with the return address in it,
13309 then make sure that it does not get reused by the ce2 pass. */
13310 if (cfun->machine->lr_save_eliminated)
13311 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13315 void
13316 thumb_expand_epilogue (void)
13318 HOST_WIDE_INT amount;
13319 arm_stack_offsets *offsets;
13320 int regno;
13322 /* Naked functions don't have prologues. */
13323 if (IS_NAKED (arm_current_func_type ()))
13324 return;
13326 offsets = arm_get_frame_offsets ();
13327 amount = offsets->outgoing_args - offsets->saved_regs;
13329 if (frame_pointer_needed)
13330 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13331 else if (amount)
13333 if (amount < 512)
13334 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13335 GEN_INT (amount)));
13336 else
13338 /* r3 is always free in the epilogue. */
13339 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13341 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13342 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13346 /* Emit a USE (stack_pointer_rtx), so that
13347 the stack adjustment will not be deleted. */
13348 emit_insn (gen_prologue_use (stack_pointer_rtx));
13350 if (current_function_profile || TARGET_NO_SCHED_PRO)
13351 emit_insn (gen_blockage ());
13353 /* Emit a clobber for each insn that will be restored in the epilogue,
13354 so that flow2 will get register lifetimes correct. */
13355 for (regno = 0; regno < 13; regno++)
13356 if (regs_ever_live[regno] && !call_used_regs[regno])
13357 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13359 if (! regs_ever_live[LR_REGNUM])
13360 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13363 static void
13364 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13366 unsigned long live_regs_mask = 0;
13367 unsigned long l_mask;
13368 unsigned high_regs_pushed = 0;
13369 int cfa_offset = 0;
13370 int regno;
13372 if (IS_NAKED (arm_current_func_type ()))
13373 return;
13375 if (is_called_in_ARM_mode (current_function_decl))
13377 const char * name;
13379 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13380 abort ();
13381 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13382 abort ();
13383 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13385 /* Generate code sequence to switch us into Thumb mode. */
13386 /* The .code 32 directive has already been emitted by
13387 ASM_DECLARE_FUNCTION_NAME. */
13388 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13389 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13391 /* Generate a label, so that the debugger will notice the
13392 change in instruction sets. This label is also used by
13393 the assembler to bypass the ARM code when this function
13394 is called from a Thumb encoded function elsewhere in the
13395 same file. Hence the definition of STUB_NAME here must
13396 agree with the definition in gas/config/tc-arm.c. */
13398 #define STUB_NAME ".real_start_of"
13400 fprintf (f, "\t.code\t16\n");
13401 #ifdef ARM_PE
13402 if (arm_dllexport_name_p (name))
13403 name = arm_strip_name_encoding (name);
13404 #endif
13405 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13406 fprintf (f, "\t.thumb_func\n");
13407 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13410 if (current_function_pretend_args_size)
13412 if (cfun->machine->uses_anonymous_args)
13414 int num_pushes;
13416 fprintf (f, "\tpush\t{");
13418 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13420 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13421 regno <= LAST_ARG_REGNUM;
13422 regno++)
13423 asm_fprintf (f, "%r%s", regno,
13424 regno == LAST_ARG_REGNUM ? "" : ", ");
13426 fprintf (f, "}\n");
13428 else
13429 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13430 SP_REGNUM, SP_REGNUM,
13431 current_function_pretend_args_size);
13433 /* We don't need to record the stores for unwinding (would it
13434 help the debugger any if we did?), but record the change in
13435 the stack pointer. */
13436 if (dwarf2out_do_frame ())
13438 char *l = dwarf2out_cfi_label ();
13440 cfa_offset = cfa_offset + current_function_pretend_args_size;
13441 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13445 /* Get the registers we are going to push. */
13446 live_regs_mask = thumb_compute_save_reg_mask ();
13447 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13448 l_mask = live_regs_mask & 0x40ff;
13449 /* Then count how many other high registers will need to be pushed. */
13450 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13452 if (TARGET_BACKTRACE)
13454 unsigned offset;
13455 unsigned work_register;
13457 /* We have been asked to create a stack backtrace structure.
13458 The code looks like this:
13460 0 .align 2
13461 0 func:
13462 0 sub SP, #16 Reserve space for 4 registers.
13463 2 push {R7} Push low registers.
13464 4 add R7, SP, #20 Get the stack pointer before the push.
13465 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13466 8 mov R7, PC Get hold of the start of this code plus 12.
13467 10 str R7, [SP, #16] Store it.
13468 12 mov R7, FP Get hold of the current frame pointer.
13469 14 str R7, [SP, #4] Store it.
13470 16 mov R7, LR Get hold of the current return address.
13471 18 str R7, [SP, #12] Store it.
13472 20 add R7, SP, #16 Point at the start of the backtrace structure.
13473 22 mov FP, R7 Put this value into the frame pointer. */
13475 work_register = thumb_find_work_register (live_regs_mask);
13477 asm_fprintf
13478 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13479 SP_REGNUM, SP_REGNUM);
13481 if (dwarf2out_do_frame ())
13483 char *l = dwarf2out_cfi_label ();
13485 cfa_offset = cfa_offset + 16;
13486 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13489 if (l_mask)
13491 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13492 offset = bit_count (l_mask);
13494 else
13495 offset = 0;
13497 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13498 offset + 16 + current_function_pretend_args_size);
13500 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13501 offset + 4);
13503 /* Make sure that the instruction fetching the PC is in the right place
13504 to calculate "start of backtrace creation code + 12". */
13505 if (l_mask)
13507 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13508 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13509 offset + 12);
13510 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13511 ARM_HARD_FRAME_POINTER_REGNUM);
13512 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13513 offset);
13515 else
13517 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13518 ARM_HARD_FRAME_POINTER_REGNUM);
13519 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13520 offset);
13521 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13522 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13523 offset + 12);
13526 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13527 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13528 offset + 8);
13529 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13530 offset + 12);
13531 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13532 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13534 /* Optimisation: If we are not pushing any low registers but we are going
13535 to push some high registers then delay our first push. This will just
13536 be a push of LR and we can combine it with the push of the first high
13537 register. */
13538 else if ((l_mask & 0xff) != 0
13539 || (high_regs_pushed == 0 && l_mask))
13540 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13542 if (high_regs_pushed)
13544 unsigned pushable_regs;
13545 unsigned next_hi_reg;
13547 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13548 if (live_regs_mask & (1 << next_hi_reg))
13549 break;
13551 pushable_regs = l_mask & 0xff;
13553 if (pushable_regs == 0)
13554 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13556 while (high_regs_pushed > 0)
13558 unsigned long real_regs_mask = 0;
13560 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13562 if (pushable_regs & (1 << regno))
13564 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13566 high_regs_pushed --;
13567 real_regs_mask |= (1 << next_hi_reg);
13569 if (high_regs_pushed)
13571 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13572 next_hi_reg --)
13573 if (live_regs_mask & (1 << next_hi_reg))
13574 break;
13576 else
13578 pushable_regs &= ~((1 << regno) - 1);
13579 break;
13584 /* If we had to find a work register and we have not yet
13585 saved the LR then add it to the list of regs to push. */
13586 if (l_mask == (1 << LR_REGNUM))
13588 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13589 1, &cfa_offset,
13590 real_regs_mask | (1 << LR_REGNUM));
13591 l_mask = 0;
13593 else
13594 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13599 /* Handle the case of a double word load into a low register from
13600 a computed memory address. The computed address may involve a
13601 register which is overwritten by the load. */
13602 const char *
13603 thumb_load_double_from_address (rtx *operands)
13605 rtx addr;
13606 rtx base;
13607 rtx offset;
13608 rtx arg1;
13609 rtx arg2;
13611 if (GET_CODE (operands[0]) != REG)
13612 abort ();
13614 if (GET_CODE (operands[1]) != MEM)
13615 abort ();
13617 /* Get the memory address. */
13618 addr = XEXP (operands[1], 0);
13620 /* Work out how the memory address is computed. */
13621 switch (GET_CODE (addr))
13623 case REG:
13624 operands[2] = gen_rtx_MEM (SImode,
13625 plus_constant (XEXP (operands[1], 0), 4));
13627 if (REGNO (operands[0]) == REGNO (addr))
13629 output_asm_insn ("ldr\t%H0, %2", operands);
13630 output_asm_insn ("ldr\t%0, %1", operands);
13632 else
13634 output_asm_insn ("ldr\t%0, %1", operands);
13635 output_asm_insn ("ldr\t%H0, %2", operands);
13637 break;
13639 case CONST:
13640 /* Compute <address> + 4 for the high order load. */
13641 operands[2] = gen_rtx_MEM (SImode,
13642 plus_constant (XEXP (operands[1], 0), 4));
13644 output_asm_insn ("ldr\t%0, %1", operands);
13645 output_asm_insn ("ldr\t%H0, %2", operands);
13646 break;
13648 case PLUS:
13649 arg1 = XEXP (addr, 0);
13650 arg2 = XEXP (addr, 1);
13652 if (CONSTANT_P (arg1))
13653 base = arg2, offset = arg1;
13654 else
13655 base = arg1, offset = arg2;
13657 if (GET_CODE (base) != REG)
13658 abort ();
13660 /* Catch the case of <address> = <reg> + <reg> */
13661 if (GET_CODE (offset) == REG)
13663 int reg_offset = REGNO (offset);
13664 int reg_base = REGNO (base);
13665 int reg_dest = REGNO (operands[0]);
13667 /* Add the base and offset registers together into the
13668 higher destination register. */
13669 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13670 reg_dest + 1, reg_base, reg_offset);
13672 /* Load the lower destination register from the address in
13673 the higher destination register. */
13674 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13675 reg_dest, reg_dest + 1);
13677 /* Load the higher destination register from its own address
13678 plus 4. */
13679 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13680 reg_dest + 1, reg_dest + 1);
13682 else
13684 /* Compute <address> + 4 for the high order load. */
13685 operands[2] = gen_rtx_MEM (SImode,
13686 plus_constant (XEXP (operands[1], 0), 4));
13688 /* If the computed address is held in the low order register
13689 then load the high order register first, otherwise always
13690 load the low order register first. */
13691 if (REGNO (operands[0]) == REGNO (base))
13693 output_asm_insn ("ldr\t%H0, %2", operands);
13694 output_asm_insn ("ldr\t%0, %1", operands);
13696 else
13698 output_asm_insn ("ldr\t%0, %1", operands);
13699 output_asm_insn ("ldr\t%H0, %2", operands);
13702 break;
13704 case LABEL_REF:
13705 /* With no registers to worry about we can just load the value
13706 directly. */
13707 operands[2] = gen_rtx_MEM (SImode,
13708 plus_constant (XEXP (operands[1], 0), 4));
13710 output_asm_insn ("ldr\t%H0, %2", operands);
13711 output_asm_insn ("ldr\t%0, %1", operands);
13712 break;
13714 default:
13715 abort ();
13716 break;
13719 return "";
13722 const char *
13723 thumb_output_move_mem_multiple (int n, rtx *operands)
13725 rtx tmp;
13727 switch (n)
13729 case 2:
13730 if (REGNO (operands[4]) > REGNO (operands[5]))
13732 tmp = operands[4];
13733 operands[4] = operands[5];
13734 operands[5] = tmp;
13736 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13737 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13738 break;
13740 case 3:
13741 if (REGNO (operands[4]) > REGNO (operands[5]))
13743 tmp = operands[4];
13744 operands[4] = operands[5];
13745 operands[5] = tmp;
13747 if (REGNO (operands[5]) > REGNO (operands[6]))
13749 tmp = operands[5];
13750 operands[5] = operands[6];
13751 operands[6] = tmp;
13753 if (REGNO (operands[4]) > REGNO (operands[5]))
13755 tmp = operands[4];
13756 operands[4] = operands[5];
13757 operands[5] = tmp;
13760 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13761 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13762 break;
13764 default:
13765 abort ();
13768 return "";
13771 /* Output a call-via instruction for thumb state. */
13772 const char *
13773 thumb_call_via_reg (rtx reg)
13775 int regno = REGNO (reg);
13776 rtx *labelp;
13778 gcc_assert (regno < SP_REGNUM);
13780 /* If we are in the normal text section we can use a single instance
13781 per compilation unit. If we are doing function sections, then we need
13782 an entry per section, since we can't rely on reachability. */
13783 if (in_text_section ())
13785 thumb_call_reg_needed = 1;
13787 if (thumb_call_via_label[regno] == NULL)
13788 thumb_call_via_label[regno] = gen_label_rtx ();
13789 labelp = thumb_call_via_label + regno;
13791 else
13793 if (cfun->machine->call_via[regno] == NULL)
13794 cfun->machine->call_via[regno] = gen_label_rtx ();
13795 labelp = cfun->machine->call_via + regno;
13798 output_asm_insn ("bl\t%a0", labelp);
13799 return "";
13802 /* Routines for generating rtl. */
13803 void
13804 thumb_expand_movmemqi (rtx *operands)
13806 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13807 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13808 HOST_WIDE_INT len = INTVAL (operands[2]);
13809 HOST_WIDE_INT offset = 0;
13811 while (len >= 12)
13813 emit_insn (gen_movmem12b (out, in, out, in));
13814 len -= 12;
13817 if (len >= 8)
13819 emit_insn (gen_movmem8b (out, in, out, in));
13820 len -= 8;
13823 if (len >= 4)
13825 rtx reg = gen_reg_rtx (SImode);
13826 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13827 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13828 len -= 4;
13829 offset += 4;
13832 if (len >= 2)
13834 rtx reg = gen_reg_rtx (HImode);
13835 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13836 plus_constant (in, offset))));
13837 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13838 reg));
13839 len -= 2;
13840 offset += 2;
13843 if (len)
13845 rtx reg = gen_reg_rtx (QImode);
13846 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13847 plus_constant (in, offset))));
13848 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13849 reg));
13853 void
13854 thumb_reload_out_hi (rtx *operands)
13856 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13859 /* Handle reading a half-word from memory during reload. */
13860 void
13861 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13863 abort ();
13866 /* Return the length of a function name prefix
13867 that starts with the character 'c'. */
13868 static int
13869 arm_get_strip_length (int c)
13871 switch (c)
13873 ARM_NAME_ENCODING_LENGTHS
13874 default: return 0;
13878 /* Return a pointer to a function's name with any
13879 and all prefix encodings stripped from it. */
13880 const char *
13881 arm_strip_name_encoding (const char *name)
13883 int skip;
13885 while ((skip = arm_get_strip_length (* name)))
13886 name += skip;
13888 return name;
13891 /* If there is a '*' anywhere in the name's prefix, then
13892 emit the stripped name verbatim, otherwise prepend an
13893 underscore if leading underscores are being used. */
13894 void
13895 arm_asm_output_labelref (FILE *stream, const char *name)
13897 int skip;
13898 int verbatim = 0;
13900 while ((skip = arm_get_strip_length (* name)))
13902 verbatim |= (*name == '*');
13903 name += skip;
13906 if (verbatim)
13907 fputs (name, stream);
13908 else
13909 asm_fprintf (stream, "%U%s", name);
13912 static void
13913 arm_file_end (void)
13915 int regno;
13917 if (! thumb_call_reg_needed)
13918 return;
13920 text_section ();
13921 asm_fprintf (asm_out_file, "\t.code 16\n");
13922 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13924 for (regno = 0; regno < SP_REGNUM; regno++)
13926 rtx label = thumb_call_via_label[regno];
13928 if (label != 0)
13930 targetm.asm_out.internal_label (asm_out_file, "L",
13931 CODE_LABEL_NUMBER (label));
13932 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13937 rtx aof_pic_label;
13939 #ifdef AOF_ASSEMBLER
13940 /* Special functions only needed when producing AOF syntax assembler. */
13942 struct pic_chain
13944 struct pic_chain * next;
13945 const char * symname;
13948 static struct pic_chain * aof_pic_chain = NULL;
13951 aof_pic_entry (rtx x)
13953 struct pic_chain ** chainp;
13954 int offset;
13956 if (aof_pic_label == NULL_RTX)
13958 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13961 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13962 offset += 4, chainp = &(*chainp)->next)
13963 if ((*chainp)->symname == XSTR (x, 0))
13964 return plus_constant (aof_pic_label, offset);
13966 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13967 (*chainp)->next = NULL;
13968 (*chainp)->symname = XSTR (x, 0);
13969 return plus_constant (aof_pic_label, offset);
13972 void
13973 aof_dump_pic_table (FILE *f)
13975 struct pic_chain * chain;
13977 if (aof_pic_chain == NULL)
13978 return;
13980 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13981 PIC_OFFSET_TABLE_REGNUM,
13982 PIC_OFFSET_TABLE_REGNUM);
13983 fputs ("|x$adcons|\n", f);
13985 for (chain = aof_pic_chain; chain; chain = chain->next)
13987 fputs ("\tDCD\t", f);
13988 assemble_name (f, chain->symname);
13989 fputs ("\n", f);
13993 int arm_text_section_count = 1;
13995 char *
13996 aof_text_section (void )
13998 static char buf[100];
13999 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
14000 arm_text_section_count++);
14001 if (flag_pic)
14002 strcat (buf, ", PIC, REENTRANT");
14003 return buf;
14006 static int arm_data_section_count = 1;
14008 char *
14009 aof_data_section (void)
14011 static char buf[100];
14012 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14013 return buf;
14016 /* The AOF assembler is religiously strict about declarations of
14017 imported and exported symbols, so that it is impossible to declare
14018 a function as imported near the beginning of the file, and then to
14019 export it later on. It is, however, possible to delay the decision
14020 until all the functions in the file have been compiled. To get
14021 around this, we maintain a list of the imports and exports, and
14022 delete from it any that are subsequently defined. At the end of
14023 compilation we spit the remainder of the list out before the END
14024 directive. */
14026 struct import
14028 struct import * next;
14029 const char * name;
14032 static struct import * imports_list = NULL;
14034 void
14035 aof_add_import (const char *name)
14037 struct import * new;
14039 for (new = imports_list; new; new = new->next)
14040 if (new->name == name)
14041 return;
14043 new = (struct import *) xmalloc (sizeof (struct import));
14044 new->next = imports_list;
14045 imports_list = new;
14046 new->name = name;
14049 void
14050 aof_delete_import (const char *name)
14052 struct import ** old;
14054 for (old = &imports_list; *old; old = & (*old)->next)
14056 if ((*old)->name == name)
14058 *old = (*old)->next;
14059 return;
14064 int arm_main_function = 0;
14066 static void
14067 aof_dump_imports (FILE *f)
14069 /* The AOF assembler needs this to cause the startup code to be extracted
14070 from the library. Brining in __main causes the whole thing to work
14071 automagically. */
14072 if (arm_main_function)
14074 text_section ();
14075 fputs ("\tIMPORT __main\n", f);
14076 fputs ("\tDCD __main\n", f);
14079 /* Now dump the remaining imports. */
14080 while (imports_list)
14082 fprintf (f, "\tIMPORT\t");
14083 assemble_name (f, imports_list->name);
14084 fputc ('\n', f);
14085 imports_list = imports_list->next;
14089 static void
14090 aof_globalize_label (FILE *stream, const char *name)
14092 default_globalize_label (stream, name);
14093 if (! strcmp (name, "main"))
14094 arm_main_function = 1;
14097 static void
14098 aof_file_start (void)
14100 fputs ("__r0\tRN\t0\n", asm_out_file);
14101 fputs ("__a1\tRN\t0\n", asm_out_file);
14102 fputs ("__a2\tRN\t1\n", asm_out_file);
14103 fputs ("__a3\tRN\t2\n", asm_out_file);
14104 fputs ("__a4\tRN\t3\n", asm_out_file);
14105 fputs ("__v1\tRN\t4\n", asm_out_file);
14106 fputs ("__v2\tRN\t5\n", asm_out_file);
14107 fputs ("__v3\tRN\t6\n", asm_out_file);
14108 fputs ("__v4\tRN\t7\n", asm_out_file);
14109 fputs ("__v5\tRN\t8\n", asm_out_file);
14110 fputs ("__v6\tRN\t9\n", asm_out_file);
14111 fputs ("__sl\tRN\t10\n", asm_out_file);
14112 fputs ("__fp\tRN\t11\n", asm_out_file);
14113 fputs ("__ip\tRN\t12\n", asm_out_file);
14114 fputs ("__sp\tRN\t13\n", asm_out_file);
14115 fputs ("__lr\tRN\t14\n", asm_out_file);
14116 fputs ("__pc\tRN\t15\n", asm_out_file);
14117 fputs ("__f0\tFN\t0\n", asm_out_file);
14118 fputs ("__f1\tFN\t1\n", asm_out_file);
14119 fputs ("__f2\tFN\t2\n", asm_out_file);
14120 fputs ("__f3\tFN\t3\n", asm_out_file);
14121 fputs ("__f4\tFN\t4\n", asm_out_file);
14122 fputs ("__f5\tFN\t5\n", asm_out_file);
14123 fputs ("__f6\tFN\t6\n", asm_out_file);
14124 fputs ("__f7\tFN\t7\n", asm_out_file);
14125 text_section ();
14128 static void
14129 aof_file_end (void)
14131 if (flag_pic)
14132 aof_dump_pic_table (asm_out_file);
14133 arm_file_end ();
14134 aof_dump_imports (asm_out_file);
14135 fputs ("\tEND\n", asm_out_file);
14137 #endif /* AOF_ASSEMBLER */
14139 #ifndef ARM_PE
14140 /* Symbols in the text segment can be accessed without indirecting via the
14141 constant pool; it may take an extra binary operation, but this is still
14142 faster than indirecting via memory. Don't do this when not optimizing,
14143 since we won't be calculating al of the offsets necessary to do this
14144 simplification. */
14146 static void
14147 arm_encode_section_info (tree decl, rtx rtl, int first)
14149 /* This doesn't work with AOF syntax, since the string table may be in
14150 a different AREA. */
14151 #ifndef AOF_ASSEMBLER
14152 if (optimize > 0 && TREE_CONSTANT (decl))
14153 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14154 #endif
14156 /* If we are referencing a function that is weak then encode a long call
14157 flag in the function name, otherwise if the function is static or
14158 or known to be defined in this file then encode a short call flag. */
14159 if (first && DECL_P (decl))
14161 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14162 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14163 else if (! TREE_PUBLIC (decl))
14164 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14167 #endif /* !ARM_PE */
14169 static void
14170 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14172 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14173 && !strcmp (prefix, "L"))
14175 arm_ccfsm_state = 0;
14176 arm_target_insn = NULL;
14178 default_internal_label (stream, prefix, labelno);
14181 /* Output code to add DELTA to the first argument, and then jump
14182 to FUNCTION. Used for C++ multiple inheritance. */
14183 static void
14184 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14185 HOST_WIDE_INT delta,
14186 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14187 tree function)
14189 static int thunk_label = 0;
14190 char label[256];
14191 int mi_delta = delta;
14192 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14193 int shift = 0;
14194 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14195 ? 1 : 0);
14196 if (mi_delta < 0)
14197 mi_delta = - mi_delta;
14198 if (TARGET_THUMB)
14200 int labelno = thunk_label++;
14201 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14202 fputs ("\tldr\tr12, ", file);
14203 assemble_name (file, label);
14204 fputc ('\n', file);
14206 while (mi_delta != 0)
14208 if ((mi_delta & (3 << shift)) == 0)
14209 shift += 2;
14210 else
14212 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14213 mi_op, this_regno, this_regno,
14214 mi_delta & (0xff << shift));
14215 mi_delta &= ~(0xff << shift);
14216 shift += 8;
14219 if (TARGET_THUMB)
14221 fprintf (file, "\tbx\tr12\n");
14222 ASM_OUTPUT_ALIGN (file, 2);
14223 assemble_name (file, label);
14224 fputs (":\n", file);
14225 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14227 else
14229 fputs ("\tb\t", file);
14230 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14231 if (NEED_PLT_RELOC)
14232 fputs ("(PLT)", file);
14233 fputc ('\n', file);
14238 arm_emit_vector_const (FILE *file, rtx x)
14240 int i;
14241 const char * pattern;
14243 if (GET_CODE (x) != CONST_VECTOR)
14244 abort ();
14246 switch (GET_MODE (x))
14248 case V2SImode: pattern = "%08x"; break;
14249 case V4HImode: pattern = "%04x"; break;
14250 case V8QImode: pattern = "%02x"; break;
14251 default: abort ();
14254 fprintf (file, "0x");
14255 for (i = CONST_VECTOR_NUNITS (x); i--;)
14257 rtx element;
14259 element = CONST_VECTOR_ELT (x, i);
14260 fprintf (file, pattern, INTVAL (element));
14263 return 1;
14266 const char *
14267 arm_output_load_gr (rtx *operands)
14269 rtx reg;
14270 rtx offset;
14271 rtx wcgr;
14272 rtx sum;
14274 if (GET_CODE (operands [1]) != MEM
14275 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14276 || GET_CODE (reg = XEXP (sum, 0)) != REG
14277 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14278 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14279 return "wldrw%?\t%0, %1";
14281 /* Fix up an out-of-range load of a GR register. */
14282 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14283 wcgr = operands[0];
14284 operands[0] = reg;
14285 output_asm_insn ("ldr%?\t%0, %1", operands);
14287 operands[0] = wcgr;
14288 operands[1] = reg;
14289 output_asm_insn ("tmcr%?\t%0, %1", operands);
14290 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14292 return "";
14295 static rtx
14296 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14297 int incoming ATTRIBUTE_UNUSED)
14299 #if 0
14300 /* FIXME: The ARM backend has special code to handle structure
14301 returns, and will reserve its own hidden first argument. So
14302 if this macro is enabled a *second* hidden argument will be
14303 reserved, which will break binary compatibility with old
14304 toolchains and also thunk handling. One day this should be
14305 fixed. */
14306 return 0;
14307 #else
14308 /* Register in which address to store a structure value
14309 is passed to a function. */
14310 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14311 #endif
14314 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14316 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14317 named arg and all anonymous args onto the stack.
14318 XXX I know the prologue shouldn't be pushing registers, but it is faster
14319 that way. */
14321 static void
14322 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14323 enum machine_mode mode ATTRIBUTE_UNUSED,
14324 tree type ATTRIBUTE_UNUSED,
14325 int *pretend_size,
14326 int second_time ATTRIBUTE_UNUSED)
14328 cfun->machine->uses_anonymous_args = 1;
14329 if (cum->nregs < NUM_ARG_REGS)
14330 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14333 /* Return nonzero if the CONSUMER instruction (a store) does not need
14334 PRODUCER's value to calculate the address. */
14337 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14339 rtx value = PATTERN (producer);
14340 rtx addr = PATTERN (consumer);
14342 if (GET_CODE (value) == COND_EXEC)
14343 value = COND_EXEC_CODE (value);
14344 if (GET_CODE (value) == PARALLEL)
14345 value = XVECEXP (value, 0, 0);
14346 value = XEXP (value, 0);
14347 if (GET_CODE (addr) == COND_EXEC)
14348 addr = COND_EXEC_CODE (addr);
14349 if (GET_CODE (addr) == PARALLEL)
14350 addr = XVECEXP (addr, 0, 0);
14351 addr = XEXP (addr, 0);
14353 return !reg_overlap_mentioned_p (value, addr);
14356 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14357 have an early register shift value or amount dependency on the
14358 result of PRODUCER. */
14361 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14363 rtx value = PATTERN (producer);
14364 rtx op = PATTERN (consumer);
14365 rtx early_op;
14367 if (GET_CODE (value) == COND_EXEC)
14368 value = COND_EXEC_CODE (value);
14369 if (GET_CODE (value) == PARALLEL)
14370 value = XVECEXP (value, 0, 0);
14371 value = XEXP (value, 0);
14372 if (GET_CODE (op) == COND_EXEC)
14373 op = COND_EXEC_CODE (op);
14374 if (GET_CODE (op) == PARALLEL)
14375 op = XVECEXP (op, 0, 0);
14376 op = XEXP (op, 1);
14378 early_op = XEXP (op, 0);
14379 /* This is either an actual independent shift, or a shift applied to
14380 the first operand of another operation. We want the whole shift
14381 operation. */
14382 if (GET_CODE (early_op) == REG)
14383 early_op = op;
14385 return !reg_overlap_mentioned_p (value, early_op);
14388 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14389 have an early register shift value dependency on the result of
14390 PRODUCER. */
14393 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14395 rtx value = PATTERN (producer);
14396 rtx op = PATTERN (consumer);
14397 rtx early_op;
14399 if (GET_CODE (value) == COND_EXEC)
14400 value = COND_EXEC_CODE (value);
14401 if (GET_CODE (value) == PARALLEL)
14402 value = XVECEXP (value, 0, 0);
14403 value = XEXP (value, 0);
14404 if (GET_CODE (op) == COND_EXEC)
14405 op = COND_EXEC_CODE (op);
14406 if (GET_CODE (op) == PARALLEL)
14407 op = XVECEXP (op, 0, 0);
14408 op = XEXP (op, 1);
14410 early_op = XEXP (op, 0);
14412 /* This is either an actual independent shift, or a shift applied to
14413 the first operand of another operation. We want the value being
14414 shifted, in either case. */
14415 if (GET_CODE (early_op) != REG)
14416 early_op = XEXP (early_op, 0);
14418 return !reg_overlap_mentioned_p (value, early_op);
14421 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14422 have an early register mult dependency on the result of
14423 PRODUCER. */
14426 arm_no_early_mul_dep (rtx producer, rtx consumer)
14428 rtx value = PATTERN (producer);
14429 rtx op = PATTERN (consumer);
14431 if (GET_CODE (value) == COND_EXEC)
14432 value = COND_EXEC_CODE (value);
14433 if (GET_CODE (value) == PARALLEL)
14434 value = XVECEXP (value, 0, 0);
14435 value = XEXP (value, 0);
14436 if (GET_CODE (op) == COND_EXEC)
14437 op = COND_EXEC_CODE (op);
14438 if (GET_CODE (op) == PARALLEL)
14439 op = XVECEXP (op, 0, 0);
14440 op = XEXP (op, 1);
14442 return (GET_CODE (op) == PLUS
14443 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14447 /* We can't rely on the caller doing the proper promotion when
14448 using APCS or ATPCS. */
14450 static bool
14451 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14453 return !TARGET_AAPCS_BASED;
14457 /* AAPCS based ABIs use short enums by default. */
14459 static bool
14460 arm_default_short_enums (void)
14462 return TARGET_AAPCS_BASED;
14466 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14468 static bool
14469 arm_align_anon_bitfield (void)
14471 return TARGET_AAPCS_BASED;
14475 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14477 static tree
14478 arm_cxx_guard_type (void)
14480 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14484 /* The EABI says test the least significan bit of a guard variable. */
14486 static bool
14487 arm_cxx_guard_mask_bit (void)
14489 return TARGET_AAPCS_BASED;
14493 /* The EABI specifies that all array cookies are 8 bytes long. */
14495 static tree
14496 arm_get_cookie_size (tree type)
14498 tree size;
14500 if (!TARGET_AAPCS_BASED)
14501 return default_cxx_get_cookie_size (type);
14503 size = build_int_cst (sizetype, 8);
14504 return size;
14508 /* The EABI says that array cookies should also contain the element size. */
14510 static bool
14511 arm_cookie_has_size (void)
14513 return TARGET_AAPCS_BASED;
14517 /* The EABI says constructors and destructors should return a pointer to
14518 the object constructed/destroyed. */
14520 static bool
14521 arm_cxx_cdtor_returns_this (void)
14523 return TARGET_AAPCS_BASED;
14526 /* The EABI says that an inline function may never be the key
14527 method. */
14529 static bool
14530 arm_cxx_key_method_may_be_inline (void)
14532 return !TARGET_AAPCS_BASED;
14535 /* The EABI says that the virtual table, etc., for a class must be
14536 exported if it has a key method. The EABI does not specific the
14537 behavior if there is no key method, but there is no harm in
14538 exporting the class data in that case too. */
14540 static bool
14541 arm_cxx_export_class_data (void)
14543 return TARGET_AAPCS_BASED;
14546 void
14547 arm_set_return_address (rtx source, rtx scratch)
14549 arm_stack_offsets *offsets;
14550 HOST_WIDE_INT delta;
14551 rtx addr;
14552 unsigned long saved_regs;
14554 saved_regs = arm_compute_save_reg_mask ();
14556 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14557 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14558 else
14560 if (frame_pointer_needed)
14561 addr = plus_constant(hard_frame_pointer_rtx, -4);
14562 else
14564 /* LR will be the first saved register. */
14565 offsets = arm_get_frame_offsets ();
14566 delta = offsets->outgoing_args - (offsets->frame + 4);
14569 if (delta >= 4096)
14571 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14572 GEN_INT (delta & ~4095)));
14573 addr = scratch;
14574 delta &= 4095;
14576 else
14577 addr = stack_pointer_rtx;
14579 addr = plus_constant (addr, delta);
14581 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14586 void
14587 thumb_set_return_address (rtx source, rtx scratch)
14589 arm_stack_offsets *offsets;
14590 HOST_WIDE_INT delta;
14591 int reg;
14592 rtx addr;
14593 unsigned long mask;
14595 emit_insn (gen_rtx_USE (VOIDmode, source));
14597 mask = thumb_compute_save_reg_mask ();
14598 if (mask & (1 << LR_REGNUM))
14600 offsets = arm_get_frame_offsets ();
14602 /* Find the saved regs. */
14603 if (frame_pointer_needed)
14605 delta = offsets->soft_frame - offsets->saved_args;
14606 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14608 else
14610 delta = offsets->outgoing_args - offsets->saved_args;
14611 reg = SP_REGNUM;
14613 /* Allow for the stack frame. */
14614 if (TARGET_BACKTRACE)
14615 delta -= 16;
14616 /* The link register is always the first saved register. */
14617 delta -= 4;
14619 /* Construct the address. */
14620 addr = gen_rtx_REG (SImode, reg);
14621 if ((reg != SP_REGNUM && delta >= 128)
14622 || delta >= 1024)
14624 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14625 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14626 addr = scratch;
14628 else
14629 addr = plus_constant (addr, delta);
14631 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14633 else
14634 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14637 /* Implements target hook vector_mode_supported_p. */
14638 bool
14639 arm_vector_mode_supported_p (enum machine_mode mode)
14641 if ((mode == V2SImode)
14642 || (mode == V4HImode)
14643 || (mode == V8QImode))
14644 return true;
14646 return false;
14649 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14650 ARM insns and therefore guarantee that the shift count is modulo 256.
14651 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14652 guarantee no particular behavior for out-of-range counts. */
14654 static unsigned HOST_WIDE_INT
14655 arm_shift_truncation_mask (enum machine_mode mode)
14657 return mode == SImode ? 255 : 0;
14661 /* Map internal gcc register numbers to DWARF2 register numbers. */
14663 unsigned int
14664 arm_dbx_register_number (unsigned int regno)
14666 if (regno < 16)
14667 return regno;
14669 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14670 compatibility. The EABI defines them as registers 96-103. */
14671 if (IS_FPA_REGNUM (regno))
14672 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14674 if (IS_VFP_REGNUM (regno))
14675 return 64 + regno - FIRST_VFP_REGNUM;
14677 if (IS_IWMMXT_GR_REGNUM (regno))
14678 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14680 if (IS_IWMMXT_REGNUM (regno))
14681 return 112 + regno - FIRST_IWMMXT_REGNUM;
14683 abort ();