* arm.c (arm_const_double_by_parts): New function.
[official-gcc.git] / gcc / config / arm / arm.c
bloba1abe96a6e5af603c7644062b1725f414cde3999
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void thumb_exit (FILE *, int);
88 static rtx is_jump_table (rtx);
89 static HOST_WIDE_INT get_jump_table_size (rtx);
90 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
91 static Mnode *add_minipool_forward_ref (Mfix *);
92 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_backward_ref (Mfix *);
94 static void assign_minipool_offsets (Mfix *);
95 static void arm_print_value (FILE *, rtx);
96 static void dump_minipool (rtx);
97 static int arm_barrier_cost (rtx);
98 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
99 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
100 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
101 rtx);
102 static void arm_reorg (void);
103 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
104 static int current_file_function_operand (rtx);
105 static unsigned long arm_compute_save_reg0_reg12_mask (void);
106 static unsigned long arm_compute_save_reg_mask (void);
107 static unsigned long arm_isr_value (tree);
108 static unsigned long arm_compute_func_type (void);
109 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
110 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
111 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
112 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
113 #endif
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_size_rtx_costs (rtx, int, int, int *);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
144 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
145 tree, bool);
147 #ifndef ARM_PE
148 static void arm_encode_section_info (tree, rtx, int);
149 #endif
151 static void arm_file_end (void);
153 #ifdef AOF_ASSEMBLER
154 static void aof_globalize_label (FILE *, const char *);
155 static void aof_dump_imports (FILE *);
156 static void aof_dump_pic_table (FILE *);
157 static void aof_file_start (void);
158 static void aof_file_end (void);
159 #endif
160 static rtx arm_struct_value_rtx (tree, int);
161 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
162 tree, int *, int);
163 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
164 enum machine_mode, tree, bool);
165 static bool arm_promote_prototypes (tree);
166 static bool arm_default_short_enums (void);
167 static bool arm_align_anon_bitfield (void);
169 static tree arm_cxx_guard_type (void);
170 static bool arm_cxx_guard_mask_bit (void);
171 static tree arm_get_cookie_size (tree);
172 static bool arm_cookie_has_size (void);
173 static bool arm_cxx_cdtor_returns_this (void);
174 static bool arm_cxx_key_method_may_be_inline (void);
175 static bool arm_cxx_export_class_data (void);
176 static void arm_init_libfuncs (void);
177 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
179 /* Initialize the GCC target structure. */
180 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
181 #undef TARGET_MERGE_DECL_ATTRIBUTES
182 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
183 #endif
185 #undef TARGET_ATTRIBUTE_TABLE
186 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
188 #undef TARGET_ASM_FILE_END
189 #define TARGET_ASM_FILE_END arm_file_end
191 #ifdef AOF_ASSEMBLER
192 #undef TARGET_ASM_BYTE_OP
193 #define TARGET_ASM_BYTE_OP "\tDCB\t"
194 #undef TARGET_ASM_ALIGNED_HI_OP
195 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
196 #undef TARGET_ASM_ALIGNED_SI_OP
197 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
198 #undef TARGET_ASM_GLOBALIZE_LABEL
199 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
200 #undef TARGET_ASM_FILE_START
201 #define TARGET_ASM_FILE_START aof_file_start
202 #undef TARGET_ASM_FILE_END
203 #define TARGET_ASM_FILE_END aof_file_end
204 #else
205 #undef TARGET_ASM_ALIGNED_SI_OP
206 #define TARGET_ASM_ALIGNED_SI_OP NULL
207 #undef TARGET_ASM_INTEGER
208 #define TARGET_ASM_INTEGER arm_assemble_integer
209 #endif
211 #undef TARGET_ASM_FUNCTION_PROLOGUE
212 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
214 #undef TARGET_ASM_FUNCTION_EPILOGUE
215 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
217 #undef TARGET_COMP_TYPE_ATTRIBUTES
218 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
220 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
221 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
223 #undef TARGET_SCHED_ADJUST_COST
224 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
226 #undef TARGET_ENCODE_SECTION_INFO
227 #ifdef ARM_PE
228 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
229 #else
230 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
231 #endif
233 #undef TARGET_STRIP_NAME_ENCODING
234 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
236 #undef TARGET_ASM_INTERNAL_LABEL
237 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
239 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
240 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
242 #undef TARGET_ASM_OUTPUT_MI_THUNK
243 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
244 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
245 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
247 /* This will be overridden in arm_override_options. */
248 #undef TARGET_RTX_COSTS
249 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
250 #undef TARGET_ADDRESS_COST
251 #define TARGET_ADDRESS_COST arm_address_cost
253 #undef TARGET_SHIFT_TRUNCATION_MASK
254 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
255 #undef TARGET_VECTOR_MODE_SUPPORTED_P
256 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
258 #undef TARGET_MACHINE_DEPENDENT_REORG
259 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
261 #undef TARGET_INIT_BUILTINS
262 #define TARGET_INIT_BUILTINS arm_init_builtins
263 #undef TARGET_EXPAND_BUILTIN
264 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
266 #undef TARGET_INIT_LIBFUNCS
267 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
269 #undef TARGET_PROMOTE_FUNCTION_ARGS
270 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
271 #undef TARGET_PROMOTE_FUNCTION_RETURN
272 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
273 #undef TARGET_PROMOTE_PROTOTYPES
274 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
275 #undef TARGET_PASS_BY_REFERENCE
276 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
277 #undef TARGET_ARG_PARTIAL_BYTES
278 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
280 #undef TARGET_STRUCT_VALUE_RTX
281 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
283 #undef TARGET_SETUP_INCOMING_VARARGS
284 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
286 #undef TARGET_DEFAULT_SHORT_ENUMS
287 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
289 #undef TARGET_ALIGN_ANON_BITFIELD
290 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
292 #undef TARGET_CXX_GUARD_TYPE
293 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
295 #undef TARGET_CXX_GUARD_MASK_BIT
296 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
298 #undef TARGET_CXX_GET_COOKIE_SIZE
299 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
301 #undef TARGET_CXX_COOKIE_HAS_SIZE
302 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
304 #undef TARGET_CXX_CDTOR_RETURNS_THIS
305 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
307 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
308 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
310 #undef TARGET_CXX_EXPORT_CLASS_DATA
311 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
313 struct gcc_target targetm = TARGET_INITIALIZER;
315 /* Obstack for minipool constant handling. */
316 static struct obstack minipool_obstack;
317 static char * minipool_startobj;
319 /* The maximum number of insns skipped which
320 will be conditionalised if possible. */
321 static int max_insns_skipped = 5;
323 extern FILE * asm_out_file;
325 /* True if we are currently building a constant table. */
326 int making_const_table;
328 /* Define the information needed to generate branch insns. This is
329 stored from the compare operation. */
330 rtx arm_compare_op0, arm_compare_op1;
332 /* The processor for which instructions should be scheduled. */
333 enum processor_type arm_tune = arm_none;
335 /* Which floating point model to use. */
336 enum arm_fp_model arm_fp_model;
338 /* Which floating point hardware is available. */
339 enum fputype arm_fpu_arch;
341 /* Which floating point hardware to schedule for. */
342 enum fputype arm_fpu_tune;
344 /* Whether to use floating point hardware. */
345 enum float_abi_type arm_float_abi;
347 /* Which ABI to use. */
348 enum arm_abi_type arm_abi;
350 /* Set by the -mfpu=... option. */
351 const char * target_fpu_name = NULL;
353 /* Set by the -mfpe=... option. */
354 const char * target_fpe_name = NULL;
356 /* Set by the -mfloat-abi=... option. */
357 const char * target_float_abi_name = NULL;
359 /* Set by the legacy -mhard-float and -msoft-float options. */
360 const char * target_float_switch = NULL;
362 /* Set by the -mabi=... option. */
363 const char * target_abi_name = NULL;
365 /* Used to parse -mstructure_size_boundary command line option. */
366 const char * structure_size_string = NULL;
367 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
369 /* Used for Thumb call_via trampolines. */
370 rtx thumb_call_via_label[14];
371 static int thumb_call_reg_needed;
373 /* Bit values used to identify processor capabilities. */
374 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
375 #define FL_ARCH3M (1 << 1) /* Extended multiply */
376 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
377 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
378 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
379 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
380 #define FL_THUMB (1 << 6) /* Thumb aware */
381 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
382 #define FL_STRONG (1 << 8) /* StrongARM */
383 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
384 #define FL_XSCALE (1 << 10) /* XScale */
385 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
386 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
387 media instructions. */
388 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
390 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
392 #define FL_FOR_ARCH2 0
393 #define FL_FOR_ARCH3 FL_MODE32
394 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
395 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
396 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
397 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
398 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
399 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
400 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
401 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
402 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
403 #define FL_FOR_ARCH6J FL_FOR_ARCH6
404 #define FL_FOR_ARCH6K FL_FOR_ARCH6
405 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
406 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
408 /* The bits in this mask specify which
409 instructions we are allowed to generate. */
410 static unsigned long insn_flags = 0;
412 /* The bits in this mask specify which instruction scheduling options should
413 be used. */
414 static unsigned long tune_flags = 0;
416 /* The following are used in the arm.md file as equivalents to bits
417 in the above two flag variables. */
419 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
420 int arm_arch3m = 0;
422 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
423 int arm_arch4 = 0;
425 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
426 int arm_arch4t = 0;
428 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
429 int arm_arch5 = 0;
431 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
432 int arm_arch5e = 0;
434 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
435 int arm_arch6 = 0;
437 /* Nonzero if this chip can benefit from load scheduling. */
438 int arm_ld_sched = 0;
440 /* Nonzero if this chip is a StrongARM. */
441 int arm_is_strong = 0;
443 /* Nonzero if this chip is a Cirrus variant. */
444 int arm_arch_cirrus = 0;
446 /* Nonzero if this chip supports Intel Wireless MMX technology. */
447 int arm_arch_iwmmxt = 0;
449 /* Nonzero if this chip is an XScale. */
450 int arm_arch_xscale = 0;
452 /* Nonzero if tuning for XScale */
453 int arm_tune_xscale = 0;
455 /* Nonzero if this chip is an ARM6 or an ARM7. */
456 int arm_is_6_or_7 = 0;
458 /* Nonzero if generating Thumb instructions. */
459 int thumb_code = 0;
461 /* Nonzero if we should define __THUMB_INTERWORK__ in the
462 preprocessor.
463 XXX This is a bit of a hack, it's intended to help work around
464 problems in GLD which doesn't understand that armv5t code is
465 interworking clean. */
466 int arm_cpp_interwork = 0;
468 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
469 must report the mode of the memory reference from PRINT_OPERAND to
470 PRINT_OPERAND_ADDRESS. */
471 enum machine_mode output_memory_reference_mode;
473 /* The register number to be used for the PIC offset register. */
474 const char * arm_pic_register_string = NULL;
475 int arm_pic_register = INVALID_REGNUM;
477 /* Set to 1 when a return insn is output, this means that the epilogue
478 is not needed. */
479 int return_used_this_function;
481 /* Set to 1 after arm_reorg has started. Reset to start at the start of
482 the next function. */
483 static int after_arm_reorg = 0;
485 /* The maximum number of insns to be used when loading a constant. */
486 static int arm_constant_limit = 3;
488 /* For an explanation of these variables, see final_prescan_insn below. */
489 int arm_ccfsm_state;
490 enum arm_cond_code arm_current_cc;
491 rtx arm_target_insn;
492 int arm_target_label;
494 /* The condition codes of the ARM, and the inverse function. */
495 static const char * const arm_condition_codes[] =
497 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
498 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
501 #define streq(string1, string2) (strcmp (string1, string2) == 0)
503 /* Initialization code. */
505 struct processors
507 const char *const name;
508 enum processor_type core;
509 const char *arch;
510 const unsigned long flags;
511 bool (* rtx_costs) (rtx, int, int, int *);
514 /* Not all of these give usefully different compilation alternatives,
515 but there is no simple way of generalizing them. */
516 static const struct processors all_cores[] =
518 /* ARM Cores */
519 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
520 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
521 #include "arm-cores.def"
522 #undef ARM_CORE
523 {NULL, arm_none, NULL, 0, NULL}
526 static const struct processors all_architectures[] =
528 /* ARM Architectures */
529 /* We don't specify rtx_costs here as it will be figured out
530 from the core. */
532 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
533 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
534 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
535 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
536 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
537 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
538 implementations that support it, so we will leave it out for now. */
539 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
540 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
541 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
542 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
543 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
544 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
545 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
546 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
547 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
548 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
549 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
550 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
551 {NULL, arm_none, NULL, 0 , NULL}
554 /* This is a magic structure. The 'string' field is magically filled in
555 with a pointer to the value specified by the user on the command line
556 assuming that the user has specified such a value. */
558 struct arm_cpu_select arm_select[] =
560 /* string name processors */
561 { NULL, "-mcpu=", all_cores },
562 { NULL, "-march=", all_architectures },
563 { NULL, "-mtune=", all_cores }
567 /* The name of the proprocessor macro to define for this architecture. */
569 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
571 struct fpu_desc
573 const char * name;
574 enum fputype fpu;
578 /* Available values for for -mfpu=. */
580 static const struct fpu_desc all_fpus[] =
582 {"fpa", FPUTYPE_FPA},
583 {"fpe2", FPUTYPE_FPA_EMU2},
584 {"fpe3", FPUTYPE_FPA_EMU2},
585 {"maverick", FPUTYPE_MAVERICK},
586 {"vfp", FPUTYPE_VFP}
590 /* Floating point models used by the different hardware.
591 See fputype in arm.h. */
593 static const enum fputype fp_model_for_fpu[] =
595 /* No FP hardware. */
596 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
597 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
598 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
599 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
600 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
601 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
605 struct float_abi
607 const char * name;
608 enum float_abi_type abi_type;
612 /* Available values for -mfloat-abi=. */
614 static const struct float_abi all_float_abis[] =
616 {"soft", ARM_FLOAT_ABI_SOFT},
617 {"softfp", ARM_FLOAT_ABI_SOFTFP},
618 {"hard", ARM_FLOAT_ABI_HARD}
622 struct abi_name
624 const char *name;
625 enum arm_abi_type abi_type;
629 /* Available values for -mabi=. */
631 static const struct abi_name arm_all_abis[] =
633 {"apcs-gnu", ARM_ABI_APCS},
634 {"atpcs", ARM_ABI_ATPCS},
635 {"aapcs", ARM_ABI_AAPCS},
636 {"iwmmxt", ARM_ABI_IWMMXT}
639 /* Return the number of bits set in VALUE. */
640 static unsigned
641 bit_count (unsigned long value)
643 unsigned long count = 0;
645 while (value)
647 count++;
648 value &= value - 1; /* Clear the least-significant set bit. */
651 return count;
654 /* Set up library functions unique to ARM. */
656 static void
657 arm_init_libfuncs (void)
659 /* There are no special library functions unless we are using the
660 ARM BPABI. */
661 if (!TARGET_BPABI)
662 return;
664 /* The functions below are described in Section 4 of the "Run-Time
665 ABI for the ARM architecture", Version 1.0. */
667 /* Double-precision floating-point arithmetic. Table 2. */
668 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
669 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
670 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
671 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
672 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
674 /* Double-precision comparisons. Table 3. */
675 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
676 set_optab_libfunc (ne_optab, DFmode, NULL);
677 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
678 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
679 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
680 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
681 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
683 /* Single-precision floating-point arithmetic. Table 4. */
684 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
685 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
686 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
687 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
688 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
690 /* Single-precision comparisons. Table 5. */
691 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
692 set_optab_libfunc (ne_optab, SFmode, NULL);
693 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
694 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
695 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
696 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
697 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
699 /* Floating-point to integer conversions. Table 6. */
700 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
701 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
702 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
703 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
704 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
705 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
706 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
707 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
709 /* Conversions between floating types. Table 7. */
710 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
711 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
713 /* Integer to floating-point conversions. Table 8. */
714 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
715 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
716 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
717 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
718 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
719 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
720 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
721 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
723 /* Long long. Table 9. */
724 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
725 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
726 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
727 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
728 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
729 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
730 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
731 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
733 /* Integer (32/32->32) division. \S 4.3.1. */
734 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
735 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
737 /* The divmod functions are designed so that they can be used for
738 plain division, even though they return both the quotient and the
739 remainder. The quotient is returned in the usual location (i.e.,
740 r0 for SImode, {r0, r1} for DImode), just as would be expected
741 for an ordinary division routine. Because the AAPCS calling
742 conventions specify that all of { r0, r1, r2, r3 } are
743 callee-saved registers, there is no need to tell the compiler
744 explicitly that those registers are clobbered by these
745 routines. */
746 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
747 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
748 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
749 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
752 /* Fix up any incompatible options that the user has specified.
753 This has now turned into a maze. */
754 void
755 arm_override_options (void)
757 unsigned i;
759 /* Set up the flags based on the cpu/architecture selected by the user. */
760 for (i = ARRAY_SIZE (arm_select); i--;)
762 struct arm_cpu_select * ptr = arm_select + i;
764 if (ptr->string != NULL && ptr->string[0] != '\0')
766 const struct processors * sel;
768 for (sel = ptr->processors; sel->name != NULL; sel++)
769 if (streq (ptr->string, sel->name))
771 /* Set the architecture define. */
772 if (i != 2)
773 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
775 /* Determine the processor core for which we should
776 tune code-generation. */
777 if (/* -mcpu= is a sensible default. */
778 i == 0
779 /* If -march= is used, and -mcpu= has not been used,
780 assume that we should tune for a representative
781 CPU from that architecture. */
782 || i == 1
783 /* -mtune= overrides -mcpu= and -march=. */
784 || i == 2)
785 arm_tune = (enum processor_type) (sel - ptr->processors);
787 if (i != 2)
789 /* If we have been given an architecture and a processor
790 make sure that they are compatible. We only generate
791 a warning though, and we prefer the CPU over the
792 architecture. */
793 if (insn_flags != 0 && (insn_flags ^ sel->flags))
794 warning ("switch -mcpu=%s conflicts with -march= switch",
795 ptr->string);
797 insn_flags = sel->flags;
800 break;
803 if (sel->name == NULL)
804 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
808 /* If the user did not specify a processor, choose one for them. */
809 if (insn_flags == 0)
811 const struct processors * sel;
812 unsigned int sought;
813 enum processor_type cpu;
815 cpu = TARGET_CPU_DEFAULT;
816 if (cpu == arm_none)
818 #ifdef SUBTARGET_CPU_DEFAULT
819 /* Use the subtarget default CPU if none was specified by
820 configure. */
821 cpu = SUBTARGET_CPU_DEFAULT;
822 #endif
823 /* Default to ARM6. */
824 if (cpu == arm_none)
825 cpu = arm6;
827 sel = &all_cores[cpu];
829 insn_flags = sel->flags;
831 /* Now check to see if the user has specified some command line
832 switch that require certain abilities from the cpu. */
833 sought = 0;
835 if (TARGET_INTERWORK || TARGET_THUMB)
837 sought |= (FL_THUMB | FL_MODE32);
839 /* There are no ARM processors that support both APCS-26 and
840 interworking. Therefore we force FL_MODE26 to be removed
841 from insn_flags here (if it was set), so that the search
842 below will always be able to find a compatible processor. */
843 insn_flags &= ~FL_MODE26;
846 if (sought != 0 && ((sought & insn_flags) != sought))
848 /* Try to locate a CPU type that supports all of the abilities
849 of the default CPU, plus the extra abilities requested by
850 the user. */
851 for (sel = all_cores; sel->name != NULL; sel++)
852 if ((sel->flags & sought) == (sought | insn_flags))
853 break;
855 if (sel->name == NULL)
857 unsigned current_bit_count = 0;
858 const struct processors * best_fit = NULL;
860 /* Ideally we would like to issue an error message here
861 saying that it was not possible to find a CPU compatible
862 with the default CPU, but which also supports the command
863 line options specified by the programmer, and so they
864 ought to use the -mcpu=<name> command line option to
865 override the default CPU type.
867 If we cannot find a cpu that has both the
868 characteristics of the default cpu and the given
869 command line options we scan the array again looking
870 for a best match. */
871 for (sel = all_cores; sel->name != NULL; sel++)
872 if ((sel->flags & sought) == sought)
874 unsigned count;
876 count = bit_count (sel->flags & insn_flags);
878 if (count >= current_bit_count)
880 best_fit = sel;
881 current_bit_count = count;
885 if (best_fit == NULL)
886 abort ();
887 else
888 sel = best_fit;
891 insn_flags = sel->flags;
893 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
894 if (arm_tune == arm_none)
895 arm_tune = (enum processor_type) (sel - all_cores);
898 /* The processor for which we should tune should now have been
899 chosen. */
900 if (arm_tune == arm_none)
901 abort ();
903 tune_flags = all_cores[(int)arm_tune].flags;
904 if (optimize_size)
905 targetm.rtx_costs = arm_size_rtx_costs;
906 else
907 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
909 /* Make sure that the processor choice does not conflict with any of the
910 other command line choices. */
911 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
913 warning ("target CPU does not support interworking" );
914 target_flags &= ~ARM_FLAG_INTERWORK;
917 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
919 warning ("target CPU does not support THUMB instructions");
920 target_flags &= ~ARM_FLAG_THUMB;
923 if (TARGET_APCS_FRAME && TARGET_THUMB)
925 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
926 target_flags &= ~ARM_FLAG_APCS_FRAME;
929 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
930 from here where no function is being compiled currently. */
931 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
932 && TARGET_ARM)
933 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
935 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
936 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
938 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
939 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
941 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
943 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
944 target_flags |= ARM_FLAG_APCS_FRAME;
947 if (TARGET_POKE_FUNCTION_NAME)
948 target_flags |= ARM_FLAG_APCS_FRAME;
950 if (TARGET_APCS_REENT && flag_pic)
951 error ("-fpic and -mapcs-reent are incompatible");
953 if (TARGET_APCS_REENT)
954 warning ("APCS reentrant code not supported. Ignored");
956 /* If this target is normally configured to use APCS frames, warn if they
957 are turned off and debugging is turned on. */
958 if (TARGET_ARM
959 && write_symbols != NO_DEBUG
960 && !TARGET_APCS_FRAME
961 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
962 warning ("-g with -mno-apcs-frame may not give sensible debugging");
964 /* If stack checking is disabled, we can use r10 as the PIC register,
965 which keeps r9 available. */
966 if (flag_pic)
967 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
969 if (TARGET_APCS_FLOAT)
970 warning ("passing floating point arguments in fp regs not yet supported");
972 /* Initialize boolean versions of the flags, for use in the arm.md file. */
973 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
974 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
975 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
976 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
977 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
978 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
979 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
980 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
982 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
983 arm_is_strong = (tune_flags & FL_STRONG) != 0;
984 thumb_code = (TARGET_ARM == 0);
985 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
986 && !(tune_flags & FL_ARCH4))) != 0;
987 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
988 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
990 /* V5 code we generate is completely interworking capable, so we turn off
991 TARGET_INTERWORK here to avoid many tests later on. */
993 /* XXX However, we must pass the right pre-processor defines to CPP
994 or GLD can get confused. This is a hack. */
995 if (TARGET_INTERWORK)
996 arm_cpp_interwork = 1;
998 if (arm_arch5)
999 target_flags &= ~ARM_FLAG_INTERWORK;
1001 if (target_abi_name)
1003 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1005 if (streq (arm_all_abis[i].name, target_abi_name))
1007 arm_abi = arm_all_abis[i].abi_type;
1008 break;
1011 if (i == ARRAY_SIZE (arm_all_abis))
1012 error ("invalid ABI option: -mabi=%s", target_abi_name);
1014 else
1015 arm_abi = ARM_DEFAULT_ABI;
1017 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1018 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1020 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1021 error ("iwmmxt abi requires an iwmmxt capable cpu");
1023 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1024 if (target_fpu_name == NULL && target_fpe_name != NULL)
1026 if (streq (target_fpe_name, "2"))
1027 target_fpu_name = "fpe2";
1028 else if (streq (target_fpe_name, "3"))
1029 target_fpu_name = "fpe3";
1030 else
1031 error ("invalid floating point emulation option: -mfpe=%s",
1032 target_fpe_name);
1034 if (target_fpu_name != NULL)
1036 /* The user specified a FPU. */
1037 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1039 if (streq (all_fpus[i].name, target_fpu_name))
1041 arm_fpu_arch = all_fpus[i].fpu;
1042 arm_fpu_tune = arm_fpu_arch;
1043 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1044 break;
1047 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1048 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1050 else
1052 #ifdef FPUTYPE_DEFAULT
1053 /* Use the default if it is specified for this platform. */
1054 arm_fpu_arch = FPUTYPE_DEFAULT;
1055 arm_fpu_tune = FPUTYPE_DEFAULT;
1056 #else
1057 /* Pick one based on CPU type. */
1058 /* ??? Some targets assume FPA is the default.
1059 if ((insn_flags & FL_VFP) != 0)
1060 arm_fpu_arch = FPUTYPE_VFP;
1061 else
1063 if (arm_arch_cirrus)
1064 arm_fpu_arch = FPUTYPE_MAVERICK;
1065 else
1066 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1067 #endif
1068 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1069 arm_fpu_tune = FPUTYPE_FPA;
1070 else
1071 arm_fpu_tune = arm_fpu_arch;
1072 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1073 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1074 abort ();
1077 if (target_float_abi_name != NULL)
1079 /* The user specified a FP ABI. */
1080 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1082 if (streq (all_float_abis[i].name, target_float_abi_name))
1084 arm_float_abi = all_float_abis[i].abi_type;
1085 break;
1088 if (i == ARRAY_SIZE (all_float_abis))
1089 error ("invalid floating point abi: -mfloat-abi=%s",
1090 target_float_abi_name);
1092 else if (target_float_switch)
1094 /* This is a bit of a hack to avoid needing target flags for these. */
1095 if (target_float_switch[0] == 'h')
1096 arm_float_abi = ARM_FLOAT_ABI_HARD;
1097 else
1098 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1100 else
1101 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1103 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1104 sorry ("-mfloat-abi=hard and VFP");
1106 /* If soft-float is specified then don't use FPU. */
1107 if (TARGET_SOFT_FLOAT)
1108 arm_fpu_arch = FPUTYPE_NONE;
1110 /* For arm2/3 there is no need to do any scheduling if there is only
1111 a floating point emulator, or we are doing software floating-point. */
1112 if ((TARGET_SOFT_FLOAT
1113 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1114 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1115 && (tune_flags & FL_MODE32) == 0)
1116 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1118 /* Override the default structure alignment for AAPCS ABI. */
1119 if (arm_abi == ARM_ABI_AAPCS)
1120 arm_structure_size_boundary = 8;
1122 if (structure_size_string != NULL)
1124 int size = strtol (structure_size_string, NULL, 0);
1126 if (size == 8 || size == 32
1127 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1128 arm_structure_size_boundary = size;
1129 else
1130 warning ("structure size boundary can only be set to %s",
1131 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1134 if (arm_pic_register_string != NULL)
1136 int pic_register = decode_reg_name (arm_pic_register_string);
1138 if (!flag_pic)
1139 warning ("-mpic-register= is useless without -fpic");
1141 /* Prevent the user from choosing an obviously stupid PIC register. */
1142 else if (pic_register < 0 || call_used_regs[pic_register]
1143 || pic_register == HARD_FRAME_POINTER_REGNUM
1144 || pic_register == STACK_POINTER_REGNUM
1145 || pic_register >= PC_REGNUM)
1146 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1147 else
1148 arm_pic_register = pic_register;
1151 if (TARGET_THUMB && flag_schedule_insns)
1153 /* Don't warn since it's on by default in -O2. */
1154 flag_schedule_insns = 0;
1157 if (optimize_size)
1159 /* There's some dispute as to whether this should be 1 or 2. However,
1160 experiments seem to show that in pathological cases a setting of
1161 1 degrades less severely than a setting of 2. This could change if
1162 other parts of the compiler change their behavior. */
1163 arm_constant_limit = 1;
1165 /* If optimizing for size, bump the number of instructions that we
1166 are prepared to conditionally execute (even on a StrongARM). */
1167 max_insns_skipped = 6;
1169 else
1171 /* For processors with load scheduling, it never costs more than
1172 2 cycles to load a constant, and the load scheduler may well
1173 reduce that to 1. */
1174 if (arm_ld_sched)
1175 arm_constant_limit = 1;
1177 /* On XScale the longer latency of a load makes it more difficult
1178 to achieve a good schedule, so it's faster to synthesize
1179 constants that can be done in two insns. */
1180 if (arm_tune_xscale)
1181 arm_constant_limit = 2;
1183 /* StrongARM has early execution of branches, so a sequence
1184 that is worth skipping is shorter. */
1185 if (arm_is_strong)
1186 max_insns_skipped = 3;
1189 /* Register global variables with the garbage collector. */
1190 arm_add_gc_roots ();
1193 static void
1194 arm_add_gc_roots (void)
1196 gcc_obstack_init(&minipool_obstack);
1197 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1200 /* A table of known ARM exception types.
1201 For use with the interrupt function attribute. */
1203 typedef struct
1205 const char *const arg;
1206 const unsigned long return_value;
1208 isr_attribute_arg;
1210 static const isr_attribute_arg isr_attribute_args [] =
1212 { "IRQ", ARM_FT_ISR },
1213 { "irq", ARM_FT_ISR },
1214 { "FIQ", ARM_FT_FIQ },
1215 { "fiq", ARM_FT_FIQ },
1216 { "ABORT", ARM_FT_ISR },
1217 { "abort", ARM_FT_ISR },
1218 { "ABORT", ARM_FT_ISR },
1219 { "abort", ARM_FT_ISR },
1220 { "UNDEF", ARM_FT_EXCEPTION },
1221 { "undef", ARM_FT_EXCEPTION },
1222 { "SWI", ARM_FT_EXCEPTION },
1223 { "swi", ARM_FT_EXCEPTION },
1224 { NULL, ARM_FT_NORMAL }
1227 /* Returns the (interrupt) function type of the current
1228 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1230 static unsigned long
1231 arm_isr_value (tree argument)
1233 const isr_attribute_arg * ptr;
1234 const char * arg;
1236 /* No argument - default to IRQ. */
1237 if (argument == NULL_TREE)
1238 return ARM_FT_ISR;
1240 /* Get the value of the argument. */
1241 if (TREE_VALUE (argument) == NULL_TREE
1242 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1243 return ARM_FT_UNKNOWN;
1245 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1247 /* Check it against the list of known arguments. */
1248 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1249 if (streq (arg, ptr->arg))
1250 return ptr->return_value;
1252 /* An unrecognized interrupt type. */
1253 return ARM_FT_UNKNOWN;
1256 /* Computes the type of the current function. */
1258 static unsigned long
1259 arm_compute_func_type (void)
1261 unsigned long type = ARM_FT_UNKNOWN;
1262 tree a;
1263 tree attr;
1265 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1266 abort ();
1268 /* Decide if the current function is volatile. Such functions
1269 never return, and many memory cycles can be saved by not storing
1270 register values that will never be needed again. This optimization
1271 was added to speed up context switching in a kernel application. */
1272 if (optimize > 0
1273 && TREE_NOTHROW (current_function_decl)
1274 && TREE_THIS_VOLATILE (current_function_decl))
1275 type |= ARM_FT_VOLATILE;
1277 if (cfun->static_chain_decl != NULL)
1278 type |= ARM_FT_NESTED;
1280 attr = DECL_ATTRIBUTES (current_function_decl);
1282 a = lookup_attribute ("naked", attr);
1283 if (a != NULL_TREE)
1284 type |= ARM_FT_NAKED;
1286 a = lookup_attribute ("isr", attr);
1287 if (a == NULL_TREE)
1288 a = lookup_attribute ("interrupt", attr);
1290 if (a == NULL_TREE)
1291 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1292 else
1293 type |= arm_isr_value (TREE_VALUE (a));
1295 return type;
1298 /* Returns the type of the current function. */
1300 unsigned long
1301 arm_current_func_type (void)
1303 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1304 cfun->machine->func_type = arm_compute_func_type ();
1306 return cfun->machine->func_type;
1309 /* Return 1 if it is possible to return using a single instruction.
1310 If SIBLING is non-null, this is a test for a return before a sibling
1311 call. SIBLING is the call insn, so we can examine its register usage. */
1314 use_return_insn (int iscond, rtx sibling)
1316 int regno;
1317 unsigned int func_type;
1318 unsigned long saved_int_regs;
1319 unsigned HOST_WIDE_INT stack_adjust;
1320 arm_stack_offsets *offsets;
1322 /* Never use a return instruction before reload has run. */
1323 if (!reload_completed)
1324 return 0;
1326 func_type = arm_current_func_type ();
1328 /* Naked functions and volatile functions need special
1329 consideration. */
1330 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1331 return 0;
1333 /* So do interrupt functions that use the frame pointer. */
1334 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1335 return 0;
1337 offsets = arm_get_frame_offsets ();
1338 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1340 /* As do variadic functions. */
1341 if (current_function_pretend_args_size
1342 || cfun->machine->uses_anonymous_args
1343 /* Or if the function calls __builtin_eh_return () */
1344 || current_function_calls_eh_return
1345 /* Or if the function calls alloca */
1346 || current_function_calls_alloca
1347 /* Or if there is a stack adjustment. However, if the stack pointer
1348 is saved on the stack, we can use a pre-incrementing stack load. */
1349 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1350 return 0;
1352 saved_int_regs = arm_compute_save_reg_mask ();
1354 /* Unfortunately, the insn
1356 ldmib sp, {..., sp, ...}
1358 triggers a bug on most SA-110 based devices, such that the stack
1359 pointer won't be correctly restored if the instruction takes a
1360 page fault. We work around this problem by popping r3 along with
1361 the other registers, since that is never slower than executing
1362 another instruction.
1364 We test for !arm_arch5 here, because code for any architecture
1365 less than this could potentially be run on one of the buggy
1366 chips. */
1367 if (stack_adjust == 4 && !arm_arch5)
1369 /* Validate that r3 is a call-clobbered register (always true in
1370 the default abi) ... */
1371 if (!call_used_regs[3])
1372 return 0;
1374 /* ... that it isn't being used for a return value (always true
1375 until we implement return-in-regs), or for a tail-call
1376 argument ... */
1377 if (sibling)
1379 if (GET_CODE (sibling) != CALL_INSN)
1380 abort ();
1382 if (find_regno_fusage (sibling, USE, 3))
1383 return 0;
1386 /* ... and that there are no call-saved registers in r0-r2
1387 (always true in the default ABI). */
1388 if (saved_int_regs & 0x7)
1389 return 0;
1392 /* Can't be done if interworking with Thumb, and any registers have been
1393 stacked. */
1394 if (TARGET_INTERWORK && saved_int_regs != 0)
1395 return 0;
1397 /* On StrongARM, conditional returns are expensive if they aren't
1398 taken and multiple registers have been stacked. */
1399 if (iscond && arm_is_strong)
1401 /* Conditional return when just the LR is stored is a simple
1402 conditional-load instruction, that's not expensive. */
1403 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1404 return 0;
1406 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1407 return 0;
1410 /* If there are saved registers but the LR isn't saved, then we need
1411 two instructions for the return. */
1412 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1413 return 0;
1415 /* Can't be done if any of the FPA regs are pushed,
1416 since this also requires an insn. */
1417 if (TARGET_HARD_FLOAT && TARGET_FPA)
1418 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1419 if (regs_ever_live[regno] && !call_used_regs[regno])
1420 return 0;
1422 /* Likewise VFP regs. */
1423 if (TARGET_HARD_FLOAT && TARGET_VFP)
1424 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1425 if (regs_ever_live[regno] && !call_used_regs[regno])
1426 return 0;
1428 if (TARGET_REALLY_IWMMXT)
1429 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1430 if (regs_ever_live[regno] && ! call_used_regs [regno])
1431 return 0;
1433 return 1;
1436 /* Return TRUE if int I is a valid immediate ARM constant. */
1439 const_ok_for_arm (HOST_WIDE_INT i)
1441 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1443 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1444 be all zero, or all one. */
1445 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1446 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1447 != ((~(unsigned HOST_WIDE_INT) 0)
1448 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1449 return FALSE;
1451 /* Fast return for 0 and powers of 2 */
1452 if ((i & (i - 1)) == 0)
1453 return TRUE;
1457 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1458 return TRUE;
1459 mask =
1460 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1461 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1463 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1465 return FALSE;
1468 /* Return true if I is a valid constant for the operation CODE. */
1469 static int
1470 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1472 if (const_ok_for_arm (i))
1473 return 1;
1475 switch (code)
1477 case PLUS:
1478 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1480 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1481 case XOR:
1482 case IOR:
1483 return 0;
1485 case AND:
1486 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1488 default:
1489 abort ();
1493 /* Emit a sequence of insns to handle a large constant.
1494 CODE is the code of the operation required, it can be any of SET, PLUS,
1495 IOR, AND, XOR, MINUS;
1496 MODE is the mode in which the operation is being performed;
1497 VAL is the integer to operate on;
1498 SOURCE is the other operand (a register, or a null-pointer for SET);
1499 SUBTARGETS means it is safe to create scratch registers if that will
1500 either produce a simpler sequence, or we will want to cse the values.
1501 Return value is the number of insns emitted. */
1504 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1505 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1507 rtx cond;
1509 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1510 cond = COND_EXEC_TEST (PATTERN (insn));
1511 else
1512 cond = NULL_RTX;
1514 if (subtargets || code == SET
1515 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1516 && REGNO (target) != REGNO (source)))
1518 /* After arm_reorg has been called, we can't fix up expensive
1519 constants by pushing them into memory so we must synthesize
1520 them in-line, regardless of the cost. This is only likely to
1521 be more costly on chips that have load delay slots and we are
1522 compiling without running the scheduler (so no splitting
1523 occurred before the final instruction emission).
1525 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1527 if (!after_arm_reorg
1528 && !cond
1529 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1530 1, 0)
1531 > arm_constant_limit + (code != SET)))
1533 if (code == SET)
1535 /* Currently SET is the only monadic value for CODE, all
1536 the rest are diadic. */
1537 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1538 return 1;
1540 else
1542 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1544 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1545 /* For MINUS, the value is subtracted from, since we never
1546 have subtraction of a constant. */
1547 if (code == MINUS)
1548 emit_insn (gen_rtx_SET (VOIDmode, target,
1549 gen_rtx_MINUS (mode, temp, source)));
1550 else
1551 emit_insn (gen_rtx_SET (VOIDmode, target,
1552 gen_rtx_fmt_ee (code, mode, source, temp)));
1553 return 2;
1558 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1562 static int
1563 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1565 HOST_WIDE_INT temp1;
1566 int num_insns = 0;
1569 int end;
1571 if (i <= 0)
1572 i += 32;
1573 if (remainder & (3 << (i - 2)))
1575 end = i - 8;
1576 if (end < 0)
1577 end += 32;
1578 temp1 = remainder & ((0x0ff << end)
1579 | ((i < end) ? (0xff >> (32 - end)) : 0));
1580 remainder &= ~temp1;
1581 num_insns++;
1582 i -= 6;
1584 i -= 2;
1585 } while (remainder);
1586 return num_insns;
1589 /* Emit an instruction with the indicated PATTERN. If COND is
1590 non-NULL, conditionalize the execution of the instruction on COND
1591 being true. */
1593 static void
1594 emit_constant_insn (rtx cond, rtx pattern)
1596 if (cond)
1597 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1598 emit_insn (pattern);
1601 /* As above, but extra parameter GENERATE which, if clear, suppresses
1602 RTL generation. */
1604 static int
1605 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1606 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1607 int generate)
1609 int can_invert = 0;
1610 int can_negate = 0;
1611 int can_negate_initial = 0;
1612 int can_shift = 0;
1613 int i;
1614 int num_bits_set = 0;
1615 int set_sign_bit_copies = 0;
1616 int clear_sign_bit_copies = 0;
1617 int clear_zero_bit_copies = 0;
1618 int set_zero_bit_copies = 0;
1619 int insns = 0;
1620 unsigned HOST_WIDE_INT temp1, temp2;
1621 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1623 /* Find out which operations are safe for a given CODE. Also do a quick
1624 check for degenerate cases; these can occur when DImode operations
1625 are split. */
1626 switch (code)
1628 case SET:
1629 can_invert = 1;
1630 can_shift = 1;
1631 can_negate = 1;
1632 break;
1634 case PLUS:
1635 can_negate = 1;
1636 can_negate_initial = 1;
1637 break;
1639 case IOR:
1640 if (remainder == 0xffffffff)
1642 if (generate)
1643 emit_constant_insn (cond,
1644 gen_rtx_SET (VOIDmode, target,
1645 GEN_INT (ARM_SIGN_EXTEND (val))));
1646 return 1;
1648 if (remainder == 0)
1650 if (reload_completed && rtx_equal_p (target, source))
1651 return 0;
1652 if (generate)
1653 emit_constant_insn (cond,
1654 gen_rtx_SET (VOIDmode, target, source));
1655 return 1;
1657 break;
1659 case AND:
1660 if (remainder == 0)
1662 if (generate)
1663 emit_constant_insn (cond,
1664 gen_rtx_SET (VOIDmode, target, const0_rtx));
1665 return 1;
1667 if (remainder == 0xffffffff)
1669 if (reload_completed && rtx_equal_p (target, source))
1670 return 0;
1671 if (generate)
1672 emit_constant_insn (cond,
1673 gen_rtx_SET (VOIDmode, target, source));
1674 return 1;
1676 can_invert = 1;
1677 break;
1679 case XOR:
1680 if (remainder == 0)
1682 if (reload_completed && rtx_equal_p (target, source))
1683 return 0;
1684 if (generate)
1685 emit_constant_insn (cond,
1686 gen_rtx_SET (VOIDmode, target, source));
1687 return 1;
1689 if (remainder == 0xffffffff)
1691 if (generate)
1692 emit_constant_insn (cond,
1693 gen_rtx_SET (VOIDmode, target,
1694 gen_rtx_NOT (mode, source)));
1695 return 1;
1698 /* We don't know how to handle this yet below. */
1699 abort ();
1701 case MINUS:
1702 /* We treat MINUS as (val - source), since (source - val) is always
1703 passed as (source + (-val)). */
1704 if (remainder == 0)
1706 if (generate)
1707 emit_constant_insn (cond,
1708 gen_rtx_SET (VOIDmode, target,
1709 gen_rtx_NEG (mode, source)));
1710 return 1;
1712 if (const_ok_for_arm (val))
1714 if (generate)
1715 emit_constant_insn (cond,
1716 gen_rtx_SET (VOIDmode, target,
1717 gen_rtx_MINUS (mode, GEN_INT (val),
1718 source)));
1719 return 1;
1721 can_negate = 1;
1723 break;
1725 default:
1726 abort ();
1729 /* If we can do it in one insn get out quickly. */
1730 if (const_ok_for_arm (val)
1731 || (can_negate_initial && const_ok_for_arm (-val))
1732 || (can_invert && const_ok_for_arm (~val)))
1734 if (generate)
1735 emit_constant_insn (cond,
1736 gen_rtx_SET (VOIDmode, target,
1737 (source
1738 ? gen_rtx_fmt_ee (code, mode, source,
1739 GEN_INT (val))
1740 : GEN_INT (val))));
1741 return 1;
1744 /* Calculate a few attributes that may be useful for specific
1745 optimizations. */
1746 for (i = 31; i >= 0; i--)
1748 if ((remainder & (1 << i)) == 0)
1749 clear_sign_bit_copies++;
1750 else
1751 break;
1754 for (i = 31; i >= 0; i--)
1756 if ((remainder & (1 << i)) != 0)
1757 set_sign_bit_copies++;
1758 else
1759 break;
1762 for (i = 0; i <= 31; i++)
1764 if ((remainder & (1 << i)) == 0)
1765 clear_zero_bit_copies++;
1766 else
1767 break;
1770 for (i = 0; i <= 31; i++)
1772 if ((remainder & (1 << i)) != 0)
1773 set_zero_bit_copies++;
1774 else
1775 break;
1778 switch (code)
1780 case SET:
1781 /* See if we can do this by sign_extending a constant that is known
1782 to be negative. This is a good, way of doing it, since the shift
1783 may well merge into a subsequent insn. */
1784 if (set_sign_bit_copies > 1)
1786 if (const_ok_for_arm
1787 (temp1 = ARM_SIGN_EXTEND (remainder
1788 << (set_sign_bit_copies - 1))))
1790 if (generate)
1792 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1793 emit_constant_insn (cond,
1794 gen_rtx_SET (VOIDmode, new_src,
1795 GEN_INT (temp1)));
1796 emit_constant_insn (cond,
1797 gen_ashrsi3 (target, new_src,
1798 GEN_INT (set_sign_bit_copies - 1)));
1800 return 2;
1802 /* For an inverted constant, we will need to set the low bits,
1803 these will be shifted out of harm's way. */
1804 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1805 if (const_ok_for_arm (~temp1))
1807 if (generate)
1809 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1810 emit_constant_insn (cond,
1811 gen_rtx_SET (VOIDmode, new_src,
1812 GEN_INT (temp1)));
1813 emit_constant_insn (cond,
1814 gen_ashrsi3 (target, new_src,
1815 GEN_INT (set_sign_bit_copies - 1)));
1817 return 2;
1821 /* See if we can generate this by setting the bottom (or the top)
1822 16 bits, and then shifting these into the other half of the
1823 word. We only look for the simplest cases, to do more would cost
1824 too much. Be careful, however, not to generate this when the
1825 alternative would take fewer insns. */
1826 if (val & 0xffff0000)
1828 temp1 = remainder & 0xffff0000;
1829 temp2 = remainder & 0x0000ffff;
1831 /* Overlaps outside this range are best done using other methods. */
1832 for (i = 9; i < 24; i++)
1834 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1835 && !const_ok_for_arm (temp2))
1837 rtx new_src = (subtargets
1838 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1839 : target);
1840 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1841 source, subtargets, generate);
1842 source = new_src;
1843 if (generate)
1844 emit_constant_insn
1845 (cond,
1846 gen_rtx_SET
1847 (VOIDmode, target,
1848 gen_rtx_IOR (mode,
1849 gen_rtx_ASHIFT (mode, source,
1850 GEN_INT (i)),
1851 source)));
1852 return insns + 1;
1856 /* Don't duplicate cases already considered. */
1857 for (i = 17; i < 24; i++)
1859 if (((temp1 | (temp1 >> i)) == remainder)
1860 && !const_ok_for_arm (temp1))
1862 rtx new_src = (subtargets
1863 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1864 : target);
1865 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1866 source, subtargets, generate);
1867 source = new_src;
1868 if (generate)
1869 emit_constant_insn
1870 (cond,
1871 gen_rtx_SET (VOIDmode, target,
1872 gen_rtx_IOR
1873 (mode,
1874 gen_rtx_LSHIFTRT (mode, source,
1875 GEN_INT (i)),
1876 source)));
1877 return insns + 1;
1881 break;
1883 case IOR:
1884 case XOR:
1885 /* If we have IOR or XOR, and the constant can be loaded in a
1886 single instruction, and we can find a temporary to put it in,
1887 then this can be done in two instructions instead of 3-4. */
1888 if (subtargets
1889 /* TARGET can't be NULL if SUBTARGETS is 0 */
1890 || (reload_completed && !reg_mentioned_p (target, source)))
1892 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1894 if (generate)
1896 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1898 emit_constant_insn (cond,
1899 gen_rtx_SET (VOIDmode, sub,
1900 GEN_INT (val)));
1901 emit_constant_insn (cond,
1902 gen_rtx_SET (VOIDmode, target,
1903 gen_rtx_fmt_ee (code, mode,
1904 source, sub)));
1906 return 2;
1910 if (code == XOR)
1911 break;
1913 if (set_sign_bit_copies > 8
1914 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1916 if (generate)
1918 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1919 rtx shift = GEN_INT (set_sign_bit_copies);
1921 emit_constant_insn
1922 (cond,
1923 gen_rtx_SET (VOIDmode, sub,
1924 gen_rtx_NOT (mode,
1925 gen_rtx_ASHIFT (mode,
1926 source,
1927 shift))));
1928 emit_constant_insn
1929 (cond,
1930 gen_rtx_SET (VOIDmode, target,
1931 gen_rtx_NOT (mode,
1932 gen_rtx_LSHIFTRT (mode, sub,
1933 shift))));
1935 return 2;
1938 if (set_zero_bit_copies > 8
1939 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1941 if (generate)
1943 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1944 rtx shift = GEN_INT (set_zero_bit_copies);
1946 emit_constant_insn
1947 (cond,
1948 gen_rtx_SET (VOIDmode, sub,
1949 gen_rtx_NOT (mode,
1950 gen_rtx_LSHIFTRT (mode,
1951 source,
1952 shift))));
1953 emit_constant_insn
1954 (cond,
1955 gen_rtx_SET (VOIDmode, target,
1956 gen_rtx_NOT (mode,
1957 gen_rtx_ASHIFT (mode, sub,
1958 shift))));
1960 return 2;
1963 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1965 if (generate)
1967 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1968 emit_constant_insn (cond,
1969 gen_rtx_SET (VOIDmode, sub,
1970 gen_rtx_NOT (mode, source)));
1971 source = sub;
1972 if (subtargets)
1973 sub = gen_reg_rtx (mode);
1974 emit_constant_insn (cond,
1975 gen_rtx_SET (VOIDmode, sub,
1976 gen_rtx_AND (mode, source,
1977 GEN_INT (temp1))));
1978 emit_constant_insn (cond,
1979 gen_rtx_SET (VOIDmode, target,
1980 gen_rtx_NOT (mode, sub)));
1982 return 3;
1984 break;
1986 case AND:
1987 /* See if two shifts will do 2 or more insn's worth of work. */
1988 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1990 HOST_WIDE_INT shift_mask = ((0xffffffff
1991 << (32 - clear_sign_bit_copies))
1992 & 0xffffffff);
1994 if ((remainder | shift_mask) != 0xffffffff)
1996 if (generate)
1998 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1999 insns = arm_gen_constant (AND, mode, cond,
2000 remainder | shift_mask,
2001 new_src, source, subtargets, 1);
2002 source = new_src;
2004 else
2006 rtx targ = subtargets ? NULL_RTX : target;
2007 insns = arm_gen_constant (AND, mode, cond,
2008 remainder | shift_mask,
2009 targ, source, subtargets, 0);
2013 if (generate)
2015 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2016 rtx shift = GEN_INT (clear_sign_bit_copies);
2018 emit_insn (gen_ashlsi3 (new_src, source, shift));
2019 emit_insn (gen_lshrsi3 (target, new_src, shift));
2022 return insns + 2;
2025 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2027 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2029 if ((remainder | shift_mask) != 0xffffffff)
2031 if (generate)
2033 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2035 insns = arm_gen_constant (AND, mode, cond,
2036 remainder | shift_mask,
2037 new_src, source, subtargets, 1);
2038 source = new_src;
2040 else
2042 rtx targ = subtargets ? NULL_RTX : target;
2044 insns = arm_gen_constant (AND, mode, cond,
2045 remainder | shift_mask,
2046 targ, source, subtargets, 0);
2050 if (generate)
2052 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2053 rtx shift = GEN_INT (clear_zero_bit_copies);
2055 emit_insn (gen_lshrsi3 (new_src, source, shift));
2056 emit_insn (gen_ashlsi3 (target, new_src, shift));
2059 return insns + 2;
2062 break;
2064 default:
2065 break;
2068 for (i = 0; i < 32; i++)
2069 if (remainder & (1 << i))
2070 num_bits_set++;
2072 if (code == AND || (can_invert && num_bits_set > 16))
2073 remainder = (~remainder) & 0xffffffff;
2074 else if (code == PLUS && num_bits_set > 16)
2075 remainder = (-remainder) & 0xffffffff;
2076 else
2078 can_invert = 0;
2079 can_negate = 0;
2082 /* Now try and find a way of doing the job in either two or three
2083 instructions.
2084 We start by looking for the largest block of zeros that are aligned on
2085 a 2-bit boundary, we then fill up the temps, wrapping around to the
2086 top of the word when we drop off the bottom.
2087 In the worst case this code should produce no more than four insns. */
2089 int best_start = 0;
2090 int best_consecutive_zeros = 0;
2092 for (i = 0; i < 32; i += 2)
2094 int consecutive_zeros = 0;
2096 if (!(remainder & (3 << i)))
2098 while ((i < 32) && !(remainder & (3 << i)))
2100 consecutive_zeros += 2;
2101 i += 2;
2103 if (consecutive_zeros > best_consecutive_zeros)
2105 best_consecutive_zeros = consecutive_zeros;
2106 best_start = i - consecutive_zeros;
2108 i -= 2;
2112 /* So long as it won't require any more insns to do so, it's
2113 desirable to emit a small constant (in bits 0...9) in the last
2114 insn. This way there is more chance that it can be combined with
2115 a later addressing insn to form a pre-indexed load or store
2116 operation. Consider:
2118 *((volatile int *)0xe0000100) = 1;
2119 *((volatile int *)0xe0000110) = 2;
2121 We want this to wind up as:
2123 mov rA, #0xe0000000
2124 mov rB, #1
2125 str rB, [rA, #0x100]
2126 mov rB, #2
2127 str rB, [rA, #0x110]
2129 rather than having to synthesize both large constants from scratch.
2131 Therefore, we calculate how many insns would be required to emit
2132 the constant starting from `best_start', and also starting from
2133 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2134 yield a shorter sequence, we may as well use zero. */
2135 if (best_start != 0
2136 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2137 && (count_insns_for_constant (remainder, 0) <=
2138 count_insns_for_constant (remainder, best_start)))
2139 best_start = 0;
2141 /* Now start emitting the insns. */
2142 i = best_start;
2145 int end;
2147 if (i <= 0)
2148 i += 32;
2149 if (remainder & (3 << (i - 2)))
2151 end = i - 8;
2152 if (end < 0)
2153 end += 32;
2154 temp1 = remainder & ((0x0ff << end)
2155 | ((i < end) ? (0xff >> (32 - end)) : 0));
2156 remainder &= ~temp1;
2158 if (generate)
2160 rtx new_src, temp1_rtx;
2162 if (code == SET || code == MINUS)
2164 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2165 if (can_invert && code != MINUS)
2166 temp1 = ~temp1;
2168 else
2170 if (remainder && subtargets)
2171 new_src = gen_reg_rtx (mode);
2172 else
2173 new_src = target;
2174 if (can_invert)
2175 temp1 = ~temp1;
2176 else if (can_negate)
2177 temp1 = -temp1;
2180 temp1 = trunc_int_for_mode (temp1, mode);
2181 temp1_rtx = GEN_INT (temp1);
2183 if (code == SET)
2185 else if (code == MINUS)
2186 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2187 else
2188 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2190 emit_constant_insn (cond,
2191 gen_rtx_SET (VOIDmode, new_src,
2192 temp1_rtx));
2193 source = new_src;
2196 if (code == SET)
2198 can_invert = 0;
2199 code = PLUS;
2201 else if (code == MINUS)
2202 code = PLUS;
2204 insns++;
2205 i -= 6;
2207 i -= 2;
2209 while (remainder);
2212 return insns;
2215 /* Canonicalize a comparison so that we are more likely to recognize it.
2216 This can be done for a few constant compares, where we can make the
2217 immediate value easier to load. */
2219 enum rtx_code
2220 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2222 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2224 switch (code)
2226 case EQ:
2227 case NE:
2228 return code;
2230 case GT:
2231 case LE:
2232 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2233 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2235 *op1 = GEN_INT (i + 1);
2236 return code == GT ? GE : LT;
2238 break;
2240 case GE:
2241 case LT:
2242 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2243 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2245 *op1 = GEN_INT (i - 1);
2246 return code == GE ? GT : LE;
2248 break;
2250 case GTU:
2251 case LEU:
2252 if (i != ~((unsigned HOST_WIDE_INT) 0)
2253 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2255 *op1 = GEN_INT (i + 1);
2256 return code == GTU ? GEU : LTU;
2258 break;
2260 case GEU:
2261 case LTU:
2262 if (i != 0
2263 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2265 *op1 = GEN_INT (i - 1);
2266 return code == GEU ? GTU : LEU;
2268 break;
2270 default:
2271 abort ();
2274 return code;
2278 /* Define how to find the value returned by a function. */
2281 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2283 enum machine_mode mode;
2284 int unsignedp ATTRIBUTE_UNUSED;
2285 rtx r ATTRIBUTE_UNUSED;
2288 mode = TYPE_MODE (type);
2289 /* Promote integer types. */
2290 if (INTEGRAL_TYPE_P (type))
2291 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2292 return LIBCALL_VALUE(mode);
2295 /* Determine the amount of memory needed to store the possible return
2296 registers of an untyped call. */
2298 arm_apply_result_size (void)
2300 int size = 16;
2302 if (TARGET_ARM)
2304 if (TARGET_HARD_FLOAT_ABI)
2306 if (TARGET_FPA)
2307 size += 12;
2308 if (TARGET_MAVERICK)
2309 size += 8;
2311 if (TARGET_IWMMXT_ABI)
2312 size += 8;
2315 return size;
2318 /* Decide whether a type should be returned in memory (true)
2319 or in a register (false). This is called by the macro
2320 RETURN_IN_MEMORY. */
2322 arm_return_in_memory (tree type)
2324 HOST_WIDE_INT size;
2326 if (!AGGREGATE_TYPE_P (type) &&
2327 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2328 /* All simple types are returned in registers.
2329 For AAPCS, complex types are treated the same as aggregates. */
2330 return 0;
2332 size = int_size_in_bytes (type);
2334 if (arm_abi != ARM_ABI_APCS)
2336 /* ATPCS and later return aggregate types in memory only if they are
2337 larger than a word (or are variable size). */
2338 return (size < 0 || size > UNITS_PER_WORD);
2341 /* For the arm-wince targets we choose to be compatible with Microsoft's
2342 ARM and Thumb compilers, which always return aggregates in memory. */
2343 #ifndef ARM_WINCE
2344 /* All structures/unions bigger than one word are returned in memory.
2345 Also catch the case where int_size_in_bytes returns -1. In this case
2346 the aggregate is either huge or of variable size, and in either case
2347 we will want to return it via memory and not in a register. */
2348 if (size < 0 || size > UNITS_PER_WORD)
2349 return 1;
2351 if (TREE_CODE (type) == RECORD_TYPE)
2353 tree field;
2355 /* For a struct the APCS says that we only return in a register
2356 if the type is 'integer like' and every addressable element
2357 has an offset of zero. For practical purposes this means
2358 that the structure can have at most one non bit-field element
2359 and that this element must be the first one in the structure. */
2361 /* Find the first field, ignoring non FIELD_DECL things which will
2362 have been created by C++. */
2363 for (field = TYPE_FIELDS (type);
2364 field && TREE_CODE (field) != FIELD_DECL;
2365 field = TREE_CHAIN (field))
2366 continue;
2368 if (field == NULL)
2369 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2371 /* Check that the first field is valid for returning in a register. */
2373 /* ... Floats are not allowed */
2374 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2375 return 1;
2377 /* ... Aggregates that are not themselves valid for returning in
2378 a register are not allowed. */
2379 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2380 return 1;
2382 /* Now check the remaining fields, if any. Only bitfields are allowed,
2383 since they are not addressable. */
2384 for (field = TREE_CHAIN (field);
2385 field;
2386 field = TREE_CHAIN (field))
2388 if (TREE_CODE (field) != FIELD_DECL)
2389 continue;
2391 if (!DECL_BIT_FIELD_TYPE (field))
2392 return 1;
2395 return 0;
2398 if (TREE_CODE (type) == UNION_TYPE)
2400 tree field;
2402 /* Unions can be returned in registers if every element is
2403 integral, or can be returned in an integer register. */
2404 for (field = TYPE_FIELDS (type);
2405 field;
2406 field = TREE_CHAIN (field))
2408 if (TREE_CODE (field) != FIELD_DECL)
2409 continue;
2411 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2412 return 1;
2414 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2415 return 1;
2418 return 0;
2420 #endif /* not ARM_WINCE */
2422 /* Return all other types in memory. */
2423 return 1;
2426 /* Indicate whether or not words of a double are in big-endian order. */
2429 arm_float_words_big_endian (void)
2431 if (TARGET_MAVERICK)
2432 return 0;
2434 /* For FPA, float words are always big-endian. For VFP, floats words
2435 follow the memory system mode. */
2437 if (TARGET_FPA)
2439 return 1;
2442 if (TARGET_VFP)
2443 return (TARGET_BIG_END ? 1 : 0);
2445 return 1;
2448 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2449 for a call to a function whose data type is FNTYPE.
2450 For a library call, FNTYPE is NULL. */
2451 void
2452 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2453 rtx libname ATTRIBUTE_UNUSED,
2454 tree fndecl ATTRIBUTE_UNUSED)
2456 /* On the ARM, the offset starts at 0. */
2457 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2458 pcum->iwmmxt_nregs = 0;
2459 pcum->can_split = true;
2461 pcum->call_cookie = CALL_NORMAL;
2463 if (TARGET_LONG_CALLS)
2464 pcum->call_cookie = CALL_LONG;
2466 /* Check for long call/short call attributes. The attributes
2467 override any command line option. */
2468 if (fntype)
2470 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2471 pcum->call_cookie = CALL_SHORT;
2472 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2473 pcum->call_cookie = CALL_LONG;
2476 /* Varargs vectors are treated the same as long long.
2477 named_count avoids having to change the way arm handles 'named' */
2478 pcum->named_count = 0;
2479 pcum->nargs = 0;
2481 if (TARGET_REALLY_IWMMXT && fntype)
2483 tree fn_arg;
2485 for (fn_arg = TYPE_ARG_TYPES (fntype);
2486 fn_arg;
2487 fn_arg = TREE_CHAIN (fn_arg))
2488 pcum->named_count += 1;
2490 if (! pcum->named_count)
2491 pcum->named_count = INT_MAX;
2496 /* Return true if mode/type need doubleword alignment. */
2497 bool
2498 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2500 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2501 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2505 /* Determine where to put an argument to a function.
2506 Value is zero to push the argument on the stack,
2507 or a hard register in which to store the argument.
2509 MODE is the argument's machine mode.
2510 TYPE is the data type of the argument (as a tree).
2511 This is null for libcalls where that information may
2512 not be available.
2513 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2514 the preceding args and about the function being called.
2515 NAMED is nonzero if this argument is a named parameter
2516 (otherwise it is an extra parameter matching an ellipsis). */
2519 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2520 tree type, int named)
2522 int nregs;
2524 /* Varargs vectors are treated the same as long long.
2525 named_count avoids having to change the way arm handles 'named' */
2526 if (TARGET_IWMMXT_ABI
2527 && arm_vector_mode_supported_p (mode)
2528 && pcum->named_count > pcum->nargs + 1)
2530 if (pcum->iwmmxt_nregs <= 9)
2531 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2532 else
2534 pcum->can_split = false;
2535 return NULL_RTX;
2539 /* Put doubleword aligned quantities in even register pairs. */
2540 if (pcum->nregs & 1
2541 && ARM_DOUBLEWORD_ALIGN
2542 && arm_needs_doubleword_align (mode, type))
2543 pcum->nregs++;
2545 if (mode == VOIDmode)
2546 /* Compute operand 2 of the call insn. */
2547 return GEN_INT (pcum->call_cookie);
2549 /* Only allow splitting an arg between regs and memory if all preceding
2550 args were allocated to regs. For args passed by reference we only count
2551 the reference pointer. */
2552 if (pcum->can_split)
2553 nregs = 1;
2554 else
2555 nregs = ARM_NUM_REGS2 (mode, type);
2557 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2558 return NULL_RTX;
2560 return gen_rtx_REG (mode, pcum->nregs);
2563 static int
2564 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2565 tree type, bool named ATTRIBUTE_UNUSED)
2567 int nregs = pcum->nregs;
2569 if (arm_vector_mode_supported_p (mode))
2570 return 0;
2572 if (NUM_ARG_REGS > nregs
2573 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2574 && pcum->can_split)
2575 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2577 return 0;
2580 /* Variable sized types are passed by reference. This is a GCC
2581 extension to the ARM ABI. */
2583 static bool
2584 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2585 enum machine_mode mode ATTRIBUTE_UNUSED,
2586 tree type, bool named ATTRIBUTE_UNUSED)
2588 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2591 /* Encode the current state of the #pragma [no_]long_calls. */
2592 typedef enum
2594 OFF, /* No #pramgma [no_]long_calls is in effect. */
2595 LONG, /* #pragma long_calls is in effect. */
2596 SHORT /* #pragma no_long_calls is in effect. */
2597 } arm_pragma_enum;
2599 static arm_pragma_enum arm_pragma_long_calls = OFF;
2601 void
2602 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2604 arm_pragma_long_calls = LONG;
2607 void
2608 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2610 arm_pragma_long_calls = SHORT;
2613 void
2614 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2616 arm_pragma_long_calls = OFF;
2619 /* Table of machine attributes. */
2620 const struct attribute_spec arm_attribute_table[] =
2622 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2623 /* Function calls made to this symbol must be done indirectly, because
2624 it may lie outside of the 26 bit addressing range of a normal function
2625 call. */
2626 { "long_call", 0, 0, false, true, true, NULL },
2627 /* Whereas these functions are always known to reside within the 26 bit
2628 addressing range. */
2629 { "short_call", 0, 0, false, true, true, NULL },
2630 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2631 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2632 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2633 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2634 #ifdef ARM_PE
2635 /* ARM/PE has three new attributes:
2636 interfacearm - ?
2637 dllexport - for exporting a function/variable that will live in a dll
2638 dllimport - for importing a function/variable from a dll
2640 Microsoft allows multiple declspecs in one __declspec, separating
2641 them with spaces. We do NOT support this. Instead, use __declspec
2642 multiple times.
2644 { "dllimport", 0, 0, true, false, false, NULL },
2645 { "dllexport", 0, 0, true, false, false, NULL },
2646 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2647 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2648 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2649 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2650 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2651 #endif
2652 { NULL, 0, 0, false, false, false, NULL }
2655 /* Handle an attribute requiring a FUNCTION_DECL;
2656 arguments as in struct attribute_spec.handler. */
2657 static tree
2658 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2659 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2661 if (TREE_CODE (*node) != FUNCTION_DECL)
2663 warning ("%qs attribute only applies to functions",
2664 IDENTIFIER_POINTER (name));
2665 *no_add_attrs = true;
2668 return NULL_TREE;
2671 /* Handle an "interrupt" or "isr" attribute;
2672 arguments as in struct attribute_spec.handler. */
2673 static tree
2674 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2675 bool *no_add_attrs)
2677 if (DECL_P (*node))
2679 if (TREE_CODE (*node) != FUNCTION_DECL)
2681 warning ("%qs attribute only applies to functions",
2682 IDENTIFIER_POINTER (name));
2683 *no_add_attrs = true;
2685 /* FIXME: the argument if any is checked for type attributes;
2686 should it be checked for decl ones? */
2688 else
2690 if (TREE_CODE (*node) == FUNCTION_TYPE
2691 || TREE_CODE (*node) == METHOD_TYPE)
2693 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2695 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2696 *no_add_attrs = true;
2699 else if (TREE_CODE (*node) == POINTER_TYPE
2700 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2701 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2702 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2704 *node = build_variant_type_copy (*node);
2705 TREE_TYPE (*node) = build_type_attribute_variant
2706 (TREE_TYPE (*node),
2707 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2708 *no_add_attrs = true;
2710 else
2712 /* Possibly pass this attribute on from the type to a decl. */
2713 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2714 | (int) ATTR_FLAG_FUNCTION_NEXT
2715 | (int) ATTR_FLAG_ARRAY_NEXT))
2717 *no_add_attrs = true;
2718 return tree_cons (name, args, NULL_TREE);
2720 else
2722 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2727 return NULL_TREE;
2730 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2731 /* Handle the "notshared" attribute. This attribute is another way of
2732 requesting hidden visibility. ARM's compiler supports
2733 "__declspec(notshared)"; we support the same thing via an
2734 attribute. */
2736 static tree
2737 arm_handle_notshared_attribute (tree *node,
2738 tree name ATTRIBUTE_UNUSED,
2739 tree args ATTRIBUTE_UNUSED,
2740 int flags ATTRIBUTE_UNUSED,
2741 bool *no_add_attrs)
2743 tree decl = TYPE_NAME (*node);
2745 if (decl)
2747 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2748 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2749 *no_add_attrs = false;
2751 return NULL_TREE;
2753 #endif
2755 /* Return 0 if the attributes for two types are incompatible, 1 if they
2756 are compatible, and 2 if they are nearly compatible (which causes a
2757 warning to be generated). */
2758 static int
2759 arm_comp_type_attributes (tree type1, tree type2)
2761 int l1, l2, s1, s2;
2763 /* Check for mismatch of non-default calling convention. */
2764 if (TREE_CODE (type1) != FUNCTION_TYPE)
2765 return 1;
2767 /* Check for mismatched call attributes. */
2768 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2769 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2770 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2771 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2773 /* Only bother to check if an attribute is defined. */
2774 if (l1 | l2 | s1 | s2)
2776 /* If one type has an attribute, the other must have the same attribute. */
2777 if ((l1 != l2) || (s1 != s2))
2778 return 0;
2780 /* Disallow mixed attributes. */
2781 if ((l1 & s2) || (l2 & s1))
2782 return 0;
2785 /* Check for mismatched ISR attribute. */
2786 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2787 if (! l1)
2788 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2789 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2790 if (! l2)
2791 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2792 if (l1 != l2)
2793 return 0;
2795 return 1;
2798 /* Encode long_call or short_call attribute by prefixing
2799 symbol name in DECL with a special character FLAG. */
2800 void
2801 arm_encode_call_attribute (tree decl, int flag)
2803 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2804 int len = strlen (str);
2805 char * newstr;
2807 /* Do not allow weak functions to be treated as short call. */
2808 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2809 return;
2811 newstr = alloca (len + 2);
2812 newstr[0] = flag;
2813 strcpy (newstr + 1, str);
2815 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2816 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2819 /* Assigns default attributes to newly defined type. This is used to
2820 set short_call/long_call attributes for function types of
2821 functions defined inside corresponding #pragma scopes. */
2822 static void
2823 arm_set_default_type_attributes (tree type)
2825 /* Add __attribute__ ((long_call)) to all functions, when
2826 inside #pragma long_calls or __attribute__ ((short_call)),
2827 when inside #pragma no_long_calls. */
2828 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2830 tree type_attr_list, attr_name;
2831 type_attr_list = TYPE_ATTRIBUTES (type);
2833 if (arm_pragma_long_calls == LONG)
2834 attr_name = get_identifier ("long_call");
2835 else if (arm_pragma_long_calls == SHORT)
2836 attr_name = get_identifier ("short_call");
2837 else
2838 return;
2840 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2841 TYPE_ATTRIBUTES (type) = type_attr_list;
2845 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2846 defined within the current compilation unit. If this cannot be
2847 determined, then 0 is returned. */
2848 static int
2849 current_file_function_operand (rtx sym_ref)
2851 /* This is a bit of a fib. A function will have a short call flag
2852 applied to its name if it has the short call attribute, or it has
2853 already been defined within the current compilation unit. */
2854 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2855 return 1;
2857 /* The current function is always defined within the current compilation
2858 unit. If it s a weak definition however, then this may not be the real
2859 definition of the function, and so we have to say no. */
2860 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2861 && !DECL_WEAK (current_function_decl))
2862 return 1;
2864 /* We cannot make the determination - default to returning 0. */
2865 return 0;
2868 /* Return nonzero if a 32 bit "long_call" should be generated for
2869 this call. We generate a long_call if the function:
2871 a. has an __attribute__((long call))
2872 or b. is within the scope of a #pragma long_calls
2873 or c. the -mlong-calls command line switch has been specified
2874 . and either:
2875 1. -ffunction-sections is in effect
2876 or 2. the current function has __attribute__ ((section))
2877 or 3. the target function has __attribute__ ((section))
2879 However we do not generate a long call if the function:
2881 d. has an __attribute__ ((short_call))
2882 or e. is inside the scope of a #pragma no_long_calls
2883 or f. is defined within the current compilation unit.
2885 This function will be called by C fragments contained in the machine
2886 description file. SYM_REF and CALL_COOKIE correspond to the matched
2887 rtl operands. CALL_SYMBOL is used to distinguish between
2888 two different callers of the function. It is set to 1 in the
2889 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2890 and "call_value" patterns. This is because of the difference in the
2891 SYM_REFs passed by these patterns. */
2893 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2895 if (!call_symbol)
2897 if (GET_CODE (sym_ref) != MEM)
2898 return 0;
2900 sym_ref = XEXP (sym_ref, 0);
2903 if (GET_CODE (sym_ref) != SYMBOL_REF)
2904 return 0;
2906 if (call_cookie & CALL_SHORT)
2907 return 0;
2909 if (TARGET_LONG_CALLS)
2911 if (flag_function_sections
2912 || DECL_SECTION_NAME (current_function_decl))
2913 /* c.3 is handled by the definition of the
2914 ARM_DECLARE_FUNCTION_SIZE macro. */
2915 return 1;
2918 if (current_file_function_operand (sym_ref))
2919 return 0;
2921 return (call_cookie & CALL_LONG)
2922 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2923 || TARGET_LONG_CALLS;
2926 /* Return nonzero if it is ok to make a tail-call to DECL. */
2927 static bool
2928 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2930 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2932 if (cfun->machine->sibcall_blocked)
2933 return false;
2935 /* Never tailcall something for which we have no decl, or if we
2936 are in Thumb mode. */
2937 if (decl == NULL || TARGET_THUMB)
2938 return false;
2940 /* Get the calling method. */
2941 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2942 call_type = CALL_SHORT;
2943 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2944 call_type = CALL_LONG;
2946 /* Cannot tail-call to long calls, since these are out of range of
2947 a branch instruction. However, if not compiling PIC, we know
2948 we can reach the symbol if it is in this compilation unit. */
2949 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2950 return false;
2952 /* If we are interworking and the function is not declared static
2953 then we can't tail-call it unless we know that it exists in this
2954 compilation unit (since it might be a Thumb routine). */
2955 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2956 return false;
2958 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2959 if (IS_INTERRUPT (arm_current_func_type ()))
2960 return false;
2962 /* Everything else is ok. */
2963 return true;
2967 /* Addressing mode support functions. */
2969 /* Return nonzero if X is a legitimate immediate operand when compiling
2970 for PIC. */
2972 legitimate_pic_operand_p (rtx x)
2974 if (CONSTANT_P (x)
2975 && flag_pic
2976 && (GET_CODE (x) == SYMBOL_REF
2977 || (GET_CODE (x) == CONST
2978 && GET_CODE (XEXP (x, 0)) == PLUS
2979 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2980 return 0;
2982 return 1;
2986 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2988 if (GET_CODE (orig) == SYMBOL_REF
2989 || GET_CODE (orig) == LABEL_REF)
2991 #ifndef AOF_ASSEMBLER
2992 rtx pic_ref, address;
2993 #endif
2994 rtx insn;
2995 int subregs = 0;
2997 if (reg == 0)
2999 if (no_new_pseudos)
3000 abort ();
3001 else
3002 reg = gen_reg_rtx (Pmode);
3004 subregs = 1;
3007 #ifdef AOF_ASSEMBLER
3008 /* The AOF assembler can generate relocations for these directly, and
3009 understands that the PIC register has to be added into the offset. */
3010 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3011 #else
3012 if (subregs)
3013 address = gen_reg_rtx (Pmode);
3014 else
3015 address = reg;
3017 if (TARGET_ARM)
3018 emit_insn (gen_pic_load_addr_arm (address, orig));
3019 else
3020 emit_insn (gen_pic_load_addr_thumb (address, orig));
3022 if ((GET_CODE (orig) == LABEL_REF
3023 || (GET_CODE (orig) == SYMBOL_REF &&
3024 SYMBOL_REF_LOCAL_P (orig)))
3025 && NEED_GOT_RELOC)
3026 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3027 else
3029 pic_ref = gen_const_mem (Pmode,
3030 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3031 address));
3034 insn = emit_move_insn (reg, pic_ref);
3035 #endif
3036 current_function_uses_pic_offset_table = 1;
3037 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3038 by loop. */
3039 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3040 REG_NOTES (insn));
3041 return reg;
3043 else if (GET_CODE (orig) == CONST)
3045 rtx base, offset;
3047 if (GET_CODE (XEXP (orig, 0)) == PLUS
3048 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3049 return orig;
3051 if (reg == 0)
3053 if (no_new_pseudos)
3054 abort ();
3055 else
3056 reg = gen_reg_rtx (Pmode);
3059 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3061 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3062 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3063 base == reg ? 0 : reg);
3065 else
3066 abort ();
3068 if (GET_CODE (offset) == CONST_INT)
3070 /* The base register doesn't really matter, we only want to
3071 test the index for the appropriate mode. */
3072 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3074 if (!no_new_pseudos)
3075 offset = force_reg (Pmode, offset);
3076 else
3077 abort ();
3080 if (GET_CODE (offset) == CONST_INT)
3081 return plus_constant (base, INTVAL (offset));
3084 if (GET_MODE_SIZE (mode) > 4
3085 && (GET_MODE_CLASS (mode) == MODE_INT
3086 || TARGET_SOFT_FLOAT))
3088 emit_insn (gen_addsi3 (reg, base, offset));
3089 return reg;
3092 return gen_rtx_PLUS (Pmode, base, offset);
3095 return orig;
3099 /* Find a spare low register to use during the prolog of a function. */
3101 static int
3102 thumb_find_work_register (unsigned long pushed_regs_mask)
3104 int reg;
3106 /* Check the argument registers first as these are call-used. The
3107 register allocation order means that sometimes r3 might be used
3108 but earlier argument registers might not, so check them all. */
3109 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3110 if (!regs_ever_live[reg])
3111 return reg;
3113 /* Before going on to check the call-saved registers we can try a couple
3114 more ways of deducing that r3 is available. The first is when we are
3115 pushing anonymous arguments onto the stack and we have less than 4
3116 registers worth of fixed arguments(*). In this case r3 will be part of
3117 the variable argument list and so we can be sure that it will be
3118 pushed right at the start of the function. Hence it will be available
3119 for the rest of the prologue.
3120 (*): ie current_function_pretend_args_size is greater than 0. */
3121 if (cfun->machine->uses_anonymous_args
3122 && current_function_pretend_args_size > 0)
3123 return LAST_ARG_REGNUM;
3125 /* The other case is when we have fixed arguments but less than 4 registers
3126 worth. In this case r3 might be used in the body of the function, but
3127 it is not being used to convey an argument into the function. In theory
3128 we could just check current_function_args_size to see how many bytes are
3129 being passed in argument registers, but it seems that it is unreliable.
3130 Sometimes it will have the value 0 when in fact arguments are being
3131 passed. (See testcase execute/20021111-1.c for an example). So we also
3132 check the args_info.nregs field as well. The problem with this field is
3133 that it makes no allowances for arguments that are passed to the
3134 function but which are not used. Hence we could miss an opportunity
3135 when a function has an unused argument in r3. But it is better to be
3136 safe than to be sorry. */
3137 if (! cfun->machine->uses_anonymous_args
3138 && current_function_args_size >= 0
3139 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3140 && cfun->args_info.nregs < 4)
3141 return LAST_ARG_REGNUM;
3143 /* Otherwise look for a call-saved register that is going to be pushed. */
3144 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3145 if (pushed_regs_mask & (1 << reg))
3146 return reg;
3148 /* Something went wrong - thumb_compute_save_reg_mask()
3149 should have arranged for a suitable register to be pushed. */
3150 abort ();
3154 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3155 low register. */
3157 void
3158 arm_load_pic_register (unsigned int scratch)
3160 #ifndef AOF_ASSEMBLER
3161 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3162 rtx global_offset_table;
3164 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3165 return;
3167 if (!flag_pic)
3168 abort ();
3170 l1 = gen_label_rtx ();
3172 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3173 /* On the ARM the PC register contains 'dot + 8' at the time of the
3174 addition, on the Thumb it is 'dot + 4'. */
3175 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3176 if (GOT_PCREL)
3177 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3178 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3179 else
3180 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3182 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3184 if (TARGET_ARM)
3186 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3187 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3189 else
3191 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3193 /* We will have pushed the pic register, so should always be
3194 able to find a work register. */
3195 pic_tmp = gen_rtx_REG (SImode, scratch);
3196 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3197 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3199 else
3200 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3201 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3204 /* Need to emit this whether or not we obey regdecls,
3205 since setjmp/longjmp can cause life info to screw up. */
3206 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3207 #endif /* AOF_ASSEMBLER */
3211 /* Return nonzero if X is valid as an ARM state addressing register. */
3212 static int
3213 arm_address_register_rtx_p (rtx x, int strict_p)
3215 int regno;
3217 if (GET_CODE (x) != REG)
3218 return 0;
3220 regno = REGNO (x);
3222 if (strict_p)
3223 return ARM_REGNO_OK_FOR_BASE_P (regno);
3225 return (regno <= LAST_ARM_REGNUM
3226 || regno >= FIRST_PSEUDO_REGISTER
3227 || regno == FRAME_POINTER_REGNUM
3228 || regno == ARG_POINTER_REGNUM);
3231 /* Return nonzero if X is a valid ARM state address operand. */
3233 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3234 int strict_p)
3236 bool use_ldrd;
3237 enum rtx_code code = GET_CODE (x);
3239 if (arm_address_register_rtx_p (x, strict_p))
3240 return 1;
3242 use_ldrd = (TARGET_LDRD
3243 && (mode == DImode
3244 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3246 if (code == POST_INC || code == PRE_DEC
3247 || ((code == PRE_INC || code == POST_DEC)
3248 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3249 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3251 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3252 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3253 && GET_CODE (XEXP (x, 1)) == PLUS
3254 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3256 rtx addend = XEXP (XEXP (x, 1), 1);
3258 /* Don't allow ldrd post increment by register because it's hard
3259 to fixup invalid register choices. */
3260 if (use_ldrd
3261 && GET_CODE (x) == POST_MODIFY
3262 && GET_CODE (addend) == REG)
3263 return 0;
3265 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3266 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3269 /* After reload constants split into minipools will have addresses
3270 from a LABEL_REF. */
3271 else if (reload_completed
3272 && (code == LABEL_REF
3273 || (code == CONST
3274 && GET_CODE (XEXP (x, 0)) == PLUS
3275 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3276 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3277 return 1;
3279 else if (mode == TImode)
3280 return 0;
3282 else if (code == PLUS)
3284 rtx xop0 = XEXP (x, 0);
3285 rtx xop1 = XEXP (x, 1);
3287 return ((arm_address_register_rtx_p (xop0, strict_p)
3288 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3289 || (arm_address_register_rtx_p (xop1, strict_p)
3290 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3293 #if 0
3294 /* Reload currently can't handle MINUS, so disable this for now */
3295 else if (GET_CODE (x) == MINUS)
3297 rtx xop0 = XEXP (x, 0);
3298 rtx xop1 = XEXP (x, 1);
3300 return (arm_address_register_rtx_p (xop0, strict_p)
3301 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3303 #endif
3305 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3306 && code == SYMBOL_REF
3307 && CONSTANT_POOL_ADDRESS_P (x)
3308 && ! (flag_pic
3309 && symbol_mentioned_p (get_pool_constant (x))))
3310 return 1;
3312 return 0;
3315 /* Return nonzero if INDEX is valid for an address index operand in
3316 ARM state. */
3317 static int
3318 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3319 int strict_p)
3321 HOST_WIDE_INT range;
3322 enum rtx_code code = GET_CODE (index);
3324 /* Standard coprocessor addressing modes. */
3325 if (TARGET_HARD_FLOAT
3326 && (TARGET_FPA || TARGET_MAVERICK)
3327 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3328 || (TARGET_MAVERICK && mode == DImode)))
3329 return (code == CONST_INT && INTVAL (index) < 1024
3330 && INTVAL (index) > -1024
3331 && (INTVAL (index) & 3) == 0);
3333 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3334 return (code == CONST_INT
3335 && INTVAL (index) < 1024
3336 && INTVAL (index) > -1024
3337 && (INTVAL (index) & 3) == 0);
3339 if (arm_address_register_rtx_p (index, strict_p)
3340 && (GET_MODE_SIZE (mode) <= 4))
3341 return 1;
3343 if (mode == DImode || mode == DFmode)
3345 if (code == CONST_INT)
3347 HOST_WIDE_INT val = INTVAL (index);
3349 if (TARGET_LDRD)
3350 return val > -256 && val < 256;
3351 else
3352 return val > -4096 && val < 4092;
3355 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3358 if (GET_MODE_SIZE (mode) <= 4
3359 && ! (arm_arch4
3360 && (mode == HImode
3361 || (mode == QImode && outer == SIGN_EXTEND))))
3363 if (code == MULT)
3365 rtx xiop0 = XEXP (index, 0);
3366 rtx xiop1 = XEXP (index, 1);
3368 return ((arm_address_register_rtx_p (xiop0, strict_p)
3369 && power_of_two_operand (xiop1, SImode))
3370 || (arm_address_register_rtx_p (xiop1, strict_p)
3371 && power_of_two_operand (xiop0, SImode)));
3373 else if (code == LSHIFTRT || code == ASHIFTRT
3374 || code == ASHIFT || code == ROTATERT)
3376 rtx op = XEXP (index, 1);
3378 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3379 && GET_CODE (op) == CONST_INT
3380 && INTVAL (op) > 0
3381 && INTVAL (op) <= 31);
3385 /* For ARM v4 we may be doing a sign-extend operation during the
3386 load. */
3387 if (arm_arch4)
3389 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3390 range = 256;
3391 else
3392 range = 4096;
3394 else
3395 range = (mode == HImode) ? 4095 : 4096;
3397 return (code == CONST_INT
3398 && INTVAL (index) < range
3399 && INTVAL (index) > -range);
3402 /* Return nonzero if X is valid as a Thumb state base register. */
3403 static int
3404 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3406 int regno;
3408 if (GET_CODE (x) != REG)
3409 return 0;
3411 regno = REGNO (x);
3413 if (strict_p)
3414 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3416 return (regno <= LAST_LO_REGNUM
3417 || regno > LAST_VIRTUAL_REGISTER
3418 || regno == FRAME_POINTER_REGNUM
3419 || (GET_MODE_SIZE (mode) >= 4
3420 && (regno == STACK_POINTER_REGNUM
3421 || regno >= FIRST_PSEUDO_REGISTER
3422 || x == hard_frame_pointer_rtx
3423 || x == arg_pointer_rtx)));
3426 /* Return nonzero if x is a legitimate index register. This is the case
3427 for any base register that can access a QImode object. */
3428 inline static int
3429 thumb_index_register_rtx_p (rtx x, int strict_p)
3431 return thumb_base_register_rtx_p (x, QImode, strict_p);
3434 /* Return nonzero if x is a legitimate Thumb-state address.
3436 The AP may be eliminated to either the SP or the FP, so we use the
3437 least common denominator, e.g. SImode, and offsets from 0 to 64.
3439 ??? Verify whether the above is the right approach.
3441 ??? Also, the FP may be eliminated to the SP, so perhaps that
3442 needs special handling also.
3444 ??? Look at how the mips16 port solves this problem. It probably uses
3445 better ways to solve some of these problems.
3447 Although it is not incorrect, we don't accept QImode and HImode
3448 addresses based on the frame pointer or arg pointer until the
3449 reload pass starts. This is so that eliminating such addresses
3450 into stack based ones won't produce impossible code. */
3452 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3454 /* ??? Not clear if this is right. Experiment. */
3455 if (GET_MODE_SIZE (mode) < 4
3456 && !(reload_in_progress || reload_completed)
3457 && (reg_mentioned_p (frame_pointer_rtx, x)
3458 || reg_mentioned_p (arg_pointer_rtx, x)
3459 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3460 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3461 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3462 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3463 return 0;
3465 /* Accept any base register. SP only in SImode or larger. */
3466 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3467 return 1;
3469 /* This is PC relative data before arm_reorg runs. */
3470 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3471 && GET_CODE (x) == SYMBOL_REF
3472 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3473 return 1;
3475 /* This is PC relative data after arm_reorg runs. */
3476 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3477 && (GET_CODE (x) == LABEL_REF
3478 || (GET_CODE (x) == CONST
3479 && GET_CODE (XEXP (x, 0)) == PLUS
3480 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3481 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3482 return 1;
3484 /* Post-inc indexing only supported for SImode and larger. */
3485 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3486 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3487 return 1;
3489 else if (GET_CODE (x) == PLUS)
3491 /* REG+REG address can be any two index registers. */
3492 /* We disallow FRAME+REG addressing since we know that FRAME
3493 will be replaced with STACK, and SP relative addressing only
3494 permits SP+OFFSET. */
3495 if (GET_MODE_SIZE (mode) <= 4
3496 && XEXP (x, 0) != frame_pointer_rtx
3497 && XEXP (x, 1) != frame_pointer_rtx
3498 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3499 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3500 return 1;
3502 /* REG+const has 5-7 bit offset for non-SP registers. */
3503 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3504 || XEXP (x, 0) == arg_pointer_rtx)
3505 && GET_CODE (XEXP (x, 1)) == CONST_INT
3506 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3507 return 1;
3509 /* REG+const has 10 bit offset for SP, but only SImode and
3510 larger is supported. */
3511 /* ??? Should probably check for DI/DFmode overflow here
3512 just like GO_IF_LEGITIMATE_OFFSET does. */
3513 else if (GET_CODE (XEXP (x, 0)) == REG
3514 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3515 && GET_MODE_SIZE (mode) >= 4
3516 && GET_CODE (XEXP (x, 1)) == CONST_INT
3517 && INTVAL (XEXP (x, 1)) >= 0
3518 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3519 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3520 return 1;
3522 else if (GET_CODE (XEXP (x, 0)) == REG
3523 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3524 && GET_MODE_SIZE (mode) >= 4
3525 && GET_CODE (XEXP (x, 1)) == CONST_INT
3526 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3527 return 1;
3530 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3531 && GET_MODE_SIZE (mode) == 4
3532 && GET_CODE (x) == SYMBOL_REF
3533 && CONSTANT_POOL_ADDRESS_P (x)
3534 && !(flag_pic
3535 && symbol_mentioned_p (get_pool_constant (x))))
3536 return 1;
3538 return 0;
3541 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3542 instruction of mode MODE. */
3544 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3546 switch (GET_MODE_SIZE (mode))
3548 case 1:
3549 return val >= 0 && val < 32;
3551 case 2:
3552 return val >= 0 && val < 64 && (val & 1) == 0;
3554 default:
3555 return (val >= 0
3556 && (val + GET_MODE_SIZE (mode)) <= 128
3557 && (val & 3) == 0);
3561 /* Try machine-dependent ways of modifying an illegitimate address
3562 to be legitimate. If we find one, return the new, valid address. */
3564 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3566 if (GET_CODE (x) == PLUS)
3568 rtx xop0 = XEXP (x, 0);
3569 rtx xop1 = XEXP (x, 1);
3571 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3572 xop0 = force_reg (SImode, xop0);
3574 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3575 xop1 = force_reg (SImode, xop1);
3577 if (ARM_BASE_REGISTER_RTX_P (xop0)
3578 && GET_CODE (xop1) == CONST_INT)
3580 HOST_WIDE_INT n, low_n;
3581 rtx base_reg, val;
3582 n = INTVAL (xop1);
3584 /* VFP addressing modes actually allow greater offsets, but for
3585 now we just stick with the lowest common denominator. */
3586 if (mode == DImode
3587 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3589 low_n = n & 0x0f;
3590 n &= ~0x0f;
3591 if (low_n > 4)
3593 n += 16;
3594 low_n -= 16;
3597 else
3599 low_n = ((mode) == TImode ? 0
3600 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3601 n -= low_n;
3604 base_reg = gen_reg_rtx (SImode);
3605 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3606 GEN_INT (n)), NULL_RTX);
3607 emit_move_insn (base_reg, val);
3608 x = (low_n == 0 ? base_reg
3609 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3611 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3612 x = gen_rtx_PLUS (SImode, xop0, xop1);
3615 /* XXX We don't allow MINUS any more -- see comment in
3616 arm_legitimate_address_p (). */
3617 else if (GET_CODE (x) == MINUS)
3619 rtx xop0 = XEXP (x, 0);
3620 rtx xop1 = XEXP (x, 1);
3622 if (CONSTANT_P (xop0))
3623 xop0 = force_reg (SImode, xop0);
3625 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3626 xop1 = force_reg (SImode, xop1);
3628 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3629 x = gen_rtx_MINUS (SImode, xop0, xop1);
3632 if (flag_pic)
3634 /* We need to find and carefully transform any SYMBOL and LABEL
3635 references; so go back to the original address expression. */
3636 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3638 if (new_x != orig_x)
3639 x = new_x;
3642 return x;
3646 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3647 to be legitimate. If we find one, return the new, valid address. */
3649 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3651 if (GET_CODE (x) == PLUS
3652 && GET_CODE (XEXP (x, 1)) == CONST_INT
3653 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3654 || INTVAL (XEXP (x, 1)) < 0))
3656 rtx xop0 = XEXP (x, 0);
3657 rtx xop1 = XEXP (x, 1);
3658 HOST_WIDE_INT offset = INTVAL (xop1);
3660 /* Try and fold the offset into a biasing of the base register and
3661 then offsetting that. Don't do this when optimizing for space
3662 since it can cause too many CSEs. */
3663 if (optimize_size && offset >= 0
3664 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3666 HOST_WIDE_INT delta;
3668 if (offset >= 256)
3669 delta = offset - (256 - GET_MODE_SIZE (mode));
3670 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3671 delta = 31 * GET_MODE_SIZE (mode);
3672 else
3673 delta = offset & (~31 * GET_MODE_SIZE (mode));
3675 xop0 = force_operand (plus_constant (xop0, offset - delta),
3676 NULL_RTX);
3677 x = plus_constant (xop0, delta);
3679 else if (offset < 0 && offset > -256)
3680 /* Small negative offsets are best done with a subtract before the
3681 dereference, forcing these into a register normally takes two
3682 instructions. */
3683 x = force_operand (x, NULL_RTX);
3684 else
3686 /* For the remaining cases, force the constant into a register. */
3687 xop1 = force_reg (SImode, xop1);
3688 x = gen_rtx_PLUS (SImode, xop0, xop1);
3691 else if (GET_CODE (x) == PLUS
3692 && s_register_operand (XEXP (x, 1), SImode)
3693 && !s_register_operand (XEXP (x, 0), SImode))
3695 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3697 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3700 if (flag_pic)
3702 /* We need to find and carefully transform any SYMBOL and LABEL
3703 references; so go back to the original address expression. */
3704 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3706 if (new_x != orig_x)
3707 x = new_x;
3710 return x;
3715 #define REG_OR_SUBREG_REG(X) \
3716 (GET_CODE (X) == REG \
3717 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3719 #define REG_OR_SUBREG_RTX(X) \
3720 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3722 #ifndef COSTS_N_INSNS
3723 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3724 #endif
3725 static inline int
3726 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3728 enum machine_mode mode = GET_MODE (x);
3730 switch (code)
3732 case ASHIFT:
3733 case ASHIFTRT:
3734 case LSHIFTRT:
3735 case ROTATERT:
3736 case PLUS:
3737 case MINUS:
3738 case COMPARE:
3739 case NEG:
3740 case NOT:
3741 return COSTS_N_INSNS (1);
3743 case MULT:
3744 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3746 int cycles = 0;
3747 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3749 while (i)
3751 i >>= 2;
3752 cycles++;
3754 return COSTS_N_INSNS (2) + cycles;
3756 return COSTS_N_INSNS (1) + 16;
3758 case SET:
3759 return (COSTS_N_INSNS (1)
3760 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3761 + GET_CODE (SET_DEST (x)) == MEM));
3763 case CONST_INT:
3764 if (outer == SET)
3766 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3767 return 0;
3768 if (thumb_shiftable_const (INTVAL (x)))
3769 return COSTS_N_INSNS (2);
3770 return COSTS_N_INSNS (3);
3772 else if ((outer == PLUS || outer == COMPARE)
3773 && INTVAL (x) < 256 && INTVAL (x) > -256)
3774 return 0;
3775 else if (outer == AND
3776 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3777 return COSTS_N_INSNS (1);
3778 else if (outer == ASHIFT || outer == ASHIFTRT
3779 || outer == LSHIFTRT)
3780 return 0;
3781 return COSTS_N_INSNS (2);
3783 case CONST:
3784 case CONST_DOUBLE:
3785 case LABEL_REF:
3786 case SYMBOL_REF:
3787 return COSTS_N_INSNS (3);
3789 case UDIV:
3790 case UMOD:
3791 case DIV:
3792 case MOD:
3793 return 100;
3795 case TRUNCATE:
3796 return 99;
3798 case AND:
3799 case XOR:
3800 case IOR:
3801 /* XXX guess. */
3802 return 8;
3804 case MEM:
3805 /* XXX another guess. */
3806 /* Memory costs quite a lot for the first word, but subsequent words
3807 load at the equivalent of a single insn each. */
3808 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3809 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3810 ? 4 : 0));
3812 case IF_THEN_ELSE:
3813 /* XXX a guess. */
3814 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3815 return 14;
3816 return 2;
3818 case ZERO_EXTEND:
3819 /* XXX still guessing. */
3820 switch (GET_MODE (XEXP (x, 0)))
3822 case QImode:
3823 return (1 + (mode == DImode ? 4 : 0)
3824 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3826 case HImode:
3827 return (4 + (mode == DImode ? 4 : 0)
3828 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3830 case SImode:
3831 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3833 default:
3834 return 99;
3837 default:
3838 return 99;
3843 /* Worker routine for arm_rtx_costs. */
3844 static inline int
3845 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3847 enum machine_mode mode = GET_MODE (x);
3848 enum rtx_code subcode;
3849 int extra_cost;
3851 switch (code)
3853 case MEM:
3854 /* Memory costs quite a lot for the first word, but subsequent words
3855 load at the equivalent of a single insn each. */
3856 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3857 + (GET_CODE (x) == SYMBOL_REF
3858 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3860 case DIV:
3861 case MOD:
3862 case UDIV:
3863 case UMOD:
3864 return optimize_size ? COSTS_N_INSNS (2) : 100;
3866 case ROTATE:
3867 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3868 return 4;
3869 /* Fall through */
3870 case ROTATERT:
3871 if (mode != SImode)
3872 return 8;
3873 /* Fall through */
3874 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3875 if (mode == DImode)
3876 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3877 + ((GET_CODE (XEXP (x, 0)) == REG
3878 || (GET_CODE (XEXP (x, 0)) == SUBREG
3879 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3880 ? 0 : 8));
3881 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3882 || (GET_CODE (XEXP (x, 0)) == SUBREG
3883 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3884 ? 0 : 4)
3885 + ((GET_CODE (XEXP (x, 1)) == REG
3886 || (GET_CODE (XEXP (x, 1)) == SUBREG
3887 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3888 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3889 ? 0 : 4));
3891 case MINUS:
3892 if (mode == DImode)
3893 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3894 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3895 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3896 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3897 ? 0 : 8));
3899 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3900 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3901 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3902 && arm_const_double_rtx (XEXP (x, 1))))
3903 ? 0 : 8)
3904 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3905 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3906 && arm_const_double_rtx (XEXP (x, 0))))
3907 ? 0 : 8));
3909 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3910 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3911 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3912 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3913 || subcode == ASHIFTRT || subcode == LSHIFTRT
3914 || subcode == ROTATE || subcode == ROTATERT
3915 || (subcode == MULT
3916 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3917 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3918 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3919 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3920 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3921 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3922 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3923 return 1;
3924 /* Fall through */
3926 case PLUS:
3927 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3928 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3929 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3930 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3931 && arm_const_double_rtx (XEXP (x, 1))))
3932 ? 0 : 8));
3934 /* Fall through */
3935 case AND: case XOR: case IOR:
3936 extra_cost = 0;
3938 /* Normally the frame registers will be spilt into reg+const during
3939 reload, so it is a bad idea to combine them with other instructions,
3940 since then they might not be moved outside of loops. As a compromise
3941 we allow integration with ops that have a constant as their second
3942 operand. */
3943 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3944 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3945 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3946 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3947 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3948 extra_cost = 4;
3950 if (mode == DImode)
3951 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3952 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3953 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3954 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3955 ? 0 : 8));
3957 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3958 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3959 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3960 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3961 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3962 ? 0 : 4));
3964 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3965 return (1 + extra_cost
3966 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3967 || subcode == LSHIFTRT || subcode == ASHIFTRT
3968 || subcode == ROTATE || subcode == ROTATERT
3969 || (subcode == MULT
3970 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3971 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3972 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3973 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3974 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3975 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3976 ? 0 : 4));
3978 return 8;
3980 case MULT:
3981 /* This should have been handled by the CPU specific routines. */
3982 abort ();
3984 case TRUNCATE:
3985 if (arm_arch3m && mode == SImode
3986 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3987 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3988 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3989 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3990 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3991 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3992 return 8;
3993 return 99;
3995 case NEG:
3996 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3997 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3998 /* Fall through */
3999 case NOT:
4000 if (mode == DImode)
4001 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4003 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4005 case IF_THEN_ELSE:
4006 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4007 return 14;
4008 return 2;
4010 case COMPARE:
4011 return 1;
4013 case ABS:
4014 return 4 + (mode == DImode ? 4 : 0);
4016 case SIGN_EXTEND:
4017 if (GET_MODE (XEXP (x, 0)) == QImode)
4018 return (4 + (mode == DImode ? 4 : 0)
4019 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4020 /* Fall through */
4021 case ZERO_EXTEND:
4022 switch (GET_MODE (XEXP (x, 0)))
4024 case QImode:
4025 return (1 + (mode == DImode ? 4 : 0)
4026 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4028 case HImode:
4029 return (4 + (mode == DImode ? 4 : 0)
4030 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4032 case SImode:
4033 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4035 case V8QImode:
4036 case V4HImode:
4037 case V2SImode:
4038 case V4QImode:
4039 case V2HImode:
4040 return 1;
4042 default:
4043 break;
4045 abort ();
4047 case CONST_INT:
4048 if (const_ok_for_arm (INTVAL (x)))
4049 return outer == SET ? 2 : -1;
4050 else if (outer == AND
4051 && const_ok_for_arm (~INTVAL (x)))
4052 return -1;
4053 else if ((outer == COMPARE
4054 || outer == PLUS || outer == MINUS)
4055 && const_ok_for_arm (-INTVAL (x)))
4056 return -1;
4057 else
4058 return 5;
4060 case CONST:
4061 case LABEL_REF:
4062 case SYMBOL_REF:
4063 return 6;
4065 case CONST_DOUBLE:
4066 if (arm_const_double_rtx (x))
4067 return outer == SET ? 2 : -1;
4068 else if ((outer == COMPARE || outer == PLUS)
4069 && neg_const_double_rtx_ok_for_fpa (x))
4070 return -1;
4071 return 7;
4073 default:
4074 return 99;
4078 /* RTX costs when optimizing for size. */
4079 static bool
4080 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4082 enum machine_mode mode = GET_MODE (x);
4084 if (TARGET_THUMB)
4086 /* XXX TBD. For now, use the standard costs. */
4087 *total = thumb_rtx_costs (x, code, outer_code);
4088 return true;
4091 switch (code)
4093 case MEM:
4094 /* A memory access costs 1 insn if the mode is small, or the address is
4095 a single register, otherwise it costs one insn per word. */
4096 if (REG_P (XEXP (x, 0)))
4097 *total = COSTS_N_INSNS (1);
4098 else
4099 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4100 return true;
4102 case DIV:
4103 case MOD:
4104 case UDIV:
4105 case UMOD:
4106 /* Needs a libcall, so it costs about this. */
4107 *total = COSTS_N_INSNS (2);
4108 return false;
4110 case ROTATE:
4111 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4113 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4114 return true;
4116 /* Fall through */
4117 case ROTATERT:
4118 case ASHIFT:
4119 case LSHIFTRT:
4120 case ASHIFTRT:
4121 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4123 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4124 return true;
4126 else if (mode == SImode)
4128 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4129 /* Slightly disparage register shifts, but not by much. */
4130 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4131 *total += 1 + rtx_cost (XEXP (x, 1), code);
4132 return true;
4135 /* Needs a libcall. */
4136 *total = COSTS_N_INSNS (2);
4137 return false;
4139 case MINUS:
4140 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4142 *total = COSTS_N_INSNS (1);
4143 return false;
4146 if (mode == SImode)
4148 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4149 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4151 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4152 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4153 || subcode1 == ROTATE || subcode1 == ROTATERT
4154 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4155 || subcode1 == ASHIFTRT)
4157 /* It's just the cost of the two operands. */
4158 *total = 0;
4159 return false;
4162 *total = COSTS_N_INSNS (1);
4163 return false;
4166 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4167 return false;
4169 case PLUS:
4170 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4172 *total = COSTS_N_INSNS (1);
4173 return false;
4176 /* Fall through */
4177 case AND: case XOR: case IOR:
4178 if (mode == SImode)
4180 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4182 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4183 || subcode == LSHIFTRT || subcode == ASHIFTRT
4184 || (code == AND && subcode == NOT))
4186 /* It's just the cost of the two operands. */
4187 *total = 0;
4188 return false;
4192 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4193 return false;
4195 case MULT:
4196 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4197 return false;
4199 case NEG:
4200 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4201 *total = COSTS_N_INSNS (1);
4202 /* Fall through */
4203 case NOT:
4204 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4206 return false;
4208 case IF_THEN_ELSE:
4209 *total = 0;
4210 return false;
4212 case COMPARE:
4213 if (cc_register (XEXP (x, 0), VOIDmode))
4214 * total = 0;
4215 else
4216 *total = COSTS_N_INSNS (1);
4217 return false;
4219 case ABS:
4220 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4221 *total = COSTS_N_INSNS (1);
4222 else
4223 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4224 return false;
4226 case SIGN_EXTEND:
4227 *total = 0;
4228 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4230 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4231 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4233 if (mode == DImode)
4234 *total += COSTS_N_INSNS (1);
4235 return false;
4237 case ZERO_EXTEND:
4238 *total = 0;
4239 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4241 switch (GET_MODE (XEXP (x, 0)))
4243 case QImode:
4244 *total += COSTS_N_INSNS (1);
4245 break;
4247 case HImode:
4248 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4250 case SImode:
4251 break;
4253 default:
4254 *total += COSTS_N_INSNS (2);
4258 if (mode == DImode)
4259 *total += COSTS_N_INSNS (1);
4261 return false;
4263 case CONST_INT:
4264 if (const_ok_for_arm (INTVAL (x)))
4265 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4266 else if (const_ok_for_arm (~INTVAL (x)))
4267 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4268 else if (const_ok_for_arm (-INTVAL (x)))
4270 if (outer_code == COMPARE || outer_code == PLUS
4271 || outer_code == MINUS)
4272 *total = 0;
4273 else
4274 *total = COSTS_N_INSNS (1);
4276 else
4277 *total = COSTS_N_INSNS (2);
4278 return true;
4280 case CONST:
4281 case LABEL_REF:
4282 case SYMBOL_REF:
4283 *total = COSTS_N_INSNS (2);
4284 return true;
4286 case CONST_DOUBLE:
4287 *total = COSTS_N_INSNS (4);
4288 return true;
4290 default:
4291 if (mode != VOIDmode)
4292 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4293 else
4294 *total = COSTS_N_INSNS (4); /* How knows? */
4295 return false;
4299 /* RTX costs for cores with a slow MUL implementation. */
4301 static bool
4302 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4304 enum machine_mode mode = GET_MODE (x);
4306 if (TARGET_THUMB)
4308 *total = thumb_rtx_costs (x, code, outer_code);
4309 return true;
4312 switch (code)
4314 case MULT:
4315 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4316 || mode == DImode)
4318 *total = 30;
4319 return true;
4322 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4324 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4325 & (unsigned HOST_WIDE_INT) 0xffffffff);
4326 int cost, const_ok = const_ok_for_arm (i);
4327 int j, booth_unit_size;
4329 /* Tune as appropriate. */
4330 cost = const_ok ? 4 : 8;
4331 booth_unit_size = 2;
4332 for (j = 0; i && j < 32; j += booth_unit_size)
4334 i >>= booth_unit_size;
4335 cost += 2;
4338 *total = cost;
4339 return true;
4342 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4343 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4344 return true;
4346 default:
4347 *total = arm_rtx_costs_1 (x, code, outer_code);
4348 return true;
4353 /* RTX cost for cores with a fast multiply unit (M variants). */
4355 static bool
4356 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4358 enum machine_mode mode = GET_MODE (x);
4360 if (TARGET_THUMB)
4362 *total = thumb_rtx_costs (x, code, outer_code);
4363 return true;
4366 switch (code)
4368 case MULT:
4369 /* There is no point basing this on the tuning, since it is always the
4370 fast variant if it exists at all. */
4371 if (mode == DImode
4372 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4373 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4374 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4376 *total = 8;
4377 return true;
4381 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4382 || mode == DImode)
4384 *total = 30;
4385 return true;
4388 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4390 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4391 & (unsigned HOST_WIDE_INT) 0xffffffff);
4392 int cost, const_ok = const_ok_for_arm (i);
4393 int j, booth_unit_size;
4395 /* Tune as appropriate. */
4396 cost = const_ok ? 4 : 8;
4397 booth_unit_size = 8;
4398 for (j = 0; i && j < 32; j += booth_unit_size)
4400 i >>= booth_unit_size;
4401 cost += 2;
4404 *total = cost;
4405 return true;
4408 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4409 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4410 return true;
4412 default:
4413 *total = arm_rtx_costs_1 (x, code, outer_code);
4414 return true;
4419 /* RTX cost for XScale CPUs. */
4421 static bool
4422 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4424 enum machine_mode mode = GET_MODE (x);
4426 if (TARGET_THUMB)
4428 *total = thumb_rtx_costs (x, code, outer_code);
4429 return true;
4432 switch (code)
4434 case MULT:
4435 /* There is no point basing this on the tuning, since it is always the
4436 fast variant if it exists at all. */
4437 if (mode == DImode
4438 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4439 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4440 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4442 *total = 8;
4443 return true;
4447 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4448 || mode == DImode)
4450 *total = 30;
4451 return true;
4454 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4456 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4457 & (unsigned HOST_WIDE_INT) 0xffffffff);
4458 int cost, const_ok = const_ok_for_arm (i);
4459 unsigned HOST_WIDE_INT masked_const;
4461 /* The cost will be related to two insns.
4462 First a load of the constant (MOV or LDR), then a multiply. */
4463 cost = 2;
4464 if (! const_ok)
4465 cost += 1; /* LDR is probably more expensive because
4466 of longer result latency. */
4467 masked_const = i & 0xffff8000;
4468 if (masked_const != 0 && masked_const != 0xffff8000)
4470 masked_const = i & 0xf8000000;
4471 if (masked_const == 0 || masked_const == 0xf8000000)
4472 cost += 1;
4473 else
4474 cost += 2;
4476 *total = cost;
4477 return true;
4480 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4481 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4482 return true;
4484 case COMPARE:
4485 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4486 will stall until the multiplication is complete. */
4487 if (GET_CODE (XEXP (x, 0)) == MULT)
4488 *total = 4 + rtx_cost (XEXP (x, 0), code);
4489 else
4490 *total = arm_rtx_costs_1 (x, code, outer_code);
4491 return true;
4493 default:
4494 *total = arm_rtx_costs_1 (x, code, outer_code);
4495 return true;
4500 /* RTX costs for 9e (and later) cores. */
4502 static bool
4503 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4505 enum machine_mode mode = GET_MODE (x);
4506 int nonreg_cost;
4507 int cost;
4509 if (TARGET_THUMB)
4511 switch (code)
4513 case MULT:
4514 *total = COSTS_N_INSNS (3);
4515 return true;
4517 default:
4518 *total = thumb_rtx_costs (x, code, outer_code);
4519 return true;
4523 switch (code)
4525 case MULT:
4526 /* There is no point basing this on the tuning, since it is always the
4527 fast variant if it exists at all. */
4528 if (mode == DImode
4529 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4530 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4531 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4533 *total = 3;
4534 return true;
4538 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4540 *total = 30;
4541 return true;
4543 if (mode == DImode)
4545 cost = 7;
4546 nonreg_cost = 8;
4548 else
4550 cost = 2;
4551 nonreg_cost = 4;
4555 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4556 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4557 return true;
4559 default:
4560 *total = arm_rtx_costs_1 (x, code, outer_code);
4561 return true;
4564 /* All address computations that can be done are free, but rtx cost returns
4565 the same for practically all of them. So we weight the different types
4566 of address here in the order (most pref first):
4567 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4568 static inline int
4569 arm_arm_address_cost (rtx x)
4571 enum rtx_code c = GET_CODE (x);
4573 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4574 return 0;
4575 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4576 return 10;
4578 if (c == PLUS || c == MINUS)
4580 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4581 return 2;
4583 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4584 return 3;
4586 return 4;
4589 return 6;
4592 static inline int
4593 arm_thumb_address_cost (rtx x)
4595 enum rtx_code c = GET_CODE (x);
4597 if (c == REG)
4598 return 1;
4599 if (c == PLUS
4600 && GET_CODE (XEXP (x, 0)) == REG
4601 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4602 return 1;
4604 return 2;
4607 static int
4608 arm_address_cost (rtx x)
4610 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4613 static int
4614 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4616 rtx i_pat, d_pat;
4618 /* Some true dependencies can have a higher cost depending
4619 on precisely how certain input operands are used. */
4620 if (arm_tune_xscale
4621 && REG_NOTE_KIND (link) == 0
4622 && recog_memoized (insn) >= 0
4623 && recog_memoized (dep) >= 0)
4625 int shift_opnum = get_attr_shift (insn);
4626 enum attr_type attr_type = get_attr_type (dep);
4628 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4629 operand for INSN. If we have a shifted input operand and the
4630 instruction we depend on is another ALU instruction, then we may
4631 have to account for an additional stall. */
4632 if (shift_opnum != 0
4633 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4635 rtx shifted_operand;
4636 int opno;
4638 /* Get the shifted operand. */
4639 extract_insn (insn);
4640 shifted_operand = recog_data.operand[shift_opnum];
4642 /* Iterate over all the operands in DEP. If we write an operand
4643 that overlaps with SHIFTED_OPERAND, then we have increase the
4644 cost of this dependency. */
4645 extract_insn (dep);
4646 preprocess_constraints ();
4647 for (opno = 0; opno < recog_data.n_operands; opno++)
4649 /* We can ignore strict inputs. */
4650 if (recog_data.operand_type[opno] == OP_IN)
4651 continue;
4653 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4654 shifted_operand))
4655 return 2;
4660 /* XXX This is not strictly true for the FPA. */
4661 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4662 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4663 return 0;
4665 /* Call insns don't incur a stall, even if they follow a load. */
4666 if (REG_NOTE_KIND (link) == 0
4667 && GET_CODE (insn) == CALL_INSN)
4668 return 1;
4670 if ((i_pat = single_set (insn)) != NULL
4671 && GET_CODE (SET_SRC (i_pat)) == MEM
4672 && (d_pat = single_set (dep)) != NULL
4673 && GET_CODE (SET_DEST (d_pat)) == MEM)
4675 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4676 /* This is a load after a store, there is no conflict if the load reads
4677 from a cached area. Assume that loads from the stack, and from the
4678 constant pool are cached, and that others will miss. This is a
4679 hack. */
4681 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4682 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4683 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4684 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4685 return 1;
4688 return cost;
4691 static int fp_consts_inited = 0;
4693 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4694 static const char * const strings_fp[8] =
4696 "0", "1", "2", "3",
4697 "4", "5", "0.5", "10"
4700 static REAL_VALUE_TYPE values_fp[8];
4702 static void
4703 init_fp_table (void)
4705 int i;
4706 REAL_VALUE_TYPE r;
4708 if (TARGET_VFP)
4709 fp_consts_inited = 1;
4710 else
4711 fp_consts_inited = 8;
4713 for (i = 0; i < fp_consts_inited; i++)
4715 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4716 values_fp[i] = r;
4720 /* Return TRUE if rtx X is a valid immediate FP constant. */
4722 arm_const_double_rtx (rtx x)
4724 REAL_VALUE_TYPE r;
4725 int i;
4727 if (!fp_consts_inited)
4728 init_fp_table ();
4730 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4731 if (REAL_VALUE_MINUS_ZERO (r))
4732 return 0;
4734 for (i = 0; i < fp_consts_inited; i++)
4735 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4736 return 1;
4738 return 0;
4741 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4743 neg_const_double_rtx_ok_for_fpa (rtx x)
4745 REAL_VALUE_TYPE r;
4746 int i;
4748 if (!fp_consts_inited)
4749 init_fp_table ();
4751 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4752 r = REAL_VALUE_NEGATE (r);
4753 if (REAL_VALUE_MINUS_ZERO (r))
4754 return 0;
4756 for (i = 0; i < 8; i++)
4757 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4758 return 1;
4760 return 0;
4763 /* Predicates for `match_operand' and `match_operator'. */
4765 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4767 cirrus_memory_offset (rtx op)
4769 /* Reject eliminable registers. */
4770 if (! (reload_in_progress || reload_completed)
4771 && ( reg_mentioned_p (frame_pointer_rtx, op)
4772 || reg_mentioned_p (arg_pointer_rtx, op)
4773 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4774 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4775 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4776 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4777 return 0;
4779 if (GET_CODE (op) == MEM)
4781 rtx ind;
4783 ind = XEXP (op, 0);
4785 /* Match: (mem (reg)). */
4786 if (GET_CODE (ind) == REG)
4787 return 1;
4789 /* Match:
4790 (mem (plus (reg)
4791 (const))). */
4792 if (GET_CODE (ind) == PLUS
4793 && GET_CODE (XEXP (ind, 0)) == REG
4794 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4795 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4796 return 1;
4799 return 0;
4802 /* Return TRUE if OP is a valid VFP memory address pattern.
4803 WB if true if writeback address modes are allowed. */
4806 arm_coproc_mem_operand (rtx op, bool wb)
4808 rtx ind;
4810 /* Reject eliminable registers. */
4811 if (! (reload_in_progress || reload_completed)
4812 && ( reg_mentioned_p (frame_pointer_rtx, op)
4813 || reg_mentioned_p (arg_pointer_rtx, op)
4814 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4815 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4816 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4817 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4818 return FALSE;
4820 /* Constants are converted into offsets from labels. */
4821 if (GET_CODE (op) != MEM)
4822 return FALSE;
4824 ind = XEXP (op, 0);
4826 if (reload_completed
4827 && (GET_CODE (ind) == LABEL_REF
4828 || (GET_CODE (ind) == CONST
4829 && GET_CODE (XEXP (ind, 0)) == PLUS
4830 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4831 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4832 return TRUE;
4834 /* Match: (mem (reg)). */
4835 if (GET_CODE (ind) == REG)
4836 return arm_address_register_rtx_p (ind, 0);
4838 /* Autoincremment addressing modes. */
4839 if (wb
4840 && (GET_CODE (ind) == PRE_INC
4841 || GET_CODE (ind) == POST_INC
4842 || GET_CODE (ind) == PRE_DEC
4843 || GET_CODE (ind) == POST_DEC))
4844 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4846 if (wb
4847 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4848 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4849 && GET_CODE (XEXP (ind, 1)) == PLUS
4850 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4851 ind = XEXP (ind, 1);
4853 /* Match:
4854 (plus (reg)
4855 (const)). */
4856 if (GET_CODE (ind) == PLUS
4857 && GET_CODE (XEXP (ind, 0)) == REG
4858 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4859 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4860 && INTVAL (XEXP (ind, 1)) > -1024
4861 && INTVAL (XEXP (ind, 1)) < 1024
4862 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4863 return TRUE;
4865 return FALSE;
4868 /* Return true if X is a register that will be eliminated later on. */
4870 arm_eliminable_register (rtx x)
4872 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4873 || REGNO (x) == ARG_POINTER_REGNUM
4874 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4875 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4878 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4879 VFP registers. Otherwise return NO_REGS. */
4881 enum reg_class
4882 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4884 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4885 return NO_REGS;
4887 return GENERAL_REGS;
4891 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4892 Use by the Cirrus Maverick code which has to workaround
4893 a hardware bug triggered by such instructions. */
4894 static bool
4895 arm_memory_load_p (rtx insn)
4897 rtx body, lhs, rhs;;
4899 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4900 return false;
4902 body = PATTERN (insn);
4904 if (GET_CODE (body) != SET)
4905 return false;
4907 lhs = XEXP (body, 0);
4908 rhs = XEXP (body, 1);
4910 lhs = REG_OR_SUBREG_RTX (lhs);
4912 /* If the destination is not a general purpose
4913 register we do not have to worry. */
4914 if (GET_CODE (lhs) != REG
4915 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4916 return false;
4918 /* As well as loads from memory we also have to react
4919 to loads of invalid constants which will be turned
4920 into loads from the minipool. */
4921 return (GET_CODE (rhs) == MEM
4922 || GET_CODE (rhs) == SYMBOL_REF
4923 || note_invalid_constants (insn, -1, false));
4926 /* Return TRUE if INSN is a Cirrus instruction. */
4927 static bool
4928 arm_cirrus_insn_p (rtx insn)
4930 enum attr_cirrus attr;
4932 /* get_attr aborts on USE and CLOBBER. */
4933 if (!insn
4934 || GET_CODE (insn) != INSN
4935 || GET_CODE (PATTERN (insn)) == USE
4936 || GET_CODE (PATTERN (insn)) == CLOBBER)
4937 return 0;
4939 attr = get_attr_cirrus (insn);
4941 return attr != CIRRUS_NOT;
4944 /* Cirrus reorg for invalid instruction combinations. */
4945 static void
4946 cirrus_reorg (rtx first)
4948 enum attr_cirrus attr;
4949 rtx body = PATTERN (first);
4950 rtx t;
4951 int nops;
4953 /* Any branch must be followed by 2 non Cirrus instructions. */
4954 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4956 nops = 0;
4957 t = next_nonnote_insn (first);
4959 if (arm_cirrus_insn_p (t))
4960 ++ nops;
4962 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4963 ++ nops;
4965 while (nops --)
4966 emit_insn_after (gen_nop (), first);
4968 return;
4971 /* (float (blah)) is in parallel with a clobber. */
4972 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4973 body = XVECEXP (body, 0, 0);
4975 if (GET_CODE (body) == SET)
4977 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4979 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4980 be followed by a non Cirrus insn. */
4981 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4983 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4984 emit_insn_after (gen_nop (), first);
4986 return;
4988 else if (arm_memory_load_p (first))
4990 unsigned int arm_regno;
4992 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4993 ldr/cfmv64hr combination where the Rd field is the same
4994 in both instructions must be split with a non Cirrus
4995 insn. Example:
4997 ldr r0, blah
4999 cfmvsr mvf0, r0. */
5001 /* Get Arm register number for ldr insn. */
5002 if (GET_CODE (lhs) == REG)
5003 arm_regno = REGNO (lhs);
5004 else if (GET_CODE (rhs) == REG)
5005 arm_regno = REGNO (rhs);
5006 else
5007 abort ();
5009 /* Next insn. */
5010 first = next_nonnote_insn (first);
5012 if (! arm_cirrus_insn_p (first))
5013 return;
5015 body = PATTERN (first);
5017 /* (float (blah)) is in parallel with a clobber. */
5018 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5019 body = XVECEXP (body, 0, 0);
5021 if (GET_CODE (body) == FLOAT)
5022 body = XEXP (body, 0);
5024 if (get_attr_cirrus (first) == CIRRUS_MOVE
5025 && GET_CODE (XEXP (body, 1)) == REG
5026 && arm_regno == REGNO (XEXP (body, 1)))
5027 emit_insn_after (gen_nop (), first);
5029 return;
5033 /* get_attr aborts on USE and CLOBBER. */
5034 if (!first
5035 || GET_CODE (first) != INSN
5036 || GET_CODE (PATTERN (first)) == USE
5037 || GET_CODE (PATTERN (first)) == CLOBBER)
5038 return;
5040 attr = get_attr_cirrus (first);
5042 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5043 must be followed by a non-coprocessor instruction. */
5044 if (attr == CIRRUS_COMPARE)
5046 nops = 0;
5048 t = next_nonnote_insn (first);
5050 if (arm_cirrus_insn_p (t))
5051 ++ nops;
5053 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5054 ++ nops;
5056 while (nops --)
5057 emit_insn_after (gen_nop (), first);
5059 return;
5063 /* Return TRUE if X references a SYMBOL_REF. */
5065 symbol_mentioned_p (rtx x)
5067 const char * fmt;
5068 int i;
5070 if (GET_CODE (x) == SYMBOL_REF)
5071 return 1;
5073 fmt = GET_RTX_FORMAT (GET_CODE (x));
5075 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5077 if (fmt[i] == 'E')
5079 int j;
5081 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5082 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5083 return 1;
5085 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5086 return 1;
5089 return 0;
5092 /* Return TRUE if X references a LABEL_REF. */
5094 label_mentioned_p (rtx x)
5096 const char * fmt;
5097 int i;
5099 if (GET_CODE (x) == LABEL_REF)
5100 return 1;
5102 fmt = GET_RTX_FORMAT (GET_CODE (x));
5103 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5105 if (fmt[i] == 'E')
5107 int j;
5109 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5110 if (label_mentioned_p (XVECEXP (x, i, j)))
5111 return 1;
5113 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5114 return 1;
5117 return 0;
5120 enum rtx_code
5121 minmax_code (rtx x)
5123 enum rtx_code code = GET_CODE (x);
5125 if (code == SMAX)
5126 return GE;
5127 else if (code == SMIN)
5128 return LE;
5129 else if (code == UMIN)
5130 return LEU;
5131 else if (code == UMAX)
5132 return GEU;
5134 abort ();
5137 /* Return 1 if memory locations are adjacent. */
5139 adjacent_mem_locations (rtx a, rtx b)
5141 /* We don't guarantee to preserve the order of these memory refs. */
5142 if (volatile_refs_p (a) || volatile_refs_p (b))
5143 return 0;
5145 if ((GET_CODE (XEXP (a, 0)) == REG
5146 || (GET_CODE (XEXP (a, 0)) == PLUS
5147 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5148 && (GET_CODE (XEXP (b, 0)) == REG
5149 || (GET_CODE (XEXP (b, 0)) == PLUS
5150 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5152 HOST_WIDE_INT val0 = 0, val1 = 0;
5153 rtx reg0, reg1;
5154 int val_diff;
5156 if (GET_CODE (XEXP (a, 0)) == PLUS)
5158 reg0 = XEXP (XEXP (a, 0), 0);
5159 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5161 else
5162 reg0 = XEXP (a, 0);
5164 if (GET_CODE (XEXP (b, 0)) == PLUS)
5166 reg1 = XEXP (XEXP (b, 0), 0);
5167 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5169 else
5170 reg1 = XEXP (b, 0);
5172 /* Don't accept any offset that will require multiple
5173 instructions to handle, since this would cause the
5174 arith_adjacentmem pattern to output an overlong sequence. */
5175 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5176 return 0;
5178 /* Don't allow an eliminable register: register elimination can make
5179 the offset too large. */
5180 if (arm_eliminable_register (reg0))
5181 return 0;
5183 val_diff = val1 - val0;
5185 if (arm_ld_sched)
5187 /* If the target has load delay slots, then there's no benefit
5188 to using an ldm instruction unless the offset is zero and
5189 we are optimizing for size. */
5190 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5191 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5192 && (val_diff == 4 || val_diff == -4));
5195 return ((REGNO (reg0) == REGNO (reg1))
5196 && (val_diff == 4 || val_diff == -4));
5199 return 0;
5203 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5204 HOST_WIDE_INT *load_offset)
5206 int unsorted_regs[4];
5207 HOST_WIDE_INT unsorted_offsets[4];
5208 int order[4];
5209 int base_reg = -1;
5210 int i;
5212 /* Can only handle 2, 3, or 4 insns at present,
5213 though could be easily extended if required. */
5214 if (nops < 2 || nops > 4)
5215 abort ();
5217 /* Loop over the operands and check that the memory references are
5218 suitable (i.e. immediate offsets from the same base register). At
5219 the same time, extract the target register, and the memory
5220 offsets. */
5221 for (i = 0; i < nops; i++)
5223 rtx reg;
5224 rtx offset;
5226 /* Convert a subreg of a mem into the mem itself. */
5227 if (GET_CODE (operands[nops + i]) == SUBREG)
5228 operands[nops + i] = alter_subreg (operands + (nops + i));
5230 if (GET_CODE (operands[nops + i]) != MEM)
5231 abort ();
5233 /* Don't reorder volatile memory references; it doesn't seem worth
5234 looking for the case where the order is ok anyway. */
5235 if (MEM_VOLATILE_P (operands[nops + i]))
5236 return 0;
5238 offset = const0_rtx;
5240 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5241 || (GET_CODE (reg) == SUBREG
5242 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5243 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5244 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5245 == REG)
5246 || (GET_CODE (reg) == SUBREG
5247 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5248 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5249 == CONST_INT)))
5251 if (i == 0)
5253 base_reg = REGNO (reg);
5254 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5255 ? REGNO (operands[i])
5256 : REGNO (SUBREG_REG (operands[i])));
5257 order[0] = 0;
5259 else
5261 if (base_reg != (int) REGNO (reg))
5262 /* Not addressed from the same base register. */
5263 return 0;
5265 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5266 ? REGNO (operands[i])
5267 : REGNO (SUBREG_REG (operands[i])));
5268 if (unsorted_regs[i] < unsorted_regs[order[0]])
5269 order[0] = i;
5272 /* If it isn't an integer register, or if it overwrites the
5273 base register but isn't the last insn in the list, then
5274 we can't do this. */
5275 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5276 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5277 return 0;
5279 unsorted_offsets[i] = INTVAL (offset);
5281 else
5282 /* Not a suitable memory address. */
5283 return 0;
5286 /* All the useful information has now been extracted from the
5287 operands into unsorted_regs and unsorted_offsets; additionally,
5288 order[0] has been set to the lowest numbered register in the
5289 list. Sort the registers into order, and check that the memory
5290 offsets are ascending and adjacent. */
5292 for (i = 1; i < nops; i++)
5294 int j;
5296 order[i] = order[i - 1];
5297 for (j = 0; j < nops; j++)
5298 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5299 && (order[i] == order[i - 1]
5300 || unsorted_regs[j] < unsorted_regs[order[i]]))
5301 order[i] = j;
5303 /* Have we found a suitable register? if not, one must be used more
5304 than once. */
5305 if (order[i] == order[i - 1])
5306 return 0;
5308 /* Is the memory address adjacent and ascending? */
5309 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5310 return 0;
5313 if (base)
5315 *base = base_reg;
5317 for (i = 0; i < nops; i++)
5318 regs[i] = unsorted_regs[order[i]];
5320 *load_offset = unsorted_offsets[order[0]];
5323 if (unsorted_offsets[order[0]] == 0)
5324 return 1; /* ldmia */
5326 if (unsorted_offsets[order[0]] == 4)
5327 return 2; /* ldmib */
5329 if (unsorted_offsets[order[nops - 1]] == 0)
5330 return 3; /* ldmda */
5332 if (unsorted_offsets[order[nops - 1]] == -4)
5333 return 4; /* ldmdb */
5335 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5336 if the offset isn't small enough. The reason 2 ldrs are faster
5337 is because these ARMs are able to do more than one cache access
5338 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5339 whilst the ARM8 has a double bandwidth cache. This means that
5340 these cores can do both an instruction fetch and a data fetch in
5341 a single cycle, so the trick of calculating the address into a
5342 scratch register (one of the result regs) and then doing a load
5343 multiple actually becomes slower (and no smaller in code size).
5344 That is the transformation
5346 ldr rd1, [rbase + offset]
5347 ldr rd2, [rbase + offset + 4]
5351 add rd1, rbase, offset
5352 ldmia rd1, {rd1, rd2}
5354 produces worse code -- '3 cycles + any stalls on rd2' instead of
5355 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5356 access per cycle, the first sequence could never complete in less
5357 than 6 cycles, whereas the ldm sequence would only take 5 and
5358 would make better use of sequential accesses if not hitting the
5359 cache.
5361 We cheat here and test 'arm_ld_sched' which we currently know to
5362 only be true for the ARM8, ARM9 and StrongARM. If this ever
5363 changes, then the test below needs to be reworked. */
5364 if (nops == 2 && arm_ld_sched)
5365 return 0;
5367 /* Can't do it without setting up the offset, only do this if it takes
5368 no more than one insn. */
5369 return (const_ok_for_arm (unsorted_offsets[order[0]])
5370 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5373 const char *
5374 emit_ldm_seq (rtx *operands, int nops)
5376 int regs[4];
5377 int base_reg;
5378 HOST_WIDE_INT offset;
5379 char buf[100];
5380 int i;
5382 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5384 case 1:
5385 strcpy (buf, "ldm%?ia\t");
5386 break;
5388 case 2:
5389 strcpy (buf, "ldm%?ib\t");
5390 break;
5392 case 3:
5393 strcpy (buf, "ldm%?da\t");
5394 break;
5396 case 4:
5397 strcpy (buf, "ldm%?db\t");
5398 break;
5400 case 5:
5401 if (offset >= 0)
5402 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5403 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5404 (long) offset);
5405 else
5406 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5407 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5408 (long) -offset);
5409 output_asm_insn (buf, operands);
5410 base_reg = regs[0];
5411 strcpy (buf, "ldm%?ia\t");
5412 break;
5414 default:
5415 abort ();
5418 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5419 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5421 for (i = 1; i < nops; i++)
5422 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5423 reg_names[regs[i]]);
5425 strcat (buf, "}\t%@ phole ldm");
5427 output_asm_insn (buf, operands);
5428 return "";
5432 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5433 HOST_WIDE_INT * load_offset)
5435 int unsorted_regs[4];
5436 HOST_WIDE_INT unsorted_offsets[4];
5437 int order[4];
5438 int base_reg = -1;
5439 int i;
5441 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5442 extended if required. */
5443 if (nops < 2 || nops > 4)
5444 abort ();
5446 /* Loop over the operands and check that the memory references are
5447 suitable (i.e. immediate offsets from the same base register). At
5448 the same time, extract the target register, and the memory
5449 offsets. */
5450 for (i = 0; i < nops; i++)
5452 rtx reg;
5453 rtx offset;
5455 /* Convert a subreg of a mem into the mem itself. */
5456 if (GET_CODE (operands[nops + i]) == SUBREG)
5457 operands[nops + i] = alter_subreg (operands + (nops + i));
5459 if (GET_CODE (operands[nops + i]) != MEM)
5460 abort ();
5462 /* Don't reorder volatile memory references; it doesn't seem worth
5463 looking for the case where the order is ok anyway. */
5464 if (MEM_VOLATILE_P (operands[nops + i]))
5465 return 0;
5467 offset = const0_rtx;
5469 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5470 || (GET_CODE (reg) == SUBREG
5471 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5472 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5473 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5474 == REG)
5475 || (GET_CODE (reg) == SUBREG
5476 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5477 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5478 == CONST_INT)))
5480 if (i == 0)
5482 base_reg = REGNO (reg);
5483 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5484 ? REGNO (operands[i])
5485 : REGNO (SUBREG_REG (operands[i])));
5486 order[0] = 0;
5488 else
5490 if (base_reg != (int) REGNO (reg))
5491 /* Not addressed from the same base register. */
5492 return 0;
5494 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5495 ? REGNO (operands[i])
5496 : REGNO (SUBREG_REG (operands[i])));
5497 if (unsorted_regs[i] < unsorted_regs[order[0]])
5498 order[0] = i;
5501 /* If it isn't an integer register, then we can't do this. */
5502 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5503 return 0;
5505 unsorted_offsets[i] = INTVAL (offset);
5507 else
5508 /* Not a suitable memory address. */
5509 return 0;
5512 /* All the useful information has now been extracted from the
5513 operands into unsorted_regs and unsorted_offsets; additionally,
5514 order[0] has been set to the lowest numbered register in the
5515 list. Sort the registers into order, and check that the memory
5516 offsets are ascending and adjacent. */
5518 for (i = 1; i < nops; i++)
5520 int j;
5522 order[i] = order[i - 1];
5523 for (j = 0; j < nops; j++)
5524 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5525 && (order[i] == order[i - 1]
5526 || unsorted_regs[j] < unsorted_regs[order[i]]))
5527 order[i] = j;
5529 /* Have we found a suitable register? if not, one must be used more
5530 than once. */
5531 if (order[i] == order[i - 1])
5532 return 0;
5534 /* Is the memory address adjacent and ascending? */
5535 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5536 return 0;
5539 if (base)
5541 *base = base_reg;
5543 for (i = 0; i < nops; i++)
5544 regs[i] = unsorted_regs[order[i]];
5546 *load_offset = unsorted_offsets[order[0]];
5549 if (unsorted_offsets[order[0]] == 0)
5550 return 1; /* stmia */
5552 if (unsorted_offsets[order[0]] == 4)
5553 return 2; /* stmib */
5555 if (unsorted_offsets[order[nops - 1]] == 0)
5556 return 3; /* stmda */
5558 if (unsorted_offsets[order[nops - 1]] == -4)
5559 return 4; /* stmdb */
5561 return 0;
5564 const char *
5565 emit_stm_seq (rtx *operands, int nops)
5567 int regs[4];
5568 int base_reg;
5569 HOST_WIDE_INT offset;
5570 char buf[100];
5571 int i;
5573 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5575 case 1:
5576 strcpy (buf, "stm%?ia\t");
5577 break;
5579 case 2:
5580 strcpy (buf, "stm%?ib\t");
5581 break;
5583 case 3:
5584 strcpy (buf, "stm%?da\t");
5585 break;
5587 case 4:
5588 strcpy (buf, "stm%?db\t");
5589 break;
5591 default:
5592 abort ();
5595 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5596 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5598 for (i = 1; i < nops; i++)
5599 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5600 reg_names[regs[i]]);
5602 strcat (buf, "}\t%@ phole stm");
5604 output_asm_insn (buf, operands);
5605 return "";
5609 /* Routines for use in generating RTL. */
5612 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5613 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5615 HOST_WIDE_INT offset = *offsetp;
5616 int i = 0, j;
5617 rtx result;
5618 int sign = up ? 1 : -1;
5619 rtx mem, addr;
5621 /* XScale has load-store double instructions, but they have stricter
5622 alignment requirements than load-store multiple, so we cannot
5623 use them.
5625 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5626 the pipeline until completion.
5628 NREGS CYCLES
5634 An ldr instruction takes 1-3 cycles, but does not block the
5635 pipeline.
5637 NREGS CYCLES
5638 1 1-3
5639 2 2-6
5640 3 3-9
5641 4 4-12
5643 Best case ldr will always win. However, the more ldr instructions
5644 we issue, the less likely we are to be able to schedule them well.
5645 Using ldr instructions also increases code size.
5647 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5648 for counts of 3 or 4 regs. */
5649 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5651 rtx seq;
5653 start_sequence ();
5655 for (i = 0; i < count; i++)
5657 addr = plus_constant (from, i * 4 * sign);
5658 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5659 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5660 offset += 4 * sign;
5663 if (write_back)
5665 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5666 *offsetp = offset;
5669 seq = get_insns ();
5670 end_sequence ();
5672 return seq;
5675 result = gen_rtx_PARALLEL (VOIDmode,
5676 rtvec_alloc (count + (write_back ? 1 : 0)));
5677 if (write_back)
5679 XVECEXP (result, 0, 0)
5680 = gen_rtx_SET (GET_MODE (from), from,
5681 plus_constant (from, count * 4 * sign));
5682 i = 1;
5683 count++;
5686 for (j = 0; i < count; i++, j++)
5688 addr = plus_constant (from, j * 4 * sign);
5689 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5690 XVECEXP (result, 0, i)
5691 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5692 offset += 4 * sign;
5695 if (write_back)
5696 *offsetp = offset;
5698 return result;
5702 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5703 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5705 HOST_WIDE_INT offset = *offsetp;
5706 int i = 0, j;
5707 rtx result;
5708 int sign = up ? 1 : -1;
5709 rtx mem, addr;
5711 /* See arm_gen_load_multiple for discussion of
5712 the pros/cons of ldm/stm usage for XScale. */
5713 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5715 rtx seq;
5717 start_sequence ();
5719 for (i = 0; i < count; i++)
5721 addr = plus_constant (to, i * 4 * sign);
5722 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5723 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5724 offset += 4 * sign;
5727 if (write_back)
5729 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5730 *offsetp = offset;
5733 seq = get_insns ();
5734 end_sequence ();
5736 return seq;
5739 result = gen_rtx_PARALLEL (VOIDmode,
5740 rtvec_alloc (count + (write_back ? 1 : 0)));
5741 if (write_back)
5743 XVECEXP (result, 0, 0)
5744 = gen_rtx_SET (GET_MODE (to), to,
5745 plus_constant (to, count * 4 * sign));
5746 i = 1;
5747 count++;
5750 for (j = 0; i < count; i++, j++)
5752 addr = plus_constant (to, j * 4 * sign);
5753 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5754 XVECEXP (result, 0, i)
5755 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5756 offset += 4 * sign;
5759 if (write_back)
5760 *offsetp = offset;
5762 return result;
5766 arm_gen_movmemqi (rtx *operands)
5768 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5769 HOST_WIDE_INT srcoffset, dstoffset;
5770 int i;
5771 rtx src, dst, srcbase, dstbase;
5772 rtx part_bytes_reg = NULL;
5773 rtx mem;
5775 if (GET_CODE (operands[2]) != CONST_INT
5776 || GET_CODE (operands[3]) != CONST_INT
5777 || INTVAL (operands[2]) > 64
5778 || INTVAL (operands[3]) & 3)
5779 return 0;
5781 dstbase = operands[0];
5782 srcbase = operands[1];
5784 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5785 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5787 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5788 out_words_to_go = INTVAL (operands[2]) / 4;
5789 last_bytes = INTVAL (operands[2]) & 3;
5790 dstoffset = srcoffset = 0;
5792 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5793 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5795 for (i = 0; in_words_to_go >= 2; i+=4)
5797 if (in_words_to_go > 4)
5798 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5799 srcbase, &srcoffset));
5800 else
5801 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5802 FALSE, srcbase, &srcoffset));
5804 if (out_words_to_go)
5806 if (out_words_to_go > 4)
5807 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5808 dstbase, &dstoffset));
5809 else if (out_words_to_go != 1)
5810 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5811 dst, TRUE,
5812 (last_bytes == 0
5813 ? FALSE : TRUE),
5814 dstbase, &dstoffset));
5815 else
5817 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5818 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5819 if (last_bytes != 0)
5821 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5822 dstoffset += 4;
5827 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5828 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5831 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5832 if (out_words_to_go)
5834 rtx sreg;
5836 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5837 sreg = copy_to_reg (mem);
5839 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5840 emit_move_insn (mem, sreg);
5841 in_words_to_go--;
5843 if (in_words_to_go) /* Sanity check */
5844 abort ();
5847 if (in_words_to_go)
5849 if (in_words_to_go < 0)
5850 abort ();
5852 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5853 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5856 if (last_bytes && part_bytes_reg == NULL)
5857 abort ();
5859 if (BYTES_BIG_ENDIAN && last_bytes)
5861 rtx tmp = gen_reg_rtx (SImode);
5863 /* The bytes we want are in the top end of the word. */
5864 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5865 GEN_INT (8 * (4 - last_bytes))));
5866 part_bytes_reg = tmp;
5868 while (last_bytes)
5870 mem = adjust_automodify_address (dstbase, QImode,
5871 plus_constant (dst, last_bytes - 1),
5872 dstoffset + last_bytes - 1);
5873 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5875 if (--last_bytes)
5877 tmp = gen_reg_rtx (SImode);
5878 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5879 part_bytes_reg = tmp;
5884 else
5886 if (last_bytes > 1)
5888 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5889 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5890 last_bytes -= 2;
5891 if (last_bytes)
5893 rtx tmp = gen_reg_rtx (SImode);
5894 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5895 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5896 part_bytes_reg = tmp;
5897 dstoffset += 2;
5901 if (last_bytes)
5903 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5904 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5908 return 1;
5911 /* Generate a memory reference for a half word, such that it will be loaded
5912 into the top 16 bits of the word. We can assume that the address is
5913 known to be alignable and of the form reg, or plus (reg, const). */
5916 arm_gen_rotated_half_load (rtx memref)
5918 HOST_WIDE_INT offset = 0;
5919 rtx base = XEXP (memref, 0);
5921 if (GET_CODE (base) == PLUS)
5923 offset = INTVAL (XEXP (base, 1));
5924 base = XEXP (base, 0);
5927 /* If we aren't allowed to generate unaligned addresses, then fail. */
5928 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5929 return NULL;
5931 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5933 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5934 return base;
5936 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5939 /* Select a dominance comparison mode if possible for a test of the general
5940 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5941 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5942 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5943 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5944 In all cases OP will be either EQ or NE, but we don't need to know which
5945 here. If we are unable to support a dominance comparison we return
5946 CC mode. This will then fail to match for the RTL expressions that
5947 generate this call. */
5948 enum machine_mode
5949 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5951 enum rtx_code cond1, cond2;
5952 int swapped = 0;
5954 /* Currently we will probably get the wrong result if the individual
5955 comparisons are not simple. This also ensures that it is safe to
5956 reverse a comparison if necessary. */
5957 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5958 != CCmode)
5959 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5960 != CCmode))
5961 return CCmode;
5963 /* The if_then_else variant of this tests the second condition if the
5964 first passes, but is true if the first fails. Reverse the first
5965 condition to get a true "inclusive-or" expression. */
5966 if (cond_or == DOM_CC_NX_OR_Y)
5967 cond1 = reverse_condition (cond1);
5969 /* If the comparisons are not equal, and one doesn't dominate the other,
5970 then we can't do this. */
5971 if (cond1 != cond2
5972 && !comparison_dominates_p (cond1, cond2)
5973 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5974 return CCmode;
5976 if (swapped)
5978 enum rtx_code temp = cond1;
5979 cond1 = cond2;
5980 cond2 = temp;
5983 switch (cond1)
5985 case EQ:
5986 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5987 return CC_DEQmode;
5989 switch (cond2)
5991 case LE: return CC_DLEmode;
5992 case LEU: return CC_DLEUmode;
5993 case GE: return CC_DGEmode;
5994 case GEU: return CC_DGEUmode;
5995 default: break;
5998 break;
6000 case LT:
6001 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6002 return CC_DLTmode;
6003 if (cond2 == LE)
6004 return CC_DLEmode;
6005 if (cond2 == NE)
6006 return CC_DNEmode;
6007 break;
6009 case GT:
6010 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6011 return CC_DGTmode;
6012 if (cond2 == GE)
6013 return CC_DGEmode;
6014 if (cond2 == NE)
6015 return CC_DNEmode;
6016 break;
6018 case LTU:
6019 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6020 return CC_DLTUmode;
6021 if (cond2 == LEU)
6022 return CC_DLEUmode;
6023 if (cond2 == NE)
6024 return CC_DNEmode;
6025 break;
6027 case GTU:
6028 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6029 return CC_DGTUmode;
6030 if (cond2 == GEU)
6031 return CC_DGEUmode;
6032 if (cond2 == NE)
6033 return CC_DNEmode;
6034 break;
6036 /* The remaining cases only occur when both comparisons are the
6037 same. */
6038 case NE:
6039 return CC_DNEmode;
6041 case LE:
6042 return CC_DLEmode;
6044 case GE:
6045 return CC_DGEmode;
6047 case LEU:
6048 return CC_DLEUmode;
6050 case GEU:
6051 return CC_DGEUmode;
6053 default:
6054 break;
6057 abort ();
6060 enum machine_mode
6061 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6063 /* All floating point compares return CCFP if it is an equality
6064 comparison, and CCFPE otherwise. */
6065 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6067 switch (op)
6069 case EQ:
6070 case NE:
6071 case UNORDERED:
6072 case ORDERED:
6073 case UNLT:
6074 case UNLE:
6075 case UNGT:
6076 case UNGE:
6077 case UNEQ:
6078 case LTGT:
6079 return CCFPmode;
6081 case LT:
6082 case LE:
6083 case GT:
6084 case GE:
6085 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6086 return CCFPmode;
6087 return CCFPEmode;
6089 default:
6090 abort ();
6094 /* A compare with a shifted operand. Because of canonicalization, the
6095 comparison will have to be swapped when we emit the assembler. */
6096 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6097 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6098 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6099 || GET_CODE (x) == ROTATERT))
6100 return CC_SWPmode;
6102 /* This operation is performed swapped, but since we only rely on the Z
6103 flag we don't need an additional mode. */
6104 if (GET_MODE (y) == SImode && REG_P (y)
6105 && GET_CODE (x) == NEG
6106 && (op == EQ || op == NE))
6107 return CC_Zmode;
6109 /* This is a special case that is used by combine to allow a
6110 comparison of a shifted byte load to be split into a zero-extend
6111 followed by a comparison of the shifted integer (only valid for
6112 equalities and unsigned inequalities). */
6113 if (GET_MODE (x) == SImode
6114 && GET_CODE (x) == ASHIFT
6115 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6116 && GET_CODE (XEXP (x, 0)) == SUBREG
6117 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6118 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6119 && (op == EQ || op == NE
6120 || op == GEU || op == GTU || op == LTU || op == LEU)
6121 && GET_CODE (y) == CONST_INT)
6122 return CC_Zmode;
6124 /* A construct for a conditional compare, if the false arm contains
6125 0, then both conditions must be true, otherwise either condition
6126 must be true. Not all conditions are possible, so CCmode is
6127 returned if it can't be done. */
6128 if (GET_CODE (x) == IF_THEN_ELSE
6129 && (XEXP (x, 2) == const0_rtx
6130 || XEXP (x, 2) == const1_rtx)
6131 && COMPARISON_P (XEXP (x, 0))
6132 && COMPARISON_P (XEXP (x, 1)))
6133 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6134 INTVAL (XEXP (x, 2)));
6136 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6137 if (GET_CODE (x) == AND
6138 && COMPARISON_P (XEXP (x, 0))
6139 && COMPARISON_P (XEXP (x, 1)))
6140 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6141 DOM_CC_X_AND_Y);
6143 if (GET_CODE (x) == IOR
6144 && COMPARISON_P (XEXP (x, 0))
6145 && COMPARISON_P (XEXP (x, 1)))
6146 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6147 DOM_CC_X_OR_Y);
6149 /* An operation (on Thumb) where we want to test for a single bit.
6150 This is done by shifting that bit up into the top bit of a
6151 scratch register; we can then branch on the sign bit. */
6152 if (TARGET_THUMB
6153 && GET_MODE (x) == SImode
6154 && (op == EQ || op == NE)
6155 && (GET_CODE (x) == ZERO_EXTRACT))
6156 return CC_Nmode;
6158 /* An operation that sets the condition codes as a side-effect, the
6159 V flag is not set correctly, so we can only use comparisons where
6160 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6161 instead.) */
6162 if (GET_MODE (x) == SImode
6163 && y == const0_rtx
6164 && (op == EQ || op == NE || op == LT || op == GE)
6165 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6166 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6167 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6168 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6169 || GET_CODE (x) == LSHIFTRT
6170 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6171 || GET_CODE (x) == ROTATERT
6172 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6173 return CC_NOOVmode;
6175 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6176 return CC_Zmode;
6178 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6179 && GET_CODE (x) == PLUS
6180 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6181 return CC_Cmode;
6183 return CCmode;
6186 /* X and Y are two things to compare using CODE. Emit the compare insn and
6187 return the rtx for register 0 in the proper mode. FP means this is a
6188 floating point compare: I don't think that it is needed on the arm. */
6190 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6192 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6193 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6195 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6196 gen_rtx_COMPARE (mode, x, y)));
6198 return cc_reg;
6201 /* Generate a sequence of insns that will generate the correct return
6202 address mask depending on the physical architecture that the program
6203 is running on. */
6205 arm_gen_return_addr_mask (void)
6207 rtx reg = gen_reg_rtx (Pmode);
6209 emit_insn (gen_return_addr_mask (reg));
6210 return reg;
6213 void
6214 arm_reload_in_hi (rtx *operands)
6216 rtx ref = operands[1];
6217 rtx base, scratch;
6218 HOST_WIDE_INT offset = 0;
6220 if (GET_CODE (ref) == SUBREG)
6222 offset = SUBREG_BYTE (ref);
6223 ref = SUBREG_REG (ref);
6226 if (GET_CODE (ref) == REG)
6228 /* We have a pseudo which has been spilt onto the stack; there
6229 are two cases here: the first where there is a simple
6230 stack-slot replacement and a second where the stack-slot is
6231 out of range, or is used as a subreg. */
6232 if (reg_equiv_mem[REGNO (ref)])
6234 ref = reg_equiv_mem[REGNO (ref)];
6235 base = find_replacement (&XEXP (ref, 0));
6237 else
6238 /* The slot is out of range, or was dressed up in a SUBREG. */
6239 base = reg_equiv_address[REGNO (ref)];
6241 else
6242 base = find_replacement (&XEXP (ref, 0));
6244 /* Handle the case where the address is too complex to be offset by 1. */
6245 if (GET_CODE (base) == MINUS
6246 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6248 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6250 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6251 base = base_plus;
6253 else if (GET_CODE (base) == PLUS)
6255 /* The addend must be CONST_INT, or we would have dealt with it above. */
6256 HOST_WIDE_INT hi, lo;
6258 offset += INTVAL (XEXP (base, 1));
6259 base = XEXP (base, 0);
6261 /* Rework the address into a legal sequence of insns. */
6262 /* Valid range for lo is -4095 -> 4095 */
6263 lo = (offset >= 0
6264 ? (offset & 0xfff)
6265 : -((-offset) & 0xfff));
6267 /* Corner case, if lo is the max offset then we would be out of range
6268 once we have added the additional 1 below, so bump the msb into the
6269 pre-loading insn(s). */
6270 if (lo == 4095)
6271 lo &= 0x7ff;
6273 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6274 ^ (HOST_WIDE_INT) 0x80000000)
6275 - (HOST_WIDE_INT) 0x80000000);
6277 if (hi + lo != offset)
6278 abort ();
6280 if (hi != 0)
6282 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6284 /* Get the base address; addsi3 knows how to handle constants
6285 that require more than one insn. */
6286 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6287 base = base_plus;
6288 offset = lo;
6292 /* Operands[2] may overlap operands[0] (though it won't overlap
6293 operands[1]), that's why we asked for a DImode reg -- so we can
6294 use the bit that does not overlap. */
6295 if (REGNO (operands[2]) == REGNO (operands[0]))
6296 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6297 else
6298 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6300 emit_insn (gen_zero_extendqisi2 (scratch,
6301 gen_rtx_MEM (QImode,
6302 plus_constant (base,
6303 offset))));
6304 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6305 gen_rtx_MEM (QImode,
6306 plus_constant (base,
6307 offset + 1))));
6308 if (!BYTES_BIG_ENDIAN)
6309 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6310 gen_rtx_IOR (SImode,
6311 gen_rtx_ASHIFT
6312 (SImode,
6313 gen_rtx_SUBREG (SImode, operands[0], 0),
6314 GEN_INT (8)),
6315 scratch)));
6316 else
6317 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6318 gen_rtx_IOR (SImode,
6319 gen_rtx_ASHIFT (SImode, scratch,
6320 GEN_INT (8)),
6321 gen_rtx_SUBREG (SImode, operands[0],
6322 0))));
6325 /* Handle storing a half-word to memory during reload by synthesizing as two
6326 byte stores. Take care not to clobber the input values until after we
6327 have moved them somewhere safe. This code assumes that if the DImode
6328 scratch in operands[2] overlaps either the input value or output address
6329 in some way, then that value must die in this insn (we absolutely need
6330 two scratch registers for some corner cases). */
6331 void
6332 arm_reload_out_hi (rtx *operands)
6334 rtx ref = operands[0];
6335 rtx outval = operands[1];
6336 rtx base, scratch;
6337 HOST_WIDE_INT offset = 0;
6339 if (GET_CODE (ref) == SUBREG)
6341 offset = SUBREG_BYTE (ref);
6342 ref = SUBREG_REG (ref);
6345 if (GET_CODE (ref) == REG)
6347 /* We have a pseudo which has been spilt onto the stack; there
6348 are two cases here: the first where there is a simple
6349 stack-slot replacement and a second where the stack-slot is
6350 out of range, or is used as a subreg. */
6351 if (reg_equiv_mem[REGNO (ref)])
6353 ref = reg_equiv_mem[REGNO (ref)];
6354 base = find_replacement (&XEXP (ref, 0));
6356 else
6357 /* The slot is out of range, or was dressed up in a SUBREG. */
6358 base = reg_equiv_address[REGNO (ref)];
6360 else
6361 base = find_replacement (&XEXP (ref, 0));
6363 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6365 /* Handle the case where the address is too complex to be offset by 1. */
6366 if (GET_CODE (base) == MINUS
6367 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6369 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6371 /* Be careful not to destroy OUTVAL. */
6372 if (reg_overlap_mentioned_p (base_plus, outval))
6374 /* Updating base_plus might destroy outval, see if we can
6375 swap the scratch and base_plus. */
6376 if (!reg_overlap_mentioned_p (scratch, outval))
6378 rtx tmp = scratch;
6379 scratch = base_plus;
6380 base_plus = tmp;
6382 else
6384 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6386 /* Be conservative and copy OUTVAL into the scratch now,
6387 this should only be necessary if outval is a subreg
6388 of something larger than a word. */
6389 /* XXX Might this clobber base? I can't see how it can,
6390 since scratch is known to overlap with OUTVAL, and
6391 must be wider than a word. */
6392 emit_insn (gen_movhi (scratch_hi, outval));
6393 outval = scratch_hi;
6397 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6398 base = base_plus;
6400 else if (GET_CODE (base) == PLUS)
6402 /* The addend must be CONST_INT, or we would have dealt with it above. */
6403 HOST_WIDE_INT hi, lo;
6405 offset += INTVAL (XEXP (base, 1));
6406 base = XEXP (base, 0);
6408 /* Rework the address into a legal sequence of insns. */
6409 /* Valid range for lo is -4095 -> 4095 */
6410 lo = (offset >= 0
6411 ? (offset & 0xfff)
6412 : -((-offset) & 0xfff));
6414 /* Corner case, if lo is the max offset then we would be out of range
6415 once we have added the additional 1 below, so bump the msb into the
6416 pre-loading insn(s). */
6417 if (lo == 4095)
6418 lo &= 0x7ff;
6420 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6421 ^ (HOST_WIDE_INT) 0x80000000)
6422 - (HOST_WIDE_INT) 0x80000000);
6424 if (hi + lo != offset)
6425 abort ();
6427 if (hi != 0)
6429 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6431 /* Be careful not to destroy OUTVAL. */
6432 if (reg_overlap_mentioned_p (base_plus, outval))
6434 /* Updating base_plus might destroy outval, see if we
6435 can swap the scratch and base_plus. */
6436 if (!reg_overlap_mentioned_p (scratch, outval))
6438 rtx tmp = scratch;
6439 scratch = base_plus;
6440 base_plus = tmp;
6442 else
6444 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6446 /* Be conservative and copy outval into scratch now,
6447 this should only be necessary if outval is a
6448 subreg of something larger than a word. */
6449 /* XXX Might this clobber base? I can't see how it
6450 can, since scratch is known to overlap with
6451 outval. */
6452 emit_insn (gen_movhi (scratch_hi, outval));
6453 outval = scratch_hi;
6457 /* Get the base address; addsi3 knows how to handle constants
6458 that require more than one insn. */
6459 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6460 base = base_plus;
6461 offset = lo;
6465 if (BYTES_BIG_ENDIAN)
6467 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6468 plus_constant (base, offset + 1)),
6469 gen_lowpart (QImode, outval)));
6470 emit_insn (gen_lshrsi3 (scratch,
6471 gen_rtx_SUBREG (SImode, outval, 0),
6472 GEN_INT (8)));
6473 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6474 gen_lowpart (QImode, scratch)));
6476 else
6478 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6479 gen_lowpart (QImode, outval)));
6480 emit_insn (gen_lshrsi3 (scratch,
6481 gen_rtx_SUBREG (SImode, outval, 0),
6482 GEN_INT (8)));
6483 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6484 plus_constant (base, offset + 1)),
6485 gen_lowpart (QImode, scratch)));
6489 /* Print a symbolic form of X to the debug file, F. */
6490 static void
6491 arm_print_value (FILE *f, rtx x)
6493 switch (GET_CODE (x))
6495 case CONST_INT:
6496 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6497 return;
6499 case CONST_DOUBLE:
6500 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6501 return;
6503 case CONST_VECTOR:
6505 int i;
6507 fprintf (f, "<");
6508 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6510 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6511 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6512 fputc (',', f);
6514 fprintf (f, ">");
6516 return;
6518 case CONST_STRING:
6519 fprintf (f, "\"%s\"", XSTR (x, 0));
6520 return;
6522 case SYMBOL_REF:
6523 fprintf (f, "`%s'", XSTR (x, 0));
6524 return;
6526 case LABEL_REF:
6527 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6528 return;
6530 case CONST:
6531 arm_print_value (f, XEXP (x, 0));
6532 return;
6534 case PLUS:
6535 arm_print_value (f, XEXP (x, 0));
6536 fprintf (f, "+");
6537 arm_print_value (f, XEXP (x, 1));
6538 return;
6540 case PC:
6541 fprintf (f, "pc");
6542 return;
6544 default:
6545 fprintf (f, "????");
6546 return;
6550 /* Routines for manipulation of the constant pool. */
6552 /* Arm instructions cannot load a large constant directly into a
6553 register; they have to come from a pc relative load. The constant
6554 must therefore be placed in the addressable range of the pc
6555 relative load. Depending on the precise pc relative load
6556 instruction the range is somewhere between 256 bytes and 4k. This
6557 means that we often have to dump a constant inside a function, and
6558 generate code to branch around it.
6560 It is important to minimize this, since the branches will slow
6561 things down and make the code larger.
6563 Normally we can hide the table after an existing unconditional
6564 branch so that there is no interruption of the flow, but in the
6565 worst case the code looks like this:
6567 ldr rn, L1
6569 b L2
6570 align
6571 L1: .long value
6575 ldr rn, L3
6577 b L4
6578 align
6579 L3: .long value
6583 We fix this by performing a scan after scheduling, which notices
6584 which instructions need to have their operands fetched from the
6585 constant table and builds the table.
6587 The algorithm starts by building a table of all the constants that
6588 need fixing up and all the natural barriers in the function (places
6589 where a constant table can be dropped without breaking the flow).
6590 For each fixup we note how far the pc-relative replacement will be
6591 able to reach and the offset of the instruction into the function.
6593 Having built the table we then group the fixes together to form
6594 tables that are as large as possible (subject to addressing
6595 constraints) and emit each table of constants after the last
6596 barrier that is within range of all the instructions in the group.
6597 If a group does not contain a barrier, then we forcibly create one
6598 by inserting a jump instruction into the flow. Once the table has
6599 been inserted, the insns are then modified to reference the
6600 relevant entry in the pool.
6602 Possible enhancements to the algorithm (not implemented) are:
6604 1) For some processors and object formats, there may be benefit in
6605 aligning the pools to the start of cache lines; this alignment
6606 would need to be taken into account when calculating addressability
6607 of a pool. */
6609 /* These typedefs are located at the start of this file, so that
6610 they can be used in the prototypes there. This comment is to
6611 remind readers of that fact so that the following structures
6612 can be understood more easily.
6614 typedef struct minipool_node Mnode;
6615 typedef struct minipool_fixup Mfix; */
6617 struct minipool_node
6619 /* Doubly linked chain of entries. */
6620 Mnode * next;
6621 Mnode * prev;
6622 /* The maximum offset into the code that this entry can be placed. While
6623 pushing fixes for forward references, all entries are sorted in order
6624 of increasing max_address. */
6625 HOST_WIDE_INT max_address;
6626 /* Similarly for an entry inserted for a backwards ref. */
6627 HOST_WIDE_INT min_address;
6628 /* The number of fixes referencing this entry. This can become zero
6629 if we "unpush" an entry. In this case we ignore the entry when we
6630 come to emit the code. */
6631 int refcount;
6632 /* The offset from the start of the minipool. */
6633 HOST_WIDE_INT offset;
6634 /* The value in table. */
6635 rtx value;
6636 /* The mode of value. */
6637 enum machine_mode mode;
6638 /* The size of the value. With iWMMXt enabled
6639 sizes > 4 also imply an alignment of 8-bytes. */
6640 int fix_size;
6643 struct minipool_fixup
6645 Mfix * next;
6646 rtx insn;
6647 HOST_WIDE_INT address;
6648 rtx * loc;
6649 enum machine_mode mode;
6650 int fix_size;
6651 rtx value;
6652 Mnode * minipool;
6653 HOST_WIDE_INT forwards;
6654 HOST_WIDE_INT backwards;
6657 /* Fixes less than a word need padding out to a word boundary. */
6658 #define MINIPOOL_FIX_SIZE(mode) \
6659 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6661 static Mnode * minipool_vector_head;
6662 static Mnode * minipool_vector_tail;
6663 static rtx minipool_vector_label;
6665 /* The linked list of all minipool fixes required for this function. */
6666 Mfix * minipool_fix_head;
6667 Mfix * minipool_fix_tail;
6668 /* The fix entry for the current minipool, once it has been placed. */
6669 Mfix * minipool_barrier;
6671 /* Determines if INSN is the start of a jump table. Returns the end
6672 of the TABLE or NULL_RTX. */
6673 static rtx
6674 is_jump_table (rtx insn)
6676 rtx table;
6678 if (GET_CODE (insn) == JUMP_INSN
6679 && JUMP_LABEL (insn) != NULL
6680 && ((table = next_real_insn (JUMP_LABEL (insn)))
6681 == next_real_insn (insn))
6682 && table != NULL
6683 && GET_CODE (table) == JUMP_INSN
6684 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6685 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6686 return table;
6688 return NULL_RTX;
6691 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6692 #define JUMP_TABLES_IN_TEXT_SECTION 0
6693 #endif
6695 static HOST_WIDE_INT
6696 get_jump_table_size (rtx insn)
6698 /* ADDR_VECs only take room if read-only data does into the text
6699 section. */
6700 if (JUMP_TABLES_IN_TEXT_SECTION
6701 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6702 || 1
6703 #endif
6706 rtx body = PATTERN (insn);
6707 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6709 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6712 return 0;
6715 /* Move a minipool fix MP from its current location to before MAX_MP.
6716 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6717 constraints may need updating. */
6718 static Mnode *
6719 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6720 HOST_WIDE_INT max_address)
6722 /* This should never be true and the code below assumes these are
6723 different. */
6724 if (mp == max_mp)
6725 abort ();
6727 if (max_mp == NULL)
6729 if (max_address < mp->max_address)
6730 mp->max_address = max_address;
6732 else
6734 if (max_address > max_mp->max_address - mp->fix_size)
6735 mp->max_address = max_mp->max_address - mp->fix_size;
6736 else
6737 mp->max_address = max_address;
6739 /* Unlink MP from its current position. Since max_mp is non-null,
6740 mp->prev must be non-null. */
6741 mp->prev->next = mp->next;
6742 if (mp->next != NULL)
6743 mp->next->prev = mp->prev;
6744 else
6745 minipool_vector_tail = mp->prev;
6747 /* Re-insert it before MAX_MP. */
6748 mp->next = max_mp;
6749 mp->prev = max_mp->prev;
6750 max_mp->prev = mp;
6752 if (mp->prev != NULL)
6753 mp->prev->next = mp;
6754 else
6755 minipool_vector_head = mp;
6758 /* Save the new entry. */
6759 max_mp = mp;
6761 /* Scan over the preceding entries and adjust their addresses as
6762 required. */
6763 while (mp->prev != NULL
6764 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6766 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6767 mp = mp->prev;
6770 return max_mp;
6773 /* Add a constant to the minipool for a forward reference. Returns the
6774 node added or NULL if the constant will not fit in this pool. */
6775 static Mnode *
6776 add_minipool_forward_ref (Mfix *fix)
6778 /* If set, max_mp is the first pool_entry that has a lower
6779 constraint than the one we are trying to add. */
6780 Mnode * max_mp = NULL;
6781 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6782 Mnode * mp;
6784 /* If this fix's address is greater than the address of the first
6785 entry, then we can't put the fix in this pool. We subtract the
6786 size of the current fix to ensure that if the table is fully
6787 packed we still have enough room to insert this value by suffling
6788 the other fixes forwards. */
6789 if (minipool_vector_head &&
6790 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6791 return NULL;
6793 /* Scan the pool to see if a constant with the same value has
6794 already been added. While we are doing this, also note the
6795 location where we must insert the constant if it doesn't already
6796 exist. */
6797 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6799 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6800 && fix->mode == mp->mode
6801 && (GET_CODE (fix->value) != CODE_LABEL
6802 || (CODE_LABEL_NUMBER (fix->value)
6803 == CODE_LABEL_NUMBER (mp->value)))
6804 && rtx_equal_p (fix->value, mp->value))
6806 /* More than one fix references this entry. */
6807 mp->refcount++;
6808 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6811 /* Note the insertion point if necessary. */
6812 if (max_mp == NULL
6813 && mp->max_address > max_address)
6814 max_mp = mp;
6816 /* If we are inserting an 8-bytes aligned quantity and
6817 we have not already found an insertion point, then
6818 make sure that all such 8-byte aligned quantities are
6819 placed at the start of the pool. */
6820 if (ARM_DOUBLEWORD_ALIGN
6821 && max_mp == NULL
6822 && fix->fix_size == 8
6823 && mp->fix_size != 8)
6825 max_mp = mp;
6826 max_address = mp->max_address;
6830 /* The value is not currently in the minipool, so we need to create
6831 a new entry for it. If MAX_MP is NULL, the entry will be put on
6832 the end of the list since the placement is less constrained than
6833 any existing entry. Otherwise, we insert the new fix before
6834 MAX_MP and, if necessary, adjust the constraints on the other
6835 entries. */
6836 mp = xmalloc (sizeof (* mp));
6837 mp->fix_size = fix->fix_size;
6838 mp->mode = fix->mode;
6839 mp->value = fix->value;
6840 mp->refcount = 1;
6841 /* Not yet required for a backwards ref. */
6842 mp->min_address = -65536;
6844 if (max_mp == NULL)
6846 mp->max_address = max_address;
6847 mp->next = NULL;
6848 mp->prev = minipool_vector_tail;
6850 if (mp->prev == NULL)
6852 minipool_vector_head = mp;
6853 minipool_vector_label = gen_label_rtx ();
6855 else
6856 mp->prev->next = mp;
6858 minipool_vector_tail = mp;
6860 else
6862 if (max_address > max_mp->max_address - mp->fix_size)
6863 mp->max_address = max_mp->max_address - mp->fix_size;
6864 else
6865 mp->max_address = max_address;
6867 mp->next = max_mp;
6868 mp->prev = max_mp->prev;
6869 max_mp->prev = mp;
6870 if (mp->prev != NULL)
6871 mp->prev->next = mp;
6872 else
6873 minipool_vector_head = mp;
6876 /* Save the new entry. */
6877 max_mp = mp;
6879 /* Scan over the preceding entries and adjust their addresses as
6880 required. */
6881 while (mp->prev != NULL
6882 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6884 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6885 mp = mp->prev;
6888 return max_mp;
6891 static Mnode *
6892 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6893 HOST_WIDE_INT min_address)
6895 HOST_WIDE_INT offset;
6897 /* This should never be true, and the code below assumes these are
6898 different. */
6899 if (mp == min_mp)
6900 abort ();
6902 if (min_mp == NULL)
6904 if (min_address > mp->min_address)
6905 mp->min_address = min_address;
6907 else
6909 /* We will adjust this below if it is too loose. */
6910 mp->min_address = min_address;
6912 /* Unlink MP from its current position. Since min_mp is non-null,
6913 mp->next must be non-null. */
6914 mp->next->prev = mp->prev;
6915 if (mp->prev != NULL)
6916 mp->prev->next = mp->next;
6917 else
6918 minipool_vector_head = mp->next;
6920 /* Reinsert it after MIN_MP. */
6921 mp->prev = min_mp;
6922 mp->next = min_mp->next;
6923 min_mp->next = mp;
6924 if (mp->next != NULL)
6925 mp->next->prev = mp;
6926 else
6927 minipool_vector_tail = mp;
6930 min_mp = mp;
6932 offset = 0;
6933 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6935 mp->offset = offset;
6936 if (mp->refcount > 0)
6937 offset += mp->fix_size;
6939 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6940 mp->next->min_address = mp->min_address + mp->fix_size;
6943 return min_mp;
6946 /* Add a constant to the minipool for a backward reference. Returns the
6947 node added or NULL if the constant will not fit in this pool.
6949 Note that the code for insertion for a backwards reference can be
6950 somewhat confusing because the calculated offsets for each fix do
6951 not take into account the size of the pool (which is still under
6952 construction. */
6953 static Mnode *
6954 add_minipool_backward_ref (Mfix *fix)
6956 /* If set, min_mp is the last pool_entry that has a lower constraint
6957 than the one we are trying to add. */
6958 Mnode *min_mp = NULL;
6959 /* This can be negative, since it is only a constraint. */
6960 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6961 Mnode *mp;
6963 /* If we can't reach the current pool from this insn, or if we can't
6964 insert this entry at the end of the pool without pushing other
6965 fixes out of range, then we don't try. This ensures that we
6966 can't fail later on. */
6967 if (min_address >= minipool_barrier->address
6968 || (minipool_vector_tail->min_address + fix->fix_size
6969 >= minipool_barrier->address))
6970 return NULL;
6972 /* Scan the pool to see if a constant with the same value has
6973 already been added. While we are doing this, also note the
6974 location where we must insert the constant if it doesn't already
6975 exist. */
6976 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6978 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6979 && fix->mode == mp->mode
6980 && (GET_CODE (fix->value) != CODE_LABEL
6981 || (CODE_LABEL_NUMBER (fix->value)
6982 == CODE_LABEL_NUMBER (mp->value)))
6983 && rtx_equal_p (fix->value, mp->value)
6984 /* Check that there is enough slack to move this entry to the
6985 end of the table (this is conservative). */
6986 && (mp->max_address
6987 > (minipool_barrier->address
6988 + minipool_vector_tail->offset
6989 + minipool_vector_tail->fix_size)))
6991 mp->refcount++;
6992 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6995 if (min_mp != NULL)
6996 mp->min_address += fix->fix_size;
6997 else
6999 /* Note the insertion point if necessary. */
7000 if (mp->min_address < min_address)
7002 /* For now, we do not allow the insertion of 8-byte alignment
7003 requiring nodes anywhere but at the start of the pool. */
7004 if (ARM_DOUBLEWORD_ALIGN
7005 && fix->fix_size == 8 && mp->fix_size != 8)
7006 return NULL;
7007 else
7008 min_mp = mp;
7010 else if (mp->max_address
7011 < minipool_barrier->address + mp->offset + fix->fix_size)
7013 /* Inserting before this entry would push the fix beyond
7014 its maximum address (which can happen if we have
7015 re-located a forwards fix); force the new fix to come
7016 after it. */
7017 min_mp = mp;
7018 min_address = mp->min_address + fix->fix_size;
7020 /* If we are inserting an 8-bytes aligned quantity and
7021 we have not already found an insertion point, then
7022 make sure that all such 8-byte aligned quantities are
7023 placed at the start of the pool. */
7024 else if (ARM_DOUBLEWORD_ALIGN
7025 && min_mp == NULL
7026 && fix->fix_size == 8
7027 && mp->fix_size < 8)
7029 min_mp = mp;
7030 min_address = mp->min_address + fix->fix_size;
7035 /* We need to create a new entry. */
7036 mp = xmalloc (sizeof (* mp));
7037 mp->fix_size = fix->fix_size;
7038 mp->mode = fix->mode;
7039 mp->value = fix->value;
7040 mp->refcount = 1;
7041 mp->max_address = minipool_barrier->address + 65536;
7043 mp->min_address = min_address;
7045 if (min_mp == NULL)
7047 mp->prev = NULL;
7048 mp->next = minipool_vector_head;
7050 if (mp->next == NULL)
7052 minipool_vector_tail = mp;
7053 minipool_vector_label = gen_label_rtx ();
7055 else
7056 mp->next->prev = mp;
7058 minipool_vector_head = mp;
7060 else
7062 mp->next = min_mp->next;
7063 mp->prev = min_mp;
7064 min_mp->next = mp;
7066 if (mp->next != NULL)
7067 mp->next->prev = mp;
7068 else
7069 minipool_vector_tail = mp;
7072 /* Save the new entry. */
7073 min_mp = mp;
7075 if (mp->prev)
7076 mp = mp->prev;
7077 else
7078 mp->offset = 0;
7080 /* Scan over the following entries and adjust their offsets. */
7081 while (mp->next != NULL)
7083 if (mp->next->min_address < mp->min_address + mp->fix_size)
7084 mp->next->min_address = mp->min_address + mp->fix_size;
7086 if (mp->refcount)
7087 mp->next->offset = mp->offset + mp->fix_size;
7088 else
7089 mp->next->offset = mp->offset;
7091 mp = mp->next;
7094 return min_mp;
7097 static void
7098 assign_minipool_offsets (Mfix *barrier)
7100 HOST_WIDE_INT offset = 0;
7101 Mnode *mp;
7103 minipool_barrier = barrier;
7105 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7107 mp->offset = offset;
7109 if (mp->refcount > 0)
7110 offset += mp->fix_size;
7114 /* Output the literal table */
7115 static void
7116 dump_minipool (rtx scan)
7118 Mnode * mp;
7119 Mnode * nmp;
7120 int align64 = 0;
7122 if (ARM_DOUBLEWORD_ALIGN)
7123 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7124 if (mp->refcount > 0 && mp->fix_size == 8)
7126 align64 = 1;
7127 break;
7130 if (dump_file)
7131 fprintf (dump_file,
7132 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7133 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7135 scan = emit_label_after (gen_label_rtx (), scan);
7136 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7137 scan = emit_label_after (minipool_vector_label, scan);
7139 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7141 if (mp->refcount > 0)
7143 if (dump_file)
7145 fprintf (dump_file,
7146 ";; Offset %u, min %ld, max %ld ",
7147 (unsigned) mp->offset, (unsigned long) mp->min_address,
7148 (unsigned long) mp->max_address);
7149 arm_print_value (dump_file, mp->value);
7150 fputc ('\n', dump_file);
7153 switch (mp->fix_size)
7155 #ifdef HAVE_consttable_1
7156 case 1:
7157 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7158 break;
7160 #endif
7161 #ifdef HAVE_consttable_2
7162 case 2:
7163 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7164 break;
7166 #endif
7167 #ifdef HAVE_consttable_4
7168 case 4:
7169 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7170 break;
7172 #endif
7173 #ifdef HAVE_consttable_8
7174 case 8:
7175 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7176 break;
7178 #endif
7179 default:
7180 abort ();
7181 break;
7185 nmp = mp->next;
7186 free (mp);
7189 minipool_vector_head = minipool_vector_tail = NULL;
7190 scan = emit_insn_after (gen_consttable_end (), scan);
7191 scan = emit_barrier_after (scan);
7194 /* Return the cost of forcibly inserting a barrier after INSN. */
7195 static int
7196 arm_barrier_cost (rtx insn)
7198 /* Basing the location of the pool on the loop depth is preferable,
7199 but at the moment, the basic block information seems to be
7200 corrupt by this stage of the compilation. */
7201 int base_cost = 50;
7202 rtx next = next_nonnote_insn (insn);
7204 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7205 base_cost -= 20;
7207 switch (GET_CODE (insn))
7209 case CODE_LABEL:
7210 /* It will always be better to place the table before the label, rather
7211 than after it. */
7212 return 50;
7214 case INSN:
7215 case CALL_INSN:
7216 return base_cost;
7218 case JUMP_INSN:
7219 return base_cost - 10;
7221 default:
7222 return base_cost + 10;
7226 /* Find the best place in the insn stream in the range
7227 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7228 Create the barrier by inserting a jump and add a new fix entry for
7229 it. */
7230 static Mfix *
7231 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7233 HOST_WIDE_INT count = 0;
7234 rtx barrier;
7235 rtx from = fix->insn;
7236 rtx selected = from;
7237 int selected_cost;
7238 HOST_WIDE_INT selected_address;
7239 Mfix * new_fix;
7240 HOST_WIDE_INT max_count = max_address - fix->address;
7241 rtx label = gen_label_rtx ();
7243 selected_cost = arm_barrier_cost (from);
7244 selected_address = fix->address;
7246 while (from && count < max_count)
7248 rtx tmp;
7249 int new_cost;
7251 /* This code shouldn't have been called if there was a natural barrier
7252 within range. */
7253 if (GET_CODE (from) == BARRIER)
7254 abort ();
7256 /* Count the length of this insn. */
7257 count += get_attr_length (from);
7259 /* If there is a jump table, add its length. */
7260 tmp = is_jump_table (from);
7261 if (tmp != NULL)
7263 count += get_jump_table_size (tmp);
7265 /* Jump tables aren't in a basic block, so base the cost on
7266 the dispatch insn. If we select this location, we will
7267 still put the pool after the table. */
7268 new_cost = arm_barrier_cost (from);
7270 if (count < max_count && new_cost <= selected_cost)
7272 selected = tmp;
7273 selected_cost = new_cost;
7274 selected_address = fix->address + count;
7277 /* Continue after the dispatch table. */
7278 from = NEXT_INSN (tmp);
7279 continue;
7282 new_cost = arm_barrier_cost (from);
7284 if (count < max_count && new_cost <= selected_cost)
7286 selected = from;
7287 selected_cost = new_cost;
7288 selected_address = fix->address + count;
7291 from = NEXT_INSN (from);
7294 /* Create a new JUMP_INSN that branches around a barrier. */
7295 from = emit_jump_insn_after (gen_jump (label), selected);
7296 JUMP_LABEL (from) = label;
7297 barrier = emit_barrier_after (from);
7298 emit_label_after (label, barrier);
7300 /* Create a minipool barrier entry for the new barrier. */
7301 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7302 new_fix->insn = barrier;
7303 new_fix->address = selected_address;
7304 new_fix->next = fix->next;
7305 fix->next = new_fix;
7307 return new_fix;
7310 /* Record that there is a natural barrier in the insn stream at
7311 ADDRESS. */
7312 static void
7313 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7315 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7317 fix->insn = insn;
7318 fix->address = address;
7320 fix->next = NULL;
7321 if (minipool_fix_head != NULL)
7322 minipool_fix_tail->next = fix;
7323 else
7324 minipool_fix_head = fix;
7326 minipool_fix_tail = fix;
7329 /* Record INSN, which will need fixing up to load a value from the
7330 minipool. ADDRESS is the offset of the insn since the start of the
7331 function; LOC is a pointer to the part of the insn which requires
7332 fixing; VALUE is the constant that must be loaded, which is of type
7333 MODE. */
7334 static void
7335 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7336 enum machine_mode mode, rtx value)
7338 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7340 #ifdef AOF_ASSEMBLER
7341 /* PIC symbol references need to be converted into offsets into the
7342 based area. */
7343 /* XXX This shouldn't be done here. */
7344 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7345 value = aof_pic_entry (value);
7346 #endif /* AOF_ASSEMBLER */
7348 fix->insn = insn;
7349 fix->address = address;
7350 fix->loc = loc;
7351 fix->mode = mode;
7352 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7353 fix->value = value;
7354 fix->forwards = get_attr_pool_range (insn);
7355 fix->backwards = get_attr_neg_pool_range (insn);
7356 fix->minipool = NULL;
7358 /* If an insn doesn't have a range defined for it, then it isn't
7359 expecting to be reworked by this code. Better to abort now than
7360 to generate duff assembly code. */
7361 if (fix->forwards == 0 && fix->backwards == 0)
7362 abort ();
7364 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7365 So there might be an empty word before the start of the pool.
7366 Hence we reduce the forward range by 4 to allow for this
7367 possibility. */
7368 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7369 fix->forwards -= 4;
7371 if (dump_file)
7373 fprintf (dump_file,
7374 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7375 GET_MODE_NAME (mode),
7376 INSN_UID (insn), (unsigned long) address,
7377 -1 * (long)fix->backwards, (long)fix->forwards);
7378 arm_print_value (dump_file, fix->value);
7379 fprintf (dump_file, "\n");
7382 /* Add it to the chain of fixes. */
7383 fix->next = NULL;
7385 if (minipool_fix_head != NULL)
7386 minipool_fix_tail->next = fix;
7387 else
7388 minipool_fix_head = fix;
7390 minipool_fix_tail = fix;
7393 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7394 Returns the number of insns needed, or 99 if we don't know how to
7395 do it. */
7397 arm_const_double_inline_cost (rtx val)
7399 rtx lowpart, highpart;
7400 enum machine_mode mode;
7402 mode = GET_MODE (val);
7404 if (mode == VOIDmode)
7405 mode = DImode;
7407 gcc_assert (GET_MODE_SIZE (mode) == 8);
7409 lowpart = gen_lowpart (SImode, val);
7410 highpart = gen_highpart_mode (SImode, mode, val);
7412 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7413 gcc_assert (GET_CODE (highpart) == CONST_INT);
7415 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7416 NULL_RTX, NULL_RTX, 0, 0)
7417 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7418 NULL_RTX, NULL_RTX, 0, 0));
7421 /* Return true if it is worthwile to split a 64-bit constant into two
7422 32-bit operations. This is the case if optimizing for size, or
7423 if we have load delay slots, or if one 32-bit part can be done with
7424 a single data operation. */
7425 bool
7426 arm_const_double_by_parts (rtx val)
7428 enum machine_mode mode = GET_MODE (val);
7429 rtx part;
7431 if (optimize_size || arm_ld_sched)
7432 return true;
7434 if (mode == VOIDmode)
7435 mode = DImode;
7437 part = gen_highpart_mode (SImode, mode, val);
7439 gcc_assert (GET_CODE (part) == CONST_INT);
7441 if (const_ok_for_arm (INTVAL (part))
7442 || const_ok_for_arm (~INTVAL (part)))
7443 return true;
7445 part = gen_lowpart (SImode, val);
7447 gcc_assert (GET_CODE (part) == CONST_INT);
7449 if (const_ok_for_arm (INTVAL (part))
7450 || const_ok_for_arm (~INTVAL (part)))
7451 return true;
7453 return false;
7456 /* Scan INSN and note any of its operands that need fixing.
7457 If DO_PUSHES is false we do not actually push any of the fixups
7458 needed. The function returns TRUE if any fixups were needed/pushed.
7459 This is used by arm_memory_load_p() which needs to know about loads
7460 of constants that will be converted into minipool loads. */
7461 static bool
7462 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7464 bool result = false;
7465 int opno;
7467 extract_insn (insn);
7469 if (!constrain_operands (1))
7470 fatal_insn_not_found (insn);
7472 if (recog_data.n_alternatives == 0)
7473 return false;
7475 /* Fill in recog_op_alt with information about the constraints of
7476 this insn. */
7477 preprocess_constraints ();
7479 for (opno = 0; opno < recog_data.n_operands; opno++)
7481 /* Things we need to fix can only occur in inputs. */
7482 if (recog_data.operand_type[opno] != OP_IN)
7483 continue;
7485 /* If this alternative is a memory reference, then any mention
7486 of constants in this alternative is really to fool reload
7487 into allowing us to accept one there. We need to fix them up
7488 now so that we output the right code. */
7489 if (recog_op_alt[opno][which_alternative].memory_ok)
7491 rtx op = recog_data.operand[opno];
7493 if (CONSTANT_P (op))
7495 if (do_pushes)
7496 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7497 recog_data.operand_mode[opno], op);
7498 result = true;
7500 else if (GET_CODE (op) == MEM
7501 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7502 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7504 if (do_pushes)
7506 rtx cop = avoid_constant_pool_reference (op);
7508 /* Casting the address of something to a mode narrower
7509 than a word can cause avoid_constant_pool_reference()
7510 to return the pool reference itself. That's no good to
7511 us here. Lets just hope that we can use the
7512 constant pool value directly. */
7513 if (op == cop)
7514 cop = get_pool_constant (XEXP (op, 0));
7516 push_minipool_fix (insn, address,
7517 recog_data.operand_loc[opno],
7518 recog_data.operand_mode[opno], cop);
7521 result = true;
7526 return result;
7529 /* Gcc puts the pool in the wrong place for ARM, since we can only
7530 load addresses a limited distance around the pc. We do some
7531 special munging to move the constant pool values to the correct
7532 point in the code. */
7533 static void
7534 arm_reorg (void)
7536 rtx insn;
7537 HOST_WIDE_INT address = 0;
7538 Mfix * fix;
7540 minipool_fix_head = minipool_fix_tail = NULL;
7542 /* The first insn must always be a note, or the code below won't
7543 scan it properly. */
7544 insn = get_insns ();
7545 if (GET_CODE (insn) != NOTE)
7546 abort ();
7548 /* Scan all the insns and record the operands that will need fixing. */
7549 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7551 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7552 && (arm_cirrus_insn_p (insn)
7553 || GET_CODE (insn) == JUMP_INSN
7554 || arm_memory_load_p (insn)))
7555 cirrus_reorg (insn);
7557 if (GET_CODE (insn) == BARRIER)
7558 push_minipool_barrier (insn, address);
7559 else if (INSN_P (insn))
7561 rtx table;
7563 note_invalid_constants (insn, address, true);
7564 address += get_attr_length (insn);
7566 /* If the insn is a vector jump, add the size of the table
7567 and skip the table. */
7568 if ((table = is_jump_table (insn)) != NULL)
7570 address += get_jump_table_size (table);
7571 insn = table;
7576 fix = minipool_fix_head;
7578 /* Now scan the fixups and perform the required changes. */
7579 while (fix)
7581 Mfix * ftmp;
7582 Mfix * fdel;
7583 Mfix * last_added_fix;
7584 Mfix * last_barrier = NULL;
7585 Mfix * this_fix;
7587 /* Skip any further barriers before the next fix. */
7588 while (fix && GET_CODE (fix->insn) == BARRIER)
7589 fix = fix->next;
7591 /* No more fixes. */
7592 if (fix == NULL)
7593 break;
7595 last_added_fix = NULL;
7597 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7599 if (GET_CODE (ftmp->insn) == BARRIER)
7601 if (ftmp->address >= minipool_vector_head->max_address)
7602 break;
7604 last_barrier = ftmp;
7606 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7607 break;
7609 last_added_fix = ftmp; /* Keep track of the last fix added. */
7612 /* If we found a barrier, drop back to that; any fixes that we
7613 could have reached but come after the barrier will now go in
7614 the next mini-pool. */
7615 if (last_barrier != NULL)
7617 /* Reduce the refcount for those fixes that won't go into this
7618 pool after all. */
7619 for (fdel = last_barrier->next;
7620 fdel && fdel != ftmp;
7621 fdel = fdel->next)
7623 fdel->minipool->refcount--;
7624 fdel->minipool = NULL;
7627 ftmp = last_barrier;
7629 else
7631 /* ftmp is first fix that we can't fit into this pool and
7632 there no natural barriers that we could use. Insert a
7633 new barrier in the code somewhere between the previous
7634 fix and this one, and arrange to jump around it. */
7635 HOST_WIDE_INT max_address;
7637 /* The last item on the list of fixes must be a barrier, so
7638 we can never run off the end of the list of fixes without
7639 last_barrier being set. */
7640 if (ftmp == NULL)
7641 abort ();
7643 max_address = minipool_vector_head->max_address;
7644 /* Check that there isn't another fix that is in range that
7645 we couldn't fit into this pool because the pool was
7646 already too large: we need to put the pool before such an
7647 instruction. */
7648 if (ftmp->address < max_address)
7649 max_address = ftmp->address;
7651 last_barrier = create_fix_barrier (last_added_fix, max_address);
7654 assign_minipool_offsets (last_barrier);
7656 while (ftmp)
7658 if (GET_CODE (ftmp->insn) != BARRIER
7659 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7660 == NULL))
7661 break;
7663 ftmp = ftmp->next;
7666 /* Scan over the fixes we have identified for this pool, fixing them
7667 up and adding the constants to the pool itself. */
7668 for (this_fix = fix; this_fix && ftmp != this_fix;
7669 this_fix = this_fix->next)
7670 if (GET_CODE (this_fix->insn) != BARRIER)
7672 rtx addr
7673 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7674 minipool_vector_label),
7675 this_fix->minipool->offset);
7676 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7679 dump_minipool (last_barrier->insn);
7680 fix = ftmp;
7683 /* From now on we must synthesize any constants that we can't handle
7684 directly. This can happen if the RTL gets split during final
7685 instruction generation. */
7686 after_arm_reorg = 1;
7688 /* Free the minipool memory. */
7689 obstack_free (&minipool_obstack, minipool_startobj);
7692 /* Routines to output assembly language. */
7694 /* If the rtx is the correct value then return the string of the number.
7695 In this way we can ensure that valid double constants are generated even
7696 when cross compiling. */
7697 const char *
7698 fp_immediate_constant (rtx x)
7700 REAL_VALUE_TYPE r;
7701 int i;
7703 if (!fp_consts_inited)
7704 init_fp_table ();
7706 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7707 for (i = 0; i < 8; i++)
7708 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7709 return strings_fp[i];
7711 abort ();
7714 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7715 static const char *
7716 fp_const_from_val (REAL_VALUE_TYPE *r)
7718 int i;
7720 if (!fp_consts_inited)
7721 init_fp_table ();
7723 for (i = 0; i < 8; i++)
7724 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7725 return strings_fp[i];
7727 abort ();
7730 /* Output the operands of a LDM/STM instruction to STREAM.
7731 MASK is the ARM register set mask of which only bits 0-15 are important.
7732 REG is the base register, either the frame pointer or the stack pointer,
7733 INSTR is the possibly suffixed load or store instruction. */
7735 static void
7736 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7737 unsigned long mask)
7739 unsigned i;
7740 bool not_first = FALSE;
7742 fputc ('\t', stream);
7743 asm_fprintf (stream, instr, reg);
7744 fputs (", {", stream);
7746 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7747 if (mask & (1 << i))
7749 if (not_first)
7750 fprintf (stream, ", ");
7752 asm_fprintf (stream, "%r", i);
7753 not_first = TRUE;
7756 fprintf (stream, "}\n");
7760 /* Output a FLDMX instruction to STREAM.
7761 BASE if the register containing the address.
7762 REG and COUNT specify the register range.
7763 Extra registers may be added to avoid hardware bugs. */
7765 static void
7766 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7768 int i;
7770 /* Workaround ARM10 VFPr1 bug. */
7771 if (count == 2 && !arm_arch6)
7773 if (reg == 15)
7774 reg--;
7775 count++;
7778 fputc ('\t', stream);
7779 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7781 for (i = reg; i < reg + count; i++)
7783 if (i > reg)
7784 fputs (", ", stream);
7785 asm_fprintf (stream, "d%d", i);
7787 fputs ("}\n", stream);
7792 /* Output the assembly for a store multiple. */
7794 const char *
7795 vfp_output_fstmx (rtx * operands)
7797 char pattern[100];
7798 int p;
7799 int base;
7800 int i;
7802 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7803 p = strlen (pattern);
7805 if (GET_CODE (operands[1]) != REG)
7806 abort ();
7808 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7809 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7811 p += sprintf (&pattern[p], ", d%d", base + i);
7813 strcpy (&pattern[p], "}");
7815 output_asm_insn (pattern, operands);
7816 return "";
7820 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7821 number of bytes pushed. */
7823 static int
7824 vfp_emit_fstmx (int base_reg, int count)
7826 rtx par;
7827 rtx dwarf;
7828 rtx tmp, reg;
7829 int i;
7831 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7832 register pairs are stored by a store multiple insn. We avoid this
7833 by pushing an extra pair. */
7834 if (count == 2 && !arm_arch6)
7836 if (base_reg == LAST_VFP_REGNUM - 3)
7837 base_reg -= 2;
7838 count++;
7841 /* ??? The frame layout is implementation defined. We describe
7842 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7843 We really need some way of representing the whole block so that the
7844 unwinder can figure it out at runtime. */
7845 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7846 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7848 reg = gen_rtx_REG (DFmode, base_reg);
7849 base_reg += 2;
7851 XVECEXP (par, 0, 0)
7852 = gen_rtx_SET (VOIDmode,
7853 gen_rtx_MEM (BLKmode,
7854 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7855 gen_rtx_UNSPEC (BLKmode,
7856 gen_rtvec (1, reg),
7857 UNSPEC_PUSH_MULT));
7859 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7860 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7861 GEN_INT (-(count * 8 + 4))));
7862 RTX_FRAME_RELATED_P (tmp) = 1;
7863 XVECEXP (dwarf, 0, 0) = tmp;
7865 tmp = gen_rtx_SET (VOIDmode,
7866 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7867 reg);
7868 RTX_FRAME_RELATED_P (tmp) = 1;
7869 XVECEXP (dwarf, 0, 1) = tmp;
7871 for (i = 1; i < count; i++)
7873 reg = gen_rtx_REG (DFmode, base_reg);
7874 base_reg += 2;
7875 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7877 tmp = gen_rtx_SET (VOIDmode,
7878 gen_rtx_MEM (DFmode,
7879 gen_rtx_PLUS (SImode,
7880 stack_pointer_rtx,
7881 GEN_INT (i * 8))),
7882 reg);
7883 RTX_FRAME_RELATED_P (tmp) = 1;
7884 XVECEXP (dwarf, 0, i + 1) = tmp;
7887 par = emit_insn (par);
7888 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7889 REG_NOTES (par));
7890 RTX_FRAME_RELATED_P (par) = 1;
7892 return count * 8 + 4;
7896 /* Output a 'call' insn. */
7897 const char *
7898 output_call (rtx *operands)
7900 if (arm_arch5)
7901 abort (); /* Patterns should call blx <reg> directly. */
7903 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7904 if (REGNO (operands[0]) == LR_REGNUM)
7906 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7907 output_asm_insn ("mov%?\t%0, %|lr", operands);
7910 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7912 if (TARGET_INTERWORK || arm_arch4t)
7913 output_asm_insn ("bx%?\t%0", operands);
7914 else
7915 output_asm_insn ("mov%?\t%|pc, %0", operands);
7917 return "";
7920 /* Output a 'call' insn that is a reference in memory. */
7921 const char *
7922 output_call_mem (rtx *operands)
7924 if (TARGET_INTERWORK && !arm_arch5)
7926 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7927 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7928 output_asm_insn ("bx%?\t%|ip", operands);
7930 else if (regno_use_in (LR_REGNUM, operands[0]))
7932 /* LR is used in the memory address. We load the address in the
7933 first instruction. It's safe to use IP as the target of the
7934 load since the call will kill it anyway. */
7935 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7936 if (arm_arch5)
7937 output_asm_insn ("blx%?\t%|ip", operands);
7938 else
7940 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7941 if (arm_arch4t)
7942 output_asm_insn ("bx%?\t%|ip", operands);
7943 else
7944 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7947 else
7949 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7950 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7953 return "";
7957 /* Output a move from arm registers to an fpa registers.
7958 OPERANDS[0] is an fpa register.
7959 OPERANDS[1] is the first registers of an arm register pair. */
7960 const char *
7961 output_mov_long_double_fpa_from_arm (rtx *operands)
7963 int arm_reg0 = REGNO (operands[1]);
7964 rtx ops[3];
7966 if (arm_reg0 == IP_REGNUM)
7967 abort ();
7969 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7970 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7971 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7973 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7974 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7976 return "";
7979 /* Output a move from an fpa register to arm registers.
7980 OPERANDS[0] is the first registers of an arm register pair.
7981 OPERANDS[1] is an fpa register. */
7982 const char *
7983 output_mov_long_double_arm_from_fpa (rtx *operands)
7985 int arm_reg0 = REGNO (operands[0]);
7986 rtx ops[3];
7988 if (arm_reg0 == IP_REGNUM)
7989 abort ();
7991 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7992 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7993 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7995 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7996 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7997 return "";
8000 /* Output a move from arm registers to arm registers of a long double
8001 OPERANDS[0] is the destination.
8002 OPERANDS[1] is the source. */
8003 const char *
8004 output_mov_long_double_arm_from_arm (rtx *operands)
8006 /* We have to be careful here because the two might overlap. */
8007 int dest_start = REGNO (operands[0]);
8008 int src_start = REGNO (operands[1]);
8009 rtx ops[2];
8010 int i;
8012 if (dest_start < src_start)
8014 for (i = 0; i < 3; i++)
8016 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8017 ops[1] = gen_rtx_REG (SImode, src_start + i);
8018 output_asm_insn ("mov%?\t%0, %1", ops);
8021 else
8023 for (i = 2; i >= 0; i--)
8025 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8026 ops[1] = gen_rtx_REG (SImode, src_start + i);
8027 output_asm_insn ("mov%?\t%0, %1", ops);
8031 return "";
8035 /* Output a move from arm registers to an fpa registers.
8036 OPERANDS[0] is an fpa register.
8037 OPERANDS[1] is the first registers of an arm register pair. */
8038 const char *
8039 output_mov_double_fpa_from_arm (rtx *operands)
8041 int arm_reg0 = REGNO (operands[1]);
8042 rtx ops[2];
8044 if (arm_reg0 == IP_REGNUM)
8045 abort ();
8047 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8048 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8049 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8050 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8051 return "";
8054 /* Output a move from an fpa register to arm registers.
8055 OPERANDS[0] is the first registers of an arm register pair.
8056 OPERANDS[1] is an fpa register. */
8057 const char *
8058 output_mov_double_arm_from_fpa (rtx *operands)
8060 int arm_reg0 = REGNO (operands[0]);
8061 rtx ops[2];
8063 if (arm_reg0 == IP_REGNUM)
8064 abort ();
8066 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8067 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8068 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8069 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8070 return "";
8073 /* Output a move between double words.
8074 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8075 or MEM<-REG and all MEMs must be offsettable addresses. */
8076 const char *
8077 output_move_double (rtx *operands)
8079 enum rtx_code code0 = GET_CODE (operands[0]);
8080 enum rtx_code code1 = GET_CODE (operands[1]);
8081 rtx otherops[3];
8083 if (code0 == REG)
8085 int reg0 = REGNO (operands[0]);
8087 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8089 if (code1 == MEM)
8091 switch (GET_CODE (XEXP (operands[1], 0)))
8093 case REG:
8094 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8095 break;
8097 case PRE_INC:
8098 if (!TARGET_LDRD)
8099 abort (); /* Should never happen now. */
8100 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8101 break;
8103 case PRE_DEC:
8104 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8105 break;
8107 case POST_INC:
8108 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8109 break;
8111 case POST_DEC:
8112 if (!TARGET_LDRD)
8113 abort (); /* Should never happen now. */
8114 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8115 break;
8117 case PRE_MODIFY:
8118 case POST_MODIFY:
8119 otherops[0] = operands[0];
8120 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8121 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8123 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8125 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8127 /* Registers overlap so split out the increment. */
8128 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8129 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8131 else
8132 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8134 else
8136 /* We only allow constant increments, so this is safe. */
8137 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8139 break;
8141 case LABEL_REF:
8142 case CONST:
8143 output_asm_insn ("adr%?\t%0, %1", operands);
8144 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8145 break;
8147 default:
8148 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8149 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8151 otherops[0] = operands[0];
8152 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8153 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8155 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8157 if (GET_CODE (otherops[2]) == CONST_INT)
8159 switch ((int) INTVAL (otherops[2]))
8161 case -8:
8162 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8163 return "";
8164 case -4:
8165 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8166 return "";
8167 case 4:
8168 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8169 return "";
8172 if (TARGET_LDRD
8173 && (GET_CODE (otherops[2]) == REG
8174 || (GET_CODE (otherops[2]) == CONST_INT
8175 && INTVAL (otherops[2]) > -256
8176 && INTVAL (otherops[2]) < 256)))
8178 if (reg_overlap_mentioned_p (otherops[0],
8179 otherops[2]))
8181 /* Swap base and index registers over to
8182 avoid a conflict. */
8183 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8184 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8187 /* If both registers conflict, it will usually
8188 have been fixed by a splitter. */
8189 if (reg_overlap_mentioned_p (otherops[0],
8190 otherops[2]))
8192 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8193 output_asm_insn ("ldr%?d\t%0, [%1]",
8194 otherops);
8195 return "";
8197 else
8199 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8200 otherops);
8201 return "";
8204 if (GET_CODE (otherops[2]) == CONST_INT)
8206 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8207 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8208 else
8209 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8211 else
8212 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8214 else
8215 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8217 return "ldm%?ia\t%0, %M0";
8219 else
8221 otherops[1] = adjust_address (operands[1], SImode, 4);
8222 /* Take care of overlapping base/data reg. */
8223 if (reg_mentioned_p (operands[0], operands[1]))
8225 output_asm_insn ("ldr%?\t%0, %1", otherops);
8226 output_asm_insn ("ldr%?\t%0, %1", operands);
8228 else
8230 output_asm_insn ("ldr%?\t%0, %1", operands);
8231 output_asm_insn ("ldr%?\t%0, %1", otherops);
8236 else
8237 abort (); /* Constraints should prevent this. */
8239 else if (code0 == MEM && code1 == REG)
8241 if (REGNO (operands[1]) == IP_REGNUM)
8242 abort ();
8244 switch (GET_CODE (XEXP (operands[0], 0)))
8246 case REG:
8247 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8248 break;
8250 case PRE_INC:
8251 if (!TARGET_LDRD)
8252 abort (); /* Should never happen now. */
8253 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8254 break;
8256 case PRE_DEC:
8257 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8258 break;
8260 case POST_INC:
8261 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8262 break;
8264 case POST_DEC:
8265 if (!TARGET_LDRD)
8266 abort (); /* Should never happen now. */
8267 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8268 break;
8270 case PRE_MODIFY:
8271 case POST_MODIFY:
8272 otherops[0] = operands[1];
8273 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8274 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8276 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8277 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8278 else
8279 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8280 break;
8282 case PLUS:
8283 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8284 if (GET_CODE (otherops[2]) == CONST_INT)
8286 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8288 case -8:
8289 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8290 return "";
8292 case -4:
8293 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8294 return "";
8296 case 4:
8297 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8298 return "";
8301 if (TARGET_LDRD
8302 && (GET_CODE (otherops[2]) == REG
8303 || (GET_CODE (otherops[2]) == CONST_INT
8304 && INTVAL (otherops[2]) > -256
8305 && INTVAL (otherops[2]) < 256)))
8307 otherops[0] = operands[1];
8308 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8309 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8310 return "";
8312 /* Fall through */
8314 default:
8315 otherops[0] = adjust_address (operands[0], SImode, 4);
8316 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8317 output_asm_insn ("str%?\t%1, %0", operands);
8318 output_asm_insn ("str%?\t%1, %0", otherops);
8321 else
8322 /* Constraints should prevent this. */
8323 abort ();
8325 return "";
8328 /* Output an ADD r, s, #n where n may be too big for one instruction.
8329 If adding zero to one register, output nothing. */
8330 const char *
8331 output_add_immediate (rtx *operands)
8333 HOST_WIDE_INT n = INTVAL (operands[2]);
8335 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8337 if (n < 0)
8338 output_multi_immediate (operands,
8339 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8340 -n);
8341 else
8342 output_multi_immediate (operands,
8343 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8347 return "";
8350 /* Output a multiple immediate operation.
8351 OPERANDS is the vector of operands referred to in the output patterns.
8352 INSTR1 is the output pattern to use for the first constant.
8353 INSTR2 is the output pattern to use for subsequent constants.
8354 IMMED_OP is the index of the constant slot in OPERANDS.
8355 N is the constant value. */
8356 static const char *
8357 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8358 int immed_op, HOST_WIDE_INT n)
8360 #if HOST_BITS_PER_WIDE_INT > 32
8361 n &= 0xffffffff;
8362 #endif
8364 if (n == 0)
8366 /* Quick and easy output. */
8367 operands[immed_op] = const0_rtx;
8368 output_asm_insn (instr1, operands);
8370 else
8372 int i;
8373 const char * instr = instr1;
8375 /* Note that n is never zero here (which would give no output). */
8376 for (i = 0; i < 32; i += 2)
8378 if (n & (3 << i))
8380 operands[immed_op] = GEN_INT (n & (255 << i));
8381 output_asm_insn (instr, operands);
8382 instr = instr2;
8383 i += 6;
8388 return "";
8391 /* Return the appropriate ARM instruction for the operation code.
8392 The returned result should not be overwritten. OP is the rtx of the
8393 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8394 was shifted. */
8395 const char *
8396 arithmetic_instr (rtx op, int shift_first_arg)
8398 switch (GET_CODE (op))
8400 case PLUS:
8401 return "add";
8403 case MINUS:
8404 return shift_first_arg ? "rsb" : "sub";
8406 case IOR:
8407 return "orr";
8409 case XOR:
8410 return "eor";
8412 case AND:
8413 return "and";
8415 default:
8416 abort ();
8420 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8421 for the operation code. The returned result should not be overwritten.
8422 OP is the rtx code of the shift.
8423 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8424 shift. */
8425 static const char *
8426 shift_op (rtx op, HOST_WIDE_INT *amountp)
8428 const char * mnem;
8429 enum rtx_code code = GET_CODE (op);
8431 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8432 *amountp = -1;
8433 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8434 *amountp = INTVAL (XEXP (op, 1));
8435 else
8436 abort ();
8438 switch (code)
8440 case ASHIFT:
8441 mnem = "asl";
8442 break;
8444 case ASHIFTRT:
8445 mnem = "asr";
8446 break;
8448 case LSHIFTRT:
8449 mnem = "lsr";
8450 break;
8452 case ROTATE:
8453 if (*amountp == -1)
8454 abort ();
8455 *amountp = 32 - *amountp;
8457 /* Fall through. */
8459 case ROTATERT:
8460 mnem = "ror";
8461 break;
8463 case MULT:
8464 /* We never have to worry about the amount being other than a
8465 power of 2, since this case can never be reloaded from a reg. */
8466 if (*amountp != -1)
8467 *amountp = int_log2 (*amountp);
8468 else
8469 abort ();
8470 return "asl";
8472 default:
8473 abort ();
8476 if (*amountp != -1)
8478 /* This is not 100% correct, but follows from the desire to merge
8479 multiplication by a power of 2 with the recognizer for a
8480 shift. >=32 is not a valid shift for "asl", so we must try and
8481 output a shift that produces the correct arithmetical result.
8482 Using lsr #32 is identical except for the fact that the carry bit
8483 is not set correctly if we set the flags; but we never use the
8484 carry bit from such an operation, so we can ignore that. */
8485 if (code == ROTATERT)
8486 /* Rotate is just modulo 32. */
8487 *amountp &= 31;
8488 else if (*amountp != (*amountp & 31))
8490 if (code == ASHIFT)
8491 mnem = "lsr";
8492 *amountp = 32;
8495 /* Shifts of 0 are no-ops. */
8496 if (*amountp == 0)
8497 return NULL;
8500 return mnem;
8503 /* Obtain the shift from the POWER of two. */
8505 static HOST_WIDE_INT
8506 int_log2 (HOST_WIDE_INT power)
8508 HOST_WIDE_INT shift = 0;
8510 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8512 if (shift > 31)
8513 abort ();
8514 shift++;
8517 return shift;
8520 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8521 because /bin/as is horribly restrictive. The judgement about
8522 whether or not each character is 'printable' (and can be output as
8523 is) or not (and must be printed with an octal escape) must be made
8524 with reference to the *host* character set -- the situation is
8525 similar to that discussed in the comments above pp_c_char in
8526 c-pretty-print.c. */
8528 #define MAX_ASCII_LEN 51
8530 void
8531 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8533 int i;
8534 int len_so_far = 0;
8536 fputs ("\t.ascii\t\"", stream);
8538 for (i = 0; i < len; i++)
8540 int c = p[i];
8542 if (len_so_far >= MAX_ASCII_LEN)
8544 fputs ("\"\n\t.ascii\t\"", stream);
8545 len_so_far = 0;
8548 if (ISPRINT (c))
8550 if (c == '\\' || c == '\"')
8552 putc ('\\', stream);
8553 len_so_far++;
8555 putc (c, stream);
8556 len_so_far++;
8558 else
8560 fprintf (stream, "\\%03o", c);
8561 len_so_far += 4;
8565 fputs ("\"\n", stream);
8568 /* Compute the register save mask for registers 0 through 12
8569 inclusive. This code is used by arm_compute_save_reg_mask. */
8571 static unsigned long
8572 arm_compute_save_reg0_reg12_mask (void)
8574 unsigned long func_type = arm_current_func_type ();
8575 unsigned long save_reg_mask = 0;
8576 unsigned int reg;
8578 if (IS_INTERRUPT (func_type))
8580 unsigned int max_reg;
8581 /* Interrupt functions must not corrupt any registers,
8582 even call clobbered ones. If this is a leaf function
8583 we can just examine the registers used by the RTL, but
8584 otherwise we have to assume that whatever function is
8585 called might clobber anything, and so we have to save
8586 all the call-clobbered registers as well. */
8587 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8588 /* FIQ handlers have registers r8 - r12 banked, so
8589 we only need to check r0 - r7, Normal ISRs only
8590 bank r14 and r15, so we must check up to r12.
8591 r13 is the stack pointer which is always preserved,
8592 so we do not need to consider it here. */
8593 max_reg = 7;
8594 else
8595 max_reg = 12;
8597 for (reg = 0; reg <= max_reg; reg++)
8598 if (regs_ever_live[reg]
8599 || (! current_function_is_leaf && call_used_regs [reg]))
8600 save_reg_mask |= (1 << reg);
8602 /* Also save the pic base register if necessary. */
8603 if (flag_pic
8604 && !TARGET_SINGLE_PIC_BASE
8605 && current_function_uses_pic_offset_table)
8606 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8608 else
8610 /* In the normal case we only need to save those registers
8611 which are call saved and which are used by this function. */
8612 for (reg = 0; reg <= 10; reg++)
8613 if (regs_ever_live[reg] && ! call_used_regs [reg])
8614 save_reg_mask |= (1 << reg);
8616 /* Handle the frame pointer as a special case. */
8617 if (! TARGET_APCS_FRAME
8618 && ! frame_pointer_needed
8619 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8620 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8621 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8623 /* If we aren't loading the PIC register,
8624 don't stack it even though it may be live. */
8625 if (flag_pic
8626 && !TARGET_SINGLE_PIC_BASE
8627 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8628 || current_function_uses_pic_offset_table))
8629 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8632 /* Save registers so the exception handler can modify them. */
8633 if (current_function_calls_eh_return)
8635 unsigned int i;
8637 for (i = 0; ; i++)
8639 reg = EH_RETURN_DATA_REGNO (i);
8640 if (reg == INVALID_REGNUM)
8641 break;
8642 save_reg_mask |= 1 << reg;
8646 return save_reg_mask;
8649 /* Compute a bit mask of which registers need to be
8650 saved on the stack for the current function. */
8652 static unsigned long
8653 arm_compute_save_reg_mask (void)
8655 unsigned int save_reg_mask = 0;
8656 unsigned long func_type = arm_current_func_type ();
8658 if (IS_NAKED (func_type))
8659 /* This should never really happen. */
8660 return 0;
8662 /* If we are creating a stack frame, then we must save the frame pointer,
8663 IP (which will hold the old stack pointer), LR and the PC. */
8664 if (frame_pointer_needed)
8665 save_reg_mask |=
8666 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8667 | (1 << IP_REGNUM)
8668 | (1 << LR_REGNUM)
8669 | (1 << PC_REGNUM);
8671 /* Volatile functions do not return, so there
8672 is no need to save any other registers. */
8673 if (IS_VOLATILE (func_type))
8674 return save_reg_mask;
8676 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8678 /* Decide if we need to save the link register.
8679 Interrupt routines have their own banked link register,
8680 so they never need to save it.
8681 Otherwise if we do not use the link register we do not need to save
8682 it. If we are pushing other registers onto the stack however, we
8683 can save an instruction in the epilogue by pushing the link register
8684 now and then popping it back into the PC. This incurs extra memory
8685 accesses though, so we only do it when optimizing for size, and only
8686 if we know that we will not need a fancy return sequence. */
8687 if (regs_ever_live [LR_REGNUM]
8688 || (save_reg_mask
8689 && optimize_size
8690 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8691 && !current_function_calls_eh_return))
8692 save_reg_mask |= 1 << LR_REGNUM;
8694 if (cfun->machine->lr_save_eliminated)
8695 save_reg_mask &= ~ (1 << LR_REGNUM);
8697 if (TARGET_REALLY_IWMMXT
8698 && ((bit_count (save_reg_mask)
8699 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8701 unsigned int reg;
8703 /* The total number of registers that are going to be pushed
8704 onto the stack is odd. We need to ensure that the stack
8705 is 64-bit aligned before we start to save iWMMXt registers,
8706 and also before we start to create locals. (A local variable
8707 might be a double or long long which we will load/store using
8708 an iWMMXt instruction). Therefore we need to push another
8709 ARM register, so that the stack will be 64-bit aligned. We
8710 try to avoid using the arg registers (r0 -r3) as they might be
8711 used to pass values in a tail call. */
8712 for (reg = 4; reg <= 12; reg++)
8713 if ((save_reg_mask & (1 << reg)) == 0)
8714 break;
8716 if (reg <= 12)
8717 save_reg_mask |= (1 << reg);
8718 else
8720 cfun->machine->sibcall_blocked = 1;
8721 save_reg_mask |= (1 << 3);
8725 return save_reg_mask;
8729 /* Compute a bit mask of which registers need to be
8730 saved on the stack for the current function. */
8731 static unsigned long
8732 thumb_compute_save_reg_mask (void)
8734 unsigned long mask;
8735 unsigned reg;
8737 mask = 0;
8738 for (reg = 0; reg < 12; reg ++)
8739 if (regs_ever_live[reg] && !call_used_regs[reg])
8740 mask |= 1 << reg;
8742 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8743 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8745 if (TARGET_SINGLE_PIC_BASE)
8746 mask &= ~(1 << arm_pic_register);
8748 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8749 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8750 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8752 /* LR will also be pushed if any lo regs are pushed. */
8753 if (mask & 0xff || thumb_force_lr_save ())
8754 mask |= (1 << LR_REGNUM);
8756 /* Make sure we have a low work register if we need one.
8757 We will need one if we are going to push a high register,
8758 but we are not currently intending to push a low register. */
8759 if ((mask & 0xff) == 0
8760 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8762 /* Use thumb_find_work_register to choose which register
8763 we will use. If the register is live then we will
8764 have to push it. Use LAST_LO_REGNUM as our fallback
8765 choice for the register to select. */
8766 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8768 if (! call_used_regs[reg])
8769 mask |= 1 << reg;
8772 return mask;
8776 /* Return the number of bytes required to save VFP registers. */
8777 static int
8778 arm_get_vfp_saved_size (void)
8780 unsigned int regno;
8781 int count;
8782 int saved;
8784 saved = 0;
8785 /* Space for saved VFP registers. */
8786 if (TARGET_HARD_FLOAT && TARGET_VFP)
8788 count = 0;
8789 for (regno = FIRST_VFP_REGNUM;
8790 regno < LAST_VFP_REGNUM;
8791 regno += 2)
8793 if ((!regs_ever_live[regno] || call_used_regs[regno])
8794 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8796 if (count > 0)
8798 /* Workaround ARM10 VFPr1 bug. */
8799 if (count == 2 && !arm_arch6)
8800 count++;
8801 saved += count * 8 + 4;
8803 count = 0;
8805 else
8806 count++;
8808 if (count > 0)
8810 if (count == 2 && !arm_arch6)
8811 count++;
8812 saved += count * 8 + 4;
8815 return saved;
8819 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8820 everything bar the final return instruction. */
8821 const char *
8822 output_return_instruction (rtx operand, int really_return, int reverse)
8824 char conditional[10];
8825 char instr[100];
8826 unsigned reg;
8827 unsigned long live_regs_mask;
8828 unsigned long func_type;
8829 arm_stack_offsets *offsets;
8831 func_type = arm_current_func_type ();
8833 if (IS_NAKED (func_type))
8834 return "";
8836 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8838 /* If this function was declared non-returning, and we have
8839 found a tail call, then we have to trust that the called
8840 function won't return. */
8841 if (really_return)
8843 rtx ops[2];
8845 /* Otherwise, trap an attempted return by aborting. */
8846 ops[0] = operand;
8847 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8848 : "abort");
8849 assemble_external_libcall (ops[1]);
8850 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8853 return "";
8856 if (current_function_calls_alloca && !really_return)
8857 abort ();
8859 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8861 return_used_this_function = 1;
8863 live_regs_mask = arm_compute_save_reg_mask ();
8865 if (live_regs_mask)
8867 const char * return_reg;
8869 /* If we do not have any special requirements for function exit
8870 (e.g. interworking, or ISR) then we can load the return address
8871 directly into the PC. Otherwise we must load it into LR. */
8872 if (really_return
8873 && ! TARGET_INTERWORK)
8874 return_reg = reg_names[PC_REGNUM];
8875 else
8876 return_reg = reg_names[LR_REGNUM];
8878 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8880 /* There are three possible reasons for the IP register
8881 being saved. 1) a stack frame was created, in which case
8882 IP contains the old stack pointer, or 2) an ISR routine
8883 corrupted it, or 3) it was saved to align the stack on
8884 iWMMXt. In case 1, restore IP into SP, otherwise just
8885 restore IP. */
8886 if (frame_pointer_needed)
8888 live_regs_mask &= ~ (1 << IP_REGNUM);
8889 live_regs_mask |= (1 << SP_REGNUM);
8891 else
8893 if (! IS_INTERRUPT (func_type)
8894 && ! TARGET_REALLY_IWMMXT)
8895 abort ();
8899 /* On some ARM architectures it is faster to use LDR rather than
8900 LDM to load a single register. On other architectures, the
8901 cost is the same. In 26 bit mode, or for exception handlers,
8902 we have to use LDM to load the PC so that the CPSR is also
8903 restored. */
8904 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8905 if (live_regs_mask == (1U << reg))
8906 break;
8908 if (reg <= LAST_ARM_REGNUM
8909 && (reg != LR_REGNUM
8910 || ! really_return
8911 || ! IS_INTERRUPT (func_type)))
8913 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8914 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8916 else
8918 char *p;
8919 int first = 1;
8921 /* Generate the load multiple instruction to restore the
8922 registers. Note we can get here, even if
8923 frame_pointer_needed is true, but only if sp already
8924 points to the base of the saved core registers. */
8925 if (live_regs_mask & (1 << SP_REGNUM))
8927 unsigned HOST_WIDE_INT stack_adjust;
8929 offsets = arm_get_frame_offsets ();
8930 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
8931 if (stack_adjust != 0 && stack_adjust != 4)
8932 abort ();
8934 if (stack_adjust && arm_arch5)
8935 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8936 else
8938 /* If we can't use ldmib (SA110 bug),
8939 then try to pop r3 instead. */
8940 if (stack_adjust)
8941 live_regs_mask |= 1 << 3;
8942 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8945 else
8946 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8948 p = instr + strlen (instr);
8950 for (reg = 0; reg <= SP_REGNUM; reg++)
8951 if (live_regs_mask & (1 << reg))
8953 int l = strlen (reg_names[reg]);
8955 if (first)
8956 first = 0;
8957 else
8959 memcpy (p, ", ", 2);
8960 p += 2;
8963 memcpy (p, "%|", 2);
8964 memcpy (p + 2, reg_names[reg], l);
8965 p += l + 2;
8968 if (live_regs_mask & (1 << LR_REGNUM))
8970 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8971 /* If returning from an interrupt, restore the CPSR. */
8972 if (IS_INTERRUPT (func_type))
8973 strcat (p, "^");
8975 else
8976 strcpy (p, "}");
8979 output_asm_insn (instr, & operand);
8981 /* See if we need to generate an extra instruction to
8982 perform the actual function return. */
8983 if (really_return
8984 && func_type != ARM_FT_INTERWORKED
8985 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8987 /* The return has already been handled
8988 by loading the LR into the PC. */
8989 really_return = 0;
8993 if (really_return)
8995 switch ((int) ARM_FUNC_TYPE (func_type))
8997 case ARM_FT_ISR:
8998 case ARM_FT_FIQ:
8999 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9000 break;
9002 case ARM_FT_INTERWORKED:
9003 sprintf (instr, "bx%s\t%%|lr", conditional);
9004 break;
9006 case ARM_FT_EXCEPTION:
9007 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9008 break;
9010 default:
9011 /* Use bx if it's available. */
9012 if (arm_arch5 || arm_arch4t)
9013 sprintf (instr, "bx%s\t%%|lr", conditional);
9014 else
9015 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9016 break;
9019 output_asm_insn (instr, & operand);
9022 return "";
9025 /* Write the function name into the code section, directly preceding
9026 the function prologue.
9028 Code will be output similar to this:
9030 .ascii "arm_poke_function_name", 0
9031 .align
9033 .word 0xff000000 + (t1 - t0)
9034 arm_poke_function_name
9035 mov ip, sp
9036 stmfd sp!, {fp, ip, lr, pc}
9037 sub fp, ip, #4
9039 When performing a stack backtrace, code can inspect the value
9040 of 'pc' stored at 'fp' + 0. If the trace function then looks
9041 at location pc - 12 and the top 8 bits are set, then we know
9042 that there is a function name embedded immediately preceding this
9043 location and has length ((pc[-3]) & 0xff000000).
9045 We assume that pc is declared as a pointer to an unsigned long.
9047 It is of no benefit to output the function name if we are assembling
9048 a leaf function. These function types will not contain a stack
9049 backtrace structure, therefore it is not possible to determine the
9050 function name. */
9051 void
9052 arm_poke_function_name (FILE *stream, const char *name)
9054 unsigned long alignlength;
9055 unsigned long length;
9056 rtx x;
9058 length = strlen (name) + 1;
9059 alignlength = ROUND_UP_WORD (length);
9061 ASM_OUTPUT_ASCII (stream, name, length);
9062 ASM_OUTPUT_ALIGN (stream, 2);
9063 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9064 assemble_aligned_integer (UNITS_PER_WORD, x);
9067 /* Place some comments into the assembler stream
9068 describing the current function. */
9069 static void
9070 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9072 unsigned long func_type;
9074 if (!TARGET_ARM)
9076 thumb_output_function_prologue (f, frame_size);
9077 return;
9080 /* Sanity check. */
9081 if (arm_ccfsm_state || arm_target_insn)
9082 abort ();
9084 func_type = arm_current_func_type ();
9086 switch ((int) ARM_FUNC_TYPE (func_type))
9088 default:
9089 case ARM_FT_NORMAL:
9090 break;
9091 case ARM_FT_INTERWORKED:
9092 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9093 break;
9094 case ARM_FT_ISR:
9095 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9096 break;
9097 case ARM_FT_FIQ:
9098 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9099 break;
9100 case ARM_FT_EXCEPTION:
9101 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9102 break;
9105 if (IS_NAKED (func_type))
9106 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9108 if (IS_VOLATILE (func_type))
9109 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9111 if (IS_NESTED (func_type))
9112 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9114 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9115 current_function_args_size,
9116 current_function_pretend_args_size, frame_size);
9118 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9119 frame_pointer_needed,
9120 cfun->machine->uses_anonymous_args);
9122 if (cfun->machine->lr_save_eliminated)
9123 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9125 if (current_function_calls_eh_return)
9126 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9128 #ifdef AOF_ASSEMBLER
9129 if (flag_pic)
9130 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9131 #endif
9133 return_used_this_function = 0;
9136 const char *
9137 arm_output_epilogue (rtx sibling)
9139 int reg;
9140 unsigned long saved_regs_mask;
9141 unsigned long func_type;
9142 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9143 frame that is $fp + 4 for a non-variadic function. */
9144 int floats_offset = 0;
9145 rtx operands[3];
9146 FILE * f = asm_out_file;
9147 unsigned int lrm_count = 0;
9148 int really_return = (sibling == NULL);
9149 int start_reg;
9150 arm_stack_offsets *offsets;
9152 /* If we have already generated the return instruction
9153 then it is futile to generate anything else. */
9154 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9155 return "";
9157 func_type = arm_current_func_type ();
9159 if (IS_NAKED (func_type))
9160 /* Naked functions don't have epilogues. */
9161 return "";
9163 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9165 rtx op;
9167 /* A volatile function should never return. Call abort. */
9168 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9169 assemble_external_libcall (op);
9170 output_asm_insn ("bl\t%a0", &op);
9172 return "";
9175 if (current_function_calls_eh_return
9176 && ! really_return)
9177 /* If we are throwing an exception, then we really must
9178 be doing a return, so we can't tail-call. */
9179 abort ();
9181 offsets = arm_get_frame_offsets ();
9182 saved_regs_mask = arm_compute_save_reg_mask ();
9184 if (TARGET_IWMMXT)
9185 lrm_count = bit_count (saved_regs_mask);
9187 floats_offset = offsets->saved_args;
9188 /* Compute how far away the floats will be. */
9189 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9190 if (saved_regs_mask & (1 << reg))
9191 floats_offset += 4;
9193 if (frame_pointer_needed)
9195 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9196 int vfp_offset = offsets->frame;
9198 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9200 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9201 if (regs_ever_live[reg] && !call_used_regs[reg])
9203 floats_offset += 12;
9204 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9205 reg, FP_REGNUM, floats_offset - vfp_offset);
9208 else
9210 start_reg = LAST_FPA_REGNUM;
9212 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9214 if (regs_ever_live[reg] && !call_used_regs[reg])
9216 floats_offset += 12;
9218 /* We can't unstack more than four registers at once. */
9219 if (start_reg - reg == 3)
9221 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9222 reg, FP_REGNUM, floats_offset - vfp_offset);
9223 start_reg = reg - 1;
9226 else
9228 if (reg != start_reg)
9229 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9230 reg + 1, start_reg - reg,
9231 FP_REGNUM, floats_offset - vfp_offset);
9232 start_reg = reg - 1;
9236 /* Just in case the last register checked also needs unstacking. */
9237 if (reg != start_reg)
9238 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9239 reg + 1, start_reg - reg,
9240 FP_REGNUM, floats_offset - vfp_offset);
9243 if (TARGET_HARD_FLOAT && TARGET_VFP)
9245 int saved_size;
9247 /* The fldmx insn does not have base+offset addressing modes,
9248 so we use IP to hold the address. */
9249 saved_size = arm_get_vfp_saved_size ();
9251 if (saved_size > 0)
9253 floats_offset += saved_size;
9254 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9255 FP_REGNUM, floats_offset - vfp_offset);
9257 start_reg = FIRST_VFP_REGNUM;
9258 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9260 if ((!regs_ever_live[reg] || call_used_regs[reg])
9261 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9263 if (start_reg != reg)
9264 arm_output_fldmx (f, IP_REGNUM,
9265 (start_reg - FIRST_VFP_REGNUM) / 2,
9266 (reg - start_reg) / 2);
9267 start_reg = reg + 2;
9270 if (start_reg != reg)
9271 arm_output_fldmx (f, IP_REGNUM,
9272 (start_reg - FIRST_VFP_REGNUM) / 2,
9273 (reg - start_reg) / 2);
9276 if (TARGET_IWMMXT)
9278 /* The frame pointer is guaranteed to be non-double-word aligned.
9279 This is because it is set to (old_stack_pointer - 4) and the
9280 old_stack_pointer was double word aligned. Thus the offset to
9281 the iWMMXt registers to be loaded must also be non-double-word
9282 sized, so that the resultant address *is* double-word aligned.
9283 We can ignore floats_offset since that was already included in
9284 the live_regs_mask. */
9285 lrm_count += (lrm_count % 2 ? 2 : 1);
9287 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9288 if (regs_ever_live[reg] && !call_used_regs[reg])
9290 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9291 reg, FP_REGNUM, lrm_count * 4);
9292 lrm_count += 2;
9296 /* saved_regs_mask should contain the IP, which at the time of stack
9297 frame generation actually contains the old stack pointer. So a
9298 quick way to unwind the stack is just pop the IP register directly
9299 into the stack pointer. */
9300 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9301 abort ();
9302 saved_regs_mask &= ~ (1 << IP_REGNUM);
9303 saved_regs_mask |= (1 << SP_REGNUM);
9305 /* There are two registers left in saved_regs_mask - LR and PC. We
9306 only need to restore the LR register (the return address), but to
9307 save time we can load it directly into the PC, unless we need a
9308 special function exit sequence, or we are not really returning. */
9309 if (really_return
9310 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9311 && !current_function_calls_eh_return)
9312 /* Delete the LR from the register mask, so that the LR on
9313 the stack is loaded into the PC in the register mask. */
9314 saved_regs_mask &= ~ (1 << LR_REGNUM);
9315 else
9316 saved_regs_mask &= ~ (1 << PC_REGNUM);
9318 /* We must use SP as the base register, because SP is one of the
9319 registers being restored. If an interrupt or page fault
9320 happens in the ldm instruction, the SP might or might not
9321 have been restored. That would be bad, as then SP will no
9322 longer indicate the safe area of stack, and we can get stack
9323 corruption. Using SP as the base register means that it will
9324 be reset correctly to the original value, should an interrupt
9325 occur. If the stack pointer already points at the right
9326 place, then omit the subtraction. */
9327 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9328 || current_function_calls_alloca)
9329 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9330 4 * bit_count (saved_regs_mask));
9331 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9333 if (IS_INTERRUPT (func_type))
9334 /* Interrupt handlers will have pushed the
9335 IP onto the stack, so restore it now. */
9336 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9338 else
9340 /* Restore stack pointer if necessary. */
9341 if (offsets->outgoing_args != offsets->saved_regs)
9343 operands[0] = operands[1] = stack_pointer_rtx;
9344 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9345 output_add_immediate (operands);
9348 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9350 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9351 if (regs_ever_live[reg] && !call_used_regs[reg])
9352 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9353 reg, SP_REGNUM);
9355 else
9357 start_reg = FIRST_FPA_REGNUM;
9359 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9361 if (regs_ever_live[reg] && !call_used_regs[reg])
9363 if (reg - start_reg == 3)
9365 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9366 start_reg, SP_REGNUM);
9367 start_reg = reg + 1;
9370 else
9372 if (reg != start_reg)
9373 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9374 start_reg, reg - start_reg,
9375 SP_REGNUM);
9377 start_reg = reg + 1;
9381 /* Just in case the last register checked also needs unstacking. */
9382 if (reg != start_reg)
9383 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9384 start_reg, reg - start_reg, SP_REGNUM);
9387 if (TARGET_HARD_FLOAT && TARGET_VFP)
9389 start_reg = FIRST_VFP_REGNUM;
9390 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9392 if ((!regs_ever_live[reg] || call_used_regs[reg])
9393 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9395 if (start_reg != reg)
9396 arm_output_fldmx (f, SP_REGNUM,
9397 (start_reg - FIRST_VFP_REGNUM) / 2,
9398 (reg - start_reg) / 2);
9399 start_reg = reg + 2;
9402 if (start_reg != reg)
9403 arm_output_fldmx (f, SP_REGNUM,
9404 (start_reg - FIRST_VFP_REGNUM) / 2,
9405 (reg - start_reg) / 2);
9407 if (TARGET_IWMMXT)
9408 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9409 if (regs_ever_live[reg] && !call_used_regs[reg])
9410 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9412 /* If we can, restore the LR into the PC. */
9413 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9414 && really_return
9415 && current_function_pretend_args_size == 0
9416 && saved_regs_mask & (1 << LR_REGNUM)
9417 && !current_function_calls_eh_return)
9419 saved_regs_mask &= ~ (1 << LR_REGNUM);
9420 saved_regs_mask |= (1 << PC_REGNUM);
9423 /* Load the registers off the stack. If we only have one register
9424 to load use the LDR instruction - it is faster. */
9425 if (saved_regs_mask == (1 << LR_REGNUM))
9427 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9429 else if (saved_regs_mask)
9431 if (saved_regs_mask & (1 << SP_REGNUM))
9432 /* Note - write back to the stack register is not enabled
9433 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9434 in the list of registers and if we add writeback the
9435 instruction becomes UNPREDICTABLE. */
9436 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9437 else
9438 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9441 if (current_function_pretend_args_size)
9443 /* Unwind the pre-pushed regs. */
9444 operands[0] = operands[1] = stack_pointer_rtx;
9445 operands[2] = GEN_INT (current_function_pretend_args_size);
9446 output_add_immediate (operands);
9450 /* We may have already restored PC directly from the stack. */
9451 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9452 return "";
9454 /* Stack adjustment for exception handler. */
9455 if (current_function_calls_eh_return)
9456 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9457 ARM_EH_STACKADJ_REGNUM);
9459 /* Generate the return instruction. */
9460 switch ((int) ARM_FUNC_TYPE (func_type))
9462 case ARM_FT_ISR:
9463 case ARM_FT_FIQ:
9464 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9465 break;
9467 case ARM_FT_EXCEPTION:
9468 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9469 break;
9471 case ARM_FT_INTERWORKED:
9472 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9473 break;
9475 default:
9476 if (arm_arch5 || arm_arch4t)
9477 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9478 else
9479 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9480 break;
9483 return "";
9486 static void
9487 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9488 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9490 arm_stack_offsets *offsets;
9492 if (TARGET_THUMB)
9494 int regno;
9496 /* Emit any call-via-reg trampolines that are needed for v4t support
9497 of call_reg and call_value_reg type insns. */
9498 for (regno = 0; regno < LR_REGNUM; regno++)
9500 rtx label = cfun->machine->call_via[regno];
9502 if (label != NULL)
9504 function_section (current_function_decl);
9505 targetm.asm_out.internal_label (asm_out_file, "L",
9506 CODE_LABEL_NUMBER (label));
9507 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9511 /* ??? Probably not safe to set this here, since it assumes that a
9512 function will be emitted as assembly immediately after we generate
9513 RTL for it. This does not happen for inline functions. */
9514 return_used_this_function = 0;
9516 else
9518 /* We need to take into account any stack-frame rounding. */
9519 offsets = arm_get_frame_offsets ();
9521 if (use_return_insn (FALSE, NULL)
9522 && return_used_this_function
9523 && offsets->saved_regs != offsets->outgoing_args
9524 && !frame_pointer_needed)
9525 abort ();
9527 /* Reset the ARM-specific per-function variables. */
9528 after_arm_reorg = 0;
9532 /* Generate and emit an insn that we will recognize as a push_multi.
9533 Unfortunately, since this insn does not reflect very well the actual
9534 semantics of the operation, we need to annotate the insn for the benefit
9535 of DWARF2 frame unwind information. */
9536 static rtx
9537 emit_multi_reg_push (unsigned long mask)
9539 int num_regs = 0;
9540 int num_dwarf_regs;
9541 int i, j;
9542 rtx par;
9543 rtx dwarf;
9544 int dwarf_par_index;
9545 rtx tmp, reg;
9547 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9548 if (mask & (1 << i))
9549 num_regs++;
9551 if (num_regs == 0 || num_regs > 16)
9552 abort ();
9554 /* We don't record the PC in the dwarf frame information. */
9555 num_dwarf_regs = num_regs;
9556 if (mask & (1 << PC_REGNUM))
9557 num_dwarf_regs--;
9559 /* For the body of the insn we are going to generate an UNSPEC in
9560 parallel with several USEs. This allows the insn to be recognized
9561 by the push_multi pattern in the arm.md file. The insn looks
9562 something like this:
9564 (parallel [
9565 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9566 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9567 (use (reg:SI 11 fp))
9568 (use (reg:SI 12 ip))
9569 (use (reg:SI 14 lr))
9570 (use (reg:SI 15 pc))
9573 For the frame note however, we try to be more explicit and actually
9574 show each register being stored into the stack frame, plus a (single)
9575 decrement of the stack pointer. We do it this way in order to be
9576 friendly to the stack unwinding code, which only wants to see a single
9577 stack decrement per instruction. The RTL we generate for the note looks
9578 something like this:
9580 (sequence [
9581 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9582 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9583 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9584 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9585 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9588 This sequence is used both by the code to support stack unwinding for
9589 exceptions handlers and the code to generate dwarf2 frame debugging. */
9591 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9592 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9593 dwarf_par_index = 1;
9595 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9597 if (mask & (1 << i))
9599 reg = gen_rtx_REG (SImode, i);
9601 XVECEXP (par, 0, 0)
9602 = gen_rtx_SET (VOIDmode,
9603 gen_rtx_MEM (BLKmode,
9604 gen_rtx_PRE_DEC (BLKmode,
9605 stack_pointer_rtx)),
9606 gen_rtx_UNSPEC (BLKmode,
9607 gen_rtvec (1, reg),
9608 UNSPEC_PUSH_MULT));
9610 if (i != PC_REGNUM)
9612 tmp = gen_rtx_SET (VOIDmode,
9613 gen_rtx_MEM (SImode, stack_pointer_rtx),
9614 reg);
9615 RTX_FRAME_RELATED_P (tmp) = 1;
9616 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9617 dwarf_par_index++;
9620 break;
9624 for (j = 1, i++; j < num_regs; i++)
9626 if (mask & (1 << i))
9628 reg = gen_rtx_REG (SImode, i);
9630 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9632 if (i != PC_REGNUM)
9634 tmp = gen_rtx_SET (VOIDmode,
9635 gen_rtx_MEM (SImode,
9636 plus_constant (stack_pointer_rtx,
9637 4 * j)),
9638 reg);
9639 RTX_FRAME_RELATED_P (tmp) = 1;
9640 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9643 j++;
9647 par = emit_insn (par);
9649 tmp = gen_rtx_SET (SImode,
9650 stack_pointer_rtx,
9651 gen_rtx_PLUS (SImode,
9652 stack_pointer_rtx,
9653 GEN_INT (-4 * num_regs)));
9654 RTX_FRAME_RELATED_P (tmp) = 1;
9655 XVECEXP (dwarf, 0, 0) = tmp;
9657 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9658 REG_NOTES (par));
9659 return par;
9662 static rtx
9663 emit_sfm (int base_reg, int count)
9665 rtx par;
9666 rtx dwarf;
9667 rtx tmp, reg;
9668 int i;
9670 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9671 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9673 reg = gen_rtx_REG (XFmode, base_reg++);
9675 XVECEXP (par, 0, 0)
9676 = gen_rtx_SET (VOIDmode,
9677 gen_rtx_MEM (BLKmode,
9678 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9679 gen_rtx_UNSPEC (BLKmode,
9680 gen_rtvec (1, reg),
9681 UNSPEC_PUSH_MULT));
9682 tmp = gen_rtx_SET (VOIDmode,
9683 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9684 RTX_FRAME_RELATED_P (tmp) = 1;
9685 XVECEXP (dwarf, 0, 1) = tmp;
9687 for (i = 1; i < count; i++)
9689 reg = gen_rtx_REG (XFmode, base_reg++);
9690 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9692 tmp = gen_rtx_SET (VOIDmode,
9693 gen_rtx_MEM (XFmode,
9694 plus_constant (stack_pointer_rtx,
9695 i * 12)),
9696 reg);
9697 RTX_FRAME_RELATED_P (tmp) = 1;
9698 XVECEXP (dwarf, 0, i + 1) = tmp;
9701 tmp = gen_rtx_SET (VOIDmode,
9702 stack_pointer_rtx,
9703 gen_rtx_PLUS (SImode,
9704 stack_pointer_rtx,
9705 GEN_INT (-12 * count)));
9706 RTX_FRAME_RELATED_P (tmp) = 1;
9707 XVECEXP (dwarf, 0, 0) = tmp;
9709 par = emit_insn (par);
9710 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9711 REG_NOTES (par));
9712 return par;
9716 /* Return true if the current function needs to save/restore LR. */
9718 static bool
9719 thumb_force_lr_save (void)
9721 return !cfun->machine->lr_save_eliminated
9722 && (!leaf_function_p ()
9723 || thumb_far_jump_used_p ()
9724 || regs_ever_live [LR_REGNUM]);
9728 /* Compute the distance from register FROM to register TO.
9729 These can be the arg pointer (26), the soft frame pointer (25),
9730 the stack pointer (13) or the hard frame pointer (11).
9731 In thumb mode r7 is used as the soft frame pointer, if needed.
9732 Typical stack layout looks like this:
9734 old stack pointer -> | |
9735 ----
9736 | | \
9737 | | saved arguments for
9738 | | vararg functions
9739 | | /
9741 hard FP & arg pointer -> | | \
9742 | | stack
9743 | | frame
9744 | | /
9746 | | \
9747 | | call saved
9748 | | registers
9749 soft frame pointer -> | | /
9751 | | \
9752 | | local
9753 | | variables
9754 | | /
9756 | | \
9757 | | outgoing
9758 | | arguments
9759 current stack pointer -> | | /
9762 For a given function some or all of these stack components
9763 may not be needed, giving rise to the possibility of
9764 eliminating some of the registers.
9766 The values returned by this function must reflect the behavior
9767 of arm_expand_prologue() and arm_compute_save_reg_mask().
9769 The sign of the number returned reflects the direction of stack
9770 growth, so the values are positive for all eliminations except
9771 from the soft frame pointer to the hard frame pointer.
9773 SFP may point just inside the local variables block to ensure correct
9774 alignment. */
9777 /* Calculate stack offsets. These are used to calculate register elimination
9778 offsets and in prologue/epilogue code. */
9780 static arm_stack_offsets *
9781 arm_get_frame_offsets (void)
9783 struct arm_stack_offsets *offsets;
9784 unsigned long func_type;
9785 int leaf;
9786 int saved;
9787 HOST_WIDE_INT frame_size;
9789 offsets = &cfun->machine->stack_offsets;
9791 /* We need to know if we are a leaf function. Unfortunately, it
9792 is possible to be called after start_sequence has been called,
9793 which causes get_insns to return the insns for the sequence,
9794 not the function, which will cause leaf_function_p to return
9795 the incorrect result.
9797 to know about leaf functions once reload has completed, and the
9798 frame size cannot be changed after that time, so we can safely
9799 use the cached value. */
9801 if (reload_completed)
9802 return offsets;
9804 /* Initially this is the size of the local variables. It will translated
9805 into an offset once we have determined the size of preceding data. */
9806 frame_size = ROUND_UP_WORD (get_frame_size ());
9808 leaf = leaf_function_p ();
9810 /* Space for variadic functions. */
9811 offsets->saved_args = current_function_pretend_args_size;
9813 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9815 if (TARGET_ARM)
9817 unsigned int regno;
9819 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9821 /* We know that SP will be doubleword aligned on entry, and we must
9822 preserve that condition at any subroutine call. We also require the
9823 soft frame pointer to be doubleword aligned. */
9825 if (TARGET_REALLY_IWMMXT)
9827 /* Check for the call-saved iWMMXt registers. */
9828 for (regno = FIRST_IWMMXT_REGNUM;
9829 regno <= LAST_IWMMXT_REGNUM;
9830 regno++)
9831 if (regs_ever_live [regno] && ! call_used_regs [regno])
9832 saved += 8;
9835 func_type = arm_current_func_type ();
9836 if (! IS_VOLATILE (func_type))
9838 /* Space for saved FPA registers. */
9839 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9840 if (regs_ever_live[regno] && ! call_used_regs[regno])
9841 saved += 12;
9843 /* Space for saved VFP registers. */
9844 if (TARGET_HARD_FLOAT && TARGET_VFP)
9845 saved += arm_get_vfp_saved_size ();
9848 else /* TARGET_THUMB */
9850 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
9851 if (TARGET_BACKTRACE)
9852 saved += 16;
9855 /* Saved registers include the stack frame. */
9856 offsets->saved_regs = offsets->saved_args + saved;
9857 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
9858 /* A leaf function does not need any stack alignment if it has nothing
9859 on the stack. */
9860 if (leaf && frame_size == 0)
9862 offsets->outgoing_args = offsets->soft_frame;
9863 return offsets;
9866 /* Ensure SFP has the correct alignment. */
9867 if (ARM_DOUBLEWORD_ALIGN
9868 && (offsets->soft_frame & 7))
9869 offsets->soft_frame += 4;
9871 offsets->outgoing_args = offsets->soft_frame + frame_size
9872 + current_function_outgoing_args_size;
9874 if (ARM_DOUBLEWORD_ALIGN)
9876 /* Ensure SP remains doubleword aligned. */
9877 if (offsets->outgoing_args & 7)
9878 offsets->outgoing_args += 4;
9879 if (offsets->outgoing_args & 7)
9880 abort ();
9883 return offsets;
9887 /* Calculate the relative offsets for the different stack pointers. Positive
9888 offsets are in the direction of stack growth. */
9890 HOST_WIDE_INT
9891 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9893 arm_stack_offsets *offsets;
9895 offsets = arm_get_frame_offsets ();
9897 /* OK, now we have enough information to compute the distances.
9898 There must be an entry in these switch tables for each pair
9899 of registers in ELIMINABLE_REGS, even if some of the entries
9900 seem to be redundant or useless. */
9901 switch (from)
9903 case ARG_POINTER_REGNUM:
9904 switch (to)
9906 case THUMB_HARD_FRAME_POINTER_REGNUM:
9907 return 0;
9909 case FRAME_POINTER_REGNUM:
9910 /* This is the reverse of the soft frame pointer
9911 to hard frame pointer elimination below. */
9912 return offsets->soft_frame - offsets->saved_args;
9914 case ARM_HARD_FRAME_POINTER_REGNUM:
9915 /* If there is no stack frame then the hard
9916 frame pointer and the arg pointer coincide. */
9917 if (offsets->frame == offsets->saved_regs)
9918 return 0;
9919 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9920 return (frame_pointer_needed
9921 && cfun->static_chain_decl != NULL
9922 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9924 case STACK_POINTER_REGNUM:
9925 /* If nothing has been pushed on the stack at all
9926 then this will return -4. This *is* correct! */
9927 return offsets->outgoing_args - (offsets->saved_args + 4);
9929 default:
9930 abort ();
9932 break;
9934 case FRAME_POINTER_REGNUM:
9935 switch (to)
9937 case THUMB_HARD_FRAME_POINTER_REGNUM:
9938 return 0;
9940 case ARM_HARD_FRAME_POINTER_REGNUM:
9941 /* The hard frame pointer points to the top entry in the
9942 stack frame. The soft frame pointer to the bottom entry
9943 in the stack frame. If there is no stack frame at all,
9944 then they are identical. */
9946 return offsets->frame - offsets->soft_frame;
9948 case STACK_POINTER_REGNUM:
9949 return offsets->outgoing_args - offsets->soft_frame;
9951 default:
9952 abort ();
9954 break;
9956 default:
9957 /* You cannot eliminate from the stack pointer.
9958 In theory you could eliminate from the hard frame
9959 pointer to the stack pointer, but this will never
9960 happen, since if a stack frame is not needed the
9961 hard frame pointer will never be used. */
9962 abort ();
9967 /* Generate the prologue instructions for entry into an ARM function. */
9968 void
9969 arm_expand_prologue (void)
9971 int reg;
9972 rtx amount;
9973 rtx insn;
9974 rtx ip_rtx;
9975 unsigned long live_regs_mask;
9976 unsigned long func_type;
9977 int fp_offset = 0;
9978 int saved_pretend_args = 0;
9979 int saved_regs = 0;
9980 unsigned HOST_WIDE_INT args_to_push;
9981 arm_stack_offsets *offsets;
9983 func_type = arm_current_func_type ();
9985 /* Naked functions don't have prologues. */
9986 if (IS_NAKED (func_type))
9987 return;
9989 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
9990 args_to_push = current_function_pretend_args_size;
9992 /* Compute which register we will have to save onto the stack. */
9993 live_regs_mask = arm_compute_save_reg_mask ();
9995 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
9997 if (frame_pointer_needed)
9999 if (IS_INTERRUPT (func_type))
10001 /* Interrupt functions must not corrupt any registers.
10002 Creating a frame pointer however, corrupts the IP
10003 register, so we must push it first. */
10004 insn = emit_multi_reg_push (1 << IP_REGNUM);
10006 /* Do not set RTX_FRAME_RELATED_P on this insn.
10007 The dwarf stack unwinding code only wants to see one
10008 stack decrement per function, and this is not it. If
10009 this instruction is labeled as being part of the frame
10010 creation sequence then dwarf2out_frame_debug_expr will
10011 abort when it encounters the assignment of IP to FP
10012 later on, since the use of SP here establishes SP as
10013 the CFA register and not IP.
10015 Anyway this instruction is not really part of the stack
10016 frame creation although it is part of the prologue. */
10018 else if (IS_NESTED (func_type))
10020 /* The Static chain register is the same as the IP register
10021 used as a scratch register during stack frame creation.
10022 To get around this need to find somewhere to store IP
10023 whilst the frame is being created. We try the following
10024 places in order:
10026 1. The last argument register.
10027 2. A slot on the stack above the frame. (This only
10028 works if the function is not a varargs function).
10029 3. Register r3, after pushing the argument registers
10030 onto the stack.
10032 Note - we only need to tell the dwarf2 backend about the SP
10033 adjustment in the second variant; the static chain register
10034 doesn't need to be unwound, as it doesn't contain a value
10035 inherited from the caller. */
10037 if (regs_ever_live[3] == 0)
10039 insn = gen_rtx_REG (SImode, 3);
10040 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10041 insn = emit_insn (insn);
10043 else if (args_to_push == 0)
10045 rtx dwarf;
10046 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10047 insn = gen_rtx_MEM (SImode, insn);
10048 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10049 insn = emit_insn (insn);
10051 fp_offset = 4;
10053 /* Just tell the dwarf backend that we adjusted SP. */
10054 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10055 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10056 GEN_INT (-fp_offset)));
10057 RTX_FRAME_RELATED_P (insn) = 1;
10058 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10059 dwarf, REG_NOTES (insn));
10061 else
10063 /* Store the args on the stack. */
10064 if (cfun->machine->uses_anonymous_args)
10065 insn = emit_multi_reg_push
10066 ((0xf0 >> (args_to_push / 4)) & 0xf);
10067 else
10068 insn = emit_insn
10069 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10070 GEN_INT (- args_to_push)));
10072 RTX_FRAME_RELATED_P (insn) = 1;
10074 saved_pretend_args = 1;
10075 fp_offset = args_to_push;
10076 args_to_push = 0;
10078 /* Now reuse r3 to preserve IP. */
10079 insn = gen_rtx_REG (SImode, 3);
10080 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10081 (void) emit_insn (insn);
10085 if (fp_offset)
10087 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10088 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10090 else
10091 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10093 insn = emit_insn (insn);
10094 RTX_FRAME_RELATED_P (insn) = 1;
10097 if (args_to_push)
10099 /* Push the argument registers, or reserve space for them. */
10100 if (cfun->machine->uses_anonymous_args)
10101 insn = emit_multi_reg_push
10102 ((0xf0 >> (args_to_push / 4)) & 0xf);
10103 else
10104 insn = emit_insn
10105 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10106 GEN_INT (- args_to_push)));
10107 RTX_FRAME_RELATED_P (insn) = 1;
10110 /* If this is an interrupt service routine, and the link register
10111 is going to be pushed, and we are not creating a stack frame,
10112 (which would involve an extra push of IP and a pop in the epilogue)
10113 subtracting four from LR now will mean that the function return
10114 can be done with a single instruction. */
10115 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10116 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10117 && ! frame_pointer_needed)
10118 emit_insn (gen_rtx_SET (SImode,
10119 gen_rtx_REG (SImode, LR_REGNUM),
10120 gen_rtx_PLUS (SImode,
10121 gen_rtx_REG (SImode, LR_REGNUM),
10122 GEN_INT (-4))));
10124 if (live_regs_mask)
10126 insn = emit_multi_reg_push (live_regs_mask);
10127 saved_regs += bit_count (live_regs_mask) * 4;
10128 RTX_FRAME_RELATED_P (insn) = 1;
10131 if (TARGET_IWMMXT)
10132 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10133 if (regs_ever_live[reg] && ! call_used_regs [reg])
10135 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10136 insn = gen_rtx_MEM (V2SImode, insn);
10137 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10138 gen_rtx_REG (V2SImode, reg)));
10139 RTX_FRAME_RELATED_P (insn) = 1;
10140 saved_regs += 8;
10143 if (! IS_VOLATILE (func_type))
10145 int start_reg;
10147 /* Save any floating point call-saved registers used by this
10148 function. */
10149 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10151 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10152 if (regs_ever_live[reg] && !call_used_regs[reg])
10154 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10155 insn = gen_rtx_MEM (XFmode, insn);
10156 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10157 gen_rtx_REG (XFmode, reg)));
10158 RTX_FRAME_RELATED_P (insn) = 1;
10159 saved_regs += 12;
10162 else
10164 start_reg = LAST_FPA_REGNUM;
10166 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10168 if (regs_ever_live[reg] && !call_used_regs[reg])
10170 if (start_reg - reg == 3)
10172 insn = emit_sfm (reg, 4);
10173 RTX_FRAME_RELATED_P (insn) = 1;
10174 saved_regs += 48;
10175 start_reg = reg - 1;
10178 else
10180 if (start_reg != reg)
10182 insn = emit_sfm (reg + 1, start_reg - reg);
10183 RTX_FRAME_RELATED_P (insn) = 1;
10184 saved_regs += (start_reg - reg) * 12;
10186 start_reg = reg - 1;
10190 if (start_reg != reg)
10192 insn = emit_sfm (reg + 1, start_reg - reg);
10193 saved_regs += (start_reg - reg) * 12;
10194 RTX_FRAME_RELATED_P (insn) = 1;
10197 if (TARGET_HARD_FLOAT && TARGET_VFP)
10199 start_reg = FIRST_VFP_REGNUM;
10201 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10203 if ((!regs_ever_live[reg] || call_used_regs[reg])
10204 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10206 if (start_reg != reg)
10207 saved_regs += vfp_emit_fstmx (start_reg,
10208 (reg - start_reg) / 2);
10209 start_reg = reg + 2;
10212 if (start_reg != reg)
10213 saved_regs += vfp_emit_fstmx (start_reg,
10214 (reg - start_reg) / 2);
10218 if (frame_pointer_needed)
10220 /* Create the new frame pointer. */
10221 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10222 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10223 RTX_FRAME_RELATED_P (insn) = 1;
10225 if (IS_NESTED (func_type))
10227 /* Recover the static chain register. */
10228 if (regs_ever_live [3] == 0
10229 || saved_pretend_args)
10230 insn = gen_rtx_REG (SImode, 3);
10231 else /* if (current_function_pretend_args_size == 0) */
10233 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10234 GEN_INT (4));
10235 insn = gen_rtx_MEM (SImode, insn);
10238 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10239 /* Add a USE to stop propagate_one_insn() from barfing. */
10240 emit_insn (gen_prologue_use (ip_rtx));
10244 offsets = arm_get_frame_offsets ();
10245 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10247 /* This add can produce multiple insns for a large constant, so we
10248 need to get tricky. */
10249 rtx last = get_last_insn ();
10251 amount = GEN_INT (offsets->saved_args + saved_regs
10252 - offsets->outgoing_args);
10254 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10255 amount));
10258 last = last ? NEXT_INSN (last) : get_insns ();
10259 RTX_FRAME_RELATED_P (last) = 1;
10261 while (last != insn);
10263 /* If the frame pointer is needed, emit a special barrier that
10264 will prevent the scheduler from moving stores to the frame
10265 before the stack adjustment. */
10266 if (frame_pointer_needed)
10267 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10268 hard_frame_pointer_rtx));
10272 if (flag_pic)
10273 arm_load_pic_register (INVALID_REGNUM);
10275 /* If we are profiling, make sure no instructions are scheduled before
10276 the call to mcount. Similarly if the user has requested no
10277 scheduling in the prolog. */
10278 if (current_function_profile || TARGET_NO_SCHED_PRO)
10279 emit_insn (gen_blockage ());
10281 /* If the link register is being kept alive, with the return address in it,
10282 then make sure that it does not get reused by the ce2 pass. */
10283 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10285 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10286 cfun->machine->lr_save_eliminated = 1;
10290 /* If CODE is 'd', then the X is a condition operand and the instruction
10291 should only be executed if the condition is true.
10292 if CODE is 'D', then the X is a condition operand and the instruction
10293 should only be executed if the condition is false: however, if the mode
10294 of the comparison is CCFPEmode, then always execute the instruction -- we
10295 do this because in these circumstances !GE does not necessarily imply LT;
10296 in these cases the instruction pattern will take care to make sure that
10297 an instruction containing %d will follow, thereby undoing the effects of
10298 doing this instruction unconditionally.
10299 If CODE is 'N' then X is a floating point operand that must be negated
10300 before output.
10301 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10302 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10303 void
10304 arm_print_operand (FILE *stream, rtx x, int code)
10306 switch (code)
10308 case '@':
10309 fputs (ASM_COMMENT_START, stream);
10310 return;
10312 case '_':
10313 fputs (user_label_prefix, stream);
10314 return;
10316 case '|':
10317 fputs (REGISTER_PREFIX, stream);
10318 return;
10320 case '?':
10321 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10323 if (TARGET_THUMB)
10325 output_operand_lossage ("predicated Thumb instruction");
10326 break;
10328 if (current_insn_predicate != NULL)
10330 output_operand_lossage
10331 ("predicated instruction in conditional sequence");
10332 break;
10335 fputs (arm_condition_codes[arm_current_cc], stream);
10337 else if (current_insn_predicate)
10339 enum arm_cond_code code;
10341 if (TARGET_THUMB)
10343 output_operand_lossage ("predicated Thumb instruction");
10344 break;
10347 code = get_arm_condition_code (current_insn_predicate);
10348 fputs (arm_condition_codes[code], stream);
10350 return;
10352 case 'N':
10354 REAL_VALUE_TYPE r;
10355 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10356 r = REAL_VALUE_NEGATE (r);
10357 fprintf (stream, "%s", fp_const_from_val (&r));
10359 return;
10361 case 'B':
10362 if (GET_CODE (x) == CONST_INT)
10364 HOST_WIDE_INT val;
10365 val = ARM_SIGN_EXTEND (~INTVAL (x));
10366 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10368 else
10370 putc ('~', stream);
10371 output_addr_const (stream, x);
10373 return;
10375 case 'i':
10376 fprintf (stream, "%s", arithmetic_instr (x, 1));
10377 return;
10379 /* Truncate Cirrus shift counts. */
10380 case 's':
10381 if (GET_CODE (x) == CONST_INT)
10383 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10384 return;
10386 arm_print_operand (stream, x, 0);
10387 return;
10389 case 'I':
10390 fprintf (stream, "%s", arithmetic_instr (x, 0));
10391 return;
10393 case 'S':
10395 HOST_WIDE_INT val;
10396 const char * shift = shift_op (x, &val);
10398 if (shift)
10400 fprintf (stream, ", %s ", shift_op (x, &val));
10401 if (val == -1)
10402 arm_print_operand (stream, XEXP (x, 1), 0);
10403 else
10404 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10407 return;
10409 /* An explanation of the 'Q', 'R' and 'H' register operands:
10411 In a pair of registers containing a DI or DF value the 'Q'
10412 operand returns the register number of the register containing
10413 the least significant part of the value. The 'R' operand returns
10414 the register number of the register containing the most
10415 significant part of the value.
10417 The 'H' operand returns the higher of the two register numbers.
10418 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10419 same as the 'Q' operand, since the most significant part of the
10420 value is held in the lower number register. The reverse is true
10421 on systems where WORDS_BIG_ENDIAN is false.
10423 The purpose of these operands is to distinguish between cases
10424 where the endian-ness of the values is important (for example
10425 when they are added together), and cases where the endian-ness
10426 is irrelevant, but the order of register operations is important.
10427 For example when loading a value from memory into a register
10428 pair, the endian-ness does not matter. Provided that the value
10429 from the lower memory address is put into the lower numbered
10430 register, and the value from the higher address is put into the
10431 higher numbered register, the load will work regardless of whether
10432 the value being loaded is big-wordian or little-wordian. The
10433 order of the two register loads can matter however, if the address
10434 of the memory location is actually held in one of the registers
10435 being overwritten by the load. */
10436 case 'Q':
10437 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10439 output_operand_lossage ("invalid operand for code '%c'", code);
10440 return;
10443 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10444 return;
10446 case 'R':
10447 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10449 output_operand_lossage ("invalid operand for code '%c'", code);
10450 return;
10453 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10454 return;
10456 case 'H':
10457 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10459 output_operand_lossage ("invalid operand for code '%c'", code);
10460 return;
10463 asm_fprintf (stream, "%r", REGNO (x) + 1);
10464 return;
10466 case 'm':
10467 asm_fprintf (stream, "%r",
10468 GET_CODE (XEXP (x, 0)) == REG
10469 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10470 return;
10472 case 'M':
10473 asm_fprintf (stream, "{%r-%r}",
10474 REGNO (x),
10475 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10476 return;
10478 case 'd':
10479 /* CONST_TRUE_RTX means always -- that's the default. */
10480 if (x == const_true_rtx)
10481 return;
10483 if (!COMPARISON_P (x))
10485 output_operand_lossage ("invalid operand for code '%c'", code);
10486 return;
10489 fputs (arm_condition_codes[get_arm_condition_code (x)],
10490 stream);
10491 return;
10493 case 'D':
10494 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10495 want to do that. */
10496 if (x == const_true_rtx)
10498 output_operand_lossage ("instruction never exectued");
10499 return;
10501 if (!COMPARISON_P (x))
10503 output_operand_lossage ("invalid operand for code '%c'", code);
10504 return;
10507 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10508 (get_arm_condition_code (x))],
10509 stream);
10510 return;
10512 /* Cirrus registers can be accessed in a variety of ways:
10513 single floating point (f)
10514 double floating point (d)
10515 32bit integer (fx)
10516 64bit integer (dx). */
10517 case 'W': /* Cirrus register in F mode. */
10518 case 'X': /* Cirrus register in D mode. */
10519 case 'Y': /* Cirrus register in FX mode. */
10520 case 'Z': /* Cirrus register in DX mode. */
10521 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10522 abort ();
10524 fprintf (stream, "mv%s%s",
10525 code == 'W' ? "f"
10526 : code == 'X' ? "d"
10527 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10529 return;
10531 /* Print cirrus register in the mode specified by the register's mode. */
10532 case 'V':
10534 int mode = GET_MODE (x);
10536 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10538 output_operand_lossage ("invalid operand for code '%c'", code);
10539 return;
10542 fprintf (stream, "mv%s%s",
10543 mode == DFmode ? "d"
10544 : mode == SImode ? "fx"
10545 : mode == DImode ? "dx"
10546 : "f", reg_names[REGNO (x)] + 2);
10548 return;
10551 case 'U':
10552 if (GET_CODE (x) != REG
10553 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10554 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10555 /* Bad value for wCG register number. */
10557 output_operand_lossage ("invalid operand for code '%c'", code);
10558 return;
10561 else
10562 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10563 return;
10565 /* Print an iWMMXt control register name. */
10566 case 'w':
10567 if (GET_CODE (x) != CONST_INT
10568 || INTVAL (x) < 0
10569 || INTVAL (x) >= 16)
10570 /* Bad value for wC register number. */
10572 output_operand_lossage ("invalid operand for code '%c'", code);
10573 return;
10576 else
10578 static const char * wc_reg_names [16] =
10580 "wCID", "wCon", "wCSSF", "wCASF",
10581 "wC4", "wC5", "wC6", "wC7",
10582 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10583 "wC12", "wC13", "wC14", "wC15"
10586 fprintf (stream, wc_reg_names [INTVAL (x)]);
10588 return;
10590 /* Print a VFP double precision register name. */
10591 case 'P':
10593 int mode = GET_MODE (x);
10594 int num;
10596 if (mode != DImode && mode != DFmode)
10598 output_operand_lossage ("invalid operand for code '%c'", code);
10599 return;
10602 if (GET_CODE (x) != REG
10603 || !IS_VFP_REGNUM (REGNO (x)))
10605 output_operand_lossage ("invalid operand for code '%c'", code);
10606 return;
10609 num = REGNO(x) - FIRST_VFP_REGNUM;
10610 if (num & 1)
10612 output_operand_lossage ("invalid operand for code '%c'", code);
10613 return;
10616 fprintf (stream, "d%d", num >> 1);
10618 return;
10620 default:
10621 if (x == 0)
10623 output_operand_lossage ("missing operand");
10624 return;
10627 if (GET_CODE (x) == REG)
10628 asm_fprintf (stream, "%r", REGNO (x));
10629 else if (GET_CODE (x) == MEM)
10631 output_memory_reference_mode = GET_MODE (x);
10632 output_address (XEXP (x, 0));
10634 else if (GET_CODE (x) == CONST_DOUBLE)
10635 fprintf (stream, "#%s", fp_immediate_constant (x));
10636 else if (GET_CODE (x) == NEG)
10637 abort (); /* This should never happen now. */
10638 else
10640 fputc ('#', stream);
10641 output_addr_const (stream, x);
10646 #ifndef AOF_ASSEMBLER
10647 /* Target hook for assembling integer objects. The ARM version needs to
10648 handle word-sized values specially. */
10649 static bool
10650 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10652 if (size == UNITS_PER_WORD && aligned_p)
10654 fputs ("\t.word\t", asm_out_file);
10655 output_addr_const (asm_out_file, x);
10657 /* Mark symbols as position independent. We only do this in the
10658 .text segment, not in the .data segment. */
10659 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10660 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10662 if (GET_CODE (x) == SYMBOL_REF
10663 && (CONSTANT_POOL_ADDRESS_P (x)
10664 || SYMBOL_REF_LOCAL_P (x)))
10665 fputs ("(GOTOFF)", asm_out_file);
10666 else if (GET_CODE (x) == LABEL_REF)
10667 fputs ("(GOTOFF)", asm_out_file);
10668 else
10669 fputs ("(GOT)", asm_out_file);
10671 fputc ('\n', asm_out_file);
10672 return true;
10675 if (arm_vector_mode_supported_p (GET_MODE (x)))
10677 int i, units;
10679 if (GET_CODE (x) != CONST_VECTOR)
10680 abort ();
10682 units = CONST_VECTOR_NUNITS (x);
10684 switch (GET_MODE (x))
10686 case V2SImode: size = 4; break;
10687 case V4HImode: size = 2; break;
10688 case V8QImode: size = 1; break;
10689 default:
10690 abort ();
10693 for (i = 0; i < units; i++)
10695 rtx elt;
10697 elt = CONST_VECTOR_ELT (x, i);
10698 assemble_integer
10699 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10702 return true;
10705 return default_assemble_integer (x, size, aligned_p);
10707 #endif
10709 /* A finite state machine takes care of noticing whether or not instructions
10710 can be conditionally executed, and thus decrease execution time and code
10711 size by deleting branch instructions. The fsm is controlled by
10712 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10714 /* The state of the fsm controlling condition codes are:
10715 0: normal, do nothing special
10716 1: make ASM_OUTPUT_OPCODE not output this instruction
10717 2: make ASM_OUTPUT_OPCODE not output this instruction
10718 3: make instructions conditional
10719 4: make instructions conditional
10721 State transitions (state->state by whom under condition):
10722 0 -> 1 final_prescan_insn if the `target' is a label
10723 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10724 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10725 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10726 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10727 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10728 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10729 (the target insn is arm_target_insn).
10731 If the jump clobbers the conditions then we use states 2 and 4.
10733 A similar thing can be done with conditional return insns.
10735 XXX In case the `target' is an unconditional branch, this conditionalising
10736 of the instructions always reduces code size, but not always execution
10737 time. But then, I want to reduce the code size to somewhere near what
10738 /bin/cc produces. */
10740 /* Returns the index of the ARM condition code string in
10741 `arm_condition_codes'. COMPARISON should be an rtx like
10742 `(eq (...) (...))'. */
10743 static enum arm_cond_code
10744 get_arm_condition_code (rtx comparison)
10746 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10747 int code;
10748 enum rtx_code comp_code = GET_CODE (comparison);
10750 if (GET_MODE_CLASS (mode) != MODE_CC)
10751 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10752 XEXP (comparison, 1));
10754 switch (mode)
10756 case CC_DNEmode: code = ARM_NE; goto dominance;
10757 case CC_DEQmode: code = ARM_EQ; goto dominance;
10758 case CC_DGEmode: code = ARM_GE; goto dominance;
10759 case CC_DGTmode: code = ARM_GT; goto dominance;
10760 case CC_DLEmode: code = ARM_LE; goto dominance;
10761 case CC_DLTmode: code = ARM_LT; goto dominance;
10762 case CC_DGEUmode: code = ARM_CS; goto dominance;
10763 case CC_DGTUmode: code = ARM_HI; goto dominance;
10764 case CC_DLEUmode: code = ARM_LS; goto dominance;
10765 case CC_DLTUmode: code = ARM_CC;
10767 dominance:
10768 if (comp_code != EQ && comp_code != NE)
10769 abort ();
10771 if (comp_code == EQ)
10772 return ARM_INVERSE_CONDITION_CODE (code);
10773 return code;
10775 case CC_NOOVmode:
10776 switch (comp_code)
10778 case NE: return ARM_NE;
10779 case EQ: return ARM_EQ;
10780 case GE: return ARM_PL;
10781 case LT: return ARM_MI;
10782 default: abort ();
10785 case CC_Zmode:
10786 switch (comp_code)
10788 case NE: return ARM_NE;
10789 case EQ: return ARM_EQ;
10790 default: abort ();
10793 case CC_Nmode:
10794 switch (comp_code)
10796 case NE: return ARM_MI;
10797 case EQ: return ARM_PL;
10798 default: abort ();
10801 case CCFPEmode:
10802 case CCFPmode:
10803 /* These encodings assume that AC=1 in the FPA system control
10804 byte. This allows us to handle all cases except UNEQ and
10805 LTGT. */
10806 switch (comp_code)
10808 case GE: return ARM_GE;
10809 case GT: return ARM_GT;
10810 case LE: return ARM_LS;
10811 case LT: return ARM_MI;
10812 case NE: return ARM_NE;
10813 case EQ: return ARM_EQ;
10814 case ORDERED: return ARM_VC;
10815 case UNORDERED: return ARM_VS;
10816 case UNLT: return ARM_LT;
10817 case UNLE: return ARM_LE;
10818 case UNGT: return ARM_HI;
10819 case UNGE: return ARM_PL;
10820 /* UNEQ and LTGT do not have a representation. */
10821 case UNEQ: /* Fall through. */
10822 case LTGT: /* Fall through. */
10823 default: abort ();
10826 case CC_SWPmode:
10827 switch (comp_code)
10829 case NE: return ARM_NE;
10830 case EQ: return ARM_EQ;
10831 case GE: return ARM_LE;
10832 case GT: return ARM_LT;
10833 case LE: return ARM_GE;
10834 case LT: return ARM_GT;
10835 case GEU: return ARM_LS;
10836 case GTU: return ARM_CC;
10837 case LEU: return ARM_CS;
10838 case LTU: return ARM_HI;
10839 default: abort ();
10842 case CC_Cmode:
10843 switch (comp_code)
10845 case LTU: return ARM_CS;
10846 case GEU: return ARM_CC;
10847 default: abort ();
10850 case CCmode:
10851 switch (comp_code)
10853 case NE: return ARM_NE;
10854 case EQ: return ARM_EQ;
10855 case GE: return ARM_GE;
10856 case GT: return ARM_GT;
10857 case LE: return ARM_LE;
10858 case LT: return ARM_LT;
10859 case GEU: return ARM_CS;
10860 case GTU: return ARM_HI;
10861 case LEU: return ARM_LS;
10862 case LTU: return ARM_CC;
10863 default: abort ();
10866 default: abort ();
10869 abort ();
10872 void
10873 arm_final_prescan_insn (rtx insn)
10875 /* BODY will hold the body of INSN. */
10876 rtx body = PATTERN (insn);
10878 /* This will be 1 if trying to repeat the trick, and things need to be
10879 reversed if it appears to fail. */
10880 int reverse = 0;
10882 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10883 taken are clobbered, even if the rtl suggests otherwise. It also
10884 means that we have to grub around within the jump expression to find
10885 out what the conditions are when the jump isn't taken. */
10886 int jump_clobbers = 0;
10888 /* If we start with a return insn, we only succeed if we find another one. */
10889 int seeking_return = 0;
10891 /* START_INSN will hold the insn from where we start looking. This is the
10892 first insn after the following code_label if REVERSE is true. */
10893 rtx start_insn = insn;
10895 /* If in state 4, check if the target branch is reached, in order to
10896 change back to state 0. */
10897 if (arm_ccfsm_state == 4)
10899 if (insn == arm_target_insn)
10901 arm_target_insn = NULL;
10902 arm_ccfsm_state = 0;
10904 return;
10907 /* If in state 3, it is possible to repeat the trick, if this insn is an
10908 unconditional branch to a label, and immediately following this branch
10909 is the previous target label which is only used once, and the label this
10910 branch jumps to is not too far off. */
10911 if (arm_ccfsm_state == 3)
10913 if (simplejump_p (insn))
10915 start_insn = next_nonnote_insn (start_insn);
10916 if (GET_CODE (start_insn) == BARRIER)
10918 /* XXX Isn't this always a barrier? */
10919 start_insn = next_nonnote_insn (start_insn);
10921 if (GET_CODE (start_insn) == CODE_LABEL
10922 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10923 && LABEL_NUSES (start_insn) == 1)
10924 reverse = TRUE;
10925 else
10926 return;
10928 else if (GET_CODE (body) == RETURN)
10930 start_insn = next_nonnote_insn (start_insn);
10931 if (GET_CODE (start_insn) == BARRIER)
10932 start_insn = next_nonnote_insn (start_insn);
10933 if (GET_CODE (start_insn) == CODE_LABEL
10934 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10935 && LABEL_NUSES (start_insn) == 1)
10937 reverse = TRUE;
10938 seeking_return = 1;
10940 else
10941 return;
10943 else
10944 return;
10947 if (arm_ccfsm_state != 0 && !reverse)
10948 abort ();
10949 if (GET_CODE (insn) != JUMP_INSN)
10950 return;
10952 /* This jump might be paralleled with a clobber of the condition codes
10953 the jump should always come first */
10954 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10955 body = XVECEXP (body, 0, 0);
10957 if (reverse
10958 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10959 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10961 int insns_skipped;
10962 int fail = FALSE, succeed = FALSE;
10963 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10964 int then_not_else = TRUE;
10965 rtx this_insn = start_insn, label = 0;
10967 /* If the jump cannot be done with one instruction, we cannot
10968 conditionally execute the instruction in the inverse case. */
10969 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10971 jump_clobbers = 1;
10972 return;
10975 /* Register the insn jumped to. */
10976 if (reverse)
10978 if (!seeking_return)
10979 label = XEXP (SET_SRC (body), 0);
10981 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10982 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10983 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10985 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10986 then_not_else = FALSE;
10988 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10989 seeking_return = 1;
10990 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10992 seeking_return = 1;
10993 then_not_else = FALSE;
10995 else
10996 abort ();
10998 /* See how many insns this branch skips, and what kind of insns. If all
10999 insns are okay, and the label or unconditional branch to the same
11000 label is not too far away, succeed. */
11001 for (insns_skipped = 0;
11002 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11004 rtx scanbody;
11006 this_insn = next_nonnote_insn (this_insn);
11007 if (!this_insn)
11008 break;
11010 switch (GET_CODE (this_insn))
11012 case CODE_LABEL:
11013 /* Succeed if it is the target label, otherwise fail since
11014 control falls in from somewhere else. */
11015 if (this_insn == label)
11017 if (jump_clobbers)
11019 arm_ccfsm_state = 2;
11020 this_insn = next_nonnote_insn (this_insn);
11022 else
11023 arm_ccfsm_state = 1;
11024 succeed = TRUE;
11026 else
11027 fail = TRUE;
11028 break;
11030 case BARRIER:
11031 /* Succeed if the following insn is the target label.
11032 Otherwise fail.
11033 If return insns are used then the last insn in a function
11034 will be a barrier. */
11035 this_insn = next_nonnote_insn (this_insn);
11036 if (this_insn && this_insn == label)
11038 if (jump_clobbers)
11040 arm_ccfsm_state = 2;
11041 this_insn = next_nonnote_insn (this_insn);
11043 else
11044 arm_ccfsm_state = 1;
11045 succeed = TRUE;
11047 else
11048 fail = TRUE;
11049 break;
11051 case CALL_INSN:
11052 /* The AAPCS says that conditional calls should not be
11053 used since they make interworking inefficient (the
11054 linker can't transform BL<cond> into BLX). That's
11055 only a problem if the machine has BLX. */
11056 if (arm_arch5)
11058 fail = TRUE;
11059 break;
11062 /* Succeed if the following insn is the target label, or
11063 if the following two insns are a barrier and the
11064 target label. */
11065 this_insn = next_nonnote_insn (this_insn);
11066 if (this_insn && GET_CODE (this_insn) == BARRIER)
11067 this_insn = next_nonnote_insn (this_insn);
11069 if (this_insn && this_insn == label
11070 && insns_skipped < max_insns_skipped)
11072 if (jump_clobbers)
11074 arm_ccfsm_state = 2;
11075 this_insn = next_nonnote_insn (this_insn);
11077 else
11078 arm_ccfsm_state = 1;
11079 succeed = TRUE;
11081 else
11082 fail = TRUE;
11083 break;
11085 case JUMP_INSN:
11086 /* If this is an unconditional branch to the same label, succeed.
11087 If it is to another label, do nothing. If it is conditional,
11088 fail. */
11089 /* XXX Probably, the tests for SET and the PC are
11090 unnecessary. */
11092 scanbody = PATTERN (this_insn);
11093 if (GET_CODE (scanbody) == SET
11094 && GET_CODE (SET_DEST (scanbody)) == PC)
11096 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11097 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11099 arm_ccfsm_state = 2;
11100 succeed = TRUE;
11102 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11103 fail = TRUE;
11105 /* Fail if a conditional return is undesirable (e.g. on a
11106 StrongARM), but still allow this if optimizing for size. */
11107 else if (GET_CODE (scanbody) == RETURN
11108 && !use_return_insn (TRUE, NULL)
11109 && !optimize_size)
11110 fail = TRUE;
11111 else if (GET_CODE (scanbody) == RETURN
11112 && seeking_return)
11114 arm_ccfsm_state = 2;
11115 succeed = TRUE;
11117 else if (GET_CODE (scanbody) == PARALLEL)
11119 switch (get_attr_conds (this_insn))
11121 case CONDS_NOCOND:
11122 break;
11123 default:
11124 fail = TRUE;
11125 break;
11128 else
11129 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11131 break;
11133 case INSN:
11134 /* Instructions using or affecting the condition codes make it
11135 fail. */
11136 scanbody = PATTERN (this_insn);
11137 if (!(GET_CODE (scanbody) == SET
11138 || GET_CODE (scanbody) == PARALLEL)
11139 || get_attr_conds (this_insn) != CONDS_NOCOND)
11140 fail = TRUE;
11142 /* A conditional cirrus instruction must be followed by
11143 a non Cirrus instruction. However, since we
11144 conditionalize instructions in this function and by
11145 the time we get here we can't add instructions
11146 (nops), because shorten_branches() has already been
11147 called, we will disable conditionalizing Cirrus
11148 instructions to be safe. */
11149 if (GET_CODE (scanbody) != USE
11150 && GET_CODE (scanbody) != CLOBBER
11151 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11152 fail = TRUE;
11153 break;
11155 default:
11156 break;
11159 if (succeed)
11161 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11162 arm_target_label = CODE_LABEL_NUMBER (label);
11163 else if (seeking_return || arm_ccfsm_state == 2)
11165 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11167 this_insn = next_nonnote_insn (this_insn);
11168 if (this_insn && (GET_CODE (this_insn) == BARRIER
11169 || GET_CODE (this_insn) == CODE_LABEL))
11170 abort ();
11172 if (!this_insn)
11174 /* Oh, dear! we ran off the end.. give up. */
11175 recog (PATTERN (insn), insn, NULL);
11176 arm_ccfsm_state = 0;
11177 arm_target_insn = NULL;
11178 return;
11180 arm_target_insn = this_insn;
11182 else
11183 abort ();
11184 if (jump_clobbers)
11186 if (reverse)
11187 abort ();
11188 arm_current_cc =
11189 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11190 0), 0), 1));
11191 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11192 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11193 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11194 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11196 else
11198 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11199 what it was. */
11200 if (!reverse)
11201 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11202 0));
11205 if (reverse || then_not_else)
11206 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11209 /* Restore recog_data (getting the attributes of other insns can
11210 destroy this array, but final.c assumes that it remains intact
11211 across this call; since the insn has been recognized already we
11212 call recog direct). */
11213 recog (PATTERN (insn), insn, NULL);
11217 /* Returns true if REGNO is a valid register
11218 for holding a quantity of type MODE. */
11220 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11222 if (GET_MODE_CLASS (mode) == MODE_CC)
11223 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11225 if (TARGET_THUMB)
11226 /* For the Thumb we only allow values bigger than SImode in
11227 registers 0 - 6, so that there is always a second low
11228 register available to hold the upper part of the value.
11229 We probably we ought to ensure that the register is the
11230 start of an even numbered register pair. */
11231 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11233 if (IS_CIRRUS_REGNUM (regno))
11234 /* We have outlawed SI values in Cirrus registers because they
11235 reside in the lower 32 bits, but SF values reside in the
11236 upper 32 bits. This causes gcc all sorts of grief. We can't
11237 even split the registers into pairs because Cirrus SI values
11238 get sign extended to 64bits-- aldyh. */
11239 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11241 if (IS_VFP_REGNUM (regno))
11243 if (mode == SFmode || mode == SImode)
11244 return TRUE;
11246 /* DFmode values are only valid in even register pairs. */
11247 if (mode == DFmode)
11248 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11249 return FALSE;
11252 if (IS_IWMMXT_GR_REGNUM (regno))
11253 return mode == SImode;
11255 if (IS_IWMMXT_REGNUM (regno))
11256 return VALID_IWMMXT_REG_MODE (mode);
11258 /* We allow any value to be stored in the general registers.
11259 Restrict doubleword quantities to even register pairs so that we can
11260 use ldrd. */
11261 if (regno <= LAST_ARM_REGNUM)
11262 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11264 if ( regno == FRAME_POINTER_REGNUM
11265 || regno == ARG_POINTER_REGNUM)
11266 /* We only allow integers in the fake hard registers. */
11267 return GET_MODE_CLASS (mode) == MODE_INT;
11269 /* The only registers left are the FPA registers
11270 which we only allow to hold FP values. */
11271 return GET_MODE_CLASS (mode) == MODE_FLOAT
11272 && regno >= FIRST_FPA_REGNUM
11273 && regno <= LAST_FPA_REGNUM;
11277 arm_regno_class (int regno)
11279 if (TARGET_THUMB)
11281 if (regno == STACK_POINTER_REGNUM)
11282 return STACK_REG;
11283 if (regno == CC_REGNUM)
11284 return CC_REG;
11285 if (regno < 8)
11286 return LO_REGS;
11287 return HI_REGS;
11290 if ( regno <= LAST_ARM_REGNUM
11291 || regno == FRAME_POINTER_REGNUM
11292 || regno == ARG_POINTER_REGNUM)
11293 return GENERAL_REGS;
11295 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11296 return NO_REGS;
11298 if (IS_CIRRUS_REGNUM (regno))
11299 return CIRRUS_REGS;
11301 if (IS_VFP_REGNUM (regno))
11302 return VFP_REGS;
11304 if (IS_IWMMXT_REGNUM (regno))
11305 return IWMMXT_REGS;
11307 if (IS_IWMMXT_GR_REGNUM (regno))
11308 return IWMMXT_GR_REGS;
11310 return FPA_REGS;
11313 /* Handle a special case when computing the offset
11314 of an argument from the frame pointer. */
11316 arm_debugger_arg_offset (int value, rtx addr)
11318 rtx insn;
11320 /* We are only interested if dbxout_parms() failed to compute the offset. */
11321 if (value != 0)
11322 return 0;
11324 /* We can only cope with the case where the address is held in a register. */
11325 if (GET_CODE (addr) != REG)
11326 return 0;
11328 /* If we are using the frame pointer to point at the argument, then
11329 an offset of 0 is correct. */
11330 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11331 return 0;
11333 /* If we are using the stack pointer to point at the
11334 argument, then an offset of 0 is correct. */
11335 if ((TARGET_THUMB || !frame_pointer_needed)
11336 && REGNO (addr) == SP_REGNUM)
11337 return 0;
11339 /* Oh dear. The argument is pointed to by a register rather
11340 than being held in a register, or being stored at a known
11341 offset from the frame pointer. Since GDB only understands
11342 those two kinds of argument we must translate the address
11343 held in the register into an offset from the frame pointer.
11344 We do this by searching through the insns for the function
11345 looking to see where this register gets its value. If the
11346 register is initialized from the frame pointer plus an offset
11347 then we are in luck and we can continue, otherwise we give up.
11349 This code is exercised by producing debugging information
11350 for a function with arguments like this:
11352 double func (double a, double b, int c, double d) {return d;}
11354 Without this code the stab for parameter 'd' will be set to
11355 an offset of 0 from the frame pointer, rather than 8. */
11357 /* The if() statement says:
11359 If the insn is a normal instruction
11360 and if the insn is setting the value in a register
11361 and if the register being set is the register holding the address of the argument
11362 and if the address is computing by an addition
11363 that involves adding to a register
11364 which is the frame pointer
11365 a constant integer
11367 then... */
11369 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11371 if ( GET_CODE (insn) == INSN
11372 && GET_CODE (PATTERN (insn)) == SET
11373 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11374 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11375 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11376 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11377 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11380 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11382 break;
11386 if (value == 0)
11388 debug_rtx (addr);
11389 warning ("unable to compute real location of stacked parameter");
11390 value = 8; /* XXX magic hack */
11393 return value;
11396 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11397 do \
11399 if ((MASK) & insn_flags) \
11400 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11401 BUILT_IN_MD, NULL, NULL_TREE); \
11403 while (0)
11405 struct builtin_description
11407 const unsigned int mask;
11408 const enum insn_code icode;
11409 const char * const name;
11410 const enum arm_builtins code;
11411 const enum rtx_code comparison;
11412 const unsigned int flag;
11415 static const struct builtin_description bdesc_2arg[] =
11417 #define IWMMXT_BUILTIN(code, string, builtin) \
11418 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11419 ARM_BUILTIN_##builtin, 0, 0 },
11421 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11422 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11423 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11424 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11425 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11426 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11427 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11428 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11429 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11430 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11431 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11432 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11433 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11434 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11435 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11436 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11437 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11438 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11439 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11440 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11441 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11442 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11443 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11444 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11445 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11446 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11447 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11448 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11449 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11450 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11451 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11452 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11453 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11454 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11455 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11456 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11457 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11458 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11459 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11460 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11461 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11462 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11463 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11464 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11465 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11466 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11467 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11468 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11469 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11470 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11471 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11472 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11473 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11474 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11475 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11476 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11477 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11478 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11480 #define IWMMXT_BUILTIN2(code, builtin) \
11481 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11483 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11484 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11485 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11486 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11487 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11488 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11489 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11490 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11491 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11492 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11493 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11494 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11495 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11496 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11497 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11498 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11499 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11500 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11501 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11502 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11503 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11504 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11505 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11506 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11507 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11508 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11509 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11510 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11511 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11512 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11513 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11514 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11517 static const struct builtin_description bdesc_1arg[] =
11519 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11520 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11521 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11522 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11523 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11524 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11525 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11526 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11527 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11528 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11529 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11530 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11531 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11532 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11533 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11534 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11535 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11536 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11539 /* Set up all the iWMMXt builtins. This is
11540 not called if TARGET_IWMMXT is zero. */
11542 static void
11543 arm_init_iwmmxt_builtins (void)
11545 const struct builtin_description * d;
11546 size_t i;
11547 tree endlink = void_list_node;
11549 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11550 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11551 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11553 tree int_ftype_int
11554 = build_function_type (integer_type_node,
11555 tree_cons (NULL_TREE, integer_type_node, endlink));
11556 tree v8qi_ftype_v8qi_v8qi_int
11557 = build_function_type (V8QI_type_node,
11558 tree_cons (NULL_TREE, V8QI_type_node,
11559 tree_cons (NULL_TREE, V8QI_type_node,
11560 tree_cons (NULL_TREE,
11561 integer_type_node,
11562 endlink))));
11563 tree v4hi_ftype_v4hi_int
11564 = build_function_type (V4HI_type_node,
11565 tree_cons (NULL_TREE, V4HI_type_node,
11566 tree_cons (NULL_TREE, integer_type_node,
11567 endlink)));
11568 tree v2si_ftype_v2si_int
11569 = build_function_type (V2SI_type_node,
11570 tree_cons (NULL_TREE, V2SI_type_node,
11571 tree_cons (NULL_TREE, integer_type_node,
11572 endlink)));
11573 tree v2si_ftype_di_di
11574 = build_function_type (V2SI_type_node,
11575 tree_cons (NULL_TREE, long_long_integer_type_node,
11576 tree_cons (NULL_TREE, long_long_integer_type_node,
11577 endlink)));
11578 tree di_ftype_di_int
11579 = build_function_type (long_long_integer_type_node,
11580 tree_cons (NULL_TREE, long_long_integer_type_node,
11581 tree_cons (NULL_TREE, integer_type_node,
11582 endlink)));
11583 tree di_ftype_di_int_int
11584 = build_function_type (long_long_integer_type_node,
11585 tree_cons (NULL_TREE, long_long_integer_type_node,
11586 tree_cons (NULL_TREE, integer_type_node,
11587 tree_cons (NULL_TREE,
11588 integer_type_node,
11589 endlink))));
11590 tree int_ftype_v8qi
11591 = build_function_type (integer_type_node,
11592 tree_cons (NULL_TREE, V8QI_type_node,
11593 endlink));
11594 tree int_ftype_v4hi
11595 = build_function_type (integer_type_node,
11596 tree_cons (NULL_TREE, V4HI_type_node,
11597 endlink));
11598 tree int_ftype_v2si
11599 = build_function_type (integer_type_node,
11600 tree_cons (NULL_TREE, V2SI_type_node,
11601 endlink));
11602 tree int_ftype_v8qi_int
11603 = build_function_type (integer_type_node,
11604 tree_cons (NULL_TREE, V8QI_type_node,
11605 tree_cons (NULL_TREE, integer_type_node,
11606 endlink)));
11607 tree int_ftype_v4hi_int
11608 = build_function_type (integer_type_node,
11609 tree_cons (NULL_TREE, V4HI_type_node,
11610 tree_cons (NULL_TREE, integer_type_node,
11611 endlink)));
11612 tree int_ftype_v2si_int
11613 = build_function_type (integer_type_node,
11614 tree_cons (NULL_TREE, V2SI_type_node,
11615 tree_cons (NULL_TREE, integer_type_node,
11616 endlink)));
11617 tree v8qi_ftype_v8qi_int_int
11618 = build_function_type (V8QI_type_node,
11619 tree_cons (NULL_TREE, V8QI_type_node,
11620 tree_cons (NULL_TREE, integer_type_node,
11621 tree_cons (NULL_TREE,
11622 integer_type_node,
11623 endlink))));
11624 tree v4hi_ftype_v4hi_int_int
11625 = build_function_type (V4HI_type_node,
11626 tree_cons (NULL_TREE, V4HI_type_node,
11627 tree_cons (NULL_TREE, integer_type_node,
11628 tree_cons (NULL_TREE,
11629 integer_type_node,
11630 endlink))));
11631 tree v2si_ftype_v2si_int_int
11632 = build_function_type (V2SI_type_node,
11633 tree_cons (NULL_TREE, V2SI_type_node,
11634 tree_cons (NULL_TREE, integer_type_node,
11635 tree_cons (NULL_TREE,
11636 integer_type_node,
11637 endlink))));
11638 /* Miscellaneous. */
11639 tree v8qi_ftype_v4hi_v4hi
11640 = build_function_type (V8QI_type_node,
11641 tree_cons (NULL_TREE, V4HI_type_node,
11642 tree_cons (NULL_TREE, V4HI_type_node,
11643 endlink)));
11644 tree v4hi_ftype_v2si_v2si
11645 = build_function_type (V4HI_type_node,
11646 tree_cons (NULL_TREE, V2SI_type_node,
11647 tree_cons (NULL_TREE, V2SI_type_node,
11648 endlink)));
11649 tree v2si_ftype_v4hi_v4hi
11650 = build_function_type (V2SI_type_node,
11651 tree_cons (NULL_TREE, V4HI_type_node,
11652 tree_cons (NULL_TREE, V4HI_type_node,
11653 endlink)));
11654 tree v2si_ftype_v8qi_v8qi
11655 = build_function_type (V2SI_type_node,
11656 tree_cons (NULL_TREE, V8QI_type_node,
11657 tree_cons (NULL_TREE, V8QI_type_node,
11658 endlink)));
11659 tree v4hi_ftype_v4hi_di
11660 = build_function_type (V4HI_type_node,
11661 tree_cons (NULL_TREE, V4HI_type_node,
11662 tree_cons (NULL_TREE,
11663 long_long_integer_type_node,
11664 endlink)));
11665 tree v2si_ftype_v2si_di
11666 = build_function_type (V2SI_type_node,
11667 tree_cons (NULL_TREE, V2SI_type_node,
11668 tree_cons (NULL_TREE,
11669 long_long_integer_type_node,
11670 endlink)));
11671 tree void_ftype_int_int
11672 = build_function_type (void_type_node,
11673 tree_cons (NULL_TREE, integer_type_node,
11674 tree_cons (NULL_TREE, integer_type_node,
11675 endlink)));
11676 tree di_ftype_void
11677 = build_function_type (long_long_unsigned_type_node, endlink);
11678 tree di_ftype_v8qi
11679 = build_function_type (long_long_integer_type_node,
11680 tree_cons (NULL_TREE, V8QI_type_node,
11681 endlink));
11682 tree di_ftype_v4hi
11683 = build_function_type (long_long_integer_type_node,
11684 tree_cons (NULL_TREE, V4HI_type_node,
11685 endlink));
11686 tree di_ftype_v2si
11687 = build_function_type (long_long_integer_type_node,
11688 tree_cons (NULL_TREE, V2SI_type_node,
11689 endlink));
11690 tree v2si_ftype_v4hi
11691 = build_function_type (V2SI_type_node,
11692 tree_cons (NULL_TREE, V4HI_type_node,
11693 endlink));
11694 tree v4hi_ftype_v8qi
11695 = build_function_type (V4HI_type_node,
11696 tree_cons (NULL_TREE, V8QI_type_node,
11697 endlink));
11699 tree di_ftype_di_v4hi_v4hi
11700 = build_function_type (long_long_unsigned_type_node,
11701 tree_cons (NULL_TREE,
11702 long_long_unsigned_type_node,
11703 tree_cons (NULL_TREE, V4HI_type_node,
11704 tree_cons (NULL_TREE,
11705 V4HI_type_node,
11706 endlink))));
11708 tree di_ftype_v4hi_v4hi
11709 = build_function_type (long_long_unsigned_type_node,
11710 tree_cons (NULL_TREE, V4HI_type_node,
11711 tree_cons (NULL_TREE, V4HI_type_node,
11712 endlink)));
11714 /* Normal vector binops. */
11715 tree v8qi_ftype_v8qi_v8qi
11716 = build_function_type (V8QI_type_node,
11717 tree_cons (NULL_TREE, V8QI_type_node,
11718 tree_cons (NULL_TREE, V8QI_type_node,
11719 endlink)));
11720 tree v4hi_ftype_v4hi_v4hi
11721 = build_function_type (V4HI_type_node,
11722 tree_cons (NULL_TREE, V4HI_type_node,
11723 tree_cons (NULL_TREE, V4HI_type_node,
11724 endlink)));
11725 tree v2si_ftype_v2si_v2si
11726 = build_function_type (V2SI_type_node,
11727 tree_cons (NULL_TREE, V2SI_type_node,
11728 tree_cons (NULL_TREE, V2SI_type_node,
11729 endlink)));
11730 tree di_ftype_di_di
11731 = build_function_type (long_long_unsigned_type_node,
11732 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11733 tree_cons (NULL_TREE,
11734 long_long_unsigned_type_node,
11735 endlink)));
11737 /* Add all builtins that are more or less simple operations on two
11738 operands. */
11739 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11741 /* Use one of the operands; the target can have a different mode for
11742 mask-generating compares. */
11743 enum machine_mode mode;
11744 tree type;
11746 if (d->name == 0)
11747 continue;
11749 mode = insn_data[d->icode].operand[1].mode;
11751 switch (mode)
11753 case V8QImode:
11754 type = v8qi_ftype_v8qi_v8qi;
11755 break;
11756 case V4HImode:
11757 type = v4hi_ftype_v4hi_v4hi;
11758 break;
11759 case V2SImode:
11760 type = v2si_ftype_v2si_v2si;
11761 break;
11762 case DImode:
11763 type = di_ftype_di_di;
11764 break;
11766 default:
11767 abort ();
11770 def_mbuiltin (d->mask, d->name, type, d->code);
11773 /* Add the remaining MMX insns with somewhat more complicated types. */
11774 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11775 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11776 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11778 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11779 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11780 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11781 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11782 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11783 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11785 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11786 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11787 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11788 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11789 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11790 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11792 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11793 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11794 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11795 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11796 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11797 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11799 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11800 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11801 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11802 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11803 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11804 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11806 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11808 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11809 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11810 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11811 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11813 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11814 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11815 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11816 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11817 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11818 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11819 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11820 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11821 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11823 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11824 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11825 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11827 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11828 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11829 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11831 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11832 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11833 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11834 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11835 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11836 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11838 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11839 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11840 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11841 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11842 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11843 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11844 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11845 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11846 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11847 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11848 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11849 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11851 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11852 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11853 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11854 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11856 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11857 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11858 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11859 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11860 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11861 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11862 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11865 static void
11866 arm_init_builtins (void)
11868 if (TARGET_REALLY_IWMMXT)
11869 arm_init_iwmmxt_builtins ();
11872 /* Errors in the source file can cause expand_expr to return const0_rtx
11873 where we expect a vector. To avoid crashing, use one of the vector
11874 clear instructions. */
11876 static rtx
11877 safe_vector_operand (rtx x, enum machine_mode mode)
11879 if (x != const0_rtx)
11880 return x;
11881 x = gen_reg_rtx (mode);
11883 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11884 : gen_rtx_SUBREG (DImode, x, 0)));
11885 return x;
11888 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11890 static rtx
11891 arm_expand_binop_builtin (enum insn_code icode,
11892 tree arglist, rtx target)
11894 rtx pat;
11895 tree arg0 = TREE_VALUE (arglist);
11896 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11897 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11898 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11899 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11900 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11901 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11903 if (VECTOR_MODE_P (mode0))
11904 op0 = safe_vector_operand (op0, mode0);
11905 if (VECTOR_MODE_P (mode1))
11906 op1 = safe_vector_operand (op1, mode1);
11908 if (! target
11909 || GET_MODE (target) != tmode
11910 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11911 target = gen_reg_rtx (tmode);
11913 /* In case the insn wants input operands in modes different from
11914 the result, abort. */
11915 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11916 abort ();
11918 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11919 op0 = copy_to_mode_reg (mode0, op0);
11920 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11921 op1 = copy_to_mode_reg (mode1, op1);
11923 pat = GEN_FCN (icode) (target, op0, op1);
11924 if (! pat)
11925 return 0;
11926 emit_insn (pat);
11927 return target;
11930 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11932 static rtx
11933 arm_expand_unop_builtin (enum insn_code icode,
11934 tree arglist, rtx target, int do_load)
11936 rtx pat;
11937 tree arg0 = TREE_VALUE (arglist);
11938 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11939 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11940 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11942 if (! target
11943 || GET_MODE (target) != tmode
11944 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11945 target = gen_reg_rtx (tmode);
11946 if (do_load)
11947 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11948 else
11950 if (VECTOR_MODE_P (mode0))
11951 op0 = safe_vector_operand (op0, mode0);
11953 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11954 op0 = copy_to_mode_reg (mode0, op0);
11957 pat = GEN_FCN (icode) (target, op0);
11958 if (! pat)
11959 return 0;
11960 emit_insn (pat);
11961 return target;
11964 /* Expand an expression EXP that calls a built-in function,
11965 with result going to TARGET if that's convenient
11966 (and in mode MODE if that's convenient).
11967 SUBTARGET may be used as the target for computing one of EXP's operands.
11968 IGNORE is nonzero if the value is to be ignored. */
11970 static rtx
11971 arm_expand_builtin (tree exp,
11972 rtx target,
11973 rtx subtarget ATTRIBUTE_UNUSED,
11974 enum machine_mode mode ATTRIBUTE_UNUSED,
11975 int ignore ATTRIBUTE_UNUSED)
11977 const struct builtin_description * d;
11978 enum insn_code icode;
11979 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11980 tree arglist = TREE_OPERAND (exp, 1);
11981 tree arg0;
11982 tree arg1;
11983 tree arg2;
11984 rtx op0;
11985 rtx op1;
11986 rtx op2;
11987 rtx pat;
11988 int fcode = DECL_FUNCTION_CODE (fndecl);
11989 size_t i;
11990 enum machine_mode tmode;
11991 enum machine_mode mode0;
11992 enum machine_mode mode1;
11993 enum machine_mode mode2;
11995 switch (fcode)
11997 case ARM_BUILTIN_TEXTRMSB:
11998 case ARM_BUILTIN_TEXTRMUB:
11999 case ARM_BUILTIN_TEXTRMSH:
12000 case ARM_BUILTIN_TEXTRMUH:
12001 case ARM_BUILTIN_TEXTRMSW:
12002 case ARM_BUILTIN_TEXTRMUW:
12003 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12004 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12005 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12006 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12007 : CODE_FOR_iwmmxt_textrmw);
12009 arg0 = TREE_VALUE (arglist);
12010 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12011 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12012 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12013 tmode = insn_data[icode].operand[0].mode;
12014 mode0 = insn_data[icode].operand[1].mode;
12015 mode1 = insn_data[icode].operand[2].mode;
12017 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12018 op0 = copy_to_mode_reg (mode0, op0);
12019 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12021 /* @@@ better error message */
12022 error ("selector must be an immediate");
12023 return gen_reg_rtx (tmode);
12025 if (target == 0
12026 || GET_MODE (target) != tmode
12027 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12028 target = gen_reg_rtx (tmode);
12029 pat = GEN_FCN (icode) (target, op0, op1);
12030 if (! pat)
12031 return 0;
12032 emit_insn (pat);
12033 return target;
12035 case ARM_BUILTIN_TINSRB:
12036 case ARM_BUILTIN_TINSRH:
12037 case ARM_BUILTIN_TINSRW:
12038 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12039 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12040 : CODE_FOR_iwmmxt_tinsrw);
12041 arg0 = TREE_VALUE (arglist);
12042 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12043 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12044 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12045 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12046 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12047 tmode = insn_data[icode].operand[0].mode;
12048 mode0 = insn_data[icode].operand[1].mode;
12049 mode1 = insn_data[icode].operand[2].mode;
12050 mode2 = insn_data[icode].operand[3].mode;
12052 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12053 op0 = copy_to_mode_reg (mode0, op0);
12054 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12055 op1 = copy_to_mode_reg (mode1, op1);
12056 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12058 /* @@@ better error message */
12059 error ("selector must be an immediate");
12060 return const0_rtx;
12062 if (target == 0
12063 || GET_MODE (target) != tmode
12064 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12065 target = gen_reg_rtx (tmode);
12066 pat = GEN_FCN (icode) (target, op0, op1, op2);
12067 if (! pat)
12068 return 0;
12069 emit_insn (pat);
12070 return target;
12072 case ARM_BUILTIN_SETWCX:
12073 arg0 = TREE_VALUE (arglist);
12074 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12075 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12076 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12077 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12078 return 0;
12080 case ARM_BUILTIN_GETWCX:
12081 arg0 = TREE_VALUE (arglist);
12082 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12083 target = gen_reg_rtx (SImode);
12084 emit_insn (gen_iwmmxt_tmrc (target, op0));
12085 return target;
12087 case ARM_BUILTIN_WSHUFH:
12088 icode = CODE_FOR_iwmmxt_wshufh;
12089 arg0 = TREE_VALUE (arglist);
12090 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12091 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12092 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12093 tmode = insn_data[icode].operand[0].mode;
12094 mode1 = insn_data[icode].operand[1].mode;
12095 mode2 = insn_data[icode].operand[2].mode;
12097 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12098 op0 = copy_to_mode_reg (mode1, op0);
12099 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12101 /* @@@ better error message */
12102 error ("mask must be an immediate");
12103 return const0_rtx;
12105 if (target == 0
12106 || GET_MODE (target) != tmode
12107 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12108 target = gen_reg_rtx (tmode);
12109 pat = GEN_FCN (icode) (target, op0, op1);
12110 if (! pat)
12111 return 0;
12112 emit_insn (pat);
12113 return target;
12115 case ARM_BUILTIN_WSADB:
12116 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12117 case ARM_BUILTIN_WSADH:
12118 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12119 case ARM_BUILTIN_WSADBZ:
12120 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12121 case ARM_BUILTIN_WSADHZ:
12122 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12124 /* Several three-argument builtins. */
12125 case ARM_BUILTIN_WMACS:
12126 case ARM_BUILTIN_WMACU:
12127 case ARM_BUILTIN_WALIGN:
12128 case ARM_BUILTIN_TMIA:
12129 case ARM_BUILTIN_TMIAPH:
12130 case ARM_BUILTIN_TMIATT:
12131 case ARM_BUILTIN_TMIATB:
12132 case ARM_BUILTIN_TMIABT:
12133 case ARM_BUILTIN_TMIABB:
12134 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12135 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12136 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12137 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12138 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12139 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12140 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12141 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12142 : CODE_FOR_iwmmxt_walign);
12143 arg0 = TREE_VALUE (arglist);
12144 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12145 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12146 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12147 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12148 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12149 tmode = insn_data[icode].operand[0].mode;
12150 mode0 = insn_data[icode].operand[1].mode;
12151 mode1 = insn_data[icode].operand[2].mode;
12152 mode2 = insn_data[icode].operand[3].mode;
12154 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12155 op0 = copy_to_mode_reg (mode0, op0);
12156 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12157 op1 = copy_to_mode_reg (mode1, op1);
12158 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12159 op2 = copy_to_mode_reg (mode2, op2);
12160 if (target == 0
12161 || GET_MODE (target) != tmode
12162 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12163 target = gen_reg_rtx (tmode);
12164 pat = GEN_FCN (icode) (target, op0, op1, op2);
12165 if (! pat)
12166 return 0;
12167 emit_insn (pat);
12168 return target;
12170 case ARM_BUILTIN_WZERO:
12171 target = gen_reg_rtx (DImode);
12172 emit_insn (gen_iwmmxt_clrdi (target));
12173 return target;
12175 default:
12176 break;
12179 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12180 if (d->code == (const enum arm_builtins) fcode)
12181 return arm_expand_binop_builtin (d->icode, arglist, target);
12183 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12184 if (d->code == (const enum arm_builtins) fcode)
12185 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12187 /* @@@ Should really do something sensible here. */
12188 return NULL_RTX;
12191 /* Return the number (counting from 0) of
12192 the least significant set bit in MASK. */
12194 inline static int
12195 number_of_first_bit_set (unsigned mask)
12197 int bit;
12199 for (bit = 0;
12200 (mask & (1 << bit)) == 0;
12201 ++bit)
12202 continue;
12204 return bit;
12207 /* Emit code to push or pop registers to or from the stack. F is the
12208 assembly file. MASK is the registers to push or pop. PUSH is
12209 nonzero if we should push, and zero if we should pop. For debugging
12210 output, if pushing, adjust CFA_OFFSET by the amount of space added
12211 to the stack. REAL_REGS should have the same number of bits set as
12212 MASK, and will be used instead (in the same order) to describe which
12213 registers were saved - this is used to mark the save slots when we
12214 push high registers after moving them to low registers. */
12215 static void
12216 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12217 unsigned long real_regs)
12219 int regno;
12220 int lo_mask = mask & 0xFF;
12221 int pushed_words = 0;
12223 if (mask == 0)
12224 abort ();
12226 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12228 /* Special case. Do not generate a POP PC statement here, do it in
12229 thumb_exit() */
12230 thumb_exit (f, -1);
12231 return;
12234 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12236 /* Look at the low registers first. */
12237 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12239 if (lo_mask & 1)
12241 asm_fprintf (f, "%r", regno);
12243 if ((lo_mask & ~1) != 0)
12244 fprintf (f, ", ");
12246 pushed_words++;
12250 if (push && (mask & (1 << LR_REGNUM)))
12252 /* Catch pushing the LR. */
12253 if (mask & 0xFF)
12254 fprintf (f, ", ");
12256 asm_fprintf (f, "%r", LR_REGNUM);
12258 pushed_words++;
12260 else if (!push && (mask & (1 << PC_REGNUM)))
12262 /* Catch popping the PC. */
12263 if (TARGET_INTERWORK || TARGET_BACKTRACE
12264 || current_function_calls_eh_return)
12266 /* The PC is never poped directly, instead
12267 it is popped into r3 and then BX is used. */
12268 fprintf (f, "}\n");
12270 thumb_exit (f, -1);
12272 return;
12274 else
12276 if (mask & 0xFF)
12277 fprintf (f, ", ");
12279 asm_fprintf (f, "%r", PC_REGNUM);
12283 fprintf (f, "}\n");
12285 if (push && pushed_words && dwarf2out_do_frame ())
12287 char *l = dwarf2out_cfi_label ();
12288 int pushed_mask = real_regs;
12290 *cfa_offset += pushed_words * 4;
12291 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12293 pushed_words = 0;
12294 pushed_mask = real_regs;
12295 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12297 if (pushed_mask & 1)
12298 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12303 /* Generate code to return from a thumb function.
12304 If 'reg_containing_return_addr' is -1, then the return address is
12305 actually on the stack, at the stack pointer. */
12306 static void
12307 thumb_exit (FILE *f, int reg_containing_return_addr)
12309 unsigned regs_available_for_popping;
12310 unsigned regs_to_pop;
12311 int pops_needed;
12312 unsigned available;
12313 unsigned required;
12314 int mode;
12315 int size;
12316 int restore_a4 = FALSE;
12318 /* Compute the registers we need to pop. */
12319 regs_to_pop = 0;
12320 pops_needed = 0;
12322 if (reg_containing_return_addr == -1)
12324 regs_to_pop |= 1 << LR_REGNUM;
12325 ++pops_needed;
12328 if (TARGET_BACKTRACE)
12330 /* Restore the (ARM) frame pointer and stack pointer. */
12331 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12332 pops_needed += 2;
12335 /* If there is nothing to pop then just emit the BX instruction and
12336 return. */
12337 if (pops_needed == 0)
12339 if (current_function_calls_eh_return)
12340 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12342 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12343 return;
12345 /* Otherwise if we are not supporting interworking and we have not created
12346 a backtrace structure and the function was not entered in ARM mode then
12347 just pop the return address straight into the PC. */
12348 else if (!TARGET_INTERWORK
12349 && !TARGET_BACKTRACE
12350 && !is_called_in_ARM_mode (current_function_decl)
12351 && !current_function_calls_eh_return)
12353 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12354 return;
12357 /* Find out how many of the (return) argument registers we can corrupt. */
12358 regs_available_for_popping = 0;
12360 /* If returning via __builtin_eh_return, the bottom three registers
12361 all contain information needed for the return. */
12362 if (current_function_calls_eh_return)
12363 size = 12;
12364 else
12366 /* If we can deduce the registers used from the function's
12367 return value. This is more reliable that examining
12368 regs_ever_live[] because that will be set if the register is
12369 ever used in the function, not just if the register is used
12370 to hold a return value. */
12372 if (current_function_return_rtx != 0)
12373 mode = GET_MODE (current_function_return_rtx);
12374 else
12375 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12377 size = GET_MODE_SIZE (mode);
12379 if (size == 0)
12381 /* In a void function we can use any argument register.
12382 In a function that returns a structure on the stack
12383 we can use the second and third argument registers. */
12384 if (mode == VOIDmode)
12385 regs_available_for_popping =
12386 (1 << ARG_REGISTER (1))
12387 | (1 << ARG_REGISTER (2))
12388 | (1 << ARG_REGISTER (3));
12389 else
12390 regs_available_for_popping =
12391 (1 << ARG_REGISTER (2))
12392 | (1 << ARG_REGISTER (3));
12394 else if (size <= 4)
12395 regs_available_for_popping =
12396 (1 << ARG_REGISTER (2))
12397 | (1 << ARG_REGISTER (3));
12398 else if (size <= 8)
12399 regs_available_for_popping =
12400 (1 << ARG_REGISTER (3));
12403 /* Match registers to be popped with registers into which we pop them. */
12404 for (available = regs_available_for_popping,
12405 required = regs_to_pop;
12406 required != 0 && available != 0;
12407 available &= ~(available & - available),
12408 required &= ~(required & - required))
12409 -- pops_needed;
12411 /* If we have any popping registers left over, remove them. */
12412 if (available > 0)
12413 regs_available_for_popping &= ~available;
12415 /* Otherwise if we need another popping register we can use
12416 the fourth argument register. */
12417 else if (pops_needed)
12419 /* If we have not found any free argument registers and
12420 reg a4 contains the return address, we must move it. */
12421 if (regs_available_for_popping == 0
12422 && reg_containing_return_addr == LAST_ARG_REGNUM)
12424 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12425 reg_containing_return_addr = LR_REGNUM;
12427 else if (size > 12)
12429 /* Register a4 is being used to hold part of the return value,
12430 but we have dire need of a free, low register. */
12431 restore_a4 = TRUE;
12433 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12436 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12438 /* The fourth argument register is available. */
12439 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12441 --pops_needed;
12445 /* Pop as many registers as we can. */
12446 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12447 regs_available_for_popping);
12449 /* Process the registers we popped. */
12450 if (reg_containing_return_addr == -1)
12452 /* The return address was popped into the lowest numbered register. */
12453 regs_to_pop &= ~(1 << LR_REGNUM);
12455 reg_containing_return_addr =
12456 number_of_first_bit_set (regs_available_for_popping);
12458 /* Remove this register for the mask of available registers, so that
12459 the return address will not be corrupted by further pops. */
12460 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12463 /* If we popped other registers then handle them here. */
12464 if (regs_available_for_popping)
12466 int frame_pointer;
12468 /* Work out which register currently contains the frame pointer. */
12469 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12471 /* Move it into the correct place. */
12472 asm_fprintf (f, "\tmov\t%r, %r\n",
12473 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12475 /* (Temporarily) remove it from the mask of popped registers. */
12476 regs_available_for_popping &= ~(1 << frame_pointer);
12477 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12479 if (regs_available_for_popping)
12481 int stack_pointer;
12483 /* We popped the stack pointer as well,
12484 find the register that contains it. */
12485 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12487 /* Move it into the stack register. */
12488 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12490 /* At this point we have popped all necessary registers, so
12491 do not worry about restoring regs_available_for_popping
12492 to its correct value:
12494 assert (pops_needed == 0)
12495 assert (regs_available_for_popping == (1 << frame_pointer))
12496 assert (regs_to_pop == (1 << STACK_POINTER)) */
12498 else
12500 /* Since we have just move the popped value into the frame
12501 pointer, the popping register is available for reuse, and
12502 we know that we still have the stack pointer left to pop. */
12503 regs_available_for_popping |= (1 << frame_pointer);
12507 /* If we still have registers left on the stack, but we no longer have
12508 any registers into which we can pop them, then we must move the return
12509 address into the link register and make available the register that
12510 contained it. */
12511 if (regs_available_for_popping == 0 && pops_needed > 0)
12513 regs_available_for_popping |= 1 << reg_containing_return_addr;
12515 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12516 reg_containing_return_addr);
12518 reg_containing_return_addr = LR_REGNUM;
12521 /* If we have registers left on the stack then pop some more.
12522 We know that at most we will want to pop FP and SP. */
12523 if (pops_needed > 0)
12525 int popped_into;
12526 int move_to;
12528 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12529 regs_available_for_popping);
12531 /* We have popped either FP or SP.
12532 Move whichever one it is into the correct register. */
12533 popped_into = number_of_first_bit_set (regs_available_for_popping);
12534 move_to = number_of_first_bit_set (regs_to_pop);
12536 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12538 regs_to_pop &= ~(1 << move_to);
12540 --pops_needed;
12543 /* If we still have not popped everything then we must have only
12544 had one register available to us and we are now popping the SP. */
12545 if (pops_needed > 0)
12547 int popped_into;
12549 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12550 regs_available_for_popping);
12552 popped_into = number_of_first_bit_set (regs_available_for_popping);
12554 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12556 assert (regs_to_pop == (1 << STACK_POINTER))
12557 assert (pops_needed == 1)
12561 /* If necessary restore the a4 register. */
12562 if (restore_a4)
12564 if (reg_containing_return_addr != LR_REGNUM)
12566 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12567 reg_containing_return_addr = LR_REGNUM;
12570 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12573 if (current_function_calls_eh_return)
12574 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12576 /* Return to caller. */
12577 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12581 void
12582 thumb_final_prescan_insn (rtx insn)
12584 if (flag_print_asm_name)
12585 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12586 INSN_ADDRESSES (INSN_UID (insn)));
12590 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12592 unsigned HOST_WIDE_INT mask = 0xff;
12593 int i;
12595 if (val == 0) /* XXX */
12596 return 0;
12598 for (i = 0; i < 25; i++)
12599 if ((val & (mask << i)) == val)
12600 return 1;
12602 return 0;
12605 /* Returns nonzero if the current function contains,
12606 or might contain a far jump. */
12607 static int
12608 thumb_far_jump_used_p (void)
12610 rtx insn;
12612 /* This test is only important for leaf functions. */
12613 /* assert (!leaf_function_p ()); */
12615 /* If we have already decided that far jumps may be used,
12616 do not bother checking again, and always return true even if
12617 it turns out that they are not being used. Once we have made
12618 the decision that far jumps are present (and that hence the link
12619 register will be pushed onto the stack) we cannot go back on it. */
12620 if (cfun->machine->far_jump_used)
12621 return 1;
12623 /* If this function is not being called from the prologue/epilogue
12624 generation code then it must be being called from the
12625 INITIAL_ELIMINATION_OFFSET macro. */
12626 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12628 /* In this case we know that we are being asked about the elimination
12629 of the arg pointer register. If that register is not being used,
12630 then there are no arguments on the stack, and we do not have to
12631 worry that a far jump might force the prologue to push the link
12632 register, changing the stack offsets. In this case we can just
12633 return false, since the presence of far jumps in the function will
12634 not affect stack offsets.
12636 If the arg pointer is live (or if it was live, but has now been
12637 eliminated and so set to dead) then we do have to test to see if
12638 the function might contain a far jump. This test can lead to some
12639 false negatives, since before reload is completed, then length of
12640 branch instructions is not known, so gcc defaults to returning their
12641 longest length, which in turn sets the far jump attribute to true.
12643 A false negative will not result in bad code being generated, but it
12644 will result in a needless push and pop of the link register. We
12645 hope that this does not occur too often.
12647 If we need doubleword stack alignment this could affect the other
12648 elimination offsets so we can't risk getting it wrong. */
12649 if (regs_ever_live [ARG_POINTER_REGNUM])
12650 cfun->machine->arg_pointer_live = 1;
12651 else if (!cfun->machine->arg_pointer_live)
12652 return 0;
12655 /* Check to see if the function contains a branch
12656 insn with the far jump attribute set. */
12657 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12659 if (GET_CODE (insn) == JUMP_INSN
12660 /* Ignore tablejump patterns. */
12661 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12662 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12663 && get_attr_far_jump (insn) == FAR_JUMP_YES
12666 /* Record the fact that we have decided that
12667 the function does use far jumps. */
12668 cfun->machine->far_jump_used = 1;
12669 return 1;
12673 return 0;
12676 /* Return nonzero if FUNC must be entered in ARM mode. */
12678 is_called_in_ARM_mode (tree func)
12680 if (TREE_CODE (func) != FUNCTION_DECL)
12681 abort ();
12683 /* Ignore the problem about functions whoes address is taken. */
12684 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12685 return TRUE;
12687 #ifdef ARM_PE
12688 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12689 #else
12690 return FALSE;
12691 #endif
12694 /* The bits which aren't usefully expanded as rtl. */
12695 const char *
12696 thumb_unexpanded_epilogue (void)
12698 int regno;
12699 unsigned long live_regs_mask = 0;
12700 int high_regs_pushed = 0;
12701 int had_to_push_lr;
12702 int size;
12703 int mode;
12705 if (return_used_this_function)
12706 return "";
12708 if (IS_NAKED (arm_current_func_type ()))
12709 return "";
12711 live_regs_mask = thumb_compute_save_reg_mask ();
12712 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12714 /* If we can deduce the registers used from the function's return value.
12715 This is more reliable that examining regs_ever_live[] because that
12716 will be set if the register is ever used in the function, not just if
12717 the register is used to hold a return value. */
12719 if (current_function_return_rtx != 0)
12720 mode = GET_MODE (current_function_return_rtx);
12721 else
12722 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12724 size = GET_MODE_SIZE (mode);
12726 /* The prolog may have pushed some high registers to use as
12727 work registers. e.g. the testsuite file:
12728 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12729 compiles to produce:
12730 push {r4, r5, r6, r7, lr}
12731 mov r7, r9
12732 mov r6, r8
12733 push {r6, r7}
12734 as part of the prolog. We have to undo that pushing here. */
12736 if (high_regs_pushed)
12738 unsigned long mask = live_regs_mask & 0xff;
12739 int next_hi_reg;
12741 /* The available low registers depend on the size of the value we are
12742 returning. */
12743 if (size <= 12)
12744 mask |= 1 << 3;
12745 if (size <= 8)
12746 mask |= 1 << 2;
12748 if (mask == 0)
12749 /* Oh dear! We have no low registers into which we can pop
12750 high registers! */
12751 internal_error
12752 ("no low registers available for popping high registers");
12754 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12755 if (live_regs_mask & (1 << next_hi_reg))
12756 break;
12758 while (high_regs_pushed)
12760 /* Find lo register(s) into which the high register(s) can
12761 be popped. */
12762 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12764 if (mask & (1 << regno))
12765 high_regs_pushed--;
12766 if (high_regs_pushed == 0)
12767 break;
12770 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12772 /* Pop the values into the low register(s). */
12773 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12775 /* Move the value(s) into the high registers. */
12776 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12778 if (mask & (1 << regno))
12780 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12781 regno);
12783 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12784 if (live_regs_mask & (1 << next_hi_reg))
12785 break;
12789 live_regs_mask &= ~0x0f00;
12792 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12793 live_regs_mask &= 0xff;
12795 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12797 /* Pop the return address into the PC. */
12798 if (had_to_push_lr)
12799 live_regs_mask |= 1 << PC_REGNUM;
12801 /* Either no argument registers were pushed or a backtrace
12802 structure was created which includes an adjusted stack
12803 pointer, so just pop everything. */
12804 if (live_regs_mask)
12805 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12806 live_regs_mask);
12808 /* We have either just popped the return address into the
12809 PC or it is was kept in LR for the entire function. */
12810 if (!had_to_push_lr)
12811 thumb_exit (asm_out_file, LR_REGNUM);
12813 else
12815 /* Pop everything but the return address. */
12816 if (live_regs_mask)
12817 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12818 live_regs_mask);
12820 if (had_to_push_lr)
12822 if (size > 12)
12824 /* We have no free low regs, so save one. */
12825 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12826 LAST_ARG_REGNUM);
12829 /* Get the return address into a temporary register. */
12830 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12831 1 << LAST_ARG_REGNUM);
12833 if (size > 12)
12835 /* Move the return address to lr. */
12836 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
12837 LAST_ARG_REGNUM);
12838 /* Restore the low register. */
12839 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
12840 IP_REGNUM);
12841 regno = LR_REGNUM;
12843 else
12844 regno = LAST_ARG_REGNUM;
12846 else
12847 regno = LR_REGNUM;
12849 /* Remove the argument registers that were pushed onto the stack. */
12850 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12851 SP_REGNUM, SP_REGNUM,
12852 current_function_pretend_args_size);
12854 thumb_exit (asm_out_file, regno);
12857 return "";
12860 /* Functions to save and restore machine-specific function data. */
12861 static struct machine_function *
12862 arm_init_machine_status (void)
12864 struct machine_function *machine;
12865 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12867 #if ARM_FT_UNKNOWN != 0
12868 machine->func_type = ARM_FT_UNKNOWN;
12869 #endif
12870 return machine;
12873 /* Return an RTX indicating where the return address to the
12874 calling function can be found. */
12876 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12878 if (count != 0)
12879 return NULL_RTX;
12881 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12884 /* Do anything needed before RTL is emitted for each function. */
12885 void
12886 arm_init_expanders (void)
12888 /* Arrange to initialize and mark the machine per-function status. */
12889 init_machine_status = arm_init_machine_status;
12891 /* This is to stop the combine pass optimizing away the alignment
12892 adjustment of va_arg. */
12893 /* ??? It is claimed that this should not be necessary. */
12894 if (cfun)
12895 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
12899 /* Like arm_compute_initial_elimination offset. Simpler because
12900 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
12902 HOST_WIDE_INT
12903 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
12905 arm_stack_offsets *offsets;
12907 offsets = arm_get_frame_offsets ();
12909 switch (from)
12911 case ARG_POINTER_REGNUM:
12912 switch (to)
12914 case STACK_POINTER_REGNUM:
12915 return offsets->outgoing_args - offsets->saved_args;
12917 case FRAME_POINTER_REGNUM:
12918 return offsets->soft_frame - offsets->saved_args;
12920 case THUMB_HARD_FRAME_POINTER_REGNUM:
12921 case ARM_HARD_FRAME_POINTER_REGNUM:
12922 return offsets->saved_regs - offsets->saved_args;
12924 default:
12925 abort();
12927 break;
12929 case FRAME_POINTER_REGNUM:
12930 switch (to)
12932 case STACK_POINTER_REGNUM:
12933 return offsets->outgoing_args - offsets->soft_frame;
12935 case THUMB_HARD_FRAME_POINTER_REGNUM:
12936 case ARM_HARD_FRAME_POINTER_REGNUM:
12937 return offsets->saved_regs - offsets->soft_frame;
12939 default:
12940 abort();
12942 break;
12944 default:
12945 abort ();
12950 /* Generate the rest of a function's prologue. */
12951 void
12952 thumb_expand_prologue (void)
12954 rtx insn, dwarf;
12956 HOST_WIDE_INT amount;
12957 arm_stack_offsets *offsets;
12958 unsigned long func_type;
12959 int regno;
12960 unsigned long live_regs_mask;
12962 func_type = arm_current_func_type ();
12964 /* Naked functions don't have prologues. */
12965 if (IS_NAKED (func_type))
12966 return;
12968 if (IS_INTERRUPT (func_type))
12970 error ("interrupt Service Routines cannot be coded in Thumb mode");
12971 return;
12974 live_regs_mask = thumb_compute_save_reg_mask ();
12975 /* Load the pic register before setting the frame pointer,
12976 so we can use r7 as a temporary work register. */
12977 if (flag_pic)
12978 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
12980 offsets = arm_get_frame_offsets ();
12982 if (frame_pointer_needed)
12984 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
12985 stack_pointer_rtx));
12986 RTX_FRAME_RELATED_P (insn) = 1;
12988 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
12989 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
12990 stack_pointer_rtx);
12992 amount = offsets->outgoing_args - offsets->saved_regs;
12993 if (amount)
12995 if (amount < 512)
12997 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12998 GEN_INT (- amount)));
12999 RTX_FRAME_RELATED_P (insn) = 1;
13001 else
13003 rtx reg;
13005 /* The stack decrement is too big for an immediate value in a single
13006 insn. In theory we could issue multiple subtracts, but after
13007 three of them it becomes more space efficient to place the full
13008 value in the constant pool and load into a register. (Also the
13009 ARM debugger really likes to see only one stack decrement per
13010 function). So instead we look for a scratch register into which
13011 we can load the decrement, and then we subtract this from the
13012 stack pointer. Unfortunately on the thumb the only available
13013 scratch registers are the argument registers, and we cannot use
13014 these as they may hold arguments to the function. Instead we
13015 attempt to locate a call preserved register which is used by this
13016 function. If we can find one, then we know that it will have
13017 been pushed at the start of the prologue and so we can corrupt
13018 it now. */
13019 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13020 if (live_regs_mask & (1 << regno)
13021 && !(frame_pointer_needed
13022 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13023 break;
13025 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13027 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13029 /* Choose an arbitrary, non-argument low register. */
13030 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13032 /* Save it by copying it into a high, scratch register. */
13033 emit_insn (gen_movsi (spare, reg));
13034 /* Add a USE to stop propagate_one_insn() from barfing. */
13035 emit_insn (gen_prologue_use (spare));
13037 /* Decrement the stack. */
13038 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13039 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13040 stack_pointer_rtx, reg));
13041 RTX_FRAME_RELATED_P (insn) = 1;
13042 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13043 plus_constant (stack_pointer_rtx,
13044 -amount));
13045 RTX_FRAME_RELATED_P (dwarf) = 1;
13046 REG_NOTES (insn)
13047 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13048 REG_NOTES (insn));
13050 /* Restore the low register's original value. */
13051 emit_insn (gen_movsi (reg, spare));
13053 /* Emit a USE of the restored scratch register, so that flow
13054 analysis will not consider the restore redundant. The
13055 register won't be used again in this function and isn't
13056 restored by the epilogue. */
13057 emit_insn (gen_prologue_use (reg));
13059 else
13061 reg = gen_rtx_REG (SImode, regno);
13063 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13065 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13066 stack_pointer_rtx, reg));
13067 RTX_FRAME_RELATED_P (insn) = 1;
13068 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13069 plus_constant (stack_pointer_rtx,
13070 -amount));
13071 RTX_FRAME_RELATED_P (dwarf) = 1;
13072 REG_NOTES (insn)
13073 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13074 REG_NOTES (insn));
13077 /* If the frame pointer is needed, emit a special barrier that
13078 will prevent the scheduler from moving stores to the frame
13079 before the stack adjustment. */
13080 if (frame_pointer_needed)
13081 emit_insn (gen_stack_tie (stack_pointer_rtx,
13082 hard_frame_pointer_rtx));
13085 if (current_function_profile || TARGET_NO_SCHED_PRO)
13086 emit_insn (gen_blockage ());
13088 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13089 if (live_regs_mask & 0xff)
13090 cfun->machine->lr_save_eliminated = 0;
13092 /* If the link register is being kept alive, with the return address in it,
13093 then make sure that it does not get reused by the ce2 pass. */
13094 if (cfun->machine->lr_save_eliminated)
13095 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13099 void
13100 thumb_expand_epilogue (void)
13102 HOST_WIDE_INT amount;
13103 arm_stack_offsets *offsets;
13104 int regno;
13106 /* Naked functions don't have prologues. */
13107 if (IS_NAKED (arm_current_func_type ()))
13108 return;
13110 offsets = arm_get_frame_offsets ();
13111 amount = offsets->outgoing_args - offsets->saved_regs;
13113 if (frame_pointer_needed)
13114 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13115 else if (amount)
13117 if (amount < 512)
13118 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13119 GEN_INT (amount)));
13120 else
13122 /* r3 is always free in the epilogue. */
13123 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13125 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13126 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13130 /* Emit a USE (stack_pointer_rtx), so that
13131 the stack adjustment will not be deleted. */
13132 emit_insn (gen_prologue_use (stack_pointer_rtx));
13134 if (current_function_profile || TARGET_NO_SCHED_PRO)
13135 emit_insn (gen_blockage ());
13137 /* Emit a clobber for each insn that will be restored in the epilogue,
13138 so that flow2 will get register lifetimes correct. */
13139 for (regno = 0; regno < 13; regno++)
13140 if (regs_ever_live[regno] && !call_used_regs[regno])
13141 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13143 if (! regs_ever_live[LR_REGNUM])
13144 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13147 static void
13148 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13150 unsigned long live_regs_mask = 0;
13151 unsigned long l_mask;
13152 unsigned high_regs_pushed = 0;
13153 int cfa_offset = 0;
13154 int regno;
13156 if (IS_NAKED (arm_current_func_type ()))
13157 return;
13159 if (is_called_in_ARM_mode (current_function_decl))
13161 const char * name;
13163 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13164 abort ();
13165 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13166 abort ();
13167 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13169 /* Generate code sequence to switch us into Thumb mode. */
13170 /* The .code 32 directive has already been emitted by
13171 ASM_DECLARE_FUNCTION_NAME. */
13172 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13173 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13175 /* Generate a label, so that the debugger will notice the
13176 change in instruction sets. This label is also used by
13177 the assembler to bypass the ARM code when this function
13178 is called from a Thumb encoded function elsewhere in the
13179 same file. Hence the definition of STUB_NAME here must
13180 agree with the definition in gas/config/tc-arm.c. */
13182 #define STUB_NAME ".real_start_of"
13184 fprintf (f, "\t.code\t16\n");
13185 #ifdef ARM_PE
13186 if (arm_dllexport_name_p (name))
13187 name = arm_strip_name_encoding (name);
13188 #endif
13189 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13190 fprintf (f, "\t.thumb_func\n");
13191 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13194 if (current_function_pretend_args_size)
13196 if (cfun->machine->uses_anonymous_args)
13198 int num_pushes;
13200 fprintf (f, "\tpush\t{");
13202 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13204 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13205 regno <= LAST_ARG_REGNUM;
13206 regno++)
13207 asm_fprintf (f, "%r%s", regno,
13208 regno == LAST_ARG_REGNUM ? "" : ", ");
13210 fprintf (f, "}\n");
13212 else
13213 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13214 SP_REGNUM, SP_REGNUM,
13215 current_function_pretend_args_size);
13217 /* We don't need to record the stores for unwinding (would it
13218 help the debugger any if we did?), but record the change in
13219 the stack pointer. */
13220 if (dwarf2out_do_frame ())
13222 char *l = dwarf2out_cfi_label ();
13224 cfa_offset = cfa_offset + current_function_pretend_args_size;
13225 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13229 /* Get the registers we are going to push. */
13230 live_regs_mask = thumb_compute_save_reg_mask ();
13231 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13232 l_mask = live_regs_mask & 0x40ff;
13233 /* Then count how many other high registers will need to be pushed. */
13234 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13236 if (TARGET_BACKTRACE)
13238 unsigned offset;
13239 unsigned work_register;
13241 /* We have been asked to create a stack backtrace structure.
13242 The code looks like this:
13244 0 .align 2
13245 0 func:
13246 0 sub SP, #16 Reserve space for 4 registers.
13247 2 push {R7} Push low registers.
13248 4 add R7, SP, #20 Get the stack pointer before the push.
13249 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13250 8 mov R7, PC Get hold of the start of this code plus 12.
13251 10 str R7, [SP, #16] Store it.
13252 12 mov R7, FP Get hold of the current frame pointer.
13253 14 str R7, [SP, #4] Store it.
13254 16 mov R7, LR Get hold of the current return address.
13255 18 str R7, [SP, #12] Store it.
13256 20 add R7, SP, #16 Point at the start of the backtrace structure.
13257 22 mov FP, R7 Put this value into the frame pointer. */
13259 work_register = thumb_find_work_register (live_regs_mask);
13261 asm_fprintf
13262 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13263 SP_REGNUM, SP_REGNUM);
13265 if (dwarf2out_do_frame ())
13267 char *l = dwarf2out_cfi_label ();
13269 cfa_offset = cfa_offset + 16;
13270 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13273 if (l_mask)
13275 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13276 offset = bit_count (l_mask);
13278 else
13279 offset = 0;
13281 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13282 offset + 16 + current_function_pretend_args_size);
13284 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13285 offset + 4);
13287 /* Make sure that the instruction fetching the PC is in the right place
13288 to calculate "start of backtrace creation code + 12". */
13289 if (l_mask)
13291 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13292 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13293 offset + 12);
13294 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13295 ARM_HARD_FRAME_POINTER_REGNUM);
13296 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13297 offset);
13299 else
13301 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13302 ARM_HARD_FRAME_POINTER_REGNUM);
13303 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13304 offset);
13305 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13306 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13307 offset + 12);
13310 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13311 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13312 offset + 8);
13313 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13314 offset + 12);
13315 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13316 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13318 /* Optimisation: If we are not pushing any low registers but we are going
13319 to push some high registers then delay our first push. This will just
13320 be a push of LR and we can combine it with the push of the first high
13321 register. */
13322 else if ((l_mask & 0xff) != 0
13323 || (high_regs_pushed == 0 && l_mask))
13324 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13326 if (high_regs_pushed)
13328 unsigned pushable_regs;
13329 unsigned next_hi_reg;
13331 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13332 if (live_regs_mask & (1 << next_hi_reg))
13333 break;
13335 pushable_regs = l_mask & 0xff;
13337 if (pushable_regs == 0)
13338 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13340 while (high_regs_pushed > 0)
13342 unsigned long real_regs_mask = 0;
13344 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13346 if (pushable_regs & (1 << regno))
13348 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13350 high_regs_pushed --;
13351 real_regs_mask |= (1 << next_hi_reg);
13353 if (high_regs_pushed)
13355 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13356 next_hi_reg --)
13357 if (live_regs_mask & (1 << next_hi_reg))
13358 break;
13360 else
13362 pushable_regs &= ~((1 << regno) - 1);
13363 break;
13368 /* If we had to find a work register and we have not yet
13369 saved the LR then add it to the list of regs to push. */
13370 if (l_mask == (1 << LR_REGNUM))
13372 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13373 1, &cfa_offset,
13374 real_regs_mask | (1 << LR_REGNUM));
13375 l_mask = 0;
13377 else
13378 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13383 /* Handle the case of a double word load into a low register from
13384 a computed memory address. The computed address may involve a
13385 register which is overwritten by the load. */
13386 const char *
13387 thumb_load_double_from_address (rtx *operands)
13389 rtx addr;
13390 rtx base;
13391 rtx offset;
13392 rtx arg1;
13393 rtx arg2;
13395 if (GET_CODE (operands[0]) != REG)
13396 abort ();
13398 if (GET_CODE (operands[1]) != MEM)
13399 abort ();
13401 /* Get the memory address. */
13402 addr = XEXP (operands[1], 0);
13404 /* Work out how the memory address is computed. */
13405 switch (GET_CODE (addr))
13407 case REG:
13408 operands[2] = gen_rtx_MEM (SImode,
13409 plus_constant (XEXP (operands[1], 0), 4));
13411 if (REGNO (operands[0]) == REGNO (addr))
13413 output_asm_insn ("ldr\t%H0, %2", operands);
13414 output_asm_insn ("ldr\t%0, %1", operands);
13416 else
13418 output_asm_insn ("ldr\t%0, %1", operands);
13419 output_asm_insn ("ldr\t%H0, %2", operands);
13421 break;
13423 case CONST:
13424 /* Compute <address> + 4 for the high order load. */
13425 operands[2] = gen_rtx_MEM (SImode,
13426 plus_constant (XEXP (operands[1], 0), 4));
13428 output_asm_insn ("ldr\t%0, %1", operands);
13429 output_asm_insn ("ldr\t%H0, %2", operands);
13430 break;
13432 case PLUS:
13433 arg1 = XEXP (addr, 0);
13434 arg2 = XEXP (addr, 1);
13436 if (CONSTANT_P (arg1))
13437 base = arg2, offset = arg1;
13438 else
13439 base = arg1, offset = arg2;
13441 if (GET_CODE (base) != REG)
13442 abort ();
13444 /* Catch the case of <address> = <reg> + <reg> */
13445 if (GET_CODE (offset) == REG)
13447 int reg_offset = REGNO (offset);
13448 int reg_base = REGNO (base);
13449 int reg_dest = REGNO (operands[0]);
13451 /* Add the base and offset registers together into the
13452 higher destination register. */
13453 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13454 reg_dest + 1, reg_base, reg_offset);
13456 /* Load the lower destination register from the address in
13457 the higher destination register. */
13458 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13459 reg_dest, reg_dest + 1);
13461 /* Load the higher destination register from its own address
13462 plus 4. */
13463 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13464 reg_dest + 1, reg_dest + 1);
13466 else
13468 /* Compute <address> + 4 for the high order load. */
13469 operands[2] = gen_rtx_MEM (SImode,
13470 plus_constant (XEXP (operands[1], 0), 4));
13472 /* If the computed address is held in the low order register
13473 then load the high order register first, otherwise always
13474 load the low order register first. */
13475 if (REGNO (operands[0]) == REGNO (base))
13477 output_asm_insn ("ldr\t%H0, %2", operands);
13478 output_asm_insn ("ldr\t%0, %1", operands);
13480 else
13482 output_asm_insn ("ldr\t%0, %1", operands);
13483 output_asm_insn ("ldr\t%H0, %2", operands);
13486 break;
13488 case LABEL_REF:
13489 /* With no registers to worry about we can just load the value
13490 directly. */
13491 operands[2] = gen_rtx_MEM (SImode,
13492 plus_constant (XEXP (operands[1], 0), 4));
13494 output_asm_insn ("ldr\t%H0, %2", operands);
13495 output_asm_insn ("ldr\t%0, %1", operands);
13496 break;
13498 default:
13499 abort ();
13500 break;
13503 return "";
13506 const char *
13507 thumb_output_move_mem_multiple (int n, rtx *operands)
13509 rtx tmp;
13511 switch (n)
13513 case 2:
13514 if (REGNO (operands[4]) > REGNO (operands[5]))
13516 tmp = operands[4];
13517 operands[4] = operands[5];
13518 operands[5] = tmp;
13520 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13521 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13522 break;
13524 case 3:
13525 if (REGNO (operands[4]) > REGNO (operands[5]))
13527 tmp = operands[4];
13528 operands[4] = operands[5];
13529 operands[5] = tmp;
13531 if (REGNO (operands[5]) > REGNO (operands[6]))
13533 tmp = operands[5];
13534 operands[5] = operands[6];
13535 operands[6] = tmp;
13537 if (REGNO (operands[4]) > REGNO (operands[5]))
13539 tmp = operands[4];
13540 operands[4] = operands[5];
13541 operands[5] = tmp;
13544 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13545 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13546 break;
13548 default:
13549 abort ();
13552 return "";
13555 /* Output a call-via instruction for thumb state. */
13556 const char *
13557 thumb_call_via_reg (rtx reg)
13559 int regno = REGNO (reg);
13560 rtx *labelp;
13562 gcc_assert (regno < LR_REGNUM);
13564 /* If we are in the normal text section we can use a single instance
13565 per compilation unit. If we are doing function sections, then we need
13566 an entry per section, since we can't rely on reachability. */
13567 if (in_text_section ())
13569 thumb_call_reg_needed = 1;
13571 if (thumb_call_via_label[regno] == NULL)
13572 thumb_call_via_label[regno] = gen_label_rtx ();
13573 labelp = thumb_call_via_label + regno;
13575 else
13577 if (cfun->machine->call_via[regno] == NULL)
13578 cfun->machine->call_via[regno] = gen_label_rtx ();
13579 labelp = cfun->machine->call_via + regno;
13582 output_asm_insn ("bl\t%a0", labelp);
13583 return "";
13586 /* Routines for generating rtl. */
13587 void
13588 thumb_expand_movmemqi (rtx *operands)
13590 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13591 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13592 HOST_WIDE_INT len = INTVAL (operands[2]);
13593 HOST_WIDE_INT offset = 0;
13595 while (len >= 12)
13597 emit_insn (gen_movmem12b (out, in, out, in));
13598 len -= 12;
13601 if (len >= 8)
13603 emit_insn (gen_movmem8b (out, in, out, in));
13604 len -= 8;
13607 if (len >= 4)
13609 rtx reg = gen_reg_rtx (SImode);
13610 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13611 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13612 len -= 4;
13613 offset += 4;
13616 if (len >= 2)
13618 rtx reg = gen_reg_rtx (HImode);
13619 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13620 plus_constant (in, offset))));
13621 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13622 reg));
13623 len -= 2;
13624 offset += 2;
13627 if (len)
13629 rtx reg = gen_reg_rtx (QImode);
13630 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13631 plus_constant (in, offset))));
13632 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13633 reg));
13637 void
13638 thumb_reload_out_hi (rtx *operands)
13640 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13643 /* Handle reading a half-word from memory during reload. */
13644 void
13645 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13647 abort ();
13650 /* Return the length of a function name prefix
13651 that starts with the character 'c'. */
13652 static int
13653 arm_get_strip_length (int c)
13655 switch (c)
13657 ARM_NAME_ENCODING_LENGTHS
13658 default: return 0;
13662 /* Return a pointer to a function's name with any
13663 and all prefix encodings stripped from it. */
13664 const char *
13665 arm_strip_name_encoding (const char *name)
13667 int skip;
13669 while ((skip = arm_get_strip_length (* name)))
13670 name += skip;
13672 return name;
13675 /* If there is a '*' anywhere in the name's prefix, then
13676 emit the stripped name verbatim, otherwise prepend an
13677 underscore if leading underscores are being used. */
13678 void
13679 arm_asm_output_labelref (FILE *stream, const char *name)
13681 int skip;
13682 int verbatim = 0;
13684 while ((skip = arm_get_strip_length (* name)))
13686 verbatim |= (*name == '*');
13687 name += skip;
13690 if (verbatim)
13691 fputs (name, stream);
13692 else
13693 asm_fprintf (stream, "%U%s", name);
13696 static void
13697 arm_file_end (void)
13699 int regno;
13701 if (! thumb_call_reg_needed)
13702 return;
13704 text_section ();
13705 asm_fprintf (asm_out_file, "\t.code 16\n");
13706 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13708 for (regno = 0; regno < LR_REGNUM; regno++)
13710 rtx label = thumb_call_via_label[regno];
13712 if (label != 0)
13714 targetm.asm_out.internal_label (asm_out_file, "L",
13715 CODE_LABEL_NUMBER (label));
13716 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13721 rtx aof_pic_label;
13723 #ifdef AOF_ASSEMBLER
13724 /* Special functions only needed when producing AOF syntax assembler. */
13726 struct pic_chain
13728 struct pic_chain * next;
13729 const char * symname;
13732 static struct pic_chain * aof_pic_chain = NULL;
13735 aof_pic_entry (rtx x)
13737 struct pic_chain ** chainp;
13738 int offset;
13740 if (aof_pic_label == NULL_RTX)
13742 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13745 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13746 offset += 4, chainp = &(*chainp)->next)
13747 if ((*chainp)->symname == XSTR (x, 0))
13748 return plus_constant (aof_pic_label, offset);
13750 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13751 (*chainp)->next = NULL;
13752 (*chainp)->symname = XSTR (x, 0);
13753 return plus_constant (aof_pic_label, offset);
13756 void
13757 aof_dump_pic_table (FILE *f)
13759 struct pic_chain * chain;
13761 if (aof_pic_chain == NULL)
13762 return;
13764 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13765 PIC_OFFSET_TABLE_REGNUM,
13766 PIC_OFFSET_TABLE_REGNUM);
13767 fputs ("|x$adcons|\n", f);
13769 for (chain = aof_pic_chain; chain; chain = chain->next)
13771 fputs ("\tDCD\t", f);
13772 assemble_name (f, chain->symname);
13773 fputs ("\n", f);
13777 int arm_text_section_count = 1;
13779 char *
13780 aof_text_section (void )
13782 static char buf[100];
13783 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13784 arm_text_section_count++);
13785 if (flag_pic)
13786 strcat (buf, ", PIC, REENTRANT");
13787 return buf;
13790 static int arm_data_section_count = 1;
13792 char *
13793 aof_data_section (void)
13795 static char buf[100];
13796 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13797 return buf;
13800 /* The AOF assembler is religiously strict about declarations of
13801 imported and exported symbols, so that it is impossible to declare
13802 a function as imported near the beginning of the file, and then to
13803 export it later on. It is, however, possible to delay the decision
13804 until all the functions in the file have been compiled. To get
13805 around this, we maintain a list of the imports and exports, and
13806 delete from it any that are subsequently defined. At the end of
13807 compilation we spit the remainder of the list out before the END
13808 directive. */
13810 struct import
13812 struct import * next;
13813 const char * name;
13816 static struct import * imports_list = NULL;
13818 void
13819 aof_add_import (const char *name)
13821 struct import * new;
13823 for (new = imports_list; new; new = new->next)
13824 if (new->name == name)
13825 return;
13827 new = (struct import *) xmalloc (sizeof (struct import));
13828 new->next = imports_list;
13829 imports_list = new;
13830 new->name = name;
13833 void
13834 aof_delete_import (const char *name)
13836 struct import ** old;
13838 for (old = &imports_list; *old; old = & (*old)->next)
13840 if ((*old)->name == name)
13842 *old = (*old)->next;
13843 return;
13848 int arm_main_function = 0;
13850 static void
13851 aof_dump_imports (FILE *f)
13853 /* The AOF assembler needs this to cause the startup code to be extracted
13854 from the library. Brining in __main causes the whole thing to work
13855 automagically. */
13856 if (arm_main_function)
13858 text_section ();
13859 fputs ("\tIMPORT __main\n", f);
13860 fputs ("\tDCD __main\n", f);
13863 /* Now dump the remaining imports. */
13864 while (imports_list)
13866 fprintf (f, "\tIMPORT\t");
13867 assemble_name (f, imports_list->name);
13868 fputc ('\n', f);
13869 imports_list = imports_list->next;
13873 static void
13874 aof_globalize_label (FILE *stream, const char *name)
13876 default_globalize_label (stream, name);
13877 if (! strcmp (name, "main"))
13878 arm_main_function = 1;
13881 static void
13882 aof_file_start (void)
13884 fputs ("__r0\tRN\t0\n", asm_out_file);
13885 fputs ("__a1\tRN\t0\n", asm_out_file);
13886 fputs ("__a2\tRN\t1\n", asm_out_file);
13887 fputs ("__a3\tRN\t2\n", asm_out_file);
13888 fputs ("__a4\tRN\t3\n", asm_out_file);
13889 fputs ("__v1\tRN\t4\n", asm_out_file);
13890 fputs ("__v2\tRN\t5\n", asm_out_file);
13891 fputs ("__v3\tRN\t6\n", asm_out_file);
13892 fputs ("__v4\tRN\t7\n", asm_out_file);
13893 fputs ("__v5\tRN\t8\n", asm_out_file);
13894 fputs ("__v6\tRN\t9\n", asm_out_file);
13895 fputs ("__sl\tRN\t10\n", asm_out_file);
13896 fputs ("__fp\tRN\t11\n", asm_out_file);
13897 fputs ("__ip\tRN\t12\n", asm_out_file);
13898 fputs ("__sp\tRN\t13\n", asm_out_file);
13899 fputs ("__lr\tRN\t14\n", asm_out_file);
13900 fputs ("__pc\tRN\t15\n", asm_out_file);
13901 fputs ("__f0\tFN\t0\n", asm_out_file);
13902 fputs ("__f1\tFN\t1\n", asm_out_file);
13903 fputs ("__f2\tFN\t2\n", asm_out_file);
13904 fputs ("__f3\tFN\t3\n", asm_out_file);
13905 fputs ("__f4\tFN\t4\n", asm_out_file);
13906 fputs ("__f5\tFN\t5\n", asm_out_file);
13907 fputs ("__f6\tFN\t6\n", asm_out_file);
13908 fputs ("__f7\tFN\t7\n", asm_out_file);
13909 text_section ();
13912 static void
13913 aof_file_end (void)
13915 if (flag_pic)
13916 aof_dump_pic_table (asm_out_file);
13917 arm_file_end ();
13918 aof_dump_imports (asm_out_file);
13919 fputs ("\tEND\n", asm_out_file);
13921 #endif /* AOF_ASSEMBLER */
13923 #ifndef ARM_PE
13924 /* Symbols in the text segment can be accessed without indirecting via the
13925 constant pool; it may take an extra binary operation, but this is still
13926 faster than indirecting via memory. Don't do this when not optimizing,
13927 since we won't be calculating al of the offsets necessary to do this
13928 simplification. */
13930 static void
13931 arm_encode_section_info (tree decl, rtx rtl, int first)
13933 /* This doesn't work with AOF syntax, since the string table may be in
13934 a different AREA. */
13935 #ifndef AOF_ASSEMBLER
13936 if (optimize > 0 && TREE_CONSTANT (decl))
13937 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13938 #endif
13940 /* If we are referencing a function that is weak then encode a long call
13941 flag in the function name, otherwise if the function is static or
13942 or known to be defined in this file then encode a short call flag. */
13943 if (first && DECL_P (decl))
13945 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
13946 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
13947 else if (! TREE_PUBLIC (decl))
13948 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
13951 #endif /* !ARM_PE */
13953 static void
13954 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
13956 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
13957 && !strcmp (prefix, "L"))
13959 arm_ccfsm_state = 0;
13960 arm_target_insn = NULL;
13962 default_internal_label (stream, prefix, labelno);
13965 /* Output code to add DELTA to the first argument, and then jump
13966 to FUNCTION. Used for C++ multiple inheritance. */
13967 static void
13968 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13969 HOST_WIDE_INT delta,
13970 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
13971 tree function)
13973 static int thunk_label = 0;
13974 char label[256];
13975 int mi_delta = delta;
13976 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
13977 int shift = 0;
13978 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
13979 ? 1 : 0);
13980 if (mi_delta < 0)
13981 mi_delta = - mi_delta;
13982 if (TARGET_THUMB)
13984 int labelno = thunk_label++;
13985 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
13986 fputs ("\tldr\tr12, ", file);
13987 assemble_name (file, label);
13988 fputc ('\n', file);
13990 while (mi_delta != 0)
13992 if ((mi_delta & (3 << shift)) == 0)
13993 shift += 2;
13994 else
13996 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
13997 mi_op, this_regno, this_regno,
13998 mi_delta & (0xff << shift));
13999 mi_delta &= ~(0xff << shift);
14000 shift += 8;
14003 if (TARGET_THUMB)
14005 fprintf (file, "\tbx\tr12\n");
14006 ASM_OUTPUT_ALIGN (file, 2);
14007 assemble_name (file, label);
14008 fputs (":\n", file);
14009 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14011 else
14013 fputs ("\tb\t", file);
14014 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14015 if (NEED_PLT_RELOC)
14016 fputs ("(PLT)", file);
14017 fputc ('\n', file);
14022 arm_emit_vector_const (FILE *file, rtx x)
14024 int i;
14025 const char * pattern;
14027 if (GET_CODE (x) != CONST_VECTOR)
14028 abort ();
14030 switch (GET_MODE (x))
14032 case V2SImode: pattern = "%08x"; break;
14033 case V4HImode: pattern = "%04x"; break;
14034 case V8QImode: pattern = "%02x"; break;
14035 default: abort ();
14038 fprintf (file, "0x");
14039 for (i = CONST_VECTOR_NUNITS (x); i--;)
14041 rtx element;
14043 element = CONST_VECTOR_ELT (x, i);
14044 fprintf (file, pattern, INTVAL (element));
14047 return 1;
14050 const char *
14051 arm_output_load_gr (rtx *operands)
14053 rtx reg;
14054 rtx offset;
14055 rtx wcgr;
14056 rtx sum;
14058 if (GET_CODE (operands [1]) != MEM
14059 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14060 || GET_CODE (reg = XEXP (sum, 0)) != REG
14061 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14062 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14063 return "wldrw%?\t%0, %1";
14065 /* Fix up an out-of-range load of a GR register. */
14066 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14067 wcgr = operands[0];
14068 operands[0] = reg;
14069 output_asm_insn ("ldr%?\t%0, %1", operands);
14071 operands[0] = wcgr;
14072 operands[1] = reg;
14073 output_asm_insn ("tmcr%?\t%0, %1", operands);
14074 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14076 return "";
14079 static rtx
14080 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14081 int incoming ATTRIBUTE_UNUSED)
14083 #if 0
14084 /* FIXME: The ARM backend has special code to handle structure
14085 returns, and will reserve its own hidden first argument. So
14086 if this macro is enabled a *second* hidden argument will be
14087 reserved, which will break binary compatibility with old
14088 toolchains and also thunk handling. One day this should be
14089 fixed. */
14090 return 0;
14091 #else
14092 /* Register in which address to store a structure value
14093 is passed to a function. */
14094 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14095 #endif
14098 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14100 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14101 named arg and all anonymous args onto the stack.
14102 XXX I know the prologue shouldn't be pushing registers, but it is faster
14103 that way. */
14105 static void
14106 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14107 enum machine_mode mode ATTRIBUTE_UNUSED,
14108 tree type ATTRIBUTE_UNUSED,
14109 int *pretend_size,
14110 int second_time ATTRIBUTE_UNUSED)
14112 cfun->machine->uses_anonymous_args = 1;
14113 if (cum->nregs < NUM_ARG_REGS)
14114 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14117 /* Return nonzero if the CONSUMER instruction (a store) does not need
14118 PRODUCER's value to calculate the address. */
14121 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14123 rtx value = PATTERN (producer);
14124 rtx addr = PATTERN (consumer);
14126 if (GET_CODE (value) == COND_EXEC)
14127 value = COND_EXEC_CODE (value);
14128 if (GET_CODE (value) == PARALLEL)
14129 value = XVECEXP (value, 0, 0);
14130 value = XEXP (value, 0);
14131 if (GET_CODE (addr) == COND_EXEC)
14132 addr = COND_EXEC_CODE (addr);
14133 if (GET_CODE (addr) == PARALLEL)
14134 addr = XVECEXP (addr, 0, 0);
14135 addr = XEXP (addr, 0);
14137 return !reg_overlap_mentioned_p (value, addr);
14140 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14141 have an early register shift value or amount dependency on the
14142 result of PRODUCER. */
14145 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14147 rtx value = PATTERN (producer);
14148 rtx op = PATTERN (consumer);
14149 rtx early_op;
14151 if (GET_CODE (value) == COND_EXEC)
14152 value = COND_EXEC_CODE (value);
14153 if (GET_CODE (value) == PARALLEL)
14154 value = XVECEXP (value, 0, 0);
14155 value = XEXP (value, 0);
14156 if (GET_CODE (op) == COND_EXEC)
14157 op = COND_EXEC_CODE (op);
14158 if (GET_CODE (op) == PARALLEL)
14159 op = XVECEXP (op, 0, 0);
14160 op = XEXP (op, 1);
14162 early_op = XEXP (op, 0);
14163 /* This is either an actual independent shift, or a shift applied to
14164 the first operand of another operation. We want the whole shift
14165 operation. */
14166 if (GET_CODE (early_op) == REG)
14167 early_op = op;
14169 return !reg_overlap_mentioned_p (value, early_op);
14172 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14173 have an early register shift value dependency on the result of
14174 PRODUCER. */
14177 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14179 rtx value = PATTERN (producer);
14180 rtx op = PATTERN (consumer);
14181 rtx early_op;
14183 if (GET_CODE (value) == COND_EXEC)
14184 value = COND_EXEC_CODE (value);
14185 if (GET_CODE (value) == PARALLEL)
14186 value = XVECEXP (value, 0, 0);
14187 value = XEXP (value, 0);
14188 if (GET_CODE (op) == COND_EXEC)
14189 op = COND_EXEC_CODE (op);
14190 if (GET_CODE (op) == PARALLEL)
14191 op = XVECEXP (op, 0, 0);
14192 op = XEXP (op, 1);
14194 early_op = XEXP (op, 0);
14196 /* This is either an actual independent shift, or a shift applied to
14197 the first operand of another operation. We want the value being
14198 shifted, in either case. */
14199 if (GET_CODE (early_op) != REG)
14200 early_op = XEXP (early_op, 0);
14202 return !reg_overlap_mentioned_p (value, early_op);
14205 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14206 have an early register mult dependency on the result of
14207 PRODUCER. */
14210 arm_no_early_mul_dep (rtx producer, rtx consumer)
14212 rtx value = PATTERN (producer);
14213 rtx op = PATTERN (consumer);
14215 if (GET_CODE (value) == COND_EXEC)
14216 value = COND_EXEC_CODE (value);
14217 if (GET_CODE (value) == PARALLEL)
14218 value = XVECEXP (value, 0, 0);
14219 value = XEXP (value, 0);
14220 if (GET_CODE (op) == COND_EXEC)
14221 op = COND_EXEC_CODE (op);
14222 if (GET_CODE (op) == PARALLEL)
14223 op = XVECEXP (op, 0, 0);
14224 op = XEXP (op, 1);
14226 return (GET_CODE (op) == PLUS
14227 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14231 /* We can't rely on the caller doing the proper promotion when
14232 using APCS or ATPCS. */
14234 static bool
14235 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14237 return !TARGET_AAPCS_BASED;
14241 /* AAPCS based ABIs use short enums by default. */
14243 static bool
14244 arm_default_short_enums (void)
14246 return TARGET_AAPCS_BASED;
14250 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14252 static bool
14253 arm_align_anon_bitfield (void)
14255 return TARGET_AAPCS_BASED;
14259 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14261 static tree
14262 arm_cxx_guard_type (void)
14264 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14268 /* The EABI says test the least significan bit of a guard variable. */
14270 static bool
14271 arm_cxx_guard_mask_bit (void)
14273 return TARGET_AAPCS_BASED;
14277 /* The EABI specifies that all array cookies are 8 bytes long. */
14279 static tree
14280 arm_get_cookie_size (tree type)
14282 tree size;
14284 if (!TARGET_AAPCS_BASED)
14285 return default_cxx_get_cookie_size (type);
14287 size = build_int_cst (sizetype, 8);
14288 return size;
14292 /* The EABI says that array cookies should also contain the element size. */
14294 static bool
14295 arm_cookie_has_size (void)
14297 return TARGET_AAPCS_BASED;
14301 /* The EABI says constructors and destructors should return a pointer to
14302 the object constructed/destroyed. */
14304 static bool
14305 arm_cxx_cdtor_returns_this (void)
14307 return TARGET_AAPCS_BASED;
14310 /* The EABI says that an inline function may never be the key
14311 method. */
14313 static bool
14314 arm_cxx_key_method_may_be_inline (void)
14316 return !TARGET_AAPCS_BASED;
14319 /* The EABI says that the virtual table, etc., for a class must be
14320 exported if it has a key method. The EABI does not specific the
14321 behavior if there is no key method, but there is no harm in
14322 exporting the class data in that case too. */
14324 static bool
14325 arm_cxx_export_class_data (void)
14327 return TARGET_AAPCS_BASED;
14330 void
14331 arm_set_return_address (rtx source, rtx scratch)
14333 arm_stack_offsets *offsets;
14334 HOST_WIDE_INT delta;
14335 rtx addr;
14336 unsigned long saved_regs;
14338 saved_regs = arm_compute_save_reg_mask ();
14340 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14341 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14342 else
14344 if (frame_pointer_needed)
14345 addr = plus_constant(hard_frame_pointer_rtx, -4);
14346 else
14348 /* LR will be the first saved register. */
14349 offsets = arm_get_frame_offsets ();
14350 delta = offsets->outgoing_args - (offsets->frame + 4);
14353 if (delta >= 4096)
14355 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14356 GEN_INT (delta & ~4095)));
14357 addr = scratch;
14358 delta &= 4095;
14360 else
14361 addr = stack_pointer_rtx;
14363 addr = plus_constant (addr, delta);
14365 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14370 void
14371 thumb_set_return_address (rtx source, rtx scratch)
14373 arm_stack_offsets *offsets;
14374 HOST_WIDE_INT delta;
14375 int reg;
14376 rtx addr;
14377 unsigned long mask;
14379 emit_insn (gen_rtx_USE (VOIDmode, source));
14381 mask = thumb_compute_save_reg_mask ();
14382 if (mask & (1 << LR_REGNUM))
14384 offsets = arm_get_frame_offsets ();
14386 /* Find the saved regs. */
14387 if (frame_pointer_needed)
14389 delta = offsets->soft_frame - offsets->saved_args;
14390 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14392 else
14394 delta = offsets->outgoing_args - offsets->saved_args;
14395 reg = SP_REGNUM;
14397 /* Allow for the stack frame. */
14398 if (TARGET_BACKTRACE)
14399 delta -= 16;
14400 /* The link register is always the first saved register. */
14401 delta -= 4;
14403 /* Construct the address. */
14404 addr = gen_rtx_REG (SImode, reg);
14405 if ((reg != SP_REGNUM && delta >= 128)
14406 || delta >= 1024)
14408 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14409 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14410 addr = scratch;
14412 else
14413 addr = plus_constant (addr, delta);
14415 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14417 else
14418 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14421 /* Implements target hook vector_mode_supported_p. */
14422 bool
14423 arm_vector_mode_supported_p (enum machine_mode mode)
14425 if ((mode == V2SImode)
14426 || (mode == V4HImode)
14427 || (mode == V8QImode))
14428 return true;
14430 return false;
14433 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14434 ARM insns and therefore guarantee that the shift count is modulo 256.
14435 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14436 guarantee no particular behavior for out-of-range counts. */
14438 static unsigned HOST_WIDE_INT
14439 arm_shift_truncation_mask (enum machine_mode mode)
14441 return mode == SImode ? 255 : 0;
14445 /* Map internal gcc register numbers to DWARF2 register numbers. */
14447 unsigned int
14448 arm_dbx_register_number (unsigned int regno)
14450 if (regno < 16)
14451 return regno;
14453 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14454 compatibility. The EABI defines them as registers 96-103. */
14455 if (IS_FPA_REGNUM (regno))
14456 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14458 if (IS_VFP_REGNUM (regno))
14459 return 64 + regno - FIRST_VFP_REGNUM;
14461 if (IS_IWMMXT_GR_REGNUM (regno))
14462 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14464 if (IS_IWMMXT_REGNUM (regno))
14465 return 112 + regno - FIRST_IWMMXT_REGNUM;
14467 abort ();