* config/arm/arm.c (arm_handle_notshared_attribute): New function.
[official-gcc.git] / gcc / config / arm / arm.c
blob730877bdc2374086daec7354d4fec656b277d21e
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static unsigned long thumb_compute_save_reg_mask (void);
75 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
76 static rtx emit_multi_reg_push (int);
77 static rtx emit_sfm (int, int);
78 #ifndef AOF_ASSEMBLER
79 static bool arm_assemble_integer (rtx, unsigned int, int);
80 #endif
81 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
82 static arm_cc get_arm_condition_code (rtx);
83 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
84 static rtx is_jump_table (rtx);
85 static const char *output_multi_immediate (rtx *, const char *, const char *,
86 int, HOST_WIDE_INT);
87 static void print_multi_reg (FILE *, const char *, int, int);
88 static const char *shift_op (rtx, HOST_WIDE_INT *);
89 static struct machine_function *arm_init_machine_status (void);
90 static int number_of_first_bit_set (int);
91 static void replace_symbols_in_block (tree, rtx, rtx);
92 static void thumb_exit (FILE *, int);
93 static void thumb_pushpop (FILE *, int, int, int *, int);
94 static rtx is_jump_table (rtx);
95 static HOST_WIDE_INT get_jump_table_size (rtx);
96 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_forward_ref (Mfix *);
98 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
99 static Mnode *add_minipool_backward_ref (Mfix *);
100 static void assign_minipool_offsets (Mfix *);
101 static void arm_print_value (FILE *, rtx);
102 static void dump_minipool (rtx);
103 static int arm_barrier_cost (rtx);
104 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
105 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
106 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
107 rtx);
108 static void arm_reorg (void);
109 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
110 static int current_file_function_operand (rtx);
111 static unsigned long arm_compute_save_reg0_reg12_mask (void);
112 static unsigned long arm_compute_save_reg_mask (void);
113 static unsigned long arm_isr_value (tree);
114 static unsigned long arm_compute_func_type (void);
115 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
116 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
117 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
118 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
119 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
120 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
121 static int arm_comp_type_attributes (tree, tree);
122 static void arm_set_default_type_attributes (tree);
123 static int arm_adjust_cost (rtx, rtx, rtx, int);
124 static int count_insns_for_constant (HOST_WIDE_INT, int);
125 static int arm_get_strip_length (int);
126 static bool arm_function_ok_for_sibcall (tree, tree);
127 static void arm_internal_label (FILE *, const char *, unsigned long);
128 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
129 tree);
130 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
131 static bool arm_size_rtx_costs (rtx, int, int, int *);
132 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
133 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
134 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
135 static bool arm_9e_rtx_costs (rtx, int, int, int *);
136 static int arm_address_cost (rtx);
137 static bool arm_memory_load_p (rtx);
138 static bool arm_cirrus_insn_p (rtx);
139 static void cirrus_reorg (rtx);
140 static void arm_init_builtins (void);
141 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
142 static void arm_init_iwmmxt_builtins (void);
143 static rtx safe_vector_operand (rtx, enum machine_mode);
144 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
145 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
146 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
147 static void emit_constant_insn (rtx cond, rtx pattern);
149 #ifndef ARM_PE
150 static void arm_encode_section_info (tree, rtx, int);
151 #endif
152 #ifdef AOF_ASSEMBLER
153 static void aof_globalize_label (FILE *, const char *);
154 static void aof_dump_imports (FILE *);
155 static void aof_dump_pic_table (FILE *);
156 static void aof_file_start (void);
157 static void aof_file_end (void);
158 #endif
159 static rtx arm_struct_value_rtx (tree, int);
160 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
161 tree, int *, int);
162 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
163 enum machine_mode, tree, bool);
164 static bool arm_promote_prototypes (tree);
165 static bool arm_default_short_enums (void);
166 static bool arm_align_anon_bitfield (void);
168 static tree arm_cxx_guard_type (void);
169 static bool arm_cxx_guard_mask_bit (void);
170 static tree arm_get_cookie_size (tree);
171 static bool arm_cookie_has_size (void);
172 static bool arm_cxx_cdtor_returns_this (void);
173 static bool arm_cxx_key_method_may_be_inline (void);
174 static bool arm_cxx_export_class_data (void);
175 static void arm_init_libfuncs (void);
176 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
178 /* Initialize the GCC target structure. */
179 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
180 #undef TARGET_MERGE_DECL_ATTRIBUTES
181 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
182 #endif
184 #undef TARGET_ATTRIBUTE_TABLE
185 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
187 #ifdef AOF_ASSEMBLER
188 #undef TARGET_ASM_BYTE_OP
189 #define TARGET_ASM_BYTE_OP "\tDCB\t"
190 #undef TARGET_ASM_ALIGNED_HI_OP
191 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
192 #undef TARGET_ASM_ALIGNED_SI_OP
193 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
194 #undef TARGET_ASM_GLOBALIZE_LABEL
195 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
196 #undef TARGET_ASM_FILE_START
197 #define TARGET_ASM_FILE_START aof_file_start
198 #undef TARGET_ASM_FILE_END
199 #define TARGET_ASM_FILE_END aof_file_end
200 #else
201 #undef TARGET_ASM_ALIGNED_SI_OP
202 #define TARGET_ASM_ALIGNED_SI_OP NULL
203 #undef TARGET_ASM_INTEGER
204 #define TARGET_ASM_INTEGER arm_assemble_integer
205 #endif
207 #undef TARGET_ASM_FUNCTION_PROLOGUE
208 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
210 #undef TARGET_ASM_FUNCTION_EPILOGUE
211 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
213 #undef TARGET_COMP_TYPE_ATTRIBUTES
214 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
216 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
217 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
219 #undef TARGET_SCHED_ADJUST_COST
220 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
222 #undef TARGET_ENCODE_SECTION_INFO
223 #ifdef ARM_PE
224 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
225 #else
226 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
227 #endif
229 #undef TARGET_STRIP_NAME_ENCODING
230 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
232 #undef TARGET_ASM_INTERNAL_LABEL
233 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
235 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
236 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
238 #undef TARGET_ASM_OUTPUT_MI_THUNK
239 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
240 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
241 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
243 /* This will be overridden in arm_override_options. */
244 #undef TARGET_RTX_COSTS
245 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
246 #undef TARGET_ADDRESS_COST
247 #define TARGET_ADDRESS_COST arm_address_cost
249 #undef TARGET_SHIFT_TRUNCATION_MASK
250 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
251 #undef TARGET_VECTOR_MODE_SUPPORTED_P
252 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
254 #undef TARGET_MACHINE_DEPENDENT_REORG
255 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
257 #undef TARGET_INIT_BUILTINS
258 #define TARGET_INIT_BUILTINS arm_init_builtins
259 #undef TARGET_EXPAND_BUILTIN
260 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
262 #undef TARGET_INIT_LIBFUNCS
263 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
265 #undef TARGET_PROMOTE_FUNCTION_ARGS
266 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
267 #undef TARGET_PROMOTE_FUNCTION_RETURN
268 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
269 #undef TARGET_PROMOTE_PROTOTYPES
270 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
271 #undef TARGET_PASS_BY_REFERENCE
272 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
274 #undef TARGET_STRUCT_VALUE_RTX
275 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
277 #undef TARGET_SETUP_INCOMING_VARARGS
278 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
280 #undef TARGET_DEFAULT_SHORT_ENUMS
281 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
283 #undef TARGET_ALIGN_ANON_BITFIELD
284 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
286 #undef TARGET_CXX_GUARD_TYPE
287 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
289 #undef TARGET_CXX_GUARD_MASK_BIT
290 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
292 #undef TARGET_CXX_GET_COOKIE_SIZE
293 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
295 #undef TARGET_CXX_COOKIE_HAS_SIZE
296 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
298 #undef TARGET_CXX_CDTOR_RETURNS_THIS
299 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
301 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
302 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
304 #undef TARGET_CXX_EXPORT_CLASS_DATA
305 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
307 struct gcc_target targetm = TARGET_INITIALIZER;
309 /* Obstack for minipool constant handling. */
310 static struct obstack minipool_obstack;
311 static char * minipool_startobj;
313 /* The maximum number of insns skipped which
314 will be conditionalised if possible. */
315 static int max_insns_skipped = 5;
317 extern FILE * asm_out_file;
319 /* True if we are currently building a constant table. */
320 int making_const_table;
322 /* Define the information needed to generate branch insns. This is
323 stored from the compare operation. */
324 rtx arm_compare_op0, arm_compare_op1;
326 /* The processor for which instructions should be scheduled. */
327 enum processor_type arm_tune = arm_none;
329 /* Which floating point model to use. */
330 enum arm_fp_model arm_fp_model;
332 /* Which floating point hardware is available. */
333 enum fputype arm_fpu_arch;
335 /* Which floating point hardware to schedule for. */
336 enum fputype arm_fpu_tune;
338 /* Whether to use floating point hardware. */
339 enum float_abi_type arm_float_abi;
341 /* Which ABI to use. */
342 enum arm_abi_type arm_abi;
344 /* Set by the -mfpu=... option. */
345 const char * target_fpu_name = NULL;
347 /* Set by the -mfpe=... option. */
348 const char * target_fpe_name = NULL;
350 /* Set by the -mfloat-abi=... option. */
351 const char * target_float_abi_name = NULL;
353 /* Set by the -mabi=... option. */
354 const char * target_abi_name = NULL;
356 /* Used to parse -mstructure_size_boundary command line option. */
357 const char * structure_size_string = NULL;
358 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
360 /* Bit values used to identify processor capabilities. */
361 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
362 #define FL_ARCH3M (1 << 1) /* Extended multiply */
363 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
364 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
365 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
366 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
367 #define FL_THUMB (1 << 6) /* Thumb aware */
368 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
369 #define FL_STRONG (1 << 8) /* StrongARM */
370 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
371 #define FL_XSCALE (1 << 10) /* XScale */
372 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
373 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
374 media instructions. */
375 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
377 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
379 #define FL_FOR_ARCH2 0
380 #define FL_FOR_ARCH3 FL_MODE32
381 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
382 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
383 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
384 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
385 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
386 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
387 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
388 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
389 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
390 #define FL_FOR_ARCH6J FL_FOR_ARCH6
391 #define FL_FOR_ARCH6K FL_FOR_ARCH6
392 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
393 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
395 /* The bits in this mask specify which
396 instructions we are allowed to generate. */
397 static unsigned long insn_flags = 0;
399 /* The bits in this mask specify which instruction scheduling options should
400 be used. */
401 static unsigned long tune_flags = 0;
403 /* The following are used in the arm.md file as equivalents to bits
404 in the above two flag variables. */
406 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
407 int arm_arch3m = 0;
409 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
410 int arm_arch4 = 0;
412 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
413 int arm_arch4t = 0;
415 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
416 int arm_arch5 = 0;
418 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
419 int arm_arch5e = 0;
421 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
422 int arm_arch6 = 0;
424 /* Nonzero if this chip can benefit from load scheduling. */
425 int arm_ld_sched = 0;
427 /* Nonzero if this chip is a StrongARM. */
428 int arm_is_strong = 0;
430 /* Nonzero if this chip is a Cirrus variant. */
431 int arm_arch_cirrus = 0;
433 /* Nonzero if this chip supports Intel Wireless MMX technology. */
434 int arm_arch_iwmmxt = 0;
436 /* Nonzero if this chip is an XScale. */
437 int arm_arch_xscale = 0;
439 /* Nonzero if tuning for XScale */
440 int arm_tune_xscale = 0;
442 /* Nonzero if this chip is an ARM6 or an ARM7. */
443 int arm_is_6_or_7 = 0;
445 /* Nonzero if generating Thumb instructions. */
446 int thumb_code = 0;
448 /* Nonzero if we should define __THUMB_INTERWORK__ in the
449 preprocessor.
450 XXX This is a bit of a hack, it's intended to help work around
451 problems in GLD which doesn't understand that armv5t code is
452 interworking clean. */
453 int arm_cpp_interwork = 0;
455 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
456 must report the mode of the memory reference from PRINT_OPERAND to
457 PRINT_OPERAND_ADDRESS. */
458 enum machine_mode output_memory_reference_mode;
460 /* The register number to be used for the PIC offset register. */
461 const char * arm_pic_register_string = NULL;
462 int arm_pic_register = INVALID_REGNUM;
464 /* Set to 1 when a return insn is output, this means that the epilogue
465 is not needed. */
466 int return_used_this_function;
468 /* Set to 1 after arm_reorg has started. Reset to start at the start of
469 the next function. */
470 static int after_arm_reorg = 0;
472 /* The maximum number of insns to be used when loading a constant. */
473 static int arm_constant_limit = 3;
475 /* For an explanation of these variables, see final_prescan_insn below. */
476 int arm_ccfsm_state;
477 enum arm_cond_code arm_current_cc;
478 rtx arm_target_insn;
479 int arm_target_label;
481 /* The condition codes of the ARM, and the inverse function. */
482 static const char * const arm_condition_codes[] =
484 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
485 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
488 #define streq(string1, string2) (strcmp (string1, string2) == 0)
490 /* Initialization code. */
492 struct processors
494 const char *const name;
495 enum processor_type core;
496 const char *arch;
497 const unsigned long flags;
498 bool (* rtx_costs) (rtx, int, int, int *);
501 /* Not all of these give usefully different compilation alternatives,
502 but there is no simple way of generalizing them. */
503 static const struct processors all_cores[] =
505 /* ARM Cores */
506 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
507 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
508 #include "arm-cores.def"
509 #undef ARM_CORE
510 {NULL, arm_none, NULL, 0, NULL}
513 static const struct processors all_architectures[] =
515 /* ARM Architectures */
516 /* We don't specify rtx_costs here as it will be figured out
517 from the core. */
519 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
520 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
521 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
522 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
523 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
524 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
525 implementations that support it, so we will leave it out for now. */
526 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
527 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
528 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
529 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
530 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
531 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
532 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
533 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
534 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
535 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
536 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
537 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
538 {NULL, arm_none, NULL, 0 , NULL}
541 /* This is a magic structure. The 'string' field is magically filled in
542 with a pointer to the value specified by the user on the command line
543 assuming that the user has specified such a value. */
545 struct arm_cpu_select arm_select[] =
547 /* string name processors */
548 { NULL, "-mcpu=", all_cores },
549 { NULL, "-march=", all_architectures },
550 { NULL, "-mtune=", all_cores }
554 /* The name of the proprocessor macro to define for this architecture. */
556 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
558 struct fpu_desc
560 const char * name;
561 enum fputype fpu;
565 /* Available values for for -mfpu=. */
567 static const struct fpu_desc all_fpus[] =
569 {"fpa", FPUTYPE_FPA},
570 {"fpe2", FPUTYPE_FPA_EMU2},
571 {"fpe3", FPUTYPE_FPA_EMU2},
572 {"maverick", FPUTYPE_MAVERICK},
573 {"vfp", FPUTYPE_VFP}
577 /* Floating point models used by the different hardware.
578 See fputype in arm.h. */
580 static const enum fputype fp_model_for_fpu[] =
582 /* No FP hardware. */
583 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
584 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
585 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
586 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
587 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
588 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
592 struct float_abi
594 const char * name;
595 enum float_abi_type abi_type;
599 /* Available values for -mfloat-abi=. */
601 static const struct float_abi all_float_abis[] =
603 {"soft", ARM_FLOAT_ABI_SOFT},
604 {"softfp", ARM_FLOAT_ABI_SOFTFP},
605 {"hard", ARM_FLOAT_ABI_HARD}
609 struct abi_name
611 const char *name;
612 enum arm_abi_type abi_type;
616 /* Available values for -mabi=. */
618 static const struct abi_name arm_all_abis[] =
620 {"apcs-gnu", ARM_ABI_APCS},
621 {"atpcs", ARM_ABI_ATPCS},
622 {"aapcs", ARM_ABI_AAPCS},
623 {"iwmmxt", ARM_ABI_IWMMXT}
626 /* Return the number of bits set in VALUE. */
627 static unsigned
628 bit_count (unsigned long value)
630 unsigned long count = 0;
632 while (value)
634 count++;
635 value &= value - 1; /* Clear the least-significant set bit. */
638 return count;
641 /* Set up library functions unique to ARM. */
643 static void
644 arm_init_libfuncs (void)
646 /* There are no special library functions unless we are using the
647 ARM BPABI. */
648 if (!TARGET_BPABI)
649 return;
651 /* The functions below are described in Section 4 of the "Run-Time
652 ABI for the ARM architecture", Version 1.0. */
654 /* Double-precision floating-point arithmetic. Table 2. */
655 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
656 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
657 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
658 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
659 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
661 /* Double-precision comparisons. Table 3. */
662 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
663 set_optab_libfunc (ne_optab, DFmode, NULL);
664 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
665 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
666 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
667 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
668 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
670 /* Single-precision floating-point arithmetic. Table 4. */
671 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
672 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
673 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
674 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
675 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
677 /* Single-precision comparisons. Table 5. */
678 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
679 set_optab_libfunc (ne_optab, SFmode, NULL);
680 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
681 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
682 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
683 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
684 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
686 /* Floating-point to integer conversions. Table 6. */
687 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
688 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
689 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
690 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
691 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
692 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
693 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
694 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
696 /* Conversions between floating types. Table 7. */
697 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
698 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
700 /* Integer to floating-point conversions. Table 8. */
701 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
702 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
703 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
704 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
705 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
706 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
707 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
708 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
710 /* Long long. Table 9. */
711 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
712 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
713 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
714 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
715 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
716 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
717 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
718 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
720 /* Integer (32/32->32) division. \S 4.3.1. */
721 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
722 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
724 /* The divmod functions are designed so that they can be used for
725 plain division, even though they return both the quotient and the
726 remainder. The quotient is returned in the usual location (i.e.,
727 r0 for SImode, {r0, r1} for DImode), just as would be expected
728 for an ordinary division routine. Because the AAPCS calling
729 conventions specify that all of { r0, r1, r2, r3 } are
730 callee-saved registers, there is no need to tell the compiler
731 explicitly that those registers are clobbered by these
732 routines. */
733 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
734 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
735 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
736 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
739 /* Fix up any incompatible options that the user has specified.
740 This has now turned into a maze. */
741 void
742 arm_override_options (void)
744 unsigned i;
746 /* Set up the flags based on the cpu/architecture selected by the user. */
747 for (i = ARRAY_SIZE (arm_select); i--;)
749 struct arm_cpu_select * ptr = arm_select + i;
751 if (ptr->string != NULL && ptr->string[0] != '\0')
753 const struct processors * sel;
755 for (sel = ptr->processors; sel->name != NULL; sel++)
756 if (streq (ptr->string, sel->name))
758 /* Set the architecture define. */
759 if (i != 2)
760 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
762 /* Determine the processor core for which we should
763 tune code-generation. */
764 if (/* -mcpu= is a sensible default. */
765 i == 0
766 /* If -march= is used, and -mcpu= has not been used,
767 assume that we should tune for a representative
768 CPU from that architecture. */
769 || i == 1
770 /* -mtune= overrides -mcpu= and -march=. */
771 || i == 2)
772 arm_tune = (enum processor_type) (sel - ptr->processors);
774 if (i != 2)
776 /* If we have been given an architecture and a processor
777 make sure that they are compatible. We only generate
778 a warning though, and we prefer the CPU over the
779 architecture. */
780 if (insn_flags != 0 && (insn_flags ^ sel->flags))
781 warning ("switch -mcpu=%s conflicts with -march= switch",
782 ptr->string);
784 insn_flags = sel->flags;
787 break;
790 if (sel->name == NULL)
791 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
795 /* If the user did not specify a processor, choose one for them. */
796 if (insn_flags == 0)
798 const struct processors * sel;
799 unsigned int sought;
800 enum processor_type cpu;
802 cpu = TARGET_CPU_DEFAULT;
803 if (cpu == arm_none)
805 #ifdef SUBTARGET_CPU_DEFAULT
806 /* Use the subtarget default CPU if none was specified by
807 configure. */
808 cpu = SUBTARGET_CPU_DEFAULT;
809 #endif
810 /* Default to ARM6. */
811 if (cpu == arm_none)
812 cpu = arm6;
814 sel = &all_cores[cpu];
816 insn_flags = sel->flags;
818 /* Now check to see if the user has specified some command line
819 switch that require certain abilities from the cpu. */
820 sought = 0;
822 if (TARGET_INTERWORK || TARGET_THUMB)
824 sought |= (FL_THUMB | FL_MODE32);
826 /* There are no ARM processors that support both APCS-26 and
827 interworking. Therefore we force FL_MODE26 to be removed
828 from insn_flags here (if it was set), so that the search
829 below will always be able to find a compatible processor. */
830 insn_flags &= ~FL_MODE26;
833 if (sought != 0 && ((sought & insn_flags) != sought))
835 /* Try to locate a CPU type that supports all of the abilities
836 of the default CPU, plus the extra abilities requested by
837 the user. */
838 for (sel = all_cores; sel->name != NULL; sel++)
839 if ((sel->flags & sought) == (sought | insn_flags))
840 break;
842 if (sel->name == NULL)
844 unsigned current_bit_count = 0;
845 const struct processors * best_fit = NULL;
847 /* Ideally we would like to issue an error message here
848 saying that it was not possible to find a CPU compatible
849 with the default CPU, but which also supports the command
850 line options specified by the programmer, and so they
851 ought to use the -mcpu=<name> command line option to
852 override the default CPU type.
854 If we cannot find a cpu that has both the
855 characteristics of the default cpu and the given
856 command line options we scan the array again looking
857 for a best match. */
858 for (sel = all_cores; sel->name != NULL; sel++)
859 if ((sel->flags & sought) == sought)
861 unsigned count;
863 count = bit_count (sel->flags & insn_flags);
865 if (count >= current_bit_count)
867 best_fit = sel;
868 current_bit_count = count;
872 if (best_fit == NULL)
873 abort ();
874 else
875 sel = best_fit;
878 insn_flags = sel->flags;
880 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
881 if (arm_tune == arm_none)
882 arm_tune = (enum processor_type) (sel - all_cores);
885 /* The processor for which we should tune should now have been
886 chosen. */
887 if (arm_tune == arm_none)
888 abort ();
890 tune_flags = all_cores[(int)arm_tune].flags;
891 if (optimize_size)
892 targetm.rtx_costs = arm_size_rtx_costs;
893 else
894 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
896 /* Make sure that the processor choice does not conflict with any of the
897 other command line choices. */
898 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
900 warning ("target CPU does not support interworking" );
901 target_flags &= ~ARM_FLAG_INTERWORK;
904 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
906 warning ("target CPU does not support THUMB instructions");
907 target_flags &= ~ARM_FLAG_THUMB;
910 if (TARGET_APCS_FRAME && TARGET_THUMB)
912 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
913 target_flags &= ~ARM_FLAG_APCS_FRAME;
916 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
917 from here where no function is being compiled currently. */
918 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
919 && TARGET_ARM)
920 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
922 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
923 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
925 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
926 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
928 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
930 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
931 target_flags |= ARM_FLAG_APCS_FRAME;
934 if (TARGET_POKE_FUNCTION_NAME)
935 target_flags |= ARM_FLAG_APCS_FRAME;
937 if (TARGET_APCS_REENT && flag_pic)
938 error ("-fpic and -mapcs-reent are incompatible");
940 if (TARGET_APCS_REENT)
941 warning ("APCS reentrant code not supported. Ignored");
943 /* If this target is normally configured to use APCS frames, warn if they
944 are turned off and debugging is turned on. */
945 if (TARGET_ARM
946 && write_symbols != NO_DEBUG
947 && !TARGET_APCS_FRAME
948 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
949 warning ("-g with -mno-apcs-frame may not give sensible debugging");
951 /* If stack checking is disabled, we can use r10 as the PIC register,
952 which keeps r9 available. */
953 if (flag_pic)
954 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
956 if (TARGET_APCS_FLOAT)
957 warning ("passing floating point arguments in fp regs not yet supported");
959 /* Initialize boolean versions of the flags, for use in the arm.md file. */
960 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
961 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
962 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
963 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
964 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
965 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
966 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
967 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
969 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
970 arm_is_strong = (tune_flags & FL_STRONG) != 0;
971 thumb_code = (TARGET_ARM == 0);
972 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
973 && !(tune_flags & FL_ARCH4))) != 0;
974 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
975 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
977 /* V5 code we generate is completely interworking capable, so we turn off
978 TARGET_INTERWORK here to avoid many tests later on. */
980 /* XXX However, we must pass the right pre-processor defines to CPP
981 or GLD can get confused. This is a hack. */
982 if (TARGET_INTERWORK)
983 arm_cpp_interwork = 1;
985 if (arm_arch5)
986 target_flags &= ~ARM_FLAG_INTERWORK;
988 if (target_abi_name)
990 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
992 if (streq (arm_all_abis[i].name, target_abi_name))
994 arm_abi = arm_all_abis[i].abi_type;
995 break;
998 if (i == ARRAY_SIZE (arm_all_abis))
999 error ("invalid ABI option: -mabi=%s", target_abi_name);
1001 else
1002 arm_abi = ARM_DEFAULT_ABI;
1004 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1005 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1007 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1008 error ("iwmmxt abi requires an iwmmxt capable cpu");
1010 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1011 if (target_fpu_name == NULL && target_fpe_name != NULL)
1013 if (streq (target_fpe_name, "2"))
1014 target_fpu_name = "fpe2";
1015 else if (streq (target_fpe_name, "3"))
1016 target_fpu_name = "fpe3";
1017 else
1018 error ("invalid floating point emulation option: -mfpe=%s",
1019 target_fpe_name);
1021 if (target_fpu_name != NULL)
1023 /* The user specified a FPU. */
1024 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1026 if (streq (all_fpus[i].name, target_fpu_name))
1028 arm_fpu_arch = all_fpus[i].fpu;
1029 arm_fpu_tune = arm_fpu_arch;
1030 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1031 break;
1034 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1035 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1037 else
1039 #ifdef FPUTYPE_DEFAULT
1040 /* Use the default if it is specified for this platform. */
1041 arm_fpu_arch = FPUTYPE_DEFAULT;
1042 arm_fpu_tune = FPUTYPE_DEFAULT;
1043 #else
1044 /* Pick one based on CPU type. */
1045 /* ??? Some targets assume FPA is the default.
1046 if ((insn_flags & FL_VFP) != 0)
1047 arm_fpu_arch = FPUTYPE_VFP;
1048 else
1050 if (arm_arch_cirrus)
1051 arm_fpu_arch = FPUTYPE_MAVERICK;
1052 else
1053 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1054 #endif
1055 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1056 arm_fpu_tune = FPUTYPE_FPA;
1057 else
1058 arm_fpu_tune = arm_fpu_arch;
1059 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1060 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1061 abort ();
1064 if (target_float_abi_name != NULL)
1066 /* The user specified a FP ABI. */
1067 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1069 if (streq (all_float_abis[i].name, target_float_abi_name))
1071 arm_float_abi = all_float_abis[i].abi_type;
1072 break;
1075 if (i == ARRAY_SIZE (all_float_abis))
1076 error ("invalid floating point abi: -mfloat-abi=%s",
1077 target_float_abi_name);
1079 else
1081 /* Use soft-float target flag. */
1082 if (target_flags & ARM_FLAG_SOFT_FLOAT)
1083 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1084 else
1085 arm_float_abi = ARM_FLOAT_ABI_HARD;
1088 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1089 sorry ("-mfloat-abi=hard and VFP");
1091 /* If soft-float is specified then don't use FPU. */
1092 if (TARGET_SOFT_FLOAT)
1093 arm_fpu_arch = FPUTYPE_NONE;
1095 /* For arm2/3 there is no need to do any scheduling if there is only
1096 a floating point emulator, or we are doing software floating-point. */
1097 if ((TARGET_SOFT_FLOAT
1098 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1099 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1100 && (tune_flags & FL_MODE32) == 0)
1101 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1103 /* Override the default structure alignment for AAPCS ABI. */
1104 if (arm_abi == ARM_ABI_AAPCS)
1105 arm_structure_size_boundary = 8;
1107 if (structure_size_string != NULL)
1109 int size = strtol (structure_size_string, NULL, 0);
1111 if (size == 8 || size == 32
1112 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1113 arm_structure_size_boundary = size;
1114 else
1115 warning ("structure size boundary can only be set to %s",
1116 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1119 if (arm_pic_register_string != NULL)
1121 int pic_register = decode_reg_name (arm_pic_register_string);
1123 if (!flag_pic)
1124 warning ("-mpic-register= is useless without -fpic");
1126 /* Prevent the user from choosing an obviously stupid PIC register. */
1127 else if (pic_register < 0 || call_used_regs[pic_register]
1128 || pic_register == HARD_FRAME_POINTER_REGNUM
1129 || pic_register == STACK_POINTER_REGNUM
1130 || pic_register >= PC_REGNUM)
1131 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1132 else
1133 arm_pic_register = pic_register;
1136 if (TARGET_THUMB && flag_schedule_insns)
1138 /* Don't warn since it's on by default in -O2. */
1139 flag_schedule_insns = 0;
1142 if (optimize_size)
1144 /* There's some dispute as to whether this should be 1 or 2. However,
1145 experiments seem to show that in pathological cases a setting of
1146 1 degrades less severely than a setting of 2. This could change if
1147 other parts of the compiler change their behavior. */
1148 arm_constant_limit = 1;
1150 /* If optimizing for size, bump the number of instructions that we
1151 are prepared to conditionally execute (even on a StrongARM). */
1152 max_insns_skipped = 6;
1154 else
1156 /* For processors with load scheduling, it never costs more than
1157 2 cycles to load a constant, and the load scheduler may well
1158 reduce that to 1. */
1159 if (tune_flags & FL_LDSCHED)
1160 arm_constant_limit = 1;
1162 /* On XScale the longer latency of a load makes it more difficult
1163 to achieve a good schedule, so it's faster to synthesize
1164 constants that can be done in two insns. */
1165 if (arm_tune_xscale)
1166 arm_constant_limit = 2;
1168 /* StrongARM has early execution of branches, so a sequence
1169 that is worth skipping is shorter. */
1170 if (arm_is_strong)
1171 max_insns_skipped = 3;
1174 /* Register global variables with the garbage collector. */
1175 arm_add_gc_roots ();
1178 static void
1179 arm_add_gc_roots (void)
1181 gcc_obstack_init(&minipool_obstack);
1182 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1185 /* A table of known ARM exception types.
1186 For use with the interrupt function attribute. */
1188 typedef struct
1190 const char *const arg;
1191 const unsigned long return_value;
1193 isr_attribute_arg;
1195 static const isr_attribute_arg isr_attribute_args [] =
1197 { "IRQ", ARM_FT_ISR },
1198 { "irq", ARM_FT_ISR },
1199 { "FIQ", ARM_FT_FIQ },
1200 { "fiq", ARM_FT_FIQ },
1201 { "ABORT", ARM_FT_ISR },
1202 { "abort", ARM_FT_ISR },
1203 { "ABORT", ARM_FT_ISR },
1204 { "abort", ARM_FT_ISR },
1205 { "UNDEF", ARM_FT_EXCEPTION },
1206 { "undef", ARM_FT_EXCEPTION },
1207 { "SWI", ARM_FT_EXCEPTION },
1208 { "swi", ARM_FT_EXCEPTION },
1209 { NULL, ARM_FT_NORMAL }
1212 /* Returns the (interrupt) function type of the current
1213 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1215 static unsigned long
1216 arm_isr_value (tree argument)
1218 const isr_attribute_arg * ptr;
1219 const char * arg;
1221 /* No argument - default to IRQ. */
1222 if (argument == NULL_TREE)
1223 return ARM_FT_ISR;
1225 /* Get the value of the argument. */
1226 if (TREE_VALUE (argument) == NULL_TREE
1227 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1228 return ARM_FT_UNKNOWN;
1230 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1232 /* Check it against the list of known arguments. */
1233 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1234 if (streq (arg, ptr->arg))
1235 return ptr->return_value;
1237 /* An unrecognized interrupt type. */
1238 return ARM_FT_UNKNOWN;
1241 /* Computes the type of the current function. */
1243 static unsigned long
1244 arm_compute_func_type (void)
1246 unsigned long type = ARM_FT_UNKNOWN;
1247 tree a;
1248 tree attr;
1250 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1251 abort ();
1253 /* Decide if the current function is volatile. Such functions
1254 never return, and many memory cycles can be saved by not storing
1255 register values that will never be needed again. This optimization
1256 was added to speed up context switching in a kernel application. */
1257 if (optimize > 0
1258 && TREE_NOTHROW (current_function_decl)
1259 && TREE_THIS_VOLATILE (current_function_decl))
1260 type |= ARM_FT_VOLATILE;
1262 if (cfun->static_chain_decl != NULL)
1263 type |= ARM_FT_NESTED;
1265 attr = DECL_ATTRIBUTES (current_function_decl);
1267 a = lookup_attribute ("naked", attr);
1268 if (a != NULL_TREE)
1269 type |= ARM_FT_NAKED;
1271 a = lookup_attribute ("isr", attr);
1272 if (a == NULL_TREE)
1273 a = lookup_attribute ("interrupt", attr);
1275 if (a == NULL_TREE)
1276 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1277 else
1278 type |= arm_isr_value (TREE_VALUE (a));
1280 return type;
1283 /* Returns the type of the current function. */
1285 unsigned long
1286 arm_current_func_type (void)
1288 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1289 cfun->machine->func_type = arm_compute_func_type ();
1291 return cfun->machine->func_type;
1294 /* Return 1 if it is possible to return using a single instruction.
1295 If SIBLING is non-null, this is a test for a return before a sibling
1296 call. SIBLING is the call insn, so we can examine its register usage. */
1299 use_return_insn (int iscond, rtx sibling)
1301 int regno;
1302 unsigned int func_type;
1303 unsigned long saved_int_regs;
1304 unsigned HOST_WIDE_INT stack_adjust;
1305 arm_stack_offsets *offsets;
1307 /* Never use a return instruction before reload has run. */
1308 if (!reload_completed)
1309 return 0;
1311 func_type = arm_current_func_type ();
1313 /* Naked functions and volatile functions need special
1314 consideration. */
1315 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1316 return 0;
1318 /* So do interrupt functions that use the frame pointer. */
1319 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1320 return 0;
1322 offsets = arm_get_frame_offsets ();
1323 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1325 /* As do variadic functions. */
1326 if (current_function_pretend_args_size
1327 || cfun->machine->uses_anonymous_args
1328 /* Or if the function calls __builtin_eh_return () */
1329 || current_function_calls_eh_return
1330 /* Or if the function calls alloca */
1331 || current_function_calls_alloca
1332 /* Or if there is a stack adjustment. However, if the stack pointer
1333 is saved on the stack, we can use a pre-incrementing stack load. */
1334 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1335 return 0;
1337 saved_int_regs = arm_compute_save_reg_mask ();
1339 /* Unfortunately, the insn
1341 ldmib sp, {..., sp, ...}
1343 triggers a bug on most SA-110 based devices, such that the stack
1344 pointer won't be correctly restored if the instruction takes a
1345 page fault. We work around this problem by popping r3 along with
1346 the other registers, since that is never slower than executing
1347 another instruction.
1349 We test for !arm_arch5 here, because code for any architecture
1350 less than this could potentially be run on one of the buggy
1351 chips. */
1352 if (stack_adjust == 4 && !arm_arch5)
1354 /* Validate that r3 is a call-clobbered register (always true in
1355 the default abi) ... */
1356 if (!call_used_regs[3])
1357 return 0;
1359 /* ... that it isn't being used for a return value (always true
1360 until we implement return-in-regs), or for a tail-call
1361 argument ... */
1362 if (sibling)
1364 if (GET_CODE (sibling) != CALL_INSN)
1365 abort ();
1367 if (find_regno_fusage (sibling, USE, 3))
1368 return 0;
1371 /* ... and that there are no call-saved registers in r0-r2
1372 (always true in the default ABI). */
1373 if (saved_int_regs & 0x7)
1374 return 0;
1377 /* Can't be done if interworking with Thumb, and any registers have been
1378 stacked. */
1379 if (TARGET_INTERWORK && saved_int_regs != 0)
1380 return 0;
1382 /* On StrongARM, conditional returns are expensive if they aren't
1383 taken and multiple registers have been stacked. */
1384 if (iscond && arm_is_strong)
1386 /* Conditional return when just the LR is stored is a simple
1387 conditional-load instruction, that's not expensive. */
1388 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1389 return 0;
1391 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1392 return 0;
1395 /* If there are saved registers but the LR isn't saved, then we need
1396 two instructions for the return. */
1397 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1398 return 0;
1400 /* Can't be done if any of the FPA regs are pushed,
1401 since this also requires an insn. */
1402 if (TARGET_HARD_FLOAT && TARGET_FPA)
1403 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1404 if (regs_ever_live[regno] && !call_used_regs[regno])
1405 return 0;
1407 /* Likewise VFP regs. */
1408 if (TARGET_HARD_FLOAT && TARGET_VFP)
1409 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1410 if (regs_ever_live[regno] && !call_used_regs[regno])
1411 return 0;
1413 if (TARGET_REALLY_IWMMXT)
1414 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1415 if (regs_ever_live[regno] && ! call_used_regs [regno])
1416 return 0;
1418 return 1;
1421 /* Return TRUE if int I is a valid immediate ARM constant. */
1424 const_ok_for_arm (HOST_WIDE_INT i)
1426 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1428 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1429 be all zero, or all one. */
1430 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1431 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1432 != ((~(unsigned HOST_WIDE_INT) 0)
1433 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1434 return FALSE;
1436 /* Fast return for 0 and powers of 2 */
1437 if ((i & (i - 1)) == 0)
1438 return TRUE;
1442 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1443 return TRUE;
1444 mask =
1445 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1446 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1448 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1450 return FALSE;
1453 /* Return true if I is a valid constant for the operation CODE. */
1454 static int
1455 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1457 if (const_ok_for_arm (i))
1458 return 1;
1460 switch (code)
1462 case PLUS:
1463 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1465 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1466 case XOR:
1467 case IOR:
1468 return 0;
1470 case AND:
1471 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1473 default:
1474 abort ();
1478 /* Emit a sequence of insns to handle a large constant.
1479 CODE is the code of the operation required, it can be any of SET, PLUS,
1480 IOR, AND, XOR, MINUS;
1481 MODE is the mode in which the operation is being performed;
1482 VAL is the integer to operate on;
1483 SOURCE is the other operand (a register, or a null-pointer for SET);
1484 SUBTARGETS means it is safe to create scratch registers if that will
1485 either produce a simpler sequence, or we will want to cse the values.
1486 Return value is the number of insns emitted. */
1489 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1490 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1492 rtx cond;
1494 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1495 cond = COND_EXEC_TEST (PATTERN (insn));
1496 else
1497 cond = NULL_RTX;
1499 if (subtargets || code == SET
1500 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1501 && REGNO (target) != REGNO (source)))
1503 /* After arm_reorg has been called, we can't fix up expensive
1504 constants by pushing them into memory so we must synthesize
1505 them in-line, regardless of the cost. This is only likely to
1506 be more costly on chips that have load delay slots and we are
1507 compiling without running the scheduler (so no splitting
1508 occurred before the final instruction emission).
1510 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1512 if (!after_arm_reorg
1513 && !cond
1514 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1515 1, 0)
1516 > arm_constant_limit + (code != SET)))
1518 if (code == SET)
1520 /* Currently SET is the only monadic value for CODE, all
1521 the rest are diadic. */
1522 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1523 return 1;
1525 else
1527 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1529 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1530 /* For MINUS, the value is subtracted from, since we never
1531 have subtraction of a constant. */
1532 if (code == MINUS)
1533 emit_insn (gen_rtx_SET (VOIDmode, target,
1534 gen_rtx_MINUS (mode, temp, source)));
1535 else
1536 emit_insn (gen_rtx_SET (VOIDmode, target,
1537 gen_rtx_fmt_ee (code, mode, source, temp)));
1538 return 2;
1543 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1547 static int
1548 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1550 HOST_WIDE_INT temp1;
1551 int num_insns = 0;
1554 int end;
1556 if (i <= 0)
1557 i += 32;
1558 if (remainder & (3 << (i - 2)))
1560 end = i - 8;
1561 if (end < 0)
1562 end += 32;
1563 temp1 = remainder & ((0x0ff << end)
1564 | ((i < end) ? (0xff >> (32 - end)) : 0));
1565 remainder &= ~temp1;
1566 num_insns++;
1567 i -= 6;
1569 i -= 2;
1570 } while (remainder);
1571 return num_insns;
1574 /* Emit an instruction with the indicated PATTERN. If COND is
1575 non-NULL, conditionalize the execution of the instruction on COND
1576 being true. */
1578 static void
1579 emit_constant_insn (rtx cond, rtx pattern)
1581 if (cond)
1582 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1583 emit_insn (pattern);
1586 /* As above, but extra parameter GENERATE which, if clear, suppresses
1587 RTL generation. */
1589 static int
1590 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1591 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1592 int generate)
1594 int can_invert = 0;
1595 int can_negate = 0;
1596 int can_negate_initial = 0;
1597 int can_shift = 0;
1598 int i;
1599 int num_bits_set = 0;
1600 int set_sign_bit_copies = 0;
1601 int clear_sign_bit_copies = 0;
1602 int clear_zero_bit_copies = 0;
1603 int set_zero_bit_copies = 0;
1604 int insns = 0;
1605 unsigned HOST_WIDE_INT temp1, temp2;
1606 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1608 /* Find out which operations are safe for a given CODE. Also do a quick
1609 check for degenerate cases; these can occur when DImode operations
1610 are split. */
1611 switch (code)
1613 case SET:
1614 can_invert = 1;
1615 can_shift = 1;
1616 can_negate = 1;
1617 break;
1619 case PLUS:
1620 can_negate = 1;
1621 can_negate_initial = 1;
1622 break;
1624 case IOR:
1625 if (remainder == 0xffffffff)
1627 if (generate)
1628 emit_constant_insn (cond,
1629 gen_rtx_SET (VOIDmode, target,
1630 GEN_INT (ARM_SIGN_EXTEND (val))));
1631 return 1;
1633 if (remainder == 0)
1635 if (reload_completed && rtx_equal_p (target, source))
1636 return 0;
1637 if (generate)
1638 emit_constant_insn (cond,
1639 gen_rtx_SET (VOIDmode, target, source));
1640 return 1;
1642 break;
1644 case AND:
1645 if (remainder == 0)
1647 if (generate)
1648 emit_constant_insn (cond,
1649 gen_rtx_SET (VOIDmode, target, const0_rtx));
1650 return 1;
1652 if (remainder == 0xffffffff)
1654 if (reload_completed && rtx_equal_p (target, source))
1655 return 0;
1656 if (generate)
1657 emit_constant_insn (cond,
1658 gen_rtx_SET (VOIDmode, target, source));
1659 return 1;
1661 can_invert = 1;
1662 break;
1664 case XOR:
1665 if (remainder == 0)
1667 if (reload_completed && rtx_equal_p (target, source))
1668 return 0;
1669 if (generate)
1670 emit_constant_insn (cond,
1671 gen_rtx_SET (VOIDmode, target, source));
1672 return 1;
1674 if (remainder == 0xffffffff)
1676 if (generate)
1677 emit_constant_insn (cond,
1678 gen_rtx_SET (VOIDmode, target,
1679 gen_rtx_NOT (mode, source)));
1680 return 1;
1683 /* We don't know how to handle this yet below. */
1684 abort ();
1686 case MINUS:
1687 /* We treat MINUS as (val - source), since (source - val) is always
1688 passed as (source + (-val)). */
1689 if (remainder == 0)
1691 if (generate)
1692 emit_constant_insn (cond,
1693 gen_rtx_SET (VOIDmode, target,
1694 gen_rtx_NEG (mode, source)));
1695 return 1;
1697 if (const_ok_for_arm (val))
1699 if (generate)
1700 emit_constant_insn (cond,
1701 gen_rtx_SET (VOIDmode, target,
1702 gen_rtx_MINUS (mode, GEN_INT (val),
1703 source)));
1704 return 1;
1706 can_negate = 1;
1708 break;
1710 default:
1711 abort ();
1714 /* If we can do it in one insn get out quickly. */
1715 if (const_ok_for_arm (val)
1716 || (can_negate_initial && const_ok_for_arm (-val))
1717 || (can_invert && const_ok_for_arm (~val)))
1719 if (generate)
1720 emit_constant_insn (cond,
1721 gen_rtx_SET (VOIDmode, target,
1722 (source
1723 ? gen_rtx_fmt_ee (code, mode, source,
1724 GEN_INT (val))
1725 : GEN_INT (val))));
1726 return 1;
1729 /* Calculate a few attributes that may be useful for specific
1730 optimizations. */
1731 for (i = 31; i >= 0; i--)
1733 if ((remainder & (1 << i)) == 0)
1734 clear_sign_bit_copies++;
1735 else
1736 break;
1739 for (i = 31; i >= 0; i--)
1741 if ((remainder & (1 << i)) != 0)
1742 set_sign_bit_copies++;
1743 else
1744 break;
1747 for (i = 0; i <= 31; i++)
1749 if ((remainder & (1 << i)) == 0)
1750 clear_zero_bit_copies++;
1751 else
1752 break;
1755 for (i = 0; i <= 31; i++)
1757 if ((remainder & (1 << i)) != 0)
1758 set_zero_bit_copies++;
1759 else
1760 break;
1763 switch (code)
1765 case SET:
1766 /* See if we can do this by sign_extending a constant that is known
1767 to be negative. This is a good, way of doing it, since the shift
1768 may well merge into a subsequent insn. */
1769 if (set_sign_bit_copies > 1)
1771 if (const_ok_for_arm
1772 (temp1 = ARM_SIGN_EXTEND (remainder
1773 << (set_sign_bit_copies - 1))))
1775 if (generate)
1777 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1778 emit_constant_insn (cond,
1779 gen_rtx_SET (VOIDmode, new_src,
1780 GEN_INT (temp1)));
1781 emit_constant_insn (cond,
1782 gen_ashrsi3 (target, new_src,
1783 GEN_INT (set_sign_bit_copies - 1)));
1785 return 2;
1787 /* For an inverted constant, we will need to set the low bits,
1788 these will be shifted out of harm's way. */
1789 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1790 if (const_ok_for_arm (~temp1))
1792 if (generate)
1794 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1795 emit_constant_insn (cond,
1796 gen_rtx_SET (VOIDmode, new_src,
1797 GEN_INT (temp1)));
1798 emit_constant_insn (cond,
1799 gen_ashrsi3 (target, new_src,
1800 GEN_INT (set_sign_bit_copies - 1)));
1802 return 2;
1806 /* See if we can generate this by setting the bottom (or the top)
1807 16 bits, and then shifting these into the other half of the
1808 word. We only look for the simplest cases, to do more would cost
1809 too much. Be careful, however, not to generate this when the
1810 alternative would take fewer insns. */
1811 if (val & 0xffff0000)
1813 temp1 = remainder & 0xffff0000;
1814 temp2 = remainder & 0x0000ffff;
1816 /* Overlaps outside this range are best done using other methods. */
1817 for (i = 9; i < 24; i++)
1819 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1820 && !const_ok_for_arm (temp2))
1822 rtx new_src = (subtargets
1823 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1824 : target);
1825 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1826 source, subtargets, generate);
1827 source = new_src;
1828 if (generate)
1829 emit_constant_insn
1830 (cond,
1831 gen_rtx_SET
1832 (VOIDmode, target,
1833 gen_rtx_IOR (mode,
1834 gen_rtx_ASHIFT (mode, source,
1835 GEN_INT (i)),
1836 source)));
1837 return insns + 1;
1841 /* Don't duplicate cases already considered. */
1842 for (i = 17; i < 24; i++)
1844 if (((temp1 | (temp1 >> i)) == remainder)
1845 && !const_ok_for_arm (temp1))
1847 rtx new_src = (subtargets
1848 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1849 : target);
1850 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1851 source, subtargets, generate);
1852 source = new_src;
1853 if (generate)
1854 emit_constant_insn
1855 (cond,
1856 gen_rtx_SET (VOIDmode, target,
1857 gen_rtx_IOR
1858 (mode,
1859 gen_rtx_LSHIFTRT (mode, source,
1860 GEN_INT (i)),
1861 source)));
1862 return insns + 1;
1866 break;
1868 case IOR:
1869 case XOR:
1870 /* If we have IOR or XOR, and the constant can be loaded in a
1871 single instruction, and we can find a temporary to put it in,
1872 then this can be done in two instructions instead of 3-4. */
1873 if (subtargets
1874 /* TARGET can't be NULL if SUBTARGETS is 0 */
1875 || (reload_completed && !reg_mentioned_p (target, source)))
1877 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1879 if (generate)
1881 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1883 emit_constant_insn (cond,
1884 gen_rtx_SET (VOIDmode, sub,
1885 GEN_INT (val)));
1886 emit_constant_insn (cond,
1887 gen_rtx_SET (VOIDmode, target,
1888 gen_rtx_fmt_ee (code, mode,
1889 source, sub)));
1891 return 2;
1895 if (code == XOR)
1896 break;
1898 if (set_sign_bit_copies > 8
1899 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1901 if (generate)
1903 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1904 rtx shift = GEN_INT (set_sign_bit_copies);
1906 emit_constant_insn
1907 (cond,
1908 gen_rtx_SET (VOIDmode, sub,
1909 gen_rtx_NOT (mode,
1910 gen_rtx_ASHIFT (mode,
1911 source,
1912 shift))));
1913 emit_constant_insn
1914 (cond,
1915 gen_rtx_SET (VOIDmode, target,
1916 gen_rtx_NOT (mode,
1917 gen_rtx_LSHIFTRT (mode, sub,
1918 shift))));
1920 return 2;
1923 if (set_zero_bit_copies > 8
1924 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1926 if (generate)
1928 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1929 rtx shift = GEN_INT (set_zero_bit_copies);
1931 emit_constant_insn
1932 (cond,
1933 gen_rtx_SET (VOIDmode, sub,
1934 gen_rtx_NOT (mode,
1935 gen_rtx_LSHIFTRT (mode,
1936 source,
1937 shift))));
1938 emit_constant_insn
1939 (cond,
1940 gen_rtx_SET (VOIDmode, target,
1941 gen_rtx_NOT (mode,
1942 gen_rtx_ASHIFT (mode, sub,
1943 shift))));
1945 return 2;
1948 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1950 if (generate)
1952 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1953 emit_constant_insn (cond,
1954 gen_rtx_SET (VOIDmode, sub,
1955 gen_rtx_NOT (mode, source)));
1956 source = sub;
1957 if (subtargets)
1958 sub = gen_reg_rtx (mode);
1959 emit_constant_insn (cond,
1960 gen_rtx_SET (VOIDmode, sub,
1961 gen_rtx_AND (mode, source,
1962 GEN_INT (temp1))));
1963 emit_constant_insn (cond,
1964 gen_rtx_SET (VOIDmode, target,
1965 gen_rtx_NOT (mode, sub)));
1967 return 3;
1969 break;
1971 case AND:
1972 /* See if two shifts will do 2 or more insn's worth of work. */
1973 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1975 HOST_WIDE_INT shift_mask = ((0xffffffff
1976 << (32 - clear_sign_bit_copies))
1977 & 0xffffffff);
1979 if ((remainder | shift_mask) != 0xffffffff)
1981 if (generate)
1983 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1984 insns = arm_gen_constant (AND, mode, cond,
1985 remainder | shift_mask,
1986 new_src, source, subtargets, 1);
1987 source = new_src;
1989 else
1991 rtx targ = subtargets ? NULL_RTX : target;
1992 insns = arm_gen_constant (AND, mode, cond,
1993 remainder | shift_mask,
1994 targ, source, subtargets, 0);
1998 if (generate)
2000 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2001 rtx shift = GEN_INT (clear_sign_bit_copies);
2003 emit_insn (gen_ashlsi3 (new_src, source, shift));
2004 emit_insn (gen_lshrsi3 (target, new_src, shift));
2007 return insns + 2;
2010 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2012 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2014 if ((remainder | shift_mask) != 0xffffffff)
2016 if (generate)
2018 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2020 insns = arm_gen_constant (AND, mode, cond,
2021 remainder | shift_mask,
2022 new_src, source, subtargets, 1);
2023 source = new_src;
2025 else
2027 rtx targ = subtargets ? NULL_RTX : target;
2029 insns = arm_gen_constant (AND, mode, cond,
2030 remainder | shift_mask,
2031 targ, source, subtargets, 0);
2035 if (generate)
2037 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2038 rtx shift = GEN_INT (clear_zero_bit_copies);
2040 emit_insn (gen_lshrsi3 (new_src, source, shift));
2041 emit_insn (gen_ashlsi3 (target, new_src, shift));
2044 return insns + 2;
2047 break;
2049 default:
2050 break;
2053 for (i = 0; i < 32; i++)
2054 if (remainder & (1 << i))
2055 num_bits_set++;
2057 if (code == AND || (can_invert && num_bits_set > 16))
2058 remainder = (~remainder) & 0xffffffff;
2059 else if (code == PLUS && num_bits_set > 16)
2060 remainder = (-remainder) & 0xffffffff;
2061 else
2063 can_invert = 0;
2064 can_negate = 0;
2067 /* Now try and find a way of doing the job in either two or three
2068 instructions.
2069 We start by looking for the largest block of zeros that are aligned on
2070 a 2-bit boundary, we then fill up the temps, wrapping around to the
2071 top of the word when we drop off the bottom.
2072 In the worst case this code should produce no more than four insns. */
2074 int best_start = 0;
2075 int best_consecutive_zeros = 0;
2077 for (i = 0; i < 32; i += 2)
2079 int consecutive_zeros = 0;
2081 if (!(remainder & (3 << i)))
2083 while ((i < 32) && !(remainder & (3 << i)))
2085 consecutive_zeros += 2;
2086 i += 2;
2088 if (consecutive_zeros > best_consecutive_zeros)
2090 best_consecutive_zeros = consecutive_zeros;
2091 best_start = i - consecutive_zeros;
2093 i -= 2;
2097 /* So long as it won't require any more insns to do so, it's
2098 desirable to emit a small constant (in bits 0...9) in the last
2099 insn. This way there is more chance that it can be combined with
2100 a later addressing insn to form a pre-indexed load or store
2101 operation. Consider:
2103 *((volatile int *)0xe0000100) = 1;
2104 *((volatile int *)0xe0000110) = 2;
2106 We want this to wind up as:
2108 mov rA, #0xe0000000
2109 mov rB, #1
2110 str rB, [rA, #0x100]
2111 mov rB, #2
2112 str rB, [rA, #0x110]
2114 rather than having to synthesize both large constants from scratch.
2116 Therefore, we calculate how many insns would be required to emit
2117 the constant starting from `best_start', and also starting from
2118 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2119 yield a shorter sequence, we may as well use zero. */
2120 if (best_start != 0
2121 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2122 && (count_insns_for_constant (remainder, 0) <=
2123 count_insns_for_constant (remainder, best_start)))
2124 best_start = 0;
2126 /* Now start emitting the insns. */
2127 i = best_start;
2130 int end;
2132 if (i <= 0)
2133 i += 32;
2134 if (remainder & (3 << (i - 2)))
2136 end = i - 8;
2137 if (end < 0)
2138 end += 32;
2139 temp1 = remainder & ((0x0ff << end)
2140 | ((i < end) ? (0xff >> (32 - end)) : 0));
2141 remainder &= ~temp1;
2143 if (generate)
2145 rtx new_src, temp1_rtx;
2147 if (code == SET || code == MINUS)
2149 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2150 if (can_invert && code != MINUS)
2151 temp1 = ~temp1;
2153 else
2155 if (remainder && subtargets)
2156 new_src = gen_reg_rtx (mode);
2157 else
2158 new_src = target;
2159 if (can_invert)
2160 temp1 = ~temp1;
2161 else if (can_negate)
2162 temp1 = -temp1;
2165 temp1 = trunc_int_for_mode (temp1, mode);
2166 temp1_rtx = GEN_INT (temp1);
2168 if (code == SET)
2170 else if (code == MINUS)
2171 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2172 else
2173 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2175 emit_constant_insn (cond,
2176 gen_rtx_SET (VOIDmode, new_src,
2177 temp1_rtx));
2178 source = new_src;
2181 if (code == SET)
2183 can_invert = 0;
2184 code = PLUS;
2186 else if (code == MINUS)
2187 code = PLUS;
2189 insns++;
2190 i -= 6;
2192 i -= 2;
2194 while (remainder);
2197 return insns;
2200 /* Canonicalize a comparison so that we are more likely to recognize it.
2201 This can be done for a few constant compares, where we can make the
2202 immediate value easier to load. */
2204 enum rtx_code
2205 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2207 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2209 switch (code)
2211 case EQ:
2212 case NE:
2213 return code;
2215 case GT:
2216 case LE:
2217 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2218 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2220 *op1 = GEN_INT (i + 1);
2221 return code == GT ? GE : LT;
2223 break;
2225 case GE:
2226 case LT:
2227 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2228 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2230 *op1 = GEN_INT (i - 1);
2231 return code == GE ? GT : LE;
2233 break;
2235 case GTU:
2236 case LEU:
2237 if (i != ~((unsigned HOST_WIDE_INT) 0)
2238 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2240 *op1 = GEN_INT (i + 1);
2241 return code == GTU ? GEU : LTU;
2243 break;
2245 case GEU:
2246 case LTU:
2247 if (i != 0
2248 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2250 *op1 = GEN_INT (i - 1);
2251 return code == GEU ? GTU : LEU;
2253 break;
2255 default:
2256 abort ();
2259 return code;
2263 /* Define how to find the value returned by a function. */
2265 rtx arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2267 enum machine_mode mode;
2268 int unsignedp ATTRIBUTE_UNUSED;
2269 rtx r ATTRIBUTE_UNUSED;
2272 mode = TYPE_MODE (type);
2273 /* Promote integer types. */
2274 if (INTEGRAL_TYPE_P (type))
2275 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2276 return LIBCALL_VALUE(mode);
2280 /* Decide whether a type should be returned in memory (true)
2281 or in a register (false). This is called by the macro
2282 RETURN_IN_MEMORY. */
2284 arm_return_in_memory (tree type)
2286 HOST_WIDE_INT size;
2288 if (!AGGREGATE_TYPE_P (type))
2289 /* All simple types are returned in registers. */
2290 return 0;
2292 size = int_size_in_bytes (type);
2294 if (arm_abi != ARM_ABI_APCS)
2296 /* ATPCS and later return aggregate types in memory only if they are
2297 larger than a word (or are variable size). */
2298 return (size < 0 || size > UNITS_PER_WORD);
2301 /* For the arm-wince targets we choose to be compatible with Microsoft's
2302 ARM and Thumb compilers, which always return aggregates in memory. */
2303 #ifndef ARM_WINCE
2304 /* All structures/unions bigger than one word are returned in memory.
2305 Also catch the case where int_size_in_bytes returns -1. In this case
2306 the aggregate is either huge or of variable size, and in either case
2307 we will want to return it via memory and not in a register. */
2308 if (size < 0 || size > UNITS_PER_WORD)
2309 return 1;
2311 if (TREE_CODE (type) == RECORD_TYPE)
2313 tree field;
2315 /* For a struct the APCS says that we only return in a register
2316 if the type is 'integer like' and every addressable element
2317 has an offset of zero. For practical purposes this means
2318 that the structure can have at most one non bit-field element
2319 and that this element must be the first one in the structure. */
2321 /* Find the first field, ignoring non FIELD_DECL things which will
2322 have been created by C++. */
2323 for (field = TYPE_FIELDS (type);
2324 field && TREE_CODE (field) != FIELD_DECL;
2325 field = TREE_CHAIN (field))
2326 continue;
2328 if (field == NULL)
2329 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2331 /* Check that the first field is valid for returning in a register. */
2333 /* ... Floats are not allowed */
2334 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2335 return 1;
2337 /* ... Aggregates that are not themselves valid for returning in
2338 a register are not allowed. */
2339 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2340 return 1;
2342 /* Now check the remaining fields, if any. Only bitfields are allowed,
2343 since they are not addressable. */
2344 for (field = TREE_CHAIN (field);
2345 field;
2346 field = TREE_CHAIN (field))
2348 if (TREE_CODE (field) != FIELD_DECL)
2349 continue;
2351 if (!DECL_BIT_FIELD_TYPE (field))
2352 return 1;
2355 return 0;
2358 if (TREE_CODE (type) == UNION_TYPE)
2360 tree field;
2362 /* Unions can be returned in registers if every element is
2363 integral, or can be returned in an integer register. */
2364 for (field = TYPE_FIELDS (type);
2365 field;
2366 field = TREE_CHAIN (field))
2368 if (TREE_CODE (field) != FIELD_DECL)
2369 continue;
2371 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2372 return 1;
2374 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2375 return 1;
2378 return 0;
2380 #endif /* not ARM_WINCE */
2382 /* Return all other types in memory. */
2383 return 1;
2386 /* Indicate whether or not words of a double are in big-endian order. */
2389 arm_float_words_big_endian (void)
2391 if (TARGET_MAVERICK)
2392 return 0;
2394 /* For FPA, float words are always big-endian. For VFP, floats words
2395 follow the memory system mode. */
2397 if (TARGET_FPA)
2399 return 1;
2402 if (TARGET_VFP)
2403 return (TARGET_BIG_END ? 1 : 0);
2405 return 1;
2408 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2409 for a call to a function whose data type is FNTYPE.
2410 For a library call, FNTYPE is NULL. */
2411 void
2412 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2413 rtx libname ATTRIBUTE_UNUSED,
2414 tree fndecl ATTRIBUTE_UNUSED)
2416 /* On the ARM, the offset starts at 0. */
2417 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2418 pcum->iwmmxt_nregs = 0;
2419 pcum->can_split = true;
2421 pcum->call_cookie = CALL_NORMAL;
2423 if (TARGET_LONG_CALLS)
2424 pcum->call_cookie = CALL_LONG;
2426 /* Check for long call/short call attributes. The attributes
2427 override any command line option. */
2428 if (fntype)
2430 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2431 pcum->call_cookie = CALL_SHORT;
2432 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2433 pcum->call_cookie = CALL_LONG;
2436 /* Varargs vectors are treated the same as long long.
2437 named_count avoids having to change the way arm handles 'named' */
2438 pcum->named_count = 0;
2439 pcum->nargs = 0;
2441 if (TARGET_REALLY_IWMMXT && fntype)
2443 tree fn_arg;
2445 for (fn_arg = TYPE_ARG_TYPES (fntype);
2446 fn_arg;
2447 fn_arg = TREE_CHAIN (fn_arg))
2448 pcum->named_count += 1;
2450 if (! pcum->named_count)
2451 pcum->named_count = INT_MAX;
2456 /* Return true if mode/type need doubleword alignment. */
2457 bool
2458 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2460 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2461 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2465 /* Determine where to put an argument to a function.
2466 Value is zero to push the argument on the stack,
2467 or a hard register in which to store the argument.
2469 MODE is the argument's machine mode.
2470 TYPE is the data type of the argument (as a tree).
2471 This is null for libcalls where that information may
2472 not be available.
2473 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2474 the preceding args and about the function being called.
2475 NAMED is nonzero if this argument is a named parameter
2476 (otherwise it is an extra parameter matching an ellipsis). */
2479 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2480 tree type, int named)
2482 int nregs;
2484 /* Varargs vectors are treated the same as long long.
2485 named_count avoids having to change the way arm handles 'named' */
2486 if (TARGET_IWMMXT_ABI
2487 && arm_vector_mode_supported_p (mode)
2488 && pcum->named_count > pcum->nargs + 1)
2490 if (pcum->iwmmxt_nregs <= 9)
2491 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2492 else
2494 pcum->can_split = false;
2495 return NULL_RTX;
2499 /* Put doubleword aligned quantities in even register pairs. */
2500 if (pcum->nregs & 1
2501 && ARM_DOUBLEWORD_ALIGN
2502 && arm_needs_doubleword_align (mode, type))
2503 pcum->nregs++;
2505 if (mode == VOIDmode)
2506 /* Compute operand 2 of the call insn. */
2507 return GEN_INT (pcum->call_cookie);
2509 /* Only allow splitting an arg between regs and memory if all preceding
2510 args were allocated to regs. For args passed by reference we only count
2511 the reference pointer. */
2512 if (pcum->can_split)
2513 nregs = 1;
2514 else
2515 nregs = ARM_NUM_REGS2 (mode, type);
2517 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2518 return NULL_RTX;
2520 return gen_rtx_REG (mode, pcum->nregs);
2523 /* Variable sized types are passed by reference. This is a GCC
2524 extension to the ARM ABI. */
2526 static bool
2527 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2528 enum machine_mode mode ATTRIBUTE_UNUSED,
2529 tree type, bool named ATTRIBUTE_UNUSED)
2531 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2534 /* Encode the current state of the #pragma [no_]long_calls. */
2535 typedef enum
2537 OFF, /* No #pramgma [no_]long_calls is in effect. */
2538 LONG, /* #pragma long_calls is in effect. */
2539 SHORT /* #pragma no_long_calls is in effect. */
2540 } arm_pragma_enum;
2542 static arm_pragma_enum arm_pragma_long_calls = OFF;
2544 void
2545 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2547 arm_pragma_long_calls = LONG;
2550 void
2551 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2553 arm_pragma_long_calls = SHORT;
2556 void
2557 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2559 arm_pragma_long_calls = OFF;
2562 /* Table of machine attributes. */
2563 const struct attribute_spec arm_attribute_table[] =
2565 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2566 /* Function calls made to this symbol must be done indirectly, because
2567 it may lie outside of the 26 bit addressing range of a normal function
2568 call. */
2569 { "long_call", 0, 0, false, true, true, NULL },
2570 /* Whereas these functions are always known to reside within the 26 bit
2571 addressing range. */
2572 { "short_call", 0, 0, false, true, true, NULL },
2573 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2574 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2575 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2576 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2577 #ifdef ARM_PE
2578 /* ARM/PE has three new attributes:
2579 interfacearm - ?
2580 dllexport - for exporting a function/variable that will live in a dll
2581 dllimport - for importing a function/variable from a dll
2583 Microsoft allows multiple declspecs in one __declspec, separating
2584 them with spaces. We do NOT support this. Instead, use __declspec
2585 multiple times.
2587 { "dllimport", 0, 0, true, false, false, NULL },
2588 { "dllexport", 0, 0, true, false, false, NULL },
2589 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2590 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2591 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2592 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2593 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2594 #endif
2595 { NULL, 0, 0, false, false, false, NULL }
2598 /* Handle an attribute requiring a FUNCTION_DECL;
2599 arguments as in struct attribute_spec.handler. */
2600 static tree
2601 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2602 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2604 if (TREE_CODE (*node) != FUNCTION_DECL)
2606 warning ("`%s' attribute only applies to functions",
2607 IDENTIFIER_POINTER (name));
2608 *no_add_attrs = true;
2611 return NULL_TREE;
2614 /* Handle an "interrupt" or "isr" attribute;
2615 arguments as in struct attribute_spec.handler. */
2616 static tree
2617 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2618 bool *no_add_attrs)
2620 if (DECL_P (*node))
2622 if (TREE_CODE (*node) != FUNCTION_DECL)
2624 warning ("`%s' attribute only applies to functions",
2625 IDENTIFIER_POINTER (name));
2626 *no_add_attrs = true;
2628 /* FIXME: the argument if any is checked for type attributes;
2629 should it be checked for decl ones? */
2631 else
2633 if (TREE_CODE (*node) == FUNCTION_TYPE
2634 || TREE_CODE (*node) == METHOD_TYPE)
2636 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2638 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2639 *no_add_attrs = true;
2642 else if (TREE_CODE (*node) == POINTER_TYPE
2643 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2644 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2645 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2647 *node = build_variant_type_copy (*node);
2648 TREE_TYPE (*node) = build_type_attribute_variant
2649 (TREE_TYPE (*node),
2650 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2651 *no_add_attrs = true;
2653 else
2655 /* Possibly pass this attribute on from the type to a decl. */
2656 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2657 | (int) ATTR_FLAG_FUNCTION_NEXT
2658 | (int) ATTR_FLAG_ARRAY_NEXT))
2660 *no_add_attrs = true;
2661 return tree_cons (name, args, NULL_TREE);
2663 else
2665 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2670 return NULL_TREE;
2673 /* Handle the "notshared" attribute. This attribute is another way of
2674 requesting hidden visibility. ARM's compiler supports
2675 "__declspec(notshared)"; we support the same thing via an
2676 attribute. */
2678 static tree
2679 arm_handle_notshared_attribute (tree *node,
2680 tree name ATTRIBUTE_UNUSED,
2681 tree args ATTRIBUTE_UNUSED,
2682 int flags ATTRIBUTE_UNUSED,
2683 bool *no_add_attrs)
2685 tree decl = TYPE_NAME (*node);
2687 if (decl)
2689 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2690 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2691 *no_add_attrs = false;
2693 return NULL_TREE;
2696 /* Return 0 if the attributes for two types are incompatible, 1 if they
2697 are compatible, and 2 if they are nearly compatible (which causes a
2698 warning to be generated). */
2699 static int
2700 arm_comp_type_attributes (tree type1, tree type2)
2702 int l1, l2, s1, s2;
2704 /* Check for mismatch of non-default calling convention. */
2705 if (TREE_CODE (type1) != FUNCTION_TYPE)
2706 return 1;
2708 /* Check for mismatched call attributes. */
2709 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2710 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2711 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2712 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2714 /* Only bother to check if an attribute is defined. */
2715 if (l1 | l2 | s1 | s2)
2717 /* If one type has an attribute, the other must have the same attribute. */
2718 if ((l1 != l2) || (s1 != s2))
2719 return 0;
2721 /* Disallow mixed attributes. */
2722 if ((l1 & s2) || (l2 & s1))
2723 return 0;
2726 /* Check for mismatched ISR attribute. */
2727 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2728 if (! l1)
2729 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2730 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2731 if (! l2)
2732 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2733 if (l1 != l2)
2734 return 0;
2736 return 1;
2739 /* Encode long_call or short_call attribute by prefixing
2740 symbol name in DECL with a special character FLAG. */
2741 void
2742 arm_encode_call_attribute (tree decl, int flag)
2744 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2745 int len = strlen (str);
2746 char * newstr;
2748 /* Do not allow weak functions to be treated as short call. */
2749 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2750 return;
2752 newstr = alloca (len + 2);
2753 newstr[0] = flag;
2754 strcpy (newstr + 1, str);
2756 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2757 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2760 /* Assigns default attributes to newly defined type. This is used to
2761 set short_call/long_call attributes for function types of
2762 functions defined inside corresponding #pragma scopes. */
2763 static void
2764 arm_set_default_type_attributes (tree type)
2766 /* Add __attribute__ ((long_call)) to all functions, when
2767 inside #pragma long_calls or __attribute__ ((short_call)),
2768 when inside #pragma no_long_calls. */
2769 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2771 tree type_attr_list, attr_name;
2772 type_attr_list = TYPE_ATTRIBUTES (type);
2774 if (arm_pragma_long_calls == LONG)
2775 attr_name = get_identifier ("long_call");
2776 else if (arm_pragma_long_calls == SHORT)
2777 attr_name = get_identifier ("short_call");
2778 else
2779 return;
2781 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2782 TYPE_ATTRIBUTES (type) = type_attr_list;
2786 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2787 defined within the current compilation unit. If this cannot be
2788 determined, then 0 is returned. */
2789 static int
2790 current_file_function_operand (rtx sym_ref)
2792 /* This is a bit of a fib. A function will have a short call flag
2793 applied to its name if it has the short call attribute, or it has
2794 already been defined within the current compilation unit. */
2795 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2796 return 1;
2798 /* The current function is always defined within the current compilation
2799 unit. If it s a weak definition however, then this may not be the real
2800 definition of the function, and so we have to say no. */
2801 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2802 && !DECL_WEAK (current_function_decl))
2803 return 1;
2805 /* We cannot make the determination - default to returning 0. */
2806 return 0;
2809 /* Return nonzero if a 32 bit "long_call" should be generated for
2810 this call. We generate a long_call if the function:
2812 a. has an __attribute__((long call))
2813 or b. is within the scope of a #pragma long_calls
2814 or c. the -mlong-calls command line switch has been specified
2815 . and either:
2816 1. -ffunction-sections is in effect
2817 or 2. the current function has __attribute__ ((section))
2818 or 3. the target function has __attribute__ ((section))
2820 However we do not generate a long call if the function:
2822 d. has an __attribute__ ((short_call))
2823 or e. is inside the scope of a #pragma no_long_calls
2824 or f. is defined within the current compilation unit.
2826 This function will be called by C fragments contained in the machine
2827 description file. SYM_REF and CALL_COOKIE correspond to the matched
2828 rtl operands. CALL_SYMBOL is used to distinguish between
2829 two different callers of the function. It is set to 1 in the
2830 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2831 and "call_value" patterns. This is because of the difference in the
2832 SYM_REFs passed by these patterns. */
2834 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2836 if (!call_symbol)
2838 if (GET_CODE (sym_ref) != MEM)
2839 return 0;
2841 sym_ref = XEXP (sym_ref, 0);
2844 if (GET_CODE (sym_ref) != SYMBOL_REF)
2845 return 0;
2847 if (call_cookie & CALL_SHORT)
2848 return 0;
2850 if (TARGET_LONG_CALLS)
2852 if (flag_function_sections
2853 || DECL_SECTION_NAME (current_function_decl))
2854 /* c.3 is handled by the definition of the
2855 ARM_DECLARE_FUNCTION_SIZE macro. */
2856 return 1;
2859 if (current_file_function_operand (sym_ref))
2860 return 0;
2862 return (call_cookie & CALL_LONG)
2863 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2864 || TARGET_LONG_CALLS;
2867 /* Return nonzero if it is ok to make a tail-call to DECL. */
2868 static bool
2869 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2871 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2873 if (cfun->machine->sibcall_blocked)
2874 return false;
2876 /* Never tailcall something for which we have no decl, or if we
2877 are in Thumb mode. */
2878 if (decl == NULL || TARGET_THUMB)
2879 return false;
2881 /* Get the calling method. */
2882 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2883 call_type = CALL_SHORT;
2884 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2885 call_type = CALL_LONG;
2887 /* Cannot tail-call to long calls, since these are out of range of
2888 a branch instruction. However, if not compiling PIC, we know
2889 we can reach the symbol if it is in this compilation unit. */
2890 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2891 return false;
2893 /* If we are interworking and the function is not declared static
2894 then we can't tail-call it unless we know that it exists in this
2895 compilation unit (since it might be a Thumb routine). */
2896 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2897 return false;
2899 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2900 if (IS_INTERRUPT (arm_current_func_type ()))
2901 return false;
2903 /* Everything else is ok. */
2904 return true;
2908 /* Addressing mode support functions. */
2910 /* Return nonzero if X is a legitimate immediate operand when compiling
2911 for PIC. */
2913 legitimate_pic_operand_p (rtx x)
2915 if (CONSTANT_P (x)
2916 && flag_pic
2917 && (GET_CODE (x) == SYMBOL_REF
2918 || (GET_CODE (x) == CONST
2919 && GET_CODE (XEXP (x, 0)) == PLUS
2920 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2921 return 0;
2923 return 1;
2927 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2929 if (GET_CODE (orig) == SYMBOL_REF
2930 || GET_CODE (orig) == LABEL_REF)
2932 #ifndef AOF_ASSEMBLER
2933 rtx pic_ref, address;
2934 #endif
2935 rtx insn;
2936 int subregs = 0;
2938 if (reg == 0)
2940 if (no_new_pseudos)
2941 abort ();
2942 else
2943 reg = gen_reg_rtx (Pmode);
2945 subregs = 1;
2948 #ifdef AOF_ASSEMBLER
2949 /* The AOF assembler can generate relocations for these directly, and
2950 understands that the PIC register has to be added into the offset. */
2951 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2952 #else
2953 if (subregs)
2954 address = gen_reg_rtx (Pmode);
2955 else
2956 address = reg;
2958 if (TARGET_ARM)
2959 emit_insn (gen_pic_load_addr_arm (address, orig));
2960 else
2961 emit_insn (gen_pic_load_addr_thumb (address, orig));
2963 if ((GET_CODE (orig) == LABEL_REF
2964 || (GET_CODE (orig) == SYMBOL_REF &&
2965 SYMBOL_REF_LOCAL_P (orig)))
2966 && NEED_GOT_RELOC)
2967 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2968 else
2970 pic_ref = gen_const_mem (Pmode,
2971 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2972 address));
2975 insn = emit_move_insn (reg, pic_ref);
2976 #endif
2977 current_function_uses_pic_offset_table = 1;
2978 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2979 by loop. */
2980 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2981 REG_NOTES (insn));
2982 return reg;
2984 else if (GET_CODE (orig) == CONST)
2986 rtx base, offset;
2988 if (GET_CODE (XEXP (orig, 0)) == PLUS
2989 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2990 return orig;
2992 if (reg == 0)
2994 if (no_new_pseudos)
2995 abort ();
2996 else
2997 reg = gen_reg_rtx (Pmode);
3000 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3002 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3003 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3004 base == reg ? 0 : reg);
3006 else
3007 abort ();
3009 if (GET_CODE (offset) == CONST_INT)
3011 /* The base register doesn't really matter, we only want to
3012 test the index for the appropriate mode. */
3013 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3015 if (!no_new_pseudos)
3016 offset = force_reg (Pmode, offset);
3017 else
3018 abort ();
3021 if (GET_CODE (offset) == CONST_INT)
3022 return plus_constant (base, INTVAL (offset));
3025 if (GET_MODE_SIZE (mode) > 4
3026 && (GET_MODE_CLASS (mode) == MODE_INT
3027 || TARGET_SOFT_FLOAT))
3029 emit_insn (gen_addsi3 (reg, base, offset));
3030 return reg;
3033 return gen_rtx_PLUS (Pmode, base, offset);
3036 return orig;
3040 /* Find a spare low register. */
3042 static int
3043 thumb_find_work_register (int live_regs_mask)
3045 int reg;
3047 /* Use a spare arg register. */
3048 if (!regs_ever_live[LAST_ARG_REGNUM])
3049 return LAST_ARG_REGNUM;
3051 /* Look for a pushed register. This is used before the frame pointer is
3052 setup, so r7 is a candidate. */
3053 for (reg = LAST_LO_REGNUM; reg >=0; reg--)
3054 if (live_regs_mask & (1 << reg))
3055 return reg;
3057 /* Something went wrong. */
3058 abort ();
3062 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3063 low register. */
3065 void
3066 arm_load_pic_register (unsigned int scratch)
3068 #ifndef AOF_ASSEMBLER
3069 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3070 rtx global_offset_table;
3072 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3073 return;
3075 if (!flag_pic)
3076 abort ();
3078 l1 = gen_label_rtx ();
3080 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3081 /* On the ARM the PC register contains 'dot + 8' at the time of the
3082 addition, on the Thumb it is 'dot + 4'. */
3083 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3084 if (GOT_PCREL)
3085 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3086 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3087 else
3088 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3090 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3092 if (TARGET_ARM)
3094 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3095 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3097 else
3099 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3101 /* We will have pushed the pic register, so should always be
3102 able to find a work register. */
3103 pic_tmp = gen_rtx_REG (SImode, scratch);
3104 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3105 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3107 else
3108 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3109 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3112 /* Need to emit this whether or not we obey regdecls,
3113 since setjmp/longjmp can cause life info to screw up. */
3114 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3115 #endif /* AOF_ASSEMBLER */
3119 /* Return nonzero if X is valid as an ARM state addressing register. */
3120 static int
3121 arm_address_register_rtx_p (rtx x, int strict_p)
3123 int regno;
3125 if (GET_CODE (x) != REG)
3126 return 0;
3128 regno = REGNO (x);
3130 if (strict_p)
3131 return ARM_REGNO_OK_FOR_BASE_P (regno);
3133 return (regno <= LAST_ARM_REGNUM
3134 || regno >= FIRST_PSEUDO_REGISTER
3135 || regno == FRAME_POINTER_REGNUM
3136 || regno == ARG_POINTER_REGNUM);
3139 /* Return nonzero if X is a valid ARM state address operand. */
3141 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3142 int strict_p)
3144 bool use_ldrd;
3145 enum rtx_code code = GET_CODE (x);
3147 if (arm_address_register_rtx_p (x, strict_p))
3148 return 1;
3150 use_ldrd = (TARGET_LDRD
3151 && (mode == DImode
3152 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3154 if (code == POST_INC || code == PRE_DEC
3155 || ((code == PRE_INC || code == POST_DEC)
3156 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3157 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3159 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3160 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3161 && GET_CODE (XEXP (x, 1)) == PLUS
3162 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3164 rtx addend = XEXP (XEXP (x, 1), 1);
3166 /* Don't allow ldrd post increment by register because it's hard
3167 to fixup invalid register choices. */
3168 if (use_ldrd
3169 && GET_CODE (x) == POST_MODIFY
3170 && GET_CODE (addend) == REG)
3171 return 0;
3173 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3174 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3177 /* After reload constants split into minipools will have addresses
3178 from a LABEL_REF. */
3179 else if (reload_completed
3180 && (code == LABEL_REF
3181 || (code == CONST
3182 && GET_CODE (XEXP (x, 0)) == PLUS
3183 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3184 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3185 return 1;
3187 else if (mode == TImode)
3188 return 0;
3190 else if (code == PLUS)
3192 rtx xop0 = XEXP (x, 0);
3193 rtx xop1 = XEXP (x, 1);
3195 return ((arm_address_register_rtx_p (xop0, strict_p)
3196 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3197 || (arm_address_register_rtx_p (xop1, strict_p)
3198 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3201 #if 0
3202 /* Reload currently can't handle MINUS, so disable this for now */
3203 else if (GET_CODE (x) == MINUS)
3205 rtx xop0 = XEXP (x, 0);
3206 rtx xop1 = XEXP (x, 1);
3208 return (arm_address_register_rtx_p (xop0, strict_p)
3209 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3211 #endif
3213 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3214 && code == SYMBOL_REF
3215 && CONSTANT_POOL_ADDRESS_P (x)
3216 && ! (flag_pic
3217 && symbol_mentioned_p (get_pool_constant (x))))
3218 return 1;
3220 return 0;
3223 /* Return nonzero if INDEX is valid for an address index operand in
3224 ARM state. */
3225 static int
3226 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3227 int strict_p)
3229 HOST_WIDE_INT range;
3230 enum rtx_code code = GET_CODE (index);
3232 /* Standard coprocessor addressing modes. */
3233 if (TARGET_HARD_FLOAT
3234 && (TARGET_FPA || TARGET_MAVERICK)
3235 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3236 || (TARGET_MAVERICK && mode == DImode)))
3237 return (code == CONST_INT && INTVAL (index) < 1024
3238 && INTVAL (index) > -1024
3239 && (INTVAL (index) & 3) == 0);
3241 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3242 return (code == CONST_INT
3243 && INTVAL (index) < 1024
3244 && INTVAL (index) > -1024
3245 && (INTVAL (index) & 3) == 0);
3247 if (arm_address_register_rtx_p (index, strict_p)
3248 && (GET_MODE_SIZE (mode) <= 4))
3249 return 1;
3251 if (mode == DImode || mode == DFmode)
3253 if (code == CONST_INT)
3255 HOST_WIDE_INT val = INTVAL (index);
3257 if (TARGET_LDRD)
3258 return val > -256 && val < 256;
3259 else
3260 return val > -4096 && val < 4092;
3263 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3266 if (GET_MODE_SIZE (mode) <= 4
3267 && ! (arm_arch4
3268 && (mode == HImode
3269 || (mode == QImode && outer == SIGN_EXTEND))))
3271 if (code == MULT)
3273 rtx xiop0 = XEXP (index, 0);
3274 rtx xiop1 = XEXP (index, 1);
3276 return ((arm_address_register_rtx_p (xiop0, strict_p)
3277 && power_of_two_operand (xiop1, SImode))
3278 || (arm_address_register_rtx_p (xiop1, strict_p)
3279 && power_of_two_operand (xiop0, SImode)));
3281 else if (code == LSHIFTRT || code == ASHIFTRT
3282 || code == ASHIFT || code == ROTATERT)
3284 rtx op = XEXP (index, 1);
3286 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3287 && GET_CODE (op) == CONST_INT
3288 && INTVAL (op) > 0
3289 && INTVAL (op) <= 31);
3293 /* For ARM v4 we may be doing a sign-extend operation during the
3294 load. */
3295 if (arm_arch4)
3297 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3298 range = 256;
3299 else
3300 range = 4096;
3302 else
3303 range = (mode == HImode) ? 4095 : 4096;
3305 return (code == CONST_INT
3306 && INTVAL (index) < range
3307 && INTVAL (index) > -range);
3310 /* Return nonzero if X is valid as a Thumb state base register. */
3311 static int
3312 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3314 int regno;
3316 if (GET_CODE (x) != REG)
3317 return 0;
3319 regno = REGNO (x);
3321 if (strict_p)
3322 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3324 return (regno <= LAST_LO_REGNUM
3325 || regno > LAST_VIRTUAL_REGISTER
3326 || regno == FRAME_POINTER_REGNUM
3327 || (GET_MODE_SIZE (mode) >= 4
3328 && (regno == STACK_POINTER_REGNUM
3329 || regno >= FIRST_PSEUDO_REGISTER
3330 || x == hard_frame_pointer_rtx
3331 || x == arg_pointer_rtx)));
3334 /* Return nonzero if x is a legitimate index register. This is the case
3335 for any base register that can access a QImode object. */
3336 inline static int
3337 thumb_index_register_rtx_p (rtx x, int strict_p)
3339 return thumb_base_register_rtx_p (x, QImode, strict_p);
3342 /* Return nonzero if x is a legitimate Thumb-state address.
3344 The AP may be eliminated to either the SP or the FP, so we use the
3345 least common denominator, e.g. SImode, and offsets from 0 to 64.
3347 ??? Verify whether the above is the right approach.
3349 ??? Also, the FP may be eliminated to the SP, so perhaps that
3350 needs special handling also.
3352 ??? Look at how the mips16 port solves this problem. It probably uses
3353 better ways to solve some of these problems.
3355 Although it is not incorrect, we don't accept QImode and HImode
3356 addresses based on the frame pointer or arg pointer until the
3357 reload pass starts. This is so that eliminating such addresses
3358 into stack based ones won't produce impossible code. */
3360 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3362 /* ??? Not clear if this is right. Experiment. */
3363 if (GET_MODE_SIZE (mode) < 4
3364 && !(reload_in_progress || reload_completed)
3365 && (reg_mentioned_p (frame_pointer_rtx, x)
3366 || reg_mentioned_p (arg_pointer_rtx, x)
3367 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3368 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3369 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3370 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3371 return 0;
3373 /* Accept any base register. SP only in SImode or larger. */
3374 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3375 return 1;
3377 /* This is PC relative data before arm_reorg runs. */
3378 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3379 && GET_CODE (x) == SYMBOL_REF
3380 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3381 return 1;
3383 /* This is PC relative data after arm_reorg runs. */
3384 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3385 && (GET_CODE (x) == LABEL_REF
3386 || (GET_CODE (x) == CONST
3387 && GET_CODE (XEXP (x, 0)) == PLUS
3388 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3389 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3390 return 1;
3392 /* Post-inc indexing only supported for SImode and larger. */
3393 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3394 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3395 return 1;
3397 else if (GET_CODE (x) == PLUS)
3399 /* REG+REG address can be any two index registers. */
3400 /* We disallow FRAME+REG addressing since we know that FRAME
3401 will be replaced with STACK, and SP relative addressing only
3402 permits SP+OFFSET. */
3403 if (GET_MODE_SIZE (mode) <= 4
3404 && XEXP (x, 0) != frame_pointer_rtx
3405 && XEXP (x, 1) != frame_pointer_rtx
3406 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3407 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3408 return 1;
3410 /* REG+const has 5-7 bit offset for non-SP registers. */
3411 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3412 || XEXP (x, 0) == arg_pointer_rtx)
3413 && GET_CODE (XEXP (x, 1)) == CONST_INT
3414 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3415 return 1;
3417 /* REG+const has 10 bit offset for SP, but only SImode and
3418 larger is supported. */
3419 /* ??? Should probably check for DI/DFmode overflow here
3420 just like GO_IF_LEGITIMATE_OFFSET does. */
3421 else if (GET_CODE (XEXP (x, 0)) == REG
3422 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3423 && GET_MODE_SIZE (mode) >= 4
3424 && GET_CODE (XEXP (x, 1)) == CONST_INT
3425 && INTVAL (XEXP (x, 1)) >= 0
3426 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3427 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3428 return 1;
3430 else if (GET_CODE (XEXP (x, 0)) == REG
3431 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3432 && GET_MODE_SIZE (mode) >= 4
3433 && GET_CODE (XEXP (x, 1)) == CONST_INT
3434 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3435 return 1;
3438 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3439 && GET_MODE_SIZE (mode) == 4
3440 && GET_CODE (x) == SYMBOL_REF
3441 && CONSTANT_POOL_ADDRESS_P (x)
3442 && !(flag_pic
3443 && symbol_mentioned_p (get_pool_constant (x))))
3444 return 1;
3446 return 0;
3449 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3450 instruction of mode MODE. */
3452 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3454 switch (GET_MODE_SIZE (mode))
3456 case 1:
3457 return val >= 0 && val < 32;
3459 case 2:
3460 return val >= 0 && val < 64 && (val & 1) == 0;
3462 default:
3463 return (val >= 0
3464 && (val + GET_MODE_SIZE (mode)) <= 128
3465 && (val & 3) == 0);
3469 /* Try machine-dependent ways of modifying an illegitimate address
3470 to be legitimate. If we find one, return the new, valid address. */
3472 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3474 if (GET_CODE (x) == PLUS)
3476 rtx xop0 = XEXP (x, 0);
3477 rtx xop1 = XEXP (x, 1);
3479 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3480 xop0 = force_reg (SImode, xop0);
3482 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3483 xop1 = force_reg (SImode, xop1);
3485 if (ARM_BASE_REGISTER_RTX_P (xop0)
3486 && GET_CODE (xop1) == CONST_INT)
3488 HOST_WIDE_INT n, low_n;
3489 rtx base_reg, val;
3490 n = INTVAL (xop1);
3492 /* VFP addressing modes actually allow greater offsets, but for
3493 now we just stick with the lowest common denominator. */
3494 if (mode == DImode
3495 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3497 low_n = n & 0x0f;
3498 n &= ~0x0f;
3499 if (low_n > 4)
3501 n += 16;
3502 low_n -= 16;
3505 else
3507 low_n = ((mode) == TImode ? 0
3508 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3509 n -= low_n;
3512 base_reg = gen_reg_rtx (SImode);
3513 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3514 GEN_INT (n)), NULL_RTX);
3515 emit_move_insn (base_reg, val);
3516 x = (low_n == 0 ? base_reg
3517 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3519 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3520 x = gen_rtx_PLUS (SImode, xop0, xop1);
3523 /* XXX We don't allow MINUS any more -- see comment in
3524 arm_legitimate_address_p (). */
3525 else if (GET_CODE (x) == MINUS)
3527 rtx xop0 = XEXP (x, 0);
3528 rtx xop1 = XEXP (x, 1);
3530 if (CONSTANT_P (xop0))
3531 xop0 = force_reg (SImode, xop0);
3533 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3534 xop1 = force_reg (SImode, xop1);
3536 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3537 x = gen_rtx_MINUS (SImode, xop0, xop1);
3540 if (flag_pic)
3542 /* We need to find and carefully transform any SYMBOL and LABEL
3543 references; so go back to the original address expression. */
3544 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3546 if (new_x != orig_x)
3547 x = new_x;
3550 return x;
3554 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3555 to be legitimate. If we find one, return the new, valid address. */
3557 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3559 if (GET_CODE (x) == PLUS
3560 && GET_CODE (XEXP (x, 1)) == CONST_INT
3561 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3562 || INTVAL (XEXP (x, 1)) < 0))
3564 rtx xop0 = XEXP (x, 0);
3565 rtx xop1 = XEXP (x, 1);
3566 HOST_WIDE_INT offset = INTVAL (xop1);
3568 /* Try and fold the offset into a biasing of the base register and
3569 then offsetting that. Don't do this when optimizing for space
3570 since it can cause too many CSEs. */
3571 if (optimize_size && offset >= 0
3572 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3574 HOST_WIDE_INT delta;
3576 if (offset >= 256)
3577 delta = offset - (256 - GET_MODE_SIZE (mode));
3578 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3579 delta = 31 * GET_MODE_SIZE (mode);
3580 else
3581 delta = offset & (~31 * GET_MODE_SIZE (mode));
3583 xop0 = force_operand (plus_constant (xop0, offset - delta),
3584 NULL_RTX);
3585 x = plus_constant (xop0, delta);
3587 else if (offset < 0 && offset > -256)
3588 /* Small negative offsets are best done with a subtract before the
3589 dereference, forcing these into a register normally takes two
3590 instructions. */
3591 x = force_operand (x, NULL_RTX);
3592 else
3594 /* For the remaining cases, force the constant into a register. */
3595 xop1 = force_reg (SImode, xop1);
3596 x = gen_rtx_PLUS (SImode, xop0, xop1);
3599 else if (GET_CODE (x) == PLUS
3600 && s_register_operand (XEXP (x, 1), SImode)
3601 && !s_register_operand (XEXP (x, 0), SImode))
3603 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3605 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3608 if (flag_pic)
3610 /* We need to find and carefully transform any SYMBOL and LABEL
3611 references; so go back to the original address expression. */
3612 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3614 if (new_x != orig_x)
3615 x = new_x;
3618 return x;
3623 #define REG_OR_SUBREG_REG(X) \
3624 (GET_CODE (X) == REG \
3625 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3627 #define REG_OR_SUBREG_RTX(X) \
3628 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3630 #ifndef COSTS_N_INSNS
3631 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3632 #endif
3633 static inline int
3634 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3636 enum machine_mode mode = GET_MODE (x);
3638 switch (code)
3640 case ASHIFT:
3641 case ASHIFTRT:
3642 case LSHIFTRT:
3643 case ROTATERT:
3644 case PLUS:
3645 case MINUS:
3646 case COMPARE:
3647 case NEG:
3648 case NOT:
3649 return COSTS_N_INSNS (1);
3651 case MULT:
3652 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3654 int cycles = 0;
3655 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3657 while (i)
3659 i >>= 2;
3660 cycles++;
3662 return COSTS_N_INSNS (2) + cycles;
3664 return COSTS_N_INSNS (1) + 16;
3666 case SET:
3667 return (COSTS_N_INSNS (1)
3668 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3669 + GET_CODE (SET_DEST (x)) == MEM));
3671 case CONST_INT:
3672 if (outer == SET)
3674 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3675 return 0;
3676 if (thumb_shiftable_const (INTVAL (x)))
3677 return COSTS_N_INSNS (2);
3678 return COSTS_N_INSNS (3);
3680 else if ((outer == PLUS || outer == COMPARE)
3681 && INTVAL (x) < 256 && INTVAL (x) > -256)
3682 return 0;
3683 else if (outer == AND
3684 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3685 return COSTS_N_INSNS (1);
3686 else if (outer == ASHIFT || outer == ASHIFTRT
3687 || outer == LSHIFTRT)
3688 return 0;
3689 return COSTS_N_INSNS (2);
3691 case CONST:
3692 case CONST_DOUBLE:
3693 case LABEL_REF:
3694 case SYMBOL_REF:
3695 return COSTS_N_INSNS (3);
3697 case UDIV:
3698 case UMOD:
3699 case DIV:
3700 case MOD:
3701 return 100;
3703 case TRUNCATE:
3704 return 99;
3706 case AND:
3707 case XOR:
3708 case IOR:
3709 /* XXX guess. */
3710 return 8;
3712 case MEM:
3713 /* XXX another guess. */
3714 /* Memory costs quite a lot for the first word, but subsequent words
3715 load at the equivalent of a single insn each. */
3716 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3717 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3718 ? 4 : 0));
3720 case IF_THEN_ELSE:
3721 /* XXX a guess. */
3722 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3723 return 14;
3724 return 2;
3726 case ZERO_EXTEND:
3727 /* XXX still guessing. */
3728 switch (GET_MODE (XEXP (x, 0)))
3730 case QImode:
3731 return (1 + (mode == DImode ? 4 : 0)
3732 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3734 case HImode:
3735 return (4 + (mode == DImode ? 4 : 0)
3736 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3738 case SImode:
3739 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3741 default:
3742 return 99;
3745 default:
3746 return 99;
3751 /* Worker routine for arm_rtx_costs. */
3752 static inline int
3753 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3755 enum machine_mode mode = GET_MODE (x);
3756 enum rtx_code subcode;
3757 int extra_cost;
3759 switch (code)
3761 case MEM:
3762 /* Memory costs quite a lot for the first word, but subsequent words
3763 load at the equivalent of a single insn each. */
3764 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3765 + (GET_CODE (x) == SYMBOL_REF
3766 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3768 case DIV:
3769 case MOD:
3770 case UDIV:
3771 case UMOD:
3772 return optimize_size ? COSTS_N_INSNS (2) : 100;
3774 case ROTATE:
3775 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3776 return 4;
3777 /* Fall through */
3778 case ROTATERT:
3779 if (mode != SImode)
3780 return 8;
3781 /* Fall through */
3782 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3783 if (mode == DImode)
3784 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3785 + ((GET_CODE (XEXP (x, 0)) == REG
3786 || (GET_CODE (XEXP (x, 0)) == SUBREG
3787 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3788 ? 0 : 8));
3789 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3790 || (GET_CODE (XEXP (x, 0)) == SUBREG
3791 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3792 ? 0 : 4)
3793 + ((GET_CODE (XEXP (x, 1)) == REG
3794 || (GET_CODE (XEXP (x, 1)) == SUBREG
3795 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3796 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3797 ? 0 : 4));
3799 case MINUS:
3800 if (mode == DImode)
3801 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3802 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3803 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3804 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3805 ? 0 : 8));
3807 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3808 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3809 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3810 && arm_const_double_rtx (XEXP (x, 1))))
3811 ? 0 : 8)
3812 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3813 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3814 && arm_const_double_rtx (XEXP (x, 0))))
3815 ? 0 : 8));
3817 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3818 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3819 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3820 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3821 || subcode == ASHIFTRT || subcode == LSHIFTRT
3822 || subcode == ROTATE || subcode == ROTATERT
3823 || (subcode == MULT
3824 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3825 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3826 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3827 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3828 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3829 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3830 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3831 return 1;
3832 /* Fall through */
3834 case PLUS:
3835 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3836 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3837 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3838 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3839 && arm_const_double_rtx (XEXP (x, 1))))
3840 ? 0 : 8));
3842 /* Fall through */
3843 case AND: case XOR: case IOR:
3844 extra_cost = 0;
3846 /* Normally the frame registers will be spilt into reg+const during
3847 reload, so it is a bad idea to combine them with other instructions,
3848 since then they might not be moved outside of loops. As a compromise
3849 we allow integration with ops that have a constant as their second
3850 operand. */
3851 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3852 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3853 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3854 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3855 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3856 extra_cost = 4;
3858 if (mode == DImode)
3859 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3860 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3861 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3862 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3863 ? 0 : 8));
3865 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3866 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3867 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3868 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3869 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3870 ? 0 : 4));
3872 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3873 return (1 + extra_cost
3874 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3875 || subcode == LSHIFTRT || subcode == ASHIFTRT
3876 || subcode == ROTATE || subcode == ROTATERT
3877 || (subcode == MULT
3878 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3879 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3880 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3881 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3882 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3883 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3884 ? 0 : 4));
3886 return 8;
3888 case MULT:
3889 /* This should have been handled by the CPU specific routines. */
3890 abort ();
3892 case TRUNCATE:
3893 if (arm_arch3m && mode == SImode
3894 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3895 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3896 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3897 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3898 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3899 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3900 return 8;
3901 return 99;
3903 case NEG:
3904 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3905 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3906 /* Fall through */
3907 case NOT:
3908 if (mode == DImode)
3909 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3911 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3913 case IF_THEN_ELSE:
3914 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3915 return 14;
3916 return 2;
3918 case COMPARE:
3919 return 1;
3921 case ABS:
3922 return 4 + (mode == DImode ? 4 : 0);
3924 case SIGN_EXTEND:
3925 if (GET_MODE (XEXP (x, 0)) == QImode)
3926 return (4 + (mode == DImode ? 4 : 0)
3927 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3928 /* Fall through */
3929 case ZERO_EXTEND:
3930 switch (GET_MODE (XEXP (x, 0)))
3932 case QImode:
3933 return (1 + (mode == DImode ? 4 : 0)
3934 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3936 case HImode:
3937 return (4 + (mode == DImode ? 4 : 0)
3938 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3940 case SImode:
3941 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3943 case V8QImode:
3944 case V4HImode:
3945 case V2SImode:
3946 case V4QImode:
3947 case V2HImode:
3948 return 1;
3950 default:
3951 break;
3953 abort ();
3955 case CONST_INT:
3956 if (const_ok_for_arm (INTVAL (x)))
3957 return outer == SET ? 2 : -1;
3958 else if (outer == AND
3959 && const_ok_for_arm (~INTVAL (x)))
3960 return -1;
3961 else if ((outer == COMPARE
3962 || outer == PLUS || outer == MINUS)
3963 && const_ok_for_arm (-INTVAL (x)))
3964 return -1;
3965 else
3966 return 5;
3968 case CONST:
3969 case LABEL_REF:
3970 case SYMBOL_REF:
3971 return 6;
3973 case CONST_DOUBLE:
3974 if (arm_const_double_rtx (x))
3975 return outer == SET ? 2 : -1;
3976 else if ((outer == COMPARE || outer == PLUS)
3977 && neg_const_double_rtx_ok_for_fpa (x))
3978 return -1;
3979 return 7;
3981 default:
3982 return 99;
3986 /* RTX costs when optimizing for size. */
3987 static bool
3988 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
3990 enum machine_mode mode = GET_MODE (x);
3992 if (TARGET_THUMB)
3994 /* XXX TBD. For now, use the standard costs. */
3995 *total = thumb_rtx_costs (x, code, outer_code);
3996 return true;
3999 switch (code)
4001 case MEM:
4002 /* A memory access costs 1 insn if the mode is small, or the address is
4003 a single register, otherwise it costs one insn per word. */
4004 if (REG_P (XEXP (x, 0)))
4005 *total = COSTS_N_INSNS (1);
4006 else
4007 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4008 return true;
4010 case DIV:
4011 case MOD:
4012 case UDIV:
4013 case UMOD:
4014 /* Needs a libcall, so it costs about this. */
4015 *total = COSTS_N_INSNS (2);
4016 return false;
4018 case ROTATE:
4019 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4021 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4022 return true;
4024 /* Fall through */
4025 case ROTATERT:
4026 case ASHIFT:
4027 case LSHIFTRT:
4028 case ASHIFTRT:
4029 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4031 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4032 return true;
4034 else if (mode == SImode)
4036 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4037 /* Slightly disparage register shifts, but not by much. */
4038 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4039 *total += 1 + rtx_cost (XEXP (x, 1), code);
4040 return true;
4043 /* Needs a libcall. */
4044 *total = COSTS_N_INSNS (2);
4045 return false;
4047 case MINUS:
4048 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4050 *total = COSTS_N_INSNS (1);
4051 return false;
4054 if (mode == SImode)
4056 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4057 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4059 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4060 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4061 || subcode1 == ROTATE || subcode1 == ROTATERT
4062 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4063 || subcode1 == ASHIFTRT)
4065 /* It's just the cost of the two operands. */
4066 *total = 0;
4067 return false;
4070 *total = COSTS_N_INSNS (1);
4071 return false;
4074 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4075 return false;
4077 case PLUS:
4078 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4080 *total = COSTS_N_INSNS (1);
4081 return false;
4084 /* Fall through */
4085 case AND: case XOR: case IOR:
4086 if (mode == SImode)
4088 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4090 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4091 || subcode == LSHIFTRT || subcode == ASHIFTRT
4092 || (code == AND && subcode == NOT))
4094 /* It's just the cost of the two operands. */
4095 *total = 0;
4096 return false;
4100 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4101 return false;
4103 case MULT:
4104 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4105 return false;
4107 case NEG:
4108 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4109 *total = COSTS_N_INSNS (1);
4110 /* Fall through */
4111 case NOT:
4112 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4114 return false;
4116 case IF_THEN_ELSE:
4117 *total = 0;
4118 return false;
4120 case COMPARE:
4121 if (cc_register (XEXP (x, 0), VOIDmode))
4122 * total = 0;
4123 else
4124 *total = COSTS_N_INSNS (1);
4125 return false;
4127 case ABS:
4128 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4129 *total = COSTS_N_INSNS (1);
4130 else
4131 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4132 return false;
4134 case SIGN_EXTEND:
4135 *total = 0;
4136 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4138 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4139 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4141 if (mode == DImode)
4142 *total += COSTS_N_INSNS (1);
4143 return false;
4145 case ZERO_EXTEND:
4146 *total = 0;
4147 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4149 switch (GET_MODE (XEXP (x, 0)))
4151 case QImode:
4152 *total += COSTS_N_INSNS (1);
4153 break;
4155 case HImode:
4156 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4158 case SImode:
4159 break;
4161 default:
4162 *total += COSTS_N_INSNS (2);
4166 if (mode == DImode)
4167 *total += COSTS_N_INSNS (1);
4169 return false;
4171 case CONST_INT:
4172 if (const_ok_for_arm (INTVAL (x)))
4173 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4174 else if (const_ok_for_arm (~INTVAL (x)))
4175 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4176 else if (const_ok_for_arm (-INTVAL (x)))
4178 if (outer_code == COMPARE || outer_code == PLUS
4179 || outer_code == MINUS)
4180 *total = 0;
4181 else
4182 *total = COSTS_N_INSNS (1);
4184 else
4185 *total = COSTS_N_INSNS (2);
4186 return true;
4188 case CONST:
4189 case LABEL_REF:
4190 case SYMBOL_REF:
4191 *total = COSTS_N_INSNS (2);
4192 return true;
4194 case CONST_DOUBLE:
4195 *total = COSTS_N_INSNS (4);
4196 return true;
4198 default:
4199 if (mode != VOIDmode)
4200 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4201 else
4202 *total = COSTS_N_INSNS (4); /* How knows? */
4203 return false;
4207 /* RTX costs for cores with a slow MUL implementation. */
4209 static bool
4210 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4212 enum machine_mode mode = GET_MODE (x);
4214 if (TARGET_THUMB)
4216 *total = thumb_rtx_costs (x, code, outer_code);
4217 return true;
4220 switch (code)
4222 case MULT:
4223 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4224 || mode == DImode)
4226 *total = 30;
4227 return true;
4230 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4232 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4233 & (unsigned HOST_WIDE_INT) 0xffffffff);
4234 int cost, const_ok = const_ok_for_arm (i);
4235 int j, booth_unit_size;
4237 /* Tune as appropriate. */
4238 cost = const_ok ? 4 : 8;
4239 booth_unit_size = 2;
4240 for (j = 0; i && j < 32; j += booth_unit_size)
4242 i >>= booth_unit_size;
4243 cost += 2;
4246 *total = cost;
4247 return true;
4250 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4251 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4252 return true;
4254 default:
4255 *total = arm_rtx_costs_1 (x, code, outer_code);
4256 return true;
4261 /* RTX cost for cores with a fast multiply unit (M variants). */
4263 static bool
4264 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4266 enum machine_mode mode = GET_MODE (x);
4268 if (TARGET_THUMB)
4270 *total = thumb_rtx_costs (x, code, outer_code);
4271 return true;
4274 switch (code)
4276 case MULT:
4277 /* There is no point basing this on the tuning, since it is always the
4278 fast variant if it exists at all. */
4279 if (mode == DImode
4280 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4281 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4282 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4284 *total = 8;
4285 return true;
4289 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4290 || mode == DImode)
4292 *total = 30;
4293 return true;
4296 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4298 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4299 & (unsigned HOST_WIDE_INT) 0xffffffff);
4300 int cost, const_ok = const_ok_for_arm (i);
4301 int j, booth_unit_size;
4303 /* Tune as appropriate. */
4304 cost = const_ok ? 4 : 8;
4305 booth_unit_size = 8;
4306 for (j = 0; i && j < 32; j += booth_unit_size)
4308 i >>= booth_unit_size;
4309 cost += 2;
4312 *total = cost;
4313 return true;
4316 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4317 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4318 return true;
4320 default:
4321 *total = arm_rtx_costs_1 (x, code, outer_code);
4322 return true;
4327 /* RTX cost for XScale CPUs. */
4329 static bool
4330 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4332 enum machine_mode mode = GET_MODE (x);
4334 if (TARGET_THUMB)
4336 *total = thumb_rtx_costs (x, code, outer_code);
4337 return true;
4340 switch (code)
4342 case MULT:
4343 /* There is no point basing this on the tuning, since it is always the
4344 fast variant if it exists at all. */
4345 if (mode == DImode
4346 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4347 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4348 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4350 *total = 8;
4351 return true;
4355 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4356 || mode == DImode)
4358 *total = 30;
4359 return true;
4362 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4364 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4365 & (unsigned HOST_WIDE_INT) 0xffffffff);
4366 int cost, const_ok = const_ok_for_arm (i);
4367 unsigned HOST_WIDE_INT masked_const;
4369 /* The cost will be related to two insns.
4370 First a load of the constant (MOV or LDR), then a multiply. */
4371 cost = 2;
4372 if (! const_ok)
4373 cost += 1; /* LDR is probably more expensive because
4374 of longer result latency. */
4375 masked_const = i & 0xffff8000;
4376 if (masked_const != 0 && masked_const != 0xffff8000)
4378 masked_const = i & 0xf8000000;
4379 if (masked_const == 0 || masked_const == 0xf8000000)
4380 cost += 1;
4381 else
4382 cost += 2;
4384 *total = cost;
4385 return true;
4388 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4389 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4390 return true;
4392 default:
4393 *total = arm_rtx_costs_1 (x, code, outer_code);
4394 return true;
4399 /* RTX costs for 9e (and later) cores. */
4401 static bool
4402 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4404 enum machine_mode mode = GET_MODE (x);
4405 int nonreg_cost;
4406 int cost;
4408 if (TARGET_THUMB)
4410 switch (code)
4412 case MULT:
4413 *total = COSTS_N_INSNS (3);
4414 return true;
4416 default:
4417 *total = thumb_rtx_costs (x, code, outer_code);
4418 return true;
4422 switch (code)
4424 case MULT:
4425 /* There is no point basing this on the tuning, since it is always the
4426 fast variant if it exists at all. */
4427 if (mode == DImode
4428 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4429 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4430 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4432 *total = 3;
4433 return true;
4437 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4439 *total = 30;
4440 return true;
4442 if (mode == DImode)
4444 cost = 7;
4445 nonreg_cost = 8;
4447 else
4449 cost = 2;
4450 nonreg_cost = 4;
4454 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4455 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4456 return true;
4458 default:
4459 *total = arm_rtx_costs_1 (x, code, outer_code);
4460 return true;
4463 /* All address computations that can be done are free, but rtx cost returns
4464 the same for practically all of them. So we weight the different types
4465 of address here in the order (most pref first):
4466 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4467 static inline int
4468 arm_arm_address_cost (rtx x)
4470 enum rtx_code c = GET_CODE (x);
4472 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4473 return 0;
4474 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4475 return 10;
4477 if (c == PLUS || c == MINUS)
4479 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4480 return 2;
4482 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4483 return 3;
4485 return 4;
4488 return 6;
4491 static inline int
4492 arm_thumb_address_cost (rtx x)
4494 enum rtx_code c = GET_CODE (x);
4496 if (c == REG)
4497 return 1;
4498 if (c == PLUS
4499 && GET_CODE (XEXP (x, 0)) == REG
4500 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4501 return 1;
4503 return 2;
4506 static int
4507 arm_address_cost (rtx x)
4509 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4512 static int
4513 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4515 rtx i_pat, d_pat;
4517 /* Some true dependencies can have a higher cost depending
4518 on precisely how certain input operands are used. */
4519 if (arm_tune_xscale
4520 && REG_NOTE_KIND (link) == 0
4521 && recog_memoized (insn) >= 0
4522 && recog_memoized (dep) >= 0)
4524 int shift_opnum = get_attr_shift (insn);
4525 enum attr_type attr_type = get_attr_type (dep);
4527 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4528 operand for INSN. If we have a shifted input operand and the
4529 instruction we depend on is another ALU instruction, then we may
4530 have to account for an additional stall. */
4531 if (shift_opnum != 0
4532 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4534 rtx shifted_operand;
4535 int opno;
4537 /* Get the shifted operand. */
4538 extract_insn (insn);
4539 shifted_operand = recog_data.operand[shift_opnum];
4541 /* Iterate over all the operands in DEP. If we write an operand
4542 that overlaps with SHIFTED_OPERAND, then we have increase the
4543 cost of this dependency. */
4544 extract_insn (dep);
4545 preprocess_constraints ();
4546 for (opno = 0; opno < recog_data.n_operands; opno++)
4548 /* We can ignore strict inputs. */
4549 if (recog_data.operand_type[opno] == OP_IN)
4550 continue;
4552 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4553 shifted_operand))
4554 return 2;
4559 /* XXX This is not strictly true for the FPA. */
4560 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4561 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4562 return 0;
4564 /* Call insns don't incur a stall, even if they follow a load. */
4565 if (REG_NOTE_KIND (link) == 0
4566 && GET_CODE (insn) == CALL_INSN)
4567 return 1;
4569 if ((i_pat = single_set (insn)) != NULL
4570 && GET_CODE (SET_SRC (i_pat)) == MEM
4571 && (d_pat = single_set (dep)) != NULL
4572 && GET_CODE (SET_DEST (d_pat)) == MEM)
4574 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4575 /* This is a load after a store, there is no conflict if the load reads
4576 from a cached area. Assume that loads from the stack, and from the
4577 constant pool are cached, and that others will miss. This is a
4578 hack. */
4580 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4581 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4582 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4583 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4584 return 1;
4587 return cost;
4590 static int fp_consts_inited = 0;
4592 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4593 static const char * const strings_fp[8] =
4595 "0", "1", "2", "3",
4596 "4", "5", "0.5", "10"
4599 static REAL_VALUE_TYPE values_fp[8];
4601 static void
4602 init_fp_table (void)
4604 int i;
4605 REAL_VALUE_TYPE r;
4607 if (TARGET_VFP)
4608 fp_consts_inited = 1;
4609 else
4610 fp_consts_inited = 8;
4612 for (i = 0; i < fp_consts_inited; i++)
4614 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4615 values_fp[i] = r;
4619 /* Return TRUE if rtx X is a valid immediate FP constant. */
4621 arm_const_double_rtx (rtx x)
4623 REAL_VALUE_TYPE r;
4624 int i;
4626 if (!fp_consts_inited)
4627 init_fp_table ();
4629 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4630 if (REAL_VALUE_MINUS_ZERO (r))
4631 return 0;
4633 for (i = 0; i < fp_consts_inited; i++)
4634 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4635 return 1;
4637 return 0;
4640 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4642 neg_const_double_rtx_ok_for_fpa (rtx x)
4644 REAL_VALUE_TYPE r;
4645 int i;
4647 if (!fp_consts_inited)
4648 init_fp_table ();
4650 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4651 r = REAL_VALUE_NEGATE (r);
4652 if (REAL_VALUE_MINUS_ZERO (r))
4653 return 0;
4655 for (i = 0; i < 8; i++)
4656 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4657 return 1;
4659 return 0;
4662 /* Predicates for `match_operand' and `match_operator'. */
4664 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4666 cirrus_memory_offset (rtx op)
4668 /* Reject eliminable registers. */
4669 if (! (reload_in_progress || reload_completed)
4670 && ( reg_mentioned_p (frame_pointer_rtx, op)
4671 || reg_mentioned_p (arg_pointer_rtx, op)
4672 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4673 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4674 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4675 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4676 return 0;
4678 if (GET_CODE (op) == MEM)
4680 rtx ind;
4682 ind = XEXP (op, 0);
4684 /* Match: (mem (reg)). */
4685 if (GET_CODE (ind) == REG)
4686 return 1;
4688 /* Match:
4689 (mem (plus (reg)
4690 (const))). */
4691 if (GET_CODE (ind) == PLUS
4692 && GET_CODE (XEXP (ind, 0)) == REG
4693 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4694 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4695 return 1;
4698 return 0;
4701 /* Return TRUE if OP is a valid VFP memory address pattern.
4702 WB if true if writeback address modes are allowed. */
4705 arm_coproc_mem_operand (rtx op, bool wb)
4707 rtx ind;
4709 /* Reject eliminable registers. */
4710 if (! (reload_in_progress || reload_completed)
4711 && ( reg_mentioned_p (frame_pointer_rtx, op)
4712 || reg_mentioned_p (arg_pointer_rtx, op)
4713 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4714 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4715 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4716 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4717 return FALSE;
4719 /* Constants are converted into offsets from labels. */
4720 if (GET_CODE (op) != MEM)
4721 return FALSE;
4723 ind = XEXP (op, 0);
4725 if (reload_completed
4726 && (GET_CODE (ind) == LABEL_REF
4727 || (GET_CODE (ind) == CONST
4728 && GET_CODE (XEXP (ind, 0)) == PLUS
4729 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4730 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4731 return TRUE;
4733 /* Match: (mem (reg)). */
4734 if (GET_CODE (ind) == REG)
4735 return arm_address_register_rtx_p (ind, 0);
4737 /* Autoincremment addressing modes. */
4738 if (wb
4739 && (GET_CODE (ind) == PRE_INC
4740 || GET_CODE (ind) == POST_INC
4741 || GET_CODE (ind) == PRE_DEC
4742 || GET_CODE (ind) == POST_DEC))
4743 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4745 if (wb
4746 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4747 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4748 && GET_CODE (XEXP (ind, 1)) == PLUS
4749 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4750 ind = XEXP (ind, 1);
4752 /* Match:
4753 (plus (reg)
4754 (const)). */
4755 if (GET_CODE (ind) == PLUS
4756 && GET_CODE (XEXP (ind, 0)) == REG
4757 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4758 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4759 && INTVAL (XEXP (ind, 1)) > -1024
4760 && INTVAL (XEXP (ind, 1)) < 1024
4761 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4762 return TRUE;
4764 return FALSE;
4768 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4769 VFP registers. Otherwise return NO_REGS. */
4771 enum reg_class
4772 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4774 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4775 return NO_REGS;
4777 return GENERAL_REGS;
4781 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4782 Use by the Cirrus Maverick code which has to workaround
4783 a hardware bug triggered by such instructions. */
4784 static bool
4785 arm_memory_load_p (rtx insn)
4787 rtx body, lhs, rhs;;
4789 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4790 return false;
4792 body = PATTERN (insn);
4794 if (GET_CODE (body) != SET)
4795 return false;
4797 lhs = XEXP (body, 0);
4798 rhs = XEXP (body, 1);
4800 lhs = REG_OR_SUBREG_RTX (lhs);
4802 /* If the destination is not a general purpose
4803 register we do not have to worry. */
4804 if (GET_CODE (lhs) != REG
4805 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4806 return false;
4808 /* As well as loads from memory we also have to react
4809 to loads of invalid constants which will be turned
4810 into loads from the minipool. */
4811 return (GET_CODE (rhs) == MEM
4812 || GET_CODE (rhs) == SYMBOL_REF
4813 || note_invalid_constants (insn, -1, false));
4816 /* Return TRUE if INSN is a Cirrus instruction. */
4817 static bool
4818 arm_cirrus_insn_p (rtx insn)
4820 enum attr_cirrus attr;
4822 /* get_attr aborts on USE and CLOBBER. */
4823 if (!insn
4824 || GET_CODE (insn) != INSN
4825 || GET_CODE (PATTERN (insn)) == USE
4826 || GET_CODE (PATTERN (insn)) == CLOBBER)
4827 return 0;
4829 attr = get_attr_cirrus (insn);
4831 return attr != CIRRUS_NOT;
4834 /* Cirrus reorg for invalid instruction combinations. */
4835 static void
4836 cirrus_reorg (rtx first)
4838 enum attr_cirrus attr;
4839 rtx body = PATTERN (first);
4840 rtx t;
4841 int nops;
4843 /* Any branch must be followed by 2 non Cirrus instructions. */
4844 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4846 nops = 0;
4847 t = next_nonnote_insn (first);
4849 if (arm_cirrus_insn_p (t))
4850 ++ nops;
4852 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4853 ++ nops;
4855 while (nops --)
4856 emit_insn_after (gen_nop (), first);
4858 return;
4861 /* (float (blah)) is in parallel with a clobber. */
4862 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4863 body = XVECEXP (body, 0, 0);
4865 if (GET_CODE (body) == SET)
4867 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4869 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4870 be followed by a non Cirrus insn. */
4871 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4873 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4874 emit_insn_after (gen_nop (), first);
4876 return;
4878 else if (arm_memory_load_p (first))
4880 unsigned int arm_regno;
4882 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4883 ldr/cfmv64hr combination where the Rd field is the same
4884 in both instructions must be split with a non Cirrus
4885 insn. Example:
4887 ldr r0, blah
4889 cfmvsr mvf0, r0. */
4891 /* Get Arm register number for ldr insn. */
4892 if (GET_CODE (lhs) == REG)
4893 arm_regno = REGNO (lhs);
4894 else if (GET_CODE (rhs) == REG)
4895 arm_regno = REGNO (rhs);
4896 else
4897 abort ();
4899 /* Next insn. */
4900 first = next_nonnote_insn (first);
4902 if (! arm_cirrus_insn_p (first))
4903 return;
4905 body = PATTERN (first);
4907 /* (float (blah)) is in parallel with a clobber. */
4908 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4909 body = XVECEXP (body, 0, 0);
4911 if (GET_CODE (body) == FLOAT)
4912 body = XEXP (body, 0);
4914 if (get_attr_cirrus (first) == CIRRUS_MOVE
4915 && GET_CODE (XEXP (body, 1)) == REG
4916 && arm_regno == REGNO (XEXP (body, 1)))
4917 emit_insn_after (gen_nop (), first);
4919 return;
4923 /* get_attr aborts on USE and CLOBBER. */
4924 if (!first
4925 || GET_CODE (first) != INSN
4926 || GET_CODE (PATTERN (first)) == USE
4927 || GET_CODE (PATTERN (first)) == CLOBBER)
4928 return;
4930 attr = get_attr_cirrus (first);
4932 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4933 must be followed by a non-coprocessor instruction. */
4934 if (attr == CIRRUS_COMPARE)
4936 nops = 0;
4938 t = next_nonnote_insn (first);
4940 if (arm_cirrus_insn_p (t))
4941 ++ nops;
4943 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4944 ++ nops;
4946 while (nops --)
4947 emit_insn_after (gen_nop (), first);
4949 return;
4953 /* Return TRUE if X references a SYMBOL_REF. */
4955 symbol_mentioned_p (rtx x)
4957 const char * fmt;
4958 int i;
4960 if (GET_CODE (x) == SYMBOL_REF)
4961 return 1;
4963 fmt = GET_RTX_FORMAT (GET_CODE (x));
4965 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4967 if (fmt[i] == 'E')
4969 int j;
4971 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4972 if (symbol_mentioned_p (XVECEXP (x, i, j)))
4973 return 1;
4975 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
4976 return 1;
4979 return 0;
4982 /* Return TRUE if X references a LABEL_REF. */
4984 label_mentioned_p (rtx x)
4986 const char * fmt;
4987 int i;
4989 if (GET_CODE (x) == LABEL_REF)
4990 return 1;
4992 fmt = GET_RTX_FORMAT (GET_CODE (x));
4993 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4995 if (fmt[i] == 'E')
4997 int j;
4999 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5000 if (label_mentioned_p (XVECEXP (x, i, j)))
5001 return 1;
5003 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5004 return 1;
5007 return 0;
5010 enum rtx_code
5011 minmax_code (rtx x)
5013 enum rtx_code code = GET_CODE (x);
5015 if (code == SMAX)
5016 return GE;
5017 else if (code == SMIN)
5018 return LE;
5019 else if (code == UMIN)
5020 return LEU;
5021 else if (code == UMAX)
5022 return GEU;
5024 abort ();
5027 /* Return 1 if memory locations are adjacent. */
5029 adjacent_mem_locations (rtx a, rtx b)
5031 if ((GET_CODE (XEXP (a, 0)) == REG
5032 || (GET_CODE (XEXP (a, 0)) == PLUS
5033 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5034 && (GET_CODE (XEXP (b, 0)) == REG
5035 || (GET_CODE (XEXP (b, 0)) == PLUS
5036 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5038 int val0 = 0, val1 = 0;
5039 int reg0, reg1;
5041 if (GET_CODE (XEXP (a, 0)) == PLUS)
5043 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5044 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5046 else
5047 reg0 = REGNO (XEXP (a, 0));
5049 if (GET_CODE (XEXP (b, 0)) == PLUS)
5051 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5052 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5054 else
5055 reg1 = REGNO (XEXP (b, 0));
5057 /* Don't accept any offset that will require multiple
5058 instructions to handle, since this would cause the
5059 arith_adjacentmem pattern to output an overlong sequence. */
5060 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5061 return 0;
5063 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5065 return 0;
5069 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5070 HOST_WIDE_INT *load_offset)
5072 int unsorted_regs[4];
5073 HOST_WIDE_INT unsorted_offsets[4];
5074 int order[4];
5075 int base_reg = -1;
5076 int i;
5078 /* Can only handle 2, 3, or 4 insns at present,
5079 though could be easily extended if required. */
5080 if (nops < 2 || nops > 4)
5081 abort ();
5083 /* Loop over the operands and check that the memory references are
5084 suitable (i.e. immediate offsets from the same base register). At
5085 the same time, extract the target register, and the memory
5086 offsets. */
5087 for (i = 0; i < nops; i++)
5089 rtx reg;
5090 rtx offset;
5092 /* Convert a subreg of a mem into the mem itself. */
5093 if (GET_CODE (operands[nops + i]) == SUBREG)
5094 operands[nops + i] = alter_subreg (operands + (nops + i));
5096 if (GET_CODE (operands[nops + i]) != MEM)
5097 abort ();
5099 /* Don't reorder volatile memory references; it doesn't seem worth
5100 looking for the case where the order is ok anyway. */
5101 if (MEM_VOLATILE_P (operands[nops + i]))
5102 return 0;
5104 offset = const0_rtx;
5106 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5107 || (GET_CODE (reg) == SUBREG
5108 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5109 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5110 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5111 == REG)
5112 || (GET_CODE (reg) == SUBREG
5113 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5114 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5115 == CONST_INT)))
5117 if (i == 0)
5119 base_reg = REGNO (reg);
5120 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5121 ? REGNO (operands[i])
5122 : REGNO (SUBREG_REG (operands[i])));
5123 order[0] = 0;
5125 else
5127 if (base_reg != (int) REGNO (reg))
5128 /* Not addressed from the same base register. */
5129 return 0;
5131 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5132 ? REGNO (operands[i])
5133 : REGNO (SUBREG_REG (operands[i])));
5134 if (unsorted_regs[i] < unsorted_regs[order[0]])
5135 order[0] = i;
5138 /* If it isn't an integer register, or if it overwrites the
5139 base register but isn't the last insn in the list, then
5140 we can't do this. */
5141 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5142 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5143 return 0;
5145 unsorted_offsets[i] = INTVAL (offset);
5147 else
5148 /* Not a suitable memory address. */
5149 return 0;
5152 /* All the useful information has now been extracted from the
5153 operands into unsorted_regs and unsorted_offsets; additionally,
5154 order[0] has been set to the lowest numbered register in the
5155 list. Sort the registers into order, and check that the memory
5156 offsets are ascending and adjacent. */
5158 for (i = 1; i < nops; i++)
5160 int j;
5162 order[i] = order[i - 1];
5163 for (j = 0; j < nops; j++)
5164 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5165 && (order[i] == order[i - 1]
5166 || unsorted_regs[j] < unsorted_regs[order[i]]))
5167 order[i] = j;
5169 /* Have we found a suitable register? if not, one must be used more
5170 than once. */
5171 if (order[i] == order[i - 1])
5172 return 0;
5174 /* Is the memory address adjacent and ascending? */
5175 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5176 return 0;
5179 if (base)
5181 *base = base_reg;
5183 for (i = 0; i < nops; i++)
5184 regs[i] = unsorted_regs[order[i]];
5186 *load_offset = unsorted_offsets[order[0]];
5189 if (unsorted_offsets[order[0]] == 0)
5190 return 1; /* ldmia */
5192 if (unsorted_offsets[order[0]] == 4)
5193 return 2; /* ldmib */
5195 if (unsorted_offsets[order[nops - 1]] == 0)
5196 return 3; /* ldmda */
5198 if (unsorted_offsets[order[nops - 1]] == -4)
5199 return 4; /* ldmdb */
5201 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5202 if the offset isn't small enough. The reason 2 ldrs are faster
5203 is because these ARMs are able to do more than one cache access
5204 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5205 whilst the ARM8 has a double bandwidth cache. This means that
5206 these cores can do both an instruction fetch and a data fetch in
5207 a single cycle, so the trick of calculating the address into a
5208 scratch register (one of the result regs) and then doing a load
5209 multiple actually becomes slower (and no smaller in code size).
5210 That is the transformation
5212 ldr rd1, [rbase + offset]
5213 ldr rd2, [rbase + offset + 4]
5217 add rd1, rbase, offset
5218 ldmia rd1, {rd1, rd2}
5220 produces worse code -- '3 cycles + any stalls on rd2' instead of
5221 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5222 access per cycle, the first sequence could never complete in less
5223 than 6 cycles, whereas the ldm sequence would only take 5 and
5224 would make better use of sequential accesses if not hitting the
5225 cache.
5227 We cheat here and test 'arm_ld_sched' which we currently know to
5228 only be true for the ARM8, ARM9 and StrongARM. If this ever
5229 changes, then the test below needs to be reworked. */
5230 if (nops == 2 && arm_ld_sched)
5231 return 0;
5233 /* Can't do it without setting up the offset, only do this if it takes
5234 no more than one insn. */
5235 return (const_ok_for_arm (unsorted_offsets[order[0]])
5236 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5239 const char *
5240 emit_ldm_seq (rtx *operands, int nops)
5242 int regs[4];
5243 int base_reg;
5244 HOST_WIDE_INT offset;
5245 char buf[100];
5246 int i;
5248 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5250 case 1:
5251 strcpy (buf, "ldm%?ia\t");
5252 break;
5254 case 2:
5255 strcpy (buf, "ldm%?ib\t");
5256 break;
5258 case 3:
5259 strcpy (buf, "ldm%?da\t");
5260 break;
5262 case 4:
5263 strcpy (buf, "ldm%?db\t");
5264 break;
5266 case 5:
5267 if (offset >= 0)
5268 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5269 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5270 (long) offset);
5271 else
5272 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5273 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5274 (long) -offset);
5275 output_asm_insn (buf, operands);
5276 base_reg = regs[0];
5277 strcpy (buf, "ldm%?ia\t");
5278 break;
5280 default:
5281 abort ();
5284 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5285 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5287 for (i = 1; i < nops; i++)
5288 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5289 reg_names[regs[i]]);
5291 strcat (buf, "}\t%@ phole ldm");
5293 output_asm_insn (buf, operands);
5294 return "";
5298 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5299 HOST_WIDE_INT * load_offset)
5301 int unsorted_regs[4];
5302 HOST_WIDE_INT unsorted_offsets[4];
5303 int order[4];
5304 int base_reg = -1;
5305 int i;
5307 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5308 extended if required. */
5309 if (nops < 2 || nops > 4)
5310 abort ();
5312 /* Loop over the operands and check that the memory references are
5313 suitable (i.e. immediate offsets from the same base register). At
5314 the same time, extract the target register, and the memory
5315 offsets. */
5316 for (i = 0; i < nops; i++)
5318 rtx reg;
5319 rtx offset;
5321 /* Convert a subreg of a mem into the mem itself. */
5322 if (GET_CODE (operands[nops + i]) == SUBREG)
5323 operands[nops + i] = alter_subreg (operands + (nops + i));
5325 if (GET_CODE (operands[nops + i]) != MEM)
5326 abort ();
5328 /* Don't reorder volatile memory references; it doesn't seem worth
5329 looking for the case where the order is ok anyway. */
5330 if (MEM_VOLATILE_P (operands[nops + i]))
5331 return 0;
5333 offset = const0_rtx;
5335 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5336 || (GET_CODE (reg) == SUBREG
5337 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5338 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5339 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5340 == REG)
5341 || (GET_CODE (reg) == SUBREG
5342 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5343 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5344 == CONST_INT)))
5346 if (i == 0)
5348 base_reg = REGNO (reg);
5349 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5350 ? REGNO (operands[i])
5351 : REGNO (SUBREG_REG (operands[i])));
5352 order[0] = 0;
5354 else
5356 if (base_reg != (int) REGNO (reg))
5357 /* Not addressed from the same base register. */
5358 return 0;
5360 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5361 ? REGNO (operands[i])
5362 : REGNO (SUBREG_REG (operands[i])));
5363 if (unsorted_regs[i] < unsorted_regs[order[0]])
5364 order[0] = i;
5367 /* If it isn't an integer register, then we can't do this. */
5368 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5369 return 0;
5371 unsorted_offsets[i] = INTVAL (offset);
5373 else
5374 /* Not a suitable memory address. */
5375 return 0;
5378 /* All the useful information has now been extracted from the
5379 operands into unsorted_regs and unsorted_offsets; additionally,
5380 order[0] has been set to the lowest numbered register in the
5381 list. Sort the registers into order, and check that the memory
5382 offsets are ascending and adjacent. */
5384 for (i = 1; i < nops; i++)
5386 int j;
5388 order[i] = order[i - 1];
5389 for (j = 0; j < nops; j++)
5390 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5391 && (order[i] == order[i - 1]
5392 || unsorted_regs[j] < unsorted_regs[order[i]]))
5393 order[i] = j;
5395 /* Have we found a suitable register? if not, one must be used more
5396 than once. */
5397 if (order[i] == order[i - 1])
5398 return 0;
5400 /* Is the memory address adjacent and ascending? */
5401 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5402 return 0;
5405 if (base)
5407 *base = base_reg;
5409 for (i = 0; i < nops; i++)
5410 regs[i] = unsorted_regs[order[i]];
5412 *load_offset = unsorted_offsets[order[0]];
5415 if (unsorted_offsets[order[0]] == 0)
5416 return 1; /* stmia */
5418 if (unsorted_offsets[order[0]] == 4)
5419 return 2; /* stmib */
5421 if (unsorted_offsets[order[nops - 1]] == 0)
5422 return 3; /* stmda */
5424 if (unsorted_offsets[order[nops - 1]] == -4)
5425 return 4; /* stmdb */
5427 return 0;
5430 const char *
5431 emit_stm_seq (rtx *operands, int nops)
5433 int regs[4];
5434 int base_reg;
5435 HOST_WIDE_INT offset;
5436 char buf[100];
5437 int i;
5439 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5441 case 1:
5442 strcpy (buf, "stm%?ia\t");
5443 break;
5445 case 2:
5446 strcpy (buf, "stm%?ib\t");
5447 break;
5449 case 3:
5450 strcpy (buf, "stm%?da\t");
5451 break;
5453 case 4:
5454 strcpy (buf, "stm%?db\t");
5455 break;
5457 default:
5458 abort ();
5461 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5462 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5464 for (i = 1; i < nops; i++)
5465 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5466 reg_names[regs[i]]);
5468 strcat (buf, "}\t%@ phole stm");
5470 output_asm_insn (buf, operands);
5471 return "";
5475 /* Routines for use in generating RTL. */
5478 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5479 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5481 HOST_WIDE_INT offset = *offsetp;
5482 int i = 0, j;
5483 rtx result;
5484 int sign = up ? 1 : -1;
5485 rtx mem, addr;
5487 /* XScale has load-store double instructions, but they have stricter
5488 alignment requirements than load-store multiple, so we cannot
5489 use them.
5491 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5492 the pipeline until completion.
5494 NREGS CYCLES
5500 An ldr instruction takes 1-3 cycles, but does not block the
5501 pipeline.
5503 NREGS CYCLES
5504 1 1-3
5505 2 2-6
5506 3 3-9
5507 4 4-12
5509 Best case ldr will always win. However, the more ldr instructions
5510 we issue, the less likely we are to be able to schedule them well.
5511 Using ldr instructions also increases code size.
5513 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5514 for counts of 3 or 4 regs. */
5515 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5517 rtx seq;
5519 start_sequence ();
5521 for (i = 0; i < count; i++)
5523 addr = plus_constant (from, i * 4 * sign);
5524 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5525 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5526 offset += 4 * sign;
5529 if (write_back)
5531 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5532 *offsetp = offset;
5535 seq = get_insns ();
5536 end_sequence ();
5538 return seq;
5541 result = gen_rtx_PARALLEL (VOIDmode,
5542 rtvec_alloc (count + (write_back ? 1 : 0)));
5543 if (write_back)
5545 XVECEXP (result, 0, 0)
5546 = gen_rtx_SET (GET_MODE (from), from,
5547 plus_constant (from, count * 4 * sign));
5548 i = 1;
5549 count++;
5552 for (j = 0; i < count; i++, j++)
5554 addr = plus_constant (from, j * 4 * sign);
5555 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5556 XVECEXP (result, 0, i)
5557 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5558 offset += 4 * sign;
5561 if (write_back)
5562 *offsetp = offset;
5564 return result;
5568 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5569 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5571 HOST_WIDE_INT offset = *offsetp;
5572 int i = 0, j;
5573 rtx result;
5574 int sign = up ? 1 : -1;
5575 rtx mem, addr;
5577 /* See arm_gen_load_multiple for discussion of
5578 the pros/cons of ldm/stm usage for XScale. */
5579 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5581 rtx seq;
5583 start_sequence ();
5585 for (i = 0; i < count; i++)
5587 addr = plus_constant (to, i * 4 * sign);
5588 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5589 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5590 offset += 4 * sign;
5593 if (write_back)
5595 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5596 *offsetp = offset;
5599 seq = get_insns ();
5600 end_sequence ();
5602 return seq;
5605 result = gen_rtx_PARALLEL (VOIDmode,
5606 rtvec_alloc (count + (write_back ? 1 : 0)));
5607 if (write_back)
5609 XVECEXP (result, 0, 0)
5610 = gen_rtx_SET (GET_MODE (to), to,
5611 plus_constant (to, count * 4 * sign));
5612 i = 1;
5613 count++;
5616 for (j = 0; i < count; i++, j++)
5618 addr = plus_constant (to, j * 4 * sign);
5619 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5620 XVECEXP (result, 0, i)
5621 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5622 offset += 4 * sign;
5625 if (write_back)
5626 *offsetp = offset;
5628 return result;
5632 arm_gen_movmemqi (rtx *operands)
5634 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5635 HOST_WIDE_INT srcoffset, dstoffset;
5636 int i;
5637 rtx src, dst, srcbase, dstbase;
5638 rtx part_bytes_reg = NULL;
5639 rtx mem;
5641 if (GET_CODE (operands[2]) != CONST_INT
5642 || GET_CODE (operands[3]) != CONST_INT
5643 || INTVAL (operands[2]) > 64
5644 || INTVAL (operands[3]) & 3)
5645 return 0;
5647 dstbase = operands[0];
5648 srcbase = operands[1];
5650 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5651 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5653 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5654 out_words_to_go = INTVAL (operands[2]) / 4;
5655 last_bytes = INTVAL (operands[2]) & 3;
5656 dstoffset = srcoffset = 0;
5658 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5659 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5661 for (i = 0; in_words_to_go >= 2; i+=4)
5663 if (in_words_to_go > 4)
5664 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5665 srcbase, &srcoffset));
5666 else
5667 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5668 FALSE, srcbase, &srcoffset));
5670 if (out_words_to_go)
5672 if (out_words_to_go > 4)
5673 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5674 dstbase, &dstoffset));
5675 else if (out_words_to_go != 1)
5676 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5677 dst, TRUE,
5678 (last_bytes == 0
5679 ? FALSE : TRUE),
5680 dstbase, &dstoffset));
5681 else
5683 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5684 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5685 if (last_bytes != 0)
5687 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5688 dstoffset += 4;
5693 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5694 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5697 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5698 if (out_words_to_go)
5700 rtx sreg;
5702 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5703 sreg = copy_to_reg (mem);
5705 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5706 emit_move_insn (mem, sreg);
5707 in_words_to_go--;
5709 if (in_words_to_go) /* Sanity check */
5710 abort ();
5713 if (in_words_to_go)
5715 if (in_words_to_go < 0)
5716 abort ();
5718 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5719 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5722 if (last_bytes && part_bytes_reg == NULL)
5723 abort ();
5725 if (BYTES_BIG_ENDIAN && last_bytes)
5727 rtx tmp = gen_reg_rtx (SImode);
5729 /* The bytes we want are in the top end of the word. */
5730 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5731 GEN_INT (8 * (4 - last_bytes))));
5732 part_bytes_reg = tmp;
5734 while (last_bytes)
5736 mem = adjust_automodify_address (dstbase, QImode,
5737 plus_constant (dst, last_bytes - 1),
5738 dstoffset + last_bytes - 1);
5739 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5741 if (--last_bytes)
5743 tmp = gen_reg_rtx (SImode);
5744 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5745 part_bytes_reg = tmp;
5750 else
5752 if (last_bytes > 1)
5754 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5755 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5756 last_bytes -= 2;
5757 if (last_bytes)
5759 rtx tmp = gen_reg_rtx (SImode);
5760 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5761 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5762 part_bytes_reg = tmp;
5763 dstoffset += 2;
5767 if (last_bytes)
5769 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5770 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5774 return 1;
5777 /* Generate a memory reference for a half word, such that it will be loaded
5778 into the top 16 bits of the word. We can assume that the address is
5779 known to be alignable and of the form reg, or plus (reg, const). */
5782 arm_gen_rotated_half_load (rtx memref)
5784 HOST_WIDE_INT offset = 0;
5785 rtx base = XEXP (memref, 0);
5787 if (GET_CODE (base) == PLUS)
5789 offset = INTVAL (XEXP (base, 1));
5790 base = XEXP (base, 0);
5793 /* If we aren't allowed to generate unaligned addresses, then fail. */
5794 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5795 return NULL;
5797 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5799 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5800 return base;
5802 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5805 /* Select a dominance comparison mode if possible for a test of the general
5806 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5807 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5808 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5809 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5810 In all cases OP will be either EQ or NE, but we don't need to know which
5811 here. If we are unable to support a dominance comparison we return
5812 CC mode. This will then fail to match for the RTL expressions that
5813 generate this call. */
5814 enum machine_mode
5815 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5817 enum rtx_code cond1, cond2;
5818 int swapped = 0;
5820 /* Currently we will probably get the wrong result if the individual
5821 comparisons are not simple. This also ensures that it is safe to
5822 reverse a comparison if necessary. */
5823 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5824 != CCmode)
5825 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5826 != CCmode))
5827 return CCmode;
5829 /* The if_then_else variant of this tests the second condition if the
5830 first passes, but is true if the first fails. Reverse the first
5831 condition to get a true "inclusive-or" expression. */
5832 if (cond_or == DOM_CC_NX_OR_Y)
5833 cond1 = reverse_condition (cond1);
5835 /* If the comparisons are not equal, and one doesn't dominate the other,
5836 then we can't do this. */
5837 if (cond1 != cond2
5838 && !comparison_dominates_p (cond1, cond2)
5839 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5840 return CCmode;
5842 if (swapped)
5844 enum rtx_code temp = cond1;
5845 cond1 = cond2;
5846 cond2 = temp;
5849 switch (cond1)
5851 case EQ:
5852 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5853 return CC_DEQmode;
5855 switch (cond2)
5857 case LE: return CC_DLEmode;
5858 case LEU: return CC_DLEUmode;
5859 case GE: return CC_DGEmode;
5860 case GEU: return CC_DGEUmode;
5861 default: break;
5864 break;
5866 case LT:
5867 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5868 return CC_DLTmode;
5869 if (cond2 == LE)
5870 return CC_DLEmode;
5871 if (cond2 == NE)
5872 return CC_DNEmode;
5873 break;
5875 case GT:
5876 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5877 return CC_DGTmode;
5878 if (cond2 == GE)
5879 return CC_DGEmode;
5880 if (cond2 == NE)
5881 return CC_DNEmode;
5882 break;
5884 case LTU:
5885 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5886 return CC_DLTUmode;
5887 if (cond2 == LEU)
5888 return CC_DLEUmode;
5889 if (cond2 == NE)
5890 return CC_DNEmode;
5891 break;
5893 case GTU:
5894 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5895 return CC_DGTUmode;
5896 if (cond2 == GEU)
5897 return CC_DGEUmode;
5898 if (cond2 == NE)
5899 return CC_DNEmode;
5900 break;
5902 /* The remaining cases only occur when both comparisons are the
5903 same. */
5904 case NE:
5905 return CC_DNEmode;
5907 case LE:
5908 return CC_DLEmode;
5910 case GE:
5911 return CC_DGEmode;
5913 case LEU:
5914 return CC_DLEUmode;
5916 case GEU:
5917 return CC_DGEUmode;
5919 default:
5920 break;
5923 abort ();
5926 enum machine_mode
5927 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
5929 /* All floating point compares return CCFP if it is an equality
5930 comparison, and CCFPE otherwise. */
5931 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5933 switch (op)
5935 case EQ:
5936 case NE:
5937 case UNORDERED:
5938 case ORDERED:
5939 case UNLT:
5940 case UNLE:
5941 case UNGT:
5942 case UNGE:
5943 case UNEQ:
5944 case LTGT:
5945 return CCFPmode;
5947 case LT:
5948 case LE:
5949 case GT:
5950 case GE:
5951 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
5952 return CCFPmode;
5953 return CCFPEmode;
5955 default:
5956 abort ();
5960 /* A compare with a shifted operand. Because of canonicalization, the
5961 comparison will have to be swapped when we emit the assembler. */
5962 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
5963 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5964 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
5965 || GET_CODE (x) == ROTATERT))
5966 return CC_SWPmode;
5968 /* This is a special case that is used by combine to allow a
5969 comparison of a shifted byte load to be split into a zero-extend
5970 followed by a comparison of the shifted integer (only valid for
5971 equalities and unsigned inequalities). */
5972 if (GET_MODE (x) == SImode
5973 && GET_CODE (x) == ASHIFT
5974 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
5975 && GET_CODE (XEXP (x, 0)) == SUBREG
5976 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
5977 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
5978 && (op == EQ || op == NE
5979 || op == GEU || op == GTU || op == LTU || op == LEU)
5980 && GET_CODE (y) == CONST_INT)
5981 return CC_Zmode;
5983 /* A construct for a conditional compare, if the false arm contains
5984 0, then both conditions must be true, otherwise either condition
5985 must be true. Not all conditions are possible, so CCmode is
5986 returned if it can't be done. */
5987 if (GET_CODE (x) == IF_THEN_ELSE
5988 && (XEXP (x, 2) == const0_rtx
5989 || XEXP (x, 2) == const1_rtx)
5990 && COMPARISON_P (XEXP (x, 0))
5991 && COMPARISON_P (XEXP (x, 1)))
5992 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5993 INTVAL (XEXP (x, 2)));
5995 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
5996 if (GET_CODE (x) == AND
5997 && COMPARISON_P (XEXP (x, 0))
5998 && COMPARISON_P (XEXP (x, 1)))
5999 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6000 DOM_CC_X_AND_Y);
6002 if (GET_CODE (x) == IOR
6003 && COMPARISON_P (XEXP (x, 0))
6004 && COMPARISON_P (XEXP (x, 1)))
6005 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6006 DOM_CC_X_OR_Y);
6008 /* An operation (on Thumb) where we want to test for a single bit.
6009 This is done by shifting that bit up into the top bit of a
6010 scratch register; we can then branch on the sign bit. */
6011 if (TARGET_THUMB
6012 && GET_MODE (x) == SImode
6013 && (op == EQ || op == NE)
6014 && (GET_CODE (x) == ZERO_EXTRACT))
6015 return CC_Nmode;
6017 /* An operation that sets the condition codes as a side-effect, the
6018 V flag is not set correctly, so we can only use comparisons where
6019 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6020 instead.) */
6021 if (GET_MODE (x) == SImode
6022 && y == const0_rtx
6023 && (op == EQ || op == NE || op == LT || op == GE)
6024 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6025 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6026 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6027 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6028 || GET_CODE (x) == LSHIFTRT
6029 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6030 || GET_CODE (x) == ROTATERT
6031 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6032 return CC_NOOVmode;
6034 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6035 return CC_Zmode;
6037 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6038 && GET_CODE (x) == PLUS
6039 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6040 return CC_Cmode;
6042 return CCmode;
6045 /* X and Y are two things to compare using CODE. Emit the compare insn and
6046 return the rtx for register 0 in the proper mode. FP means this is a
6047 floating point compare: I don't think that it is needed on the arm. */
6049 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6051 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6052 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6054 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6055 gen_rtx_COMPARE (mode, x, y)));
6057 return cc_reg;
6060 /* Generate a sequence of insns that will generate the correct return
6061 address mask depending on the physical architecture that the program
6062 is running on. */
6064 arm_gen_return_addr_mask (void)
6066 rtx reg = gen_reg_rtx (Pmode);
6068 emit_insn (gen_return_addr_mask (reg));
6069 return reg;
6072 void
6073 arm_reload_in_hi (rtx *operands)
6075 rtx ref = operands[1];
6076 rtx base, scratch;
6077 HOST_WIDE_INT offset = 0;
6079 if (GET_CODE (ref) == SUBREG)
6081 offset = SUBREG_BYTE (ref);
6082 ref = SUBREG_REG (ref);
6085 if (GET_CODE (ref) == REG)
6087 /* We have a pseudo which has been spilt onto the stack; there
6088 are two cases here: the first where there is a simple
6089 stack-slot replacement and a second where the stack-slot is
6090 out of range, or is used as a subreg. */
6091 if (reg_equiv_mem[REGNO (ref)])
6093 ref = reg_equiv_mem[REGNO (ref)];
6094 base = find_replacement (&XEXP (ref, 0));
6096 else
6097 /* The slot is out of range, or was dressed up in a SUBREG. */
6098 base = reg_equiv_address[REGNO (ref)];
6100 else
6101 base = find_replacement (&XEXP (ref, 0));
6103 /* Handle the case where the address is too complex to be offset by 1. */
6104 if (GET_CODE (base) == MINUS
6105 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6107 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6109 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6110 base = base_plus;
6112 else if (GET_CODE (base) == PLUS)
6114 /* The addend must be CONST_INT, or we would have dealt with it above. */
6115 HOST_WIDE_INT hi, lo;
6117 offset += INTVAL (XEXP (base, 1));
6118 base = XEXP (base, 0);
6120 /* Rework the address into a legal sequence of insns. */
6121 /* Valid range for lo is -4095 -> 4095 */
6122 lo = (offset >= 0
6123 ? (offset & 0xfff)
6124 : -((-offset) & 0xfff));
6126 /* Corner case, if lo is the max offset then we would be out of range
6127 once we have added the additional 1 below, so bump the msb into the
6128 pre-loading insn(s). */
6129 if (lo == 4095)
6130 lo &= 0x7ff;
6132 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6133 ^ (HOST_WIDE_INT) 0x80000000)
6134 - (HOST_WIDE_INT) 0x80000000);
6136 if (hi + lo != offset)
6137 abort ();
6139 if (hi != 0)
6141 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6143 /* Get the base address; addsi3 knows how to handle constants
6144 that require more than one insn. */
6145 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6146 base = base_plus;
6147 offset = lo;
6151 /* Operands[2] may overlap operands[0] (though it won't overlap
6152 operands[1]), that's why we asked for a DImode reg -- so we can
6153 use the bit that does not overlap. */
6154 if (REGNO (operands[2]) == REGNO (operands[0]))
6155 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6156 else
6157 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6159 emit_insn (gen_zero_extendqisi2 (scratch,
6160 gen_rtx_MEM (QImode,
6161 plus_constant (base,
6162 offset))));
6163 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6164 gen_rtx_MEM (QImode,
6165 plus_constant (base,
6166 offset + 1))));
6167 if (!BYTES_BIG_ENDIAN)
6168 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6169 gen_rtx_IOR (SImode,
6170 gen_rtx_ASHIFT
6171 (SImode,
6172 gen_rtx_SUBREG (SImode, operands[0], 0),
6173 GEN_INT (8)),
6174 scratch)));
6175 else
6176 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6177 gen_rtx_IOR (SImode,
6178 gen_rtx_ASHIFT (SImode, scratch,
6179 GEN_INT (8)),
6180 gen_rtx_SUBREG (SImode, operands[0],
6181 0))));
6184 /* Handle storing a half-word to memory during reload by synthesizing as two
6185 byte stores. Take care not to clobber the input values until after we
6186 have moved them somewhere safe. This code assumes that if the DImode
6187 scratch in operands[2] overlaps either the input value or output address
6188 in some way, then that value must die in this insn (we absolutely need
6189 two scratch registers for some corner cases). */
6190 void
6191 arm_reload_out_hi (rtx *operands)
6193 rtx ref = operands[0];
6194 rtx outval = operands[1];
6195 rtx base, scratch;
6196 HOST_WIDE_INT offset = 0;
6198 if (GET_CODE (ref) == SUBREG)
6200 offset = SUBREG_BYTE (ref);
6201 ref = SUBREG_REG (ref);
6204 if (GET_CODE (ref) == REG)
6206 /* We have a pseudo which has been spilt onto the stack; there
6207 are two cases here: the first where there is a simple
6208 stack-slot replacement and a second where the stack-slot is
6209 out of range, or is used as a subreg. */
6210 if (reg_equiv_mem[REGNO (ref)])
6212 ref = reg_equiv_mem[REGNO (ref)];
6213 base = find_replacement (&XEXP (ref, 0));
6215 else
6216 /* The slot is out of range, or was dressed up in a SUBREG. */
6217 base = reg_equiv_address[REGNO (ref)];
6219 else
6220 base = find_replacement (&XEXP (ref, 0));
6222 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6224 /* Handle the case where the address is too complex to be offset by 1. */
6225 if (GET_CODE (base) == MINUS
6226 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6228 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6230 /* Be careful not to destroy OUTVAL. */
6231 if (reg_overlap_mentioned_p (base_plus, outval))
6233 /* Updating base_plus might destroy outval, see if we can
6234 swap the scratch and base_plus. */
6235 if (!reg_overlap_mentioned_p (scratch, outval))
6237 rtx tmp = scratch;
6238 scratch = base_plus;
6239 base_plus = tmp;
6241 else
6243 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6245 /* Be conservative and copy OUTVAL into the scratch now,
6246 this should only be necessary if outval is a subreg
6247 of something larger than a word. */
6248 /* XXX Might this clobber base? I can't see how it can,
6249 since scratch is known to overlap with OUTVAL, and
6250 must be wider than a word. */
6251 emit_insn (gen_movhi (scratch_hi, outval));
6252 outval = scratch_hi;
6256 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6257 base = base_plus;
6259 else if (GET_CODE (base) == PLUS)
6261 /* The addend must be CONST_INT, or we would have dealt with it above. */
6262 HOST_WIDE_INT hi, lo;
6264 offset += INTVAL (XEXP (base, 1));
6265 base = XEXP (base, 0);
6267 /* Rework the address into a legal sequence of insns. */
6268 /* Valid range for lo is -4095 -> 4095 */
6269 lo = (offset >= 0
6270 ? (offset & 0xfff)
6271 : -((-offset) & 0xfff));
6273 /* Corner case, if lo is the max offset then we would be out of range
6274 once we have added the additional 1 below, so bump the msb into the
6275 pre-loading insn(s). */
6276 if (lo == 4095)
6277 lo &= 0x7ff;
6279 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6280 ^ (HOST_WIDE_INT) 0x80000000)
6281 - (HOST_WIDE_INT) 0x80000000);
6283 if (hi + lo != offset)
6284 abort ();
6286 if (hi != 0)
6288 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6290 /* Be careful not to destroy OUTVAL. */
6291 if (reg_overlap_mentioned_p (base_plus, outval))
6293 /* Updating base_plus might destroy outval, see if we
6294 can swap the scratch and base_plus. */
6295 if (!reg_overlap_mentioned_p (scratch, outval))
6297 rtx tmp = scratch;
6298 scratch = base_plus;
6299 base_plus = tmp;
6301 else
6303 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6305 /* Be conservative and copy outval into scratch now,
6306 this should only be necessary if outval is a
6307 subreg of something larger than a word. */
6308 /* XXX Might this clobber base? I can't see how it
6309 can, since scratch is known to overlap with
6310 outval. */
6311 emit_insn (gen_movhi (scratch_hi, outval));
6312 outval = scratch_hi;
6316 /* Get the base address; addsi3 knows how to handle constants
6317 that require more than one insn. */
6318 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6319 base = base_plus;
6320 offset = lo;
6324 if (BYTES_BIG_ENDIAN)
6326 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6327 plus_constant (base, offset + 1)),
6328 gen_lowpart (QImode, outval)));
6329 emit_insn (gen_lshrsi3 (scratch,
6330 gen_rtx_SUBREG (SImode, outval, 0),
6331 GEN_INT (8)));
6332 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6333 gen_lowpart (QImode, scratch)));
6335 else
6337 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6338 gen_lowpart (QImode, outval)));
6339 emit_insn (gen_lshrsi3 (scratch,
6340 gen_rtx_SUBREG (SImode, outval, 0),
6341 GEN_INT (8)));
6342 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6343 plus_constant (base, offset + 1)),
6344 gen_lowpart (QImode, scratch)));
6348 /* Print a symbolic form of X to the debug file, F. */
6349 static void
6350 arm_print_value (FILE *f, rtx x)
6352 switch (GET_CODE (x))
6354 case CONST_INT:
6355 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6356 return;
6358 case CONST_DOUBLE:
6359 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6360 return;
6362 case CONST_VECTOR:
6364 int i;
6366 fprintf (f, "<");
6367 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6369 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6370 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6371 fputc (',', f);
6373 fprintf (f, ">");
6375 return;
6377 case CONST_STRING:
6378 fprintf (f, "\"%s\"", XSTR (x, 0));
6379 return;
6381 case SYMBOL_REF:
6382 fprintf (f, "`%s'", XSTR (x, 0));
6383 return;
6385 case LABEL_REF:
6386 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6387 return;
6389 case CONST:
6390 arm_print_value (f, XEXP (x, 0));
6391 return;
6393 case PLUS:
6394 arm_print_value (f, XEXP (x, 0));
6395 fprintf (f, "+");
6396 arm_print_value (f, XEXP (x, 1));
6397 return;
6399 case PC:
6400 fprintf (f, "pc");
6401 return;
6403 default:
6404 fprintf (f, "????");
6405 return;
6409 /* Routines for manipulation of the constant pool. */
6411 /* Arm instructions cannot load a large constant directly into a
6412 register; they have to come from a pc relative load. The constant
6413 must therefore be placed in the addressable range of the pc
6414 relative load. Depending on the precise pc relative load
6415 instruction the range is somewhere between 256 bytes and 4k. This
6416 means that we often have to dump a constant inside a function, and
6417 generate code to branch around it.
6419 It is important to minimize this, since the branches will slow
6420 things down and make the code larger.
6422 Normally we can hide the table after an existing unconditional
6423 branch so that there is no interruption of the flow, but in the
6424 worst case the code looks like this:
6426 ldr rn, L1
6428 b L2
6429 align
6430 L1: .long value
6434 ldr rn, L3
6436 b L4
6437 align
6438 L3: .long value
6442 We fix this by performing a scan after scheduling, which notices
6443 which instructions need to have their operands fetched from the
6444 constant table and builds the table.
6446 The algorithm starts by building a table of all the constants that
6447 need fixing up and all the natural barriers in the function (places
6448 where a constant table can be dropped without breaking the flow).
6449 For each fixup we note how far the pc-relative replacement will be
6450 able to reach and the offset of the instruction into the function.
6452 Having built the table we then group the fixes together to form
6453 tables that are as large as possible (subject to addressing
6454 constraints) and emit each table of constants after the last
6455 barrier that is within range of all the instructions in the group.
6456 If a group does not contain a barrier, then we forcibly create one
6457 by inserting a jump instruction into the flow. Once the table has
6458 been inserted, the insns are then modified to reference the
6459 relevant entry in the pool.
6461 Possible enhancements to the algorithm (not implemented) are:
6463 1) For some processors and object formats, there may be benefit in
6464 aligning the pools to the start of cache lines; this alignment
6465 would need to be taken into account when calculating addressability
6466 of a pool. */
6468 /* These typedefs are located at the start of this file, so that
6469 they can be used in the prototypes there. This comment is to
6470 remind readers of that fact so that the following structures
6471 can be understood more easily.
6473 typedef struct minipool_node Mnode;
6474 typedef struct minipool_fixup Mfix; */
6476 struct minipool_node
6478 /* Doubly linked chain of entries. */
6479 Mnode * next;
6480 Mnode * prev;
6481 /* The maximum offset into the code that this entry can be placed. While
6482 pushing fixes for forward references, all entries are sorted in order
6483 of increasing max_address. */
6484 HOST_WIDE_INT max_address;
6485 /* Similarly for an entry inserted for a backwards ref. */
6486 HOST_WIDE_INT min_address;
6487 /* The number of fixes referencing this entry. This can become zero
6488 if we "unpush" an entry. In this case we ignore the entry when we
6489 come to emit the code. */
6490 int refcount;
6491 /* The offset from the start of the minipool. */
6492 HOST_WIDE_INT offset;
6493 /* The value in table. */
6494 rtx value;
6495 /* The mode of value. */
6496 enum machine_mode mode;
6497 /* The size of the value. With iWMMXt enabled
6498 sizes > 4 also imply an alignment of 8-bytes. */
6499 int fix_size;
6502 struct minipool_fixup
6504 Mfix * next;
6505 rtx insn;
6506 HOST_WIDE_INT address;
6507 rtx * loc;
6508 enum machine_mode mode;
6509 int fix_size;
6510 rtx value;
6511 Mnode * minipool;
6512 HOST_WIDE_INT forwards;
6513 HOST_WIDE_INT backwards;
6516 /* Fixes less than a word need padding out to a word boundary. */
6517 #define MINIPOOL_FIX_SIZE(mode) \
6518 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6520 static Mnode * minipool_vector_head;
6521 static Mnode * minipool_vector_tail;
6522 static rtx minipool_vector_label;
6524 /* The linked list of all minipool fixes required for this function. */
6525 Mfix * minipool_fix_head;
6526 Mfix * minipool_fix_tail;
6527 /* The fix entry for the current minipool, once it has been placed. */
6528 Mfix * minipool_barrier;
6530 /* Determines if INSN is the start of a jump table. Returns the end
6531 of the TABLE or NULL_RTX. */
6532 static rtx
6533 is_jump_table (rtx insn)
6535 rtx table;
6537 if (GET_CODE (insn) == JUMP_INSN
6538 && JUMP_LABEL (insn) != NULL
6539 && ((table = next_real_insn (JUMP_LABEL (insn)))
6540 == next_real_insn (insn))
6541 && table != NULL
6542 && GET_CODE (table) == JUMP_INSN
6543 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6544 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6545 return table;
6547 return NULL_RTX;
6550 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6551 #define JUMP_TABLES_IN_TEXT_SECTION 0
6552 #endif
6554 static HOST_WIDE_INT
6555 get_jump_table_size (rtx insn)
6557 /* ADDR_VECs only take room if read-only data does into the text
6558 section. */
6559 if (JUMP_TABLES_IN_TEXT_SECTION
6560 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6561 || 1
6562 #endif
6565 rtx body = PATTERN (insn);
6566 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6568 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6571 return 0;
6574 /* Move a minipool fix MP from its current location to before MAX_MP.
6575 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6576 constraints may need updating. */
6577 static Mnode *
6578 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6579 HOST_WIDE_INT max_address)
6581 /* This should never be true and the code below assumes these are
6582 different. */
6583 if (mp == max_mp)
6584 abort ();
6586 if (max_mp == NULL)
6588 if (max_address < mp->max_address)
6589 mp->max_address = max_address;
6591 else
6593 if (max_address > max_mp->max_address - mp->fix_size)
6594 mp->max_address = max_mp->max_address - mp->fix_size;
6595 else
6596 mp->max_address = max_address;
6598 /* Unlink MP from its current position. Since max_mp is non-null,
6599 mp->prev must be non-null. */
6600 mp->prev->next = mp->next;
6601 if (mp->next != NULL)
6602 mp->next->prev = mp->prev;
6603 else
6604 minipool_vector_tail = mp->prev;
6606 /* Re-insert it before MAX_MP. */
6607 mp->next = max_mp;
6608 mp->prev = max_mp->prev;
6609 max_mp->prev = mp;
6611 if (mp->prev != NULL)
6612 mp->prev->next = mp;
6613 else
6614 minipool_vector_head = mp;
6617 /* Save the new entry. */
6618 max_mp = mp;
6620 /* Scan over the preceding entries and adjust their addresses as
6621 required. */
6622 while (mp->prev != NULL
6623 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6625 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6626 mp = mp->prev;
6629 return max_mp;
6632 /* Add a constant to the minipool for a forward reference. Returns the
6633 node added or NULL if the constant will not fit in this pool. */
6634 static Mnode *
6635 add_minipool_forward_ref (Mfix *fix)
6637 /* If set, max_mp is the first pool_entry that has a lower
6638 constraint than the one we are trying to add. */
6639 Mnode * max_mp = NULL;
6640 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6641 Mnode * mp;
6643 /* If this fix's address is greater than the address of the first
6644 entry, then we can't put the fix in this pool. We subtract the
6645 size of the current fix to ensure that if the table is fully
6646 packed we still have enough room to insert this value by suffling
6647 the other fixes forwards. */
6648 if (minipool_vector_head &&
6649 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6650 return NULL;
6652 /* Scan the pool to see if a constant with the same value has
6653 already been added. While we are doing this, also note the
6654 location where we must insert the constant if it doesn't already
6655 exist. */
6656 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6658 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6659 && fix->mode == mp->mode
6660 && (GET_CODE (fix->value) != CODE_LABEL
6661 || (CODE_LABEL_NUMBER (fix->value)
6662 == CODE_LABEL_NUMBER (mp->value)))
6663 && rtx_equal_p (fix->value, mp->value))
6665 /* More than one fix references this entry. */
6666 mp->refcount++;
6667 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6670 /* Note the insertion point if necessary. */
6671 if (max_mp == NULL
6672 && mp->max_address > max_address)
6673 max_mp = mp;
6675 /* If we are inserting an 8-bytes aligned quantity and
6676 we have not already found an insertion point, then
6677 make sure that all such 8-byte aligned quantities are
6678 placed at the start of the pool. */
6679 if (ARM_DOUBLEWORD_ALIGN
6680 && max_mp == NULL
6681 && fix->fix_size == 8
6682 && mp->fix_size != 8)
6684 max_mp = mp;
6685 max_address = mp->max_address;
6689 /* The value is not currently in the minipool, so we need to create
6690 a new entry for it. If MAX_MP is NULL, the entry will be put on
6691 the end of the list since the placement is less constrained than
6692 any existing entry. Otherwise, we insert the new fix before
6693 MAX_MP and, if necessary, adjust the constraints on the other
6694 entries. */
6695 mp = xmalloc (sizeof (* mp));
6696 mp->fix_size = fix->fix_size;
6697 mp->mode = fix->mode;
6698 mp->value = fix->value;
6699 mp->refcount = 1;
6700 /* Not yet required for a backwards ref. */
6701 mp->min_address = -65536;
6703 if (max_mp == NULL)
6705 mp->max_address = max_address;
6706 mp->next = NULL;
6707 mp->prev = minipool_vector_tail;
6709 if (mp->prev == NULL)
6711 minipool_vector_head = mp;
6712 minipool_vector_label = gen_label_rtx ();
6714 else
6715 mp->prev->next = mp;
6717 minipool_vector_tail = mp;
6719 else
6721 if (max_address > max_mp->max_address - mp->fix_size)
6722 mp->max_address = max_mp->max_address - mp->fix_size;
6723 else
6724 mp->max_address = max_address;
6726 mp->next = max_mp;
6727 mp->prev = max_mp->prev;
6728 max_mp->prev = mp;
6729 if (mp->prev != NULL)
6730 mp->prev->next = mp;
6731 else
6732 minipool_vector_head = mp;
6735 /* Save the new entry. */
6736 max_mp = mp;
6738 /* Scan over the preceding entries and adjust their addresses as
6739 required. */
6740 while (mp->prev != NULL
6741 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6743 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6744 mp = mp->prev;
6747 return max_mp;
6750 static Mnode *
6751 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6752 HOST_WIDE_INT min_address)
6754 HOST_WIDE_INT offset;
6756 /* This should never be true, and the code below assumes these are
6757 different. */
6758 if (mp == min_mp)
6759 abort ();
6761 if (min_mp == NULL)
6763 if (min_address > mp->min_address)
6764 mp->min_address = min_address;
6766 else
6768 /* We will adjust this below if it is too loose. */
6769 mp->min_address = min_address;
6771 /* Unlink MP from its current position. Since min_mp is non-null,
6772 mp->next must be non-null. */
6773 mp->next->prev = mp->prev;
6774 if (mp->prev != NULL)
6775 mp->prev->next = mp->next;
6776 else
6777 minipool_vector_head = mp->next;
6779 /* Reinsert it after MIN_MP. */
6780 mp->prev = min_mp;
6781 mp->next = min_mp->next;
6782 min_mp->next = mp;
6783 if (mp->next != NULL)
6784 mp->next->prev = mp;
6785 else
6786 minipool_vector_tail = mp;
6789 min_mp = mp;
6791 offset = 0;
6792 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6794 mp->offset = offset;
6795 if (mp->refcount > 0)
6796 offset += mp->fix_size;
6798 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6799 mp->next->min_address = mp->min_address + mp->fix_size;
6802 return min_mp;
6805 /* Add a constant to the minipool for a backward reference. Returns the
6806 node added or NULL if the constant will not fit in this pool.
6808 Note that the code for insertion for a backwards reference can be
6809 somewhat confusing because the calculated offsets for each fix do
6810 not take into account the size of the pool (which is still under
6811 construction. */
6812 static Mnode *
6813 add_minipool_backward_ref (Mfix *fix)
6815 /* If set, min_mp is the last pool_entry that has a lower constraint
6816 than the one we are trying to add. */
6817 Mnode *min_mp = NULL;
6818 /* This can be negative, since it is only a constraint. */
6819 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6820 Mnode *mp;
6822 /* If we can't reach the current pool from this insn, or if we can't
6823 insert this entry at the end of the pool without pushing other
6824 fixes out of range, then we don't try. This ensures that we
6825 can't fail later on. */
6826 if (min_address >= minipool_barrier->address
6827 || (minipool_vector_tail->min_address + fix->fix_size
6828 >= minipool_barrier->address))
6829 return NULL;
6831 /* Scan the pool to see if a constant with the same value has
6832 already been added. While we are doing this, also note the
6833 location where we must insert the constant if it doesn't already
6834 exist. */
6835 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6837 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6838 && fix->mode == mp->mode
6839 && (GET_CODE (fix->value) != CODE_LABEL
6840 || (CODE_LABEL_NUMBER (fix->value)
6841 == CODE_LABEL_NUMBER (mp->value)))
6842 && rtx_equal_p (fix->value, mp->value)
6843 /* Check that there is enough slack to move this entry to the
6844 end of the table (this is conservative). */
6845 && (mp->max_address
6846 > (minipool_barrier->address
6847 + minipool_vector_tail->offset
6848 + minipool_vector_tail->fix_size)))
6850 mp->refcount++;
6851 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6854 if (min_mp != NULL)
6855 mp->min_address += fix->fix_size;
6856 else
6858 /* Note the insertion point if necessary. */
6859 if (mp->min_address < min_address)
6861 /* For now, we do not allow the insertion of 8-byte alignment
6862 requiring nodes anywhere but at the start of the pool. */
6863 if (ARM_DOUBLEWORD_ALIGN
6864 && fix->fix_size == 8 && mp->fix_size != 8)
6865 return NULL;
6866 else
6867 min_mp = mp;
6869 else if (mp->max_address
6870 < minipool_barrier->address + mp->offset + fix->fix_size)
6872 /* Inserting before this entry would push the fix beyond
6873 its maximum address (which can happen if we have
6874 re-located a forwards fix); force the new fix to come
6875 after it. */
6876 min_mp = mp;
6877 min_address = mp->min_address + fix->fix_size;
6879 /* If we are inserting an 8-bytes aligned quantity and
6880 we have not already found an insertion point, then
6881 make sure that all such 8-byte aligned quantities are
6882 placed at the start of the pool. */
6883 else if (ARM_DOUBLEWORD_ALIGN
6884 && min_mp == NULL
6885 && fix->fix_size == 8
6886 && mp->fix_size < 8)
6888 min_mp = mp;
6889 min_address = mp->min_address + fix->fix_size;
6894 /* We need to create a new entry. */
6895 mp = xmalloc (sizeof (* mp));
6896 mp->fix_size = fix->fix_size;
6897 mp->mode = fix->mode;
6898 mp->value = fix->value;
6899 mp->refcount = 1;
6900 mp->max_address = minipool_barrier->address + 65536;
6902 mp->min_address = min_address;
6904 if (min_mp == NULL)
6906 mp->prev = NULL;
6907 mp->next = minipool_vector_head;
6909 if (mp->next == NULL)
6911 minipool_vector_tail = mp;
6912 minipool_vector_label = gen_label_rtx ();
6914 else
6915 mp->next->prev = mp;
6917 minipool_vector_head = mp;
6919 else
6921 mp->next = min_mp->next;
6922 mp->prev = min_mp;
6923 min_mp->next = mp;
6925 if (mp->next != NULL)
6926 mp->next->prev = mp;
6927 else
6928 minipool_vector_tail = mp;
6931 /* Save the new entry. */
6932 min_mp = mp;
6934 if (mp->prev)
6935 mp = mp->prev;
6936 else
6937 mp->offset = 0;
6939 /* Scan over the following entries and adjust their offsets. */
6940 while (mp->next != NULL)
6942 if (mp->next->min_address < mp->min_address + mp->fix_size)
6943 mp->next->min_address = mp->min_address + mp->fix_size;
6945 if (mp->refcount)
6946 mp->next->offset = mp->offset + mp->fix_size;
6947 else
6948 mp->next->offset = mp->offset;
6950 mp = mp->next;
6953 return min_mp;
6956 static void
6957 assign_minipool_offsets (Mfix *barrier)
6959 HOST_WIDE_INT offset = 0;
6960 Mnode *mp;
6962 minipool_barrier = barrier;
6964 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6966 mp->offset = offset;
6968 if (mp->refcount > 0)
6969 offset += mp->fix_size;
6973 /* Output the literal table */
6974 static void
6975 dump_minipool (rtx scan)
6977 Mnode * mp;
6978 Mnode * nmp;
6979 int align64 = 0;
6981 if (ARM_DOUBLEWORD_ALIGN)
6982 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6983 if (mp->refcount > 0 && mp->fix_size == 8)
6985 align64 = 1;
6986 break;
6989 if (dump_file)
6990 fprintf (dump_file,
6991 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
6992 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
6994 scan = emit_label_after (gen_label_rtx (), scan);
6995 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
6996 scan = emit_label_after (minipool_vector_label, scan);
6998 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7000 if (mp->refcount > 0)
7002 if (dump_file)
7004 fprintf (dump_file,
7005 ";; Offset %u, min %ld, max %ld ",
7006 (unsigned) mp->offset, (unsigned long) mp->min_address,
7007 (unsigned long) mp->max_address);
7008 arm_print_value (dump_file, mp->value);
7009 fputc ('\n', dump_file);
7012 switch (mp->fix_size)
7014 #ifdef HAVE_consttable_1
7015 case 1:
7016 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7017 break;
7019 #endif
7020 #ifdef HAVE_consttable_2
7021 case 2:
7022 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7023 break;
7025 #endif
7026 #ifdef HAVE_consttable_4
7027 case 4:
7028 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7029 break;
7031 #endif
7032 #ifdef HAVE_consttable_8
7033 case 8:
7034 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7035 break;
7037 #endif
7038 default:
7039 abort ();
7040 break;
7044 nmp = mp->next;
7045 free (mp);
7048 minipool_vector_head = minipool_vector_tail = NULL;
7049 scan = emit_insn_after (gen_consttable_end (), scan);
7050 scan = emit_barrier_after (scan);
7053 /* Return the cost of forcibly inserting a barrier after INSN. */
7054 static int
7055 arm_barrier_cost (rtx insn)
7057 /* Basing the location of the pool on the loop depth is preferable,
7058 but at the moment, the basic block information seems to be
7059 corrupt by this stage of the compilation. */
7060 int base_cost = 50;
7061 rtx next = next_nonnote_insn (insn);
7063 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7064 base_cost -= 20;
7066 switch (GET_CODE (insn))
7068 case CODE_LABEL:
7069 /* It will always be better to place the table before the label, rather
7070 than after it. */
7071 return 50;
7073 case INSN:
7074 case CALL_INSN:
7075 return base_cost;
7077 case JUMP_INSN:
7078 return base_cost - 10;
7080 default:
7081 return base_cost + 10;
7085 /* Find the best place in the insn stream in the range
7086 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7087 Create the barrier by inserting a jump and add a new fix entry for
7088 it. */
7089 static Mfix *
7090 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7092 HOST_WIDE_INT count = 0;
7093 rtx barrier;
7094 rtx from = fix->insn;
7095 rtx selected = from;
7096 int selected_cost;
7097 HOST_WIDE_INT selected_address;
7098 Mfix * new_fix;
7099 HOST_WIDE_INT max_count = max_address - fix->address;
7100 rtx label = gen_label_rtx ();
7102 selected_cost = arm_barrier_cost (from);
7103 selected_address = fix->address;
7105 while (from && count < max_count)
7107 rtx tmp;
7108 int new_cost;
7110 /* This code shouldn't have been called if there was a natural barrier
7111 within range. */
7112 if (GET_CODE (from) == BARRIER)
7113 abort ();
7115 /* Count the length of this insn. */
7116 count += get_attr_length (from);
7118 /* If there is a jump table, add its length. */
7119 tmp = is_jump_table (from);
7120 if (tmp != NULL)
7122 count += get_jump_table_size (tmp);
7124 /* Jump tables aren't in a basic block, so base the cost on
7125 the dispatch insn. If we select this location, we will
7126 still put the pool after the table. */
7127 new_cost = arm_barrier_cost (from);
7129 if (count < max_count && new_cost <= selected_cost)
7131 selected = tmp;
7132 selected_cost = new_cost;
7133 selected_address = fix->address + count;
7136 /* Continue after the dispatch table. */
7137 from = NEXT_INSN (tmp);
7138 continue;
7141 new_cost = arm_barrier_cost (from);
7143 if (count < max_count && new_cost <= selected_cost)
7145 selected = from;
7146 selected_cost = new_cost;
7147 selected_address = fix->address + count;
7150 from = NEXT_INSN (from);
7153 /* Create a new JUMP_INSN that branches around a barrier. */
7154 from = emit_jump_insn_after (gen_jump (label), selected);
7155 JUMP_LABEL (from) = label;
7156 barrier = emit_barrier_after (from);
7157 emit_label_after (label, barrier);
7159 /* Create a minipool barrier entry for the new barrier. */
7160 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7161 new_fix->insn = barrier;
7162 new_fix->address = selected_address;
7163 new_fix->next = fix->next;
7164 fix->next = new_fix;
7166 return new_fix;
7169 /* Record that there is a natural barrier in the insn stream at
7170 ADDRESS. */
7171 static void
7172 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7174 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7176 fix->insn = insn;
7177 fix->address = address;
7179 fix->next = NULL;
7180 if (minipool_fix_head != NULL)
7181 minipool_fix_tail->next = fix;
7182 else
7183 minipool_fix_head = fix;
7185 minipool_fix_tail = fix;
7188 /* Record INSN, which will need fixing up to load a value from the
7189 minipool. ADDRESS is the offset of the insn since the start of the
7190 function; LOC is a pointer to the part of the insn which requires
7191 fixing; VALUE is the constant that must be loaded, which is of type
7192 MODE. */
7193 static void
7194 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7195 enum machine_mode mode, rtx value)
7197 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7199 #ifdef AOF_ASSEMBLER
7200 /* PIC symbol references need to be converted into offsets into the
7201 based area. */
7202 /* XXX This shouldn't be done here. */
7203 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7204 value = aof_pic_entry (value);
7205 #endif /* AOF_ASSEMBLER */
7207 fix->insn = insn;
7208 fix->address = address;
7209 fix->loc = loc;
7210 fix->mode = mode;
7211 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7212 fix->value = value;
7213 fix->forwards = get_attr_pool_range (insn);
7214 fix->backwards = get_attr_neg_pool_range (insn);
7215 fix->minipool = NULL;
7217 /* If an insn doesn't have a range defined for it, then it isn't
7218 expecting to be reworked by this code. Better to abort now than
7219 to generate duff assembly code. */
7220 if (fix->forwards == 0 && fix->backwards == 0)
7221 abort ();
7223 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7224 So there might be an empty word before the start of the pool.
7225 Hence we reduce the forward range by 4 to allow for this
7226 possibility. */
7227 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7228 fix->forwards -= 4;
7230 if (dump_file)
7232 fprintf (dump_file,
7233 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7234 GET_MODE_NAME (mode),
7235 INSN_UID (insn), (unsigned long) address,
7236 -1 * (long)fix->backwards, (long)fix->forwards);
7237 arm_print_value (dump_file, fix->value);
7238 fprintf (dump_file, "\n");
7241 /* Add it to the chain of fixes. */
7242 fix->next = NULL;
7244 if (minipool_fix_head != NULL)
7245 minipool_fix_tail->next = fix;
7246 else
7247 minipool_fix_head = fix;
7249 minipool_fix_tail = fix;
7252 /* Scan INSN and note any of its operands that need fixing.
7253 If DO_PUSHES is false we do not actually push any of the fixups
7254 needed. The function returns TRUE is any fixups were needed/pushed.
7255 This is used by arm_memory_load_p() which needs to know about loads
7256 of constants that will be converted into minipool loads. */
7257 static bool
7258 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7260 bool result = false;
7261 int opno;
7263 extract_insn (insn);
7265 if (!constrain_operands (1))
7266 fatal_insn_not_found (insn);
7268 if (recog_data.n_alternatives == 0)
7269 return false;
7271 /* Fill in recog_op_alt with information about the constraints of this insn. */
7272 preprocess_constraints ();
7274 for (opno = 0; opno < recog_data.n_operands; opno++)
7276 /* Things we need to fix can only occur in inputs. */
7277 if (recog_data.operand_type[opno] != OP_IN)
7278 continue;
7280 /* If this alternative is a memory reference, then any mention
7281 of constants in this alternative is really to fool reload
7282 into allowing us to accept one there. We need to fix them up
7283 now so that we output the right code. */
7284 if (recog_op_alt[opno][which_alternative].memory_ok)
7286 rtx op = recog_data.operand[opno];
7288 if (CONSTANT_P (op))
7290 if (do_pushes)
7291 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7292 recog_data.operand_mode[opno], op);
7293 result = true;
7295 else if (GET_CODE (op) == MEM
7296 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7297 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7299 if (do_pushes)
7301 rtx cop = avoid_constant_pool_reference (op);
7303 /* Casting the address of something to a mode narrower
7304 than a word can cause avoid_constant_pool_reference()
7305 to return the pool reference itself. That's no good to
7306 us here. Lets just hope that we can use the
7307 constant pool value directly. */
7308 if (op == cop)
7309 cop = get_pool_constant (XEXP (op, 0));
7311 push_minipool_fix (insn, address,
7312 recog_data.operand_loc[opno],
7313 recog_data.operand_mode[opno], cop);
7316 result = true;
7321 return result;
7324 /* Gcc puts the pool in the wrong place for ARM, since we can only
7325 load addresses a limited distance around the pc. We do some
7326 special munging to move the constant pool values to the correct
7327 point in the code. */
7328 static void
7329 arm_reorg (void)
7331 rtx insn;
7332 HOST_WIDE_INT address = 0;
7333 Mfix * fix;
7335 minipool_fix_head = minipool_fix_tail = NULL;
7337 /* The first insn must always be a note, or the code below won't
7338 scan it properly. */
7339 insn = get_insns ();
7340 if (GET_CODE (insn) != NOTE)
7341 abort ();
7343 /* Scan all the insns and record the operands that will need fixing. */
7344 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7346 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7347 && (arm_cirrus_insn_p (insn)
7348 || GET_CODE (insn) == JUMP_INSN
7349 || arm_memory_load_p (insn)))
7350 cirrus_reorg (insn);
7352 if (GET_CODE (insn) == BARRIER)
7353 push_minipool_barrier (insn, address);
7354 else if (INSN_P (insn))
7356 rtx table;
7358 note_invalid_constants (insn, address, true);
7359 address += get_attr_length (insn);
7361 /* If the insn is a vector jump, add the size of the table
7362 and skip the table. */
7363 if ((table = is_jump_table (insn)) != NULL)
7365 address += get_jump_table_size (table);
7366 insn = table;
7371 fix = minipool_fix_head;
7373 /* Now scan the fixups and perform the required changes. */
7374 while (fix)
7376 Mfix * ftmp;
7377 Mfix * fdel;
7378 Mfix * last_added_fix;
7379 Mfix * last_barrier = NULL;
7380 Mfix * this_fix;
7382 /* Skip any further barriers before the next fix. */
7383 while (fix && GET_CODE (fix->insn) == BARRIER)
7384 fix = fix->next;
7386 /* No more fixes. */
7387 if (fix == NULL)
7388 break;
7390 last_added_fix = NULL;
7392 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7394 if (GET_CODE (ftmp->insn) == BARRIER)
7396 if (ftmp->address >= minipool_vector_head->max_address)
7397 break;
7399 last_barrier = ftmp;
7401 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7402 break;
7404 last_added_fix = ftmp; /* Keep track of the last fix added. */
7407 /* If we found a barrier, drop back to that; any fixes that we
7408 could have reached but come after the barrier will now go in
7409 the next mini-pool. */
7410 if (last_barrier != NULL)
7412 /* Reduce the refcount for those fixes that won't go into this
7413 pool after all. */
7414 for (fdel = last_barrier->next;
7415 fdel && fdel != ftmp;
7416 fdel = fdel->next)
7418 fdel->minipool->refcount--;
7419 fdel->minipool = NULL;
7422 ftmp = last_barrier;
7424 else
7426 /* ftmp is first fix that we can't fit into this pool and
7427 there no natural barriers that we could use. Insert a
7428 new barrier in the code somewhere between the previous
7429 fix and this one, and arrange to jump around it. */
7430 HOST_WIDE_INT max_address;
7432 /* The last item on the list of fixes must be a barrier, so
7433 we can never run off the end of the list of fixes without
7434 last_barrier being set. */
7435 if (ftmp == NULL)
7436 abort ();
7438 max_address = minipool_vector_head->max_address;
7439 /* Check that there isn't another fix that is in range that
7440 we couldn't fit into this pool because the pool was
7441 already too large: we need to put the pool before such an
7442 instruction. */
7443 if (ftmp->address < max_address)
7444 max_address = ftmp->address;
7446 last_barrier = create_fix_barrier (last_added_fix, max_address);
7449 assign_minipool_offsets (last_barrier);
7451 while (ftmp)
7453 if (GET_CODE (ftmp->insn) != BARRIER
7454 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7455 == NULL))
7456 break;
7458 ftmp = ftmp->next;
7461 /* Scan over the fixes we have identified for this pool, fixing them
7462 up and adding the constants to the pool itself. */
7463 for (this_fix = fix; this_fix && ftmp != this_fix;
7464 this_fix = this_fix->next)
7465 if (GET_CODE (this_fix->insn) != BARRIER)
7467 rtx addr
7468 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7469 minipool_vector_label),
7470 this_fix->minipool->offset);
7471 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7474 dump_minipool (last_barrier->insn);
7475 fix = ftmp;
7478 /* From now on we must synthesize any constants that we can't handle
7479 directly. This can happen if the RTL gets split during final
7480 instruction generation. */
7481 after_arm_reorg = 1;
7483 /* Free the minipool memory. */
7484 obstack_free (&minipool_obstack, minipool_startobj);
7487 /* Routines to output assembly language. */
7489 /* If the rtx is the correct value then return the string of the number.
7490 In this way we can ensure that valid double constants are generated even
7491 when cross compiling. */
7492 const char *
7493 fp_immediate_constant (rtx x)
7495 REAL_VALUE_TYPE r;
7496 int i;
7498 if (!fp_consts_inited)
7499 init_fp_table ();
7501 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7502 for (i = 0; i < 8; i++)
7503 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7504 return strings_fp[i];
7506 abort ();
7509 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7510 static const char *
7511 fp_const_from_val (REAL_VALUE_TYPE *r)
7513 int i;
7515 if (!fp_consts_inited)
7516 init_fp_table ();
7518 for (i = 0; i < 8; i++)
7519 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7520 return strings_fp[i];
7522 abort ();
7525 /* Output the operands of a LDM/STM instruction to STREAM.
7526 MASK is the ARM register set mask of which only bits 0-15 are important.
7527 REG is the base register, either the frame pointer or the stack pointer,
7528 INSTR is the possibly suffixed load or store instruction. */
7529 static void
7530 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7532 int i;
7533 int not_first = FALSE;
7535 fputc ('\t', stream);
7536 asm_fprintf (stream, instr, reg);
7537 fputs (", {", stream);
7539 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7540 if (mask & (1 << i))
7542 if (not_first)
7543 fprintf (stream, ", ");
7545 asm_fprintf (stream, "%r", i);
7546 not_first = TRUE;
7549 fprintf (stream, "}\n");
7553 /* Output a FLDMX instruction to STREAM.
7554 BASE if the register containing the address.
7555 REG and COUNT specify the register range.
7556 Extra registers may be added to avoid hardware bugs. */
7558 static void
7559 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7561 int i;
7563 /* Workaround ARM10 VFPr1 bug. */
7564 if (count == 2 && !arm_arch6)
7566 if (reg == 15)
7567 reg--;
7568 count++;
7571 fputc ('\t', stream);
7572 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7574 for (i = reg; i < reg + count; i++)
7576 if (i > reg)
7577 fputs (", ", stream);
7578 asm_fprintf (stream, "d%d", i);
7580 fputs ("}\n", stream);
7585 /* Output the assembly for a store multiple. */
7587 const char *
7588 vfp_output_fstmx (rtx * operands)
7590 char pattern[100];
7591 int p;
7592 int base;
7593 int i;
7595 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7596 p = strlen (pattern);
7598 if (GET_CODE (operands[1]) != REG)
7599 abort ();
7601 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7602 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7604 p += sprintf (&pattern[p], ", d%d", base + i);
7606 strcpy (&pattern[p], "}");
7608 output_asm_insn (pattern, operands);
7609 return "";
7613 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7614 number of bytes pushed. */
7616 static int
7617 vfp_emit_fstmx (int base_reg, int count)
7619 rtx par;
7620 rtx dwarf;
7621 rtx tmp, reg;
7622 int i;
7624 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7625 register pairs are stored by a store multiple insn. We avoid this
7626 by pushing an extra pair. */
7627 if (count == 2 && !arm_arch6)
7629 if (base_reg == LAST_VFP_REGNUM - 3)
7630 base_reg -= 2;
7631 count++;
7634 /* ??? The frame layout is implementation defined. We describe
7635 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7636 We really need some way of representing the whole block so that the
7637 unwinder can figure it out at runtime. */
7638 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7639 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7641 reg = gen_rtx_REG (DFmode, base_reg);
7642 base_reg += 2;
7644 XVECEXP (par, 0, 0)
7645 = gen_rtx_SET (VOIDmode,
7646 gen_rtx_MEM (BLKmode,
7647 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7648 gen_rtx_UNSPEC (BLKmode,
7649 gen_rtvec (1, reg),
7650 UNSPEC_PUSH_MULT));
7652 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7653 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7654 GEN_INT (-(count * 8 + 4))));
7655 RTX_FRAME_RELATED_P (tmp) = 1;
7656 XVECEXP (dwarf, 0, 0) = tmp;
7658 tmp = gen_rtx_SET (VOIDmode,
7659 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7660 reg);
7661 RTX_FRAME_RELATED_P (tmp) = 1;
7662 XVECEXP (dwarf, 0, 1) = tmp;
7664 for (i = 1; i < count; i++)
7666 reg = gen_rtx_REG (DFmode, base_reg);
7667 base_reg += 2;
7668 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7670 tmp = gen_rtx_SET (VOIDmode,
7671 gen_rtx_MEM (DFmode,
7672 gen_rtx_PLUS (SImode,
7673 stack_pointer_rtx,
7674 GEN_INT (i * 8))),
7675 reg);
7676 RTX_FRAME_RELATED_P (tmp) = 1;
7677 XVECEXP (dwarf, 0, i + 1) = tmp;
7680 par = emit_insn (par);
7681 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7682 REG_NOTES (par));
7683 RTX_FRAME_RELATED_P (par) = 1;
7685 return count * 8 + 4;
7689 /* Output a 'call' insn. */
7690 const char *
7691 output_call (rtx *operands)
7693 if (arm_arch5)
7694 abort (); /* Patterns should call blx <reg> directly. */
7696 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7697 if (REGNO (operands[0]) == LR_REGNUM)
7699 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7700 output_asm_insn ("mov%?\t%0, %|lr", operands);
7703 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7705 if (TARGET_INTERWORK || arm_arch4t)
7706 output_asm_insn ("bx%?\t%0", operands);
7707 else
7708 output_asm_insn ("mov%?\t%|pc, %0", operands);
7710 return "";
7713 /* Output a 'call' insn that is a reference in memory. */
7714 const char *
7715 output_call_mem (rtx *operands)
7717 if (TARGET_INTERWORK && !arm_arch5)
7719 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7720 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7721 output_asm_insn ("bx%?\t%|ip", operands);
7723 else if (regno_use_in (LR_REGNUM, operands[0]))
7725 /* LR is used in the memory address. We load the address in the
7726 first instruction. It's safe to use IP as the target of the
7727 load since the call will kill it anyway. */
7728 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7729 if (arm_arch5)
7730 output_asm_insn ("blx%?\t%|ip", operands);
7731 else
7733 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7734 if (arm_arch4t)
7735 output_asm_insn ("bx%?\t%|ip", operands);
7736 else
7737 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7740 else
7742 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7743 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7746 return "";
7750 /* Output a move from arm registers to an fpa registers.
7751 OPERANDS[0] is an fpa register.
7752 OPERANDS[1] is the first registers of an arm register pair. */
7753 const char *
7754 output_mov_long_double_fpa_from_arm (rtx *operands)
7756 int arm_reg0 = REGNO (operands[1]);
7757 rtx ops[3];
7759 if (arm_reg0 == IP_REGNUM)
7760 abort ();
7762 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7763 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7764 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7766 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7767 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7769 return "";
7772 /* Output a move from an fpa register to arm registers.
7773 OPERANDS[0] is the first registers of an arm register pair.
7774 OPERANDS[1] is an fpa register. */
7775 const char *
7776 output_mov_long_double_arm_from_fpa (rtx *operands)
7778 int arm_reg0 = REGNO (operands[0]);
7779 rtx ops[3];
7781 if (arm_reg0 == IP_REGNUM)
7782 abort ();
7784 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7785 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7786 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7788 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7789 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7790 return "";
7793 /* Output a move from arm registers to arm registers of a long double
7794 OPERANDS[0] is the destination.
7795 OPERANDS[1] is the source. */
7796 const char *
7797 output_mov_long_double_arm_from_arm (rtx *operands)
7799 /* We have to be careful here because the two might overlap. */
7800 int dest_start = REGNO (operands[0]);
7801 int src_start = REGNO (operands[1]);
7802 rtx ops[2];
7803 int i;
7805 if (dest_start < src_start)
7807 for (i = 0; i < 3; i++)
7809 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7810 ops[1] = gen_rtx_REG (SImode, src_start + i);
7811 output_asm_insn ("mov%?\t%0, %1", ops);
7814 else
7816 for (i = 2; i >= 0; i--)
7818 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7819 ops[1] = gen_rtx_REG (SImode, src_start + i);
7820 output_asm_insn ("mov%?\t%0, %1", ops);
7824 return "";
7828 /* Output a move from arm registers to an fpa registers.
7829 OPERANDS[0] is an fpa register.
7830 OPERANDS[1] is the first registers of an arm register pair. */
7831 const char *
7832 output_mov_double_fpa_from_arm (rtx *operands)
7834 int arm_reg0 = REGNO (operands[1]);
7835 rtx ops[2];
7837 if (arm_reg0 == IP_REGNUM)
7838 abort ();
7840 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7841 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7842 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7843 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7844 return "";
7847 /* Output a move from an fpa register to arm registers.
7848 OPERANDS[0] is the first registers of an arm register pair.
7849 OPERANDS[1] is an fpa register. */
7850 const char *
7851 output_mov_double_arm_from_fpa (rtx *operands)
7853 int arm_reg0 = REGNO (operands[0]);
7854 rtx ops[2];
7856 if (arm_reg0 == IP_REGNUM)
7857 abort ();
7859 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7860 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7861 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
7862 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
7863 return "";
7866 /* Output a move between double words.
7867 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
7868 or MEM<-REG and all MEMs must be offsettable addresses. */
7869 const char *
7870 output_move_double (rtx *operands)
7872 enum rtx_code code0 = GET_CODE (operands[0]);
7873 enum rtx_code code1 = GET_CODE (operands[1]);
7874 rtx otherops[3];
7876 if (code0 == REG)
7878 int reg0 = REGNO (operands[0]);
7880 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
7882 if (code1 == REG)
7884 int reg1 = REGNO (operands[1]);
7885 if (reg1 == IP_REGNUM)
7886 abort ();
7888 /* Ensure the second source is not overwritten. */
7889 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
7890 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
7891 else
7892 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
7894 else if (code1 == CONST_VECTOR)
7896 HOST_WIDE_INT hint = 0;
7898 switch (GET_MODE (operands[1]))
7900 case V2SImode:
7901 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
7902 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
7903 break;
7905 case V4HImode:
7906 if (BYTES_BIG_ENDIAN)
7908 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7909 hint <<= 16;
7910 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7912 else
7914 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7915 hint <<= 16;
7916 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7919 otherops[1] = GEN_INT (hint);
7920 hint = 0;
7922 if (BYTES_BIG_ENDIAN)
7924 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7925 hint <<= 16;
7926 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7928 else
7930 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7931 hint <<= 16;
7932 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7935 operands[1] = GEN_INT (hint);
7936 break;
7938 case V8QImode:
7939 if (BYTES_BIG_ENDIAN)
7941 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7942 hint <<= 8;
7943 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7944 hint <<= 8;
7945 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7946 hint <<= 8;
7947 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7949 else
7951 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7952 hint <<= 8;
7953 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7954 hint <<= 8;
7955 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7956 hint <<= 8;
7957 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7960 otherops[1] = GEN_INT (hint);
7961 hint = 0;
7963 if (BYTES_BIG_ENDIAN)
7965 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7966 hint <<= 8;
7967 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7968 hint <<= 8;
7969 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7970 hint <<= 8;
7971 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7973 else
7975 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7976 hint <<= 8;
7977 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7978 hint <<= 8;
7979 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7980 hint <<= 8;
7981 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7984 operands[1] = GEN_INT (hint);
7985 break;
7987 default:
7988 abort ();
7990 output_mov_immediate (operands);
7991 output_mov_immediate (otherops);
7993 else if (code1 == CONST_DOUBLE)
7995 if (GET_MODE (operands[1]) == DFmode)
7997 REAL_VALUE_TYPE r;
7998 long l[2];
8000 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8001 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8002 otherops[1] = GEN_INT (l[1]);
8003 operands[1] = GEN_INT (l[0]);
8005 else if (GET_MODE (operands[1]) != VOIDmode)
8006 abort ();
8007 else if (WORDS_BIG_ENDIAN)
8009 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8010 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8012 else
8014 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8015 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8018 output_mov_immediate (operands);
8019 output_mov_immediate (otherops);
8021 else if (code1 == CONST_INT)
8023 #if HOST_BITS_PER_WIDE_INT > 32
8024 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8025 what the upper word is. */
8026 if (WORDS_BIG_ENDIAN)
8028 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8029 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8031 else
8033 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8034 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8036 #else
8037 /* Sign extend the intval into the high-order word. */
8038 if (WORDS_BIG_ENDIAN)
8040 otherops[1] = operands[1];
8041 operands[1] = (INTVAL (operands[1]) < 0
8042 ? constm1_rtx : const0_rtx);
8044 else
8045 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8046 #endif
8047 output_mov_immediate (otherops);
8048 output_mov_immediate (operands);
8050 else if (code1 == MEM)
8052 switch (GET_CODE (XEXP (operands[1], 0)))
8054 case REG:
8055 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8056 break;
8058 case PRE_INC:
8059 if (!TARGET_LDRD)
8060 abort (); /* Should never happen now. */
8061 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8062 break;
8064 case PRE_DEC:
8065 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8066 break;
8068 case POST_INC:
8069 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8070 break;
8072 case POST_DEC:
8073 if (!TARGET_LDRD)
8074 abort (); /* Should never happen now. */
8075 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8076 break;
8078 case PRE_MODIFY:
8079 case POST_MODIFY:
8080 otherops[0] = operands[0];
8081 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8082 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8084 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8086 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8088 /* Registers overlap so split out the increment. */
8089 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8090 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8092 else
8093 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8095 else
8097 /* We only allow constant increments, so this is safe. */
8098 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8100 break;
8102 case LABEL_REF:
8103 case CONST:
8104 output_asm_insn ("adr%?\t%0, %1", operands);
8105 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8106 break;
8108 default:
8109 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8110 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8112 otherops[0] = operands[0];
8113 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8114 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8116 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8118 if (GET_CODE (otherops[2]) == CONST_INT)
8120 switch ((int) INTVAL (otherops[2]))
8122 case -8:
8123 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8124 return "";
8125 case -4:
8126 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8127 return "";
8128 case 4:
8129 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8130 return "";
8133 if (TARGET_LDRD
8134 && (GET_CODE (otherops[2]) == REG
8135 || (GET_CODE (otherops[2]) == CONST_INT
8136 && INTVAL (otherops[2]) > -256
8137 && INTVAL (otherops[2]) < 256)))
8139 if (reg_overlap_mentioned_p (otherops[0],
8140 otherops[2]))
8142 /* Swap base and index registers over to
8143 avoid a conflict. */
8144 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8145 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8148 /* If both registers conflict, it will usually
8149 have been fixed by a splitter. */
8150 if (reg_overlap_mentioned_p (otherops[0],
8151 otherops[2]))
8153 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8154 output_asm_insn ("ldr%?d\t%0, [%1]",
8155 otherops);
8156 return "";
8158 else
8160 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8161 otherops);
8162 return "";
8165 if (GET_CODE (otherops[2]) == CONST_INT)
8167 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8168 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8169 else
8170 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8172 else
8173 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8175 else
8176 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8178 return "ldm%?ia\t%0, %M0";
8180 else
8182 otherops[1] = adjust_address (operands[1], SImode, 4);
8183 /* Take care of overlapping base/data reg. */
8184 if (reg_mentioned_p (operands[0], operands[1]))
8186 output_asm_insn ("ldr%?\t%0, %1", otherops);
8187 output_asm_insn ("ldr%?\t%0, %1", operands);
8189 else
8191 output_asm_insn ("ldr%?\t%0, %1", operands);
8192 output_asm_insn ("ldr%?\t%0, %1", otherops);
8197 else
8198 abort (); /* Constraints should prevent this. */
8200 else if (code0 == MEM && code1 == REG)
8202 if (REGNO (operands[1]) == IP_REGNUM)
8203 abort ();
8205 switch (GET_CODE (XEXP (operands[0], 0)))
8207 case REG:
8208 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8209 break;
8211 case PRE_INC:
8212 if (!TARGET_LDRD)
8213 abort (); /* Should never happen now. */
8214 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8215 break;
8217 case PRE_DEC:
8218 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8219 break;
8221 case POST_INC:
8222 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8223 break;
8225 case POST_DEC:
8226 if (!TARGET_LDRD)
8227 abort (); /* Should never happen now. */
8228 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8229 break;
8231 case PRE_MODIFY:
8232 case POST_MODIFY:
8233 otherops[0] = operands[1];
8234 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8235 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8237 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8238 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8239 else
8240 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8241 break;
8243 case PLUS:
8244 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8245 if (GET_CODE (otherops[2]) == CONST_INT)
8247 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8249 case -8:
8250 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8251 return "";
8253 case -4:
8254 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8255 return "";
8257 case 4:
8258 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8259 return "";
8262 if (TARGET_LDRD
8263 && (GET_CODE (otherops[2]) == REG
8264 || (GET_CODE (otherops[2]) == CONST_INT
8265 && INTVAL (otherops[2]) > -256
8266 && INTVAL (otherops[2]) < 256)))
8268 otherops[0] = operands[1];
8269 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8270 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8271 return "";
8273 /* Fall through */
8275 default:
8276 otherops[0] = adjust_address (operands[0], SImode, 4);
8277 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8278 output_asm_insn ("str%?\t%1, %0", operands);
8279 output_asm_insn ("str%?\t%1, %0", otherops);
8282 else
8283 /* Constraints should prevent this. */
8284 abort ();
8286 return "";
8290 /* Output an arbitrary MOV reg, #n.
8291 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8292 const char *
8293 output_mov_immediate (rtx *operands)
8295 HOST_WIDE_INT n = INTVAL (operands[1]);
8297 /* Try to use one MOV. */
8298 if (const_ok_for_arm (n))
8299 output_asm_insn ("mov%?\t%0, %1", operands);
8301 /* Try to use one MVN. */
8302 else if (const_ok_for_arm (~n))
8304 operands[1] = GEN_INT (~n);
8305 output_asm_insn ("mvn%?\t%0, %1", operands);
8307 else
8309 int n_ones = 0;
8310 int i;
8312 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8313 for (i = 0; i < 32; i++)
8314 if (n & 1 << i)
8315 n_ones++;
8317 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8318 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8319 else
8320 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8323 return "";
8326 /* Output an ADD r, s, #n where n may be too big for one instruction.
8327 If adding zero to one register, output nothing. */
8328 const char *
8329 output_add_immediate (rtx *operands)
8331 HOST_WIDE_INT n = INTVAL (operands[2]);
8333 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8335 if (n < 0)
8336 output_multi_immediate (operands,
8337 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8338 -n);
8339 else
8340 output_multi_immediate (operands,
8341 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8345 return "";
8348 /* Output a multiple immediate operation.
8349 OPERANDS is the vector of operands referred to in the output patterns.
8350 INSTR1 is the output pattern to use for the first constant.
8351 INSTR2 is the output pattern to use for subsequent constants.
8352 IMMED_OP is the index of the constant slot in OPERANDS.
8353 N is the constant value. */
8354 static const char *
8355 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8356 int immed_op, HOST_WIDE_INT n)
8358 #if HOST_BITS_PER_WIDE_INT > 32
8359 n &= 0xffffffff;
8360 #endif
8362 if (n == 0)
8364 /* Quick and easy output. */
8365 operands[immed_op] = const0_rtx;
8366 output_asm_insn (instr1, operands);
8368 else
8370 int i;
8371 const char * instr = instr1;
8373 /* Note that n is never zero here (which would give no output). */
8374 for (i = 0; i < 32; i += 2)
8376 if (n & (3 << i))
8378 operands[immed_op] = GEN_INT (n & (255 << i));
8379 output_asm_insn (instr, operands);
8380 instr = instr2;
8381 i += 6;
8386 return "";
8389 /* Return the appropriate ARM instruction for the operation code.
8390 The returned result should not be overwritten. OP is the rtx of the
8391 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8392 was shifted. */
8393 const char *
8394 arithmetic_instr (rtx op, int shift_first_arg)
8396 switch (GET_CODE (op))
8398 case PLUS:
8399 return "add";
8401 case MINUS:
8402 return shift_first_arg ? "rsb" : "sub";
8404 case IOR:
8405 return "orr";
8407 case XOR:
8408 return "eor";
8410 case AND:
8411 return "and";
8413 default:
8414 abort ();
8418 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8419 for the operation code. The returned result should not be overwritten.
8420 OP is the rtx code of the shift.
8421 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8422 shift. */
8423 static const char *
8424 shift_op (rtx op, HOST_WIDE_INT *amountp)
8426 const char * mnem;
8427 enum rtx_code code = GET_CODE (op);
8429 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8430 *amountp = -1;
8431 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8432 *amountp = INTVAL (XEXP (op, 1));
8433 else
8434 abort ();
8436 switch (code)
8438 case ASHIFT:
8439 mnem = "asl";
8440 break;
8442 case ASHIFTRT:
8443 mnem = "asr";
8444 break;
8446 case LSHIFTRT:
8447 mnem = "lsr";
8448 break;
8450 case ROTATE:
8451 if (*amountp == -1)
8452 abort ();
8453 *amountp = 32 - *amountp;
8455 /* Fall through. */
8457 case ROTATERT:
8458 mnem = "ror";
8459 break;
8461 case MULT:
8462 /* We never have to worry about the amount being other than a
8463 power of 2, since this case can never be reloaded from a reg. */
8464 if (*amountp != -1)
8465 *amountp = int_log2 (*amountp);
8466 else
8467 abort ();
8468 return "asl";
8470 default:
8471 abort ();
8474 if (*amountp != -1)
8476 /* This is not 100% correct, but follows from the desire to merge
8477 multiplication by a power of 2 with the recognizer for a
8478 shift. >=32 is not a valid shift for "asl", so we must try and
8479 output a shift that produces the correct arithmetical result.
8480 Using lsr #32 is identical except for the fact that the carry bit
8481 is not set correctly if we set the flags; but we never use the
8482 carry bit from such an operation, so we can ignore that. */
8483 if (code == ROTATERT)
8484 /* Rotate is just modulo 32. */
8485 *amountp &= 31;
8486 else if (*amountp != (*amountp & 31))
8488 if (code == ASHIFT)
8489 mnem = "lsr";
8490 *amountp = 32;
8493 /* Shifts of 0 are no-ops. */
8494 if (*amountp == 0)
8495 return NULL;
8498 return mnem;
8501 /* Obtain the shift from the POWER of two. */
8503 static HOST_WIDE_INT
8504 int_log2 (HOST_WIDE_INT power)
8506 HOST_WIDE_INT shift = 0;
8508 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8510 if (shift > 31)
8511 abort ();
8512 shift++;
8515 return shift;
8518 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8519 /bin/as is horribly restrictive. */
8520 #define MAX_ASCII_LEN 51
8522 void
8523 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8525 int i;
8526 int len_so_far = 0;
8528 fputs ("\t.ascii\t\"", stream);
8530 for (i = 0; i < len; i++)
8532 int c = p[i];
8534 if (len_so_far >= MAX_ASCII_LEN)
8536 fputs ("\"\n\t.ascii\t\"", stream);
8537 len_so_far = 0;
8540 switch (c)
8542 case TARGET_TAB:
8543 fputs ("\\t", stream);
8544 len_so_far += 2;
8545 break;
8547 case TARGET_FF:
8548 fputs ("\\f", stream);
8549 len_so_far += 2;
8550 break;
8552 case TARGET_BS:
8553 fputs ("\\b", stream);
8554 len_so_far += 2;
8555 break;
8557 case TARGET_CR:
8558 fputs ("\\r", stream);
8559 len_so_far += 2;
8560 break;
8562 case TARGET_NEWLINE:
8563 fputs ("\\n", stream);
8564 c = p [i + 1];
8565 if ((c >= ' ' && c <= '~')
8566 || c == TARGET_TAB)
8567 /* This is a good place for a line break. */
8568 len_so_far = MAX_ASCII_LEN;
8569 else
8570 len_so_far += 2;
8571 break;
8573 case '\"':
8574 case '\\':
8575 putc ('\\', stream);
8576 len_so_far++;
8577 /* Drop through. */
8579 default:
8580 if (c >= ' ' && c <= '~')
8582 putc (c, stream);
8583 len_so_far++;
8585 else
8587 fprintf (stream, "\\%03o", c);
8588 len_so_far += 4;
8590 break;
8594 fputs ("\"\n", stream);
8597 /* Compute the register save mask for registers 0 through 12
8598 inclusive. This code is used by arm_compute_save_reg_mask. */
8599 static unsigned long
8600 arm_compute_save_reg0_reg12_mask (void)
8602 unsigned long func_type = arm_current_func_type ();
8603 unsigned int save_reg_mask = 0;
8604 unsigned int reg;
8606 if (IS_INTERRUPT (func_type))
8608 unsigned int max_reg;
8609 /* Interrupt functions must not corrupt any registers,
8610 even call clobbered ones. If this is a leaf function
8611 we can just examine the registers used by the RTL, but
8612 otherwise we have to assume that whatever function is
8613 called might clobber anything, and so we have to save
8614 all the call-clobbered registers as well. */
8615 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8616 /* FIQ handlers have registers r8 - r12 banked, so
8617 we only need to check r0 - r7, Normal ISRs only
8618 bank r14 and r15, so we must check up to r12.
8619 r13 is the stack pointer which is always preserved,
8620 so we do not need to consider it here. */
8621 max_reg = 7;
8622 else
8623 max_reg = 12;
8625 for (reg = 0; reg <= max_reg; reg++)
8626 if (regs_ever_live[reg]
8627 || (! current_function_is_leaf && call_used_regs [reg]))
8628 save_reg_mask |= (1 << reg);
8630 else
8632 /* In the normal case we only need to save those registers
8633 which are call saved and which are used by this function. */
8634 for (reg = 0; reg <= 10; reg++)
8635 if (regs_ever_live[reg] && ! call_used_regs [reg])
8636 save_reg_mask |= (1 << reg);
8638 /* Handle the frame pointer as a special case. */
8639 if (! TARGET_APCS_FRAME
8640 && ! frame_pointer_needed
8641 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8642 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8643 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8645 /* If we aren't loading the PIC register,
8646 don't stack it even though it may be live. */
8647 if (flag_pic
8648 && ! TARGET_SINGLE_PIC_BASE
8649 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8650 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8653 /* Save registers so the exception handler can modify them. */
8654 if (current_function_calls_eh_return)
8656 unsigned int i;
8658 for (i = 0; ; i++)
8660 reg = EH_RETURN_DATA_REGNO (i);
8661 if (reg == INVALID_REGNUM)
8662 break;
8663 save_reg_mask |= 1 << reg;
8667 return save_reg_mask;
8670 /* Compute a bit mask of which registers need to be
8671 saved on the stack for the current function. */
8673 static unsigned long
8674 arm_compute_save_reg_mask (void)
8676 unsigned int save_reg_mask = 0;
8677 unsigned long func_type = arm_current_func_type ();
8679 if (IS_NAKED (func_type))
8680 /* This should never really happen. */
8681 return 0;
8683 /* If we are creating a stack frame, then we must save the frame pointer,
8684 IP (which will hold the old stack pointer), LR and the PC. */
8685 if (frame_pointer_needed)
8686 save_reg_mask |=
8687 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8688 | (1 << IP_REGNUM)
8689 | (1 << LR_REGNUM)
8690 | (1 << PC_REGNUM);
8692 /* Volatile functions do not return, so there
8693 is no need to save any other registers. */
8694 if (IS_VOLATILE (func_type))
8695 return save_reg_mask;
8697 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8699 /* Decide if we need to save the link register.
8700 Interrupt routines have their own banked link register,
8701 so they never need to save it.
8702 Otherwise if we do not use the link register we do not need to save
8703 it. If we are pushing other registers onto the stack however, we
8704 can save an instruction in the epilogue by pushing the link register
8705 now and then popping it back into the PC. This incurs extra memory
8706 accesses though, so we only do it when optimizing for size, and only
8707 if we know that we will not need a fancy return sequence. */
8708 if (regs_ever_live [LR_REGNUM]
8709 || (save_reg_mask
8710 && optimize_size
8711 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8712 && !current_function_calls_eh_return))
8713 save_reg_mask |= 1 << LR_REGNUM;
8715 if (cfun->machine->lr_save_eliminated)
8716 save_reg_mask &= ~ (1 << LR_REGNUM);
8718 if (TARGET_REALLY_IWMMXT
8719 && ((bit_count (save_reg_mask)
8720 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8722 unsigned int reg;
8724 /* The total number of registers that are going to be pushed
8725 onto the stack is odd. We need to ensure that the stack
8726 is 64-bit aligned before we start to save iWMMXt registers,
8727 and also before we start to create locals. (A local variable
8728 might be a double or long long which we will load/store using
8729 an iWMMXt instruction). Therefore we need to push another
8730 ARM register, so that the stack will be 64-bit aligned. We
8731 try to avoid using the arg registers (r0 -r3) as they might be
8732 used to pass values in a tail call. */
8733 for (reg = 4; reg <= 12; reg++)
8734 if ((save_reg_mask & (1 << reg)) == 0)
8735 break;
8737 if (reg <= 12)
8738 save_reg_mask |= (1 << reg);
8739 else
8741 cfun->machine->sibcall_blocked = 1;
8742 save_reg_mask |= (1 << 3);
8746 return save_reg_mask;
8750 /* Compute a bit mask of which registers need to be
8751 saved on the stack for the current function. */
8752 static unsigned long
8753 thumb_compute_save_reg_mask (void)
8755 unsigned long mask;
8756 int reg;
8758 mask = 0;
8759 for (reg = 0; reg < 12; reg ++)
8761 if (regs_ever_live[reg] && !call_used_regs[reg])
8762 mask |= 1 << reg;
8765 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8766 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8767 if (TARGET_SINGLE_PIC_BASE)
8768 mask &= ~(1 << arm_pic_register);
8769 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8770 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8771 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8773 /* lr will also be pushed if any lo regs are pushed. */
8774 if (mask & 0xff || thumb_force_lr_save ())
8775 mask |= (1 << LR_REGNUM);
8777 /* Make sure we have a low work register if we need one. */
8778 if (((mask & 0xff) == 0 && regs_ever_live[LAST_ARG_REGNUM])
8779 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8780 mask |= 1 << LAST_LO_REGNUM;
8782 return mask;
8786 /* Return the number of bytes required to save VFP registers. */
8787 static int
8788 arm_get_vfp_saved_size (void)
8790 unsigned int regno;
8791 int count;
8792 int saved;
8794 saved = 0;
8795 /* Space for saved VFP registers. */
8796 if (TARGET_HARD_FLOAT && TARGET_VFP)
8798 count = 0;
8799 for (regno = FIRST_VFP_REGNUM;
8800 regno < LAST_VFP_REGNUM;
8801 regno += 2)
8803 if ((!regs_ever_live[regno] || call_used_regs[regno])
8804 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8806 if (count > 0)
8808 /* Workaround ARM10 VFPr1 bug. */
8809 if (count == 2 && !arm_arch6)
8810 count++;
8811 saved += count * 8 + 4;
8813 count = 0;
8815 else
8816 count++;
8818 if (count > 0)
8820 if (count == 2 && !arm_arch6)
8821 count++;
8822 saved += count * 8 + 4;
8825 return saved;
8829 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8830 everything bar the final return instruction. */
8831 const char *
8832 output_return_instruction (rtx operand, int really_return, int reverse)
8834 char conditional[10];
8835 char instr[100];
8836 int reg;
8837 unsigned long live_regs_mask;
8838 unsigned long func_type;
8839 arm_stack_offsets *offsets;
8841 func_type = arm_current_func_type ();
8843 if (IS_NAKED (func_type))
8844 return "";
8846 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8848 /* If this function was declared non-returning, and we have
8849 found a tail call, then we have to trust that the called
8850 function won't return. */
8851 if (really_return)
8853 rtx ops[2];
8855 /* Otherwise, trap an attempted return by aborting. */
8856 ops[0] = operand;
8857 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8858 : "abort");
8859 assemble_external_libcall (ops[1]);
8860 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8863 return "";
8866 if (current_function_calls_alloca && !really_return)
8867 abort ();
8869 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8871 return_used_this_function = 1;
8873 live_regs_mask = arm_compute_save_reg_mask ();
8875 if (live_regs_mask)
8877 const char * return_reg;
8879 /* If we do not have any special requirements for function exit
8880 (e.g. interworking, or ISR) then we can load the return address
8881 directly into the PC. Otherwise we must load it into LR. */
8882 if (really_return
8883 && ! TARGET_INTERWORK)
8884 return_reg = reg_names[PC_REGNUM];
8885 else
8886 return_reg = reg_names[LR_REGNUM];
8888 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8890 /* There are three possible reasons for the IP register
8891 being saved. 1) a stack frame was created, in which case
8892 IP contains the old stack pointer, or 2) an ISR routine
8893 corrupted it, or 3) it was saved to align the stack on
8894 iWMMXt. In case 1, restore IP into SP, otherwise just
8895 restore IP. */
8896 if (frame_pointer_needed)
8898 live_regs_mask &= ~ (1 << IP_REGNUM);
8899 live_regs_mask |= (1 << SP_REGNUM);
8901 else
8903 if (! IS_INTERRUPT (func_type)
8904 && ! TARGET_REALLY_IWMMXT)
8905 abort ();
8909 /* On some ARM architectures it is faster to use LDR rather than
8910 LDM to load a single register. On other architectures, the
8911 cost is the same. In 26 bit mode, or for exception handlers,
8912 we have to use LDM to load the PC so that the CPSR is also
8913 restored. */
8914 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8916 if (live_regs_mask == (unsigned int)(1 << reg))
8917 break;
8919 if (reg <= LAST_ARM_REGNUM
8920 && (reg != LR_REGNUM
8921 || ! really_return
8922 || ! IS_INTERRUPT (func_type)))
8924 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8925 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8927 else
8929 char *p;
8930 int first = 1;
8932 /* Generate the load multiple instruction to restore the
8933 registers. Note we can get here, even if
8934 frame_pointer_needed is true, but only if sp already
8935 points to the base of the saved core registers. */
8936 if (live_regs_mask & (1 << SP_REGNUM))
8938 unsigned HOST_WIDE_INT stack_adjust;
8940 offsets = arm_get_frame_offsets ();
8941 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
8942 if (stack_adjust != 0 && stack_adjust != 4)
8943 abort ();
8945 if (stack_adjust && arm_arch5)
8946 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8947 else
8949 /* If we can't use ldmib (SA110 bug), then try to pop r3
8950 instead. */
8951 if (stack_adjust)
8952 live_regs_mask |= 1 << 3;
8953 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8956 else
8957 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8959 p = instr + strlen (instr);
8961 for (reg = 0; reg <= SP_REGNUM; reg++)
8962 if (live_regs_mask & (1 << reg))
8964 int l = strlen (reg_names[reg]);
8966 if (first)
8967 first = 0;
8968 else
8970 memcpy (p, ", ", 2);
8971 p += 2;
8974 memcpy (p, "%|", 2);
8975 memcpy (p + 2, reg_names[reg], l);
8976 p += l + 2;
8979 if (live_regs_mask & (1 << LR_REGNUM))
8981 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8982 /* If returning from an interrupt, restore the CPSR. */
8983 if (IS_INTERRUPT (func_type))
8984 strcat (p, "^");
8986 else
8987 strcpy (p, "}");
8990 output_asm_insn (instr, & operand);
8992 /* See if we need to generate an extra instruction to
8993 perform the actual function return. */
8994 if (really_return
8995 && func_type != ARM_FT_INTERWORKED
8996 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8998 /* The return has already been handled
8999 by loading the LR into the PC. */
9000 really_return = 0;
9004 if (really_return)
9006 switch ((int) ARM_FUNC_TYPE (func_type))
9008 case ARM_FT_ISR:
9009 case ARM_FT_FIQ:
9010 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9011 break;
9013 case ARM_FT_INTERWORKED:
9014 sprintf (instr, "bx%s\t%%|lr", conditional);
9015 break;
9017 case ARM_FT_EXCEPTION:
9018 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9019 break;
9021 default:
9022 /* Use bx if it's available. */
9023 if (arm_arch5 || arm_arch4t)
9024 sprintf (instr, "bx%s\t%%|lr", conditional);
9025 else
9026 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9027 break;
9030 output_asm_insn (instr, & operand);
9033 return "";
9036 /* Write the function name into the code section, directly preceding
9037 the function prologue.
9039 Code will be output similar to this:
9041 .ascii "arm_poke_function_name", 0
9042 .align
9044 .word 0xff000000 + (t1 - t0)
9045 arm_poke_function_name
9046 mov ip, sp
9047 stmfd sp!, {fp, ip, lr, pc}
9048 sub fp, ip, #4
9050 When performing a stack backtrace, code can inspect the value
9051 of 'pc' stored at 'fp' + 0. If the trace function then looks
9052 at location pc - 12 and the top 8 bits are set, then we know
9053 that there is a function name embedded immediately preceding this
9054 location and has length ((pc[-3]) & 0xff000000).
9056 We assume that pc is declared as a pointer to an unsigned long.
9058 It is of no benefit to output the function name if we are assembling
9059 a leaf function. These function types will not contain a stack
9060 backtrace structure, therefore it is not possible to determine the
9061 function name. */
9062 void
9063 arm_poke_function_name (FILE *stream, const char *name)
9065 unsigned long alignlength;
9066 unsigned long length;
9067 rtx x;
9069 length = strlen (name) + 1;
9070 alignlength = ROUND_UP_WORD (length);
9072 ASM_OUTPUT_ASCII (stream, name, length);
9073 ASM_OUTPUT_ALIGN (stream, 2);
9074 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9075 assemble_aligned_integer (UNITS_PER_WORD, x);
9078 /* Place some comments into the assembler stream
9079 describing the current function. */
9080 static void
9081 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9083 unsigned long func_type;
9085 if (!TARGET_ARM)
9087 thumb_output_function_prologue (f, frame_size);
9088 return;
9091 /* Sanity check. */
9092 if (arm_ccfsm_state || arm_target_insn)
9093 abort ();
9095 func_type = arm_current_func_type ();
9097 switch ((int) ARM_FUNC_TYPE (func_type))
9099 default:
9100 case ARM_FT_NORMAL:
9101 break;
9102 case ARM_FT_INTERWORKED:
9103 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9104 break;
9105 case ARM_FT_ISR:
9106 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9107 break;
9108 case ARM_FT_FIQ:
9109 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9110 break;
9111 case ARM_FT_EXCEPTION:
9112 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9113 break;
9116 if (IS_NAKED (func_type))
9117 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9119 if (IS_VOLATILE (func_type))
9120 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9122 if (IS_NESTED (func_type))
9123 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9125 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9126 current_function_args_size,
9127 current_function_pretend_args_size, frame_size);
9129 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9130 frame_pointer_needed,
9131 cfun->machine->uses_anonymous_args);
9133 if (cfun->machine->lr_save_eliminated)
9134 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9136 if (current_function_calls_eh_return)
9137 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9139 #ifdef AOF_ASSEMBLER
9140 if (flag_pic)
9141 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9142 #endif
9144 return_used_this_function = 0;
9147 const char *
9148 arm_output_epilogue (rtx sibling)
9150 int reg;
9151 unsigned long saved_regs_mask;
9152 unsigned long func_type;
9153 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9154 frame that is $fp + 4 for a non-variadic function. */
9155 int floats_offset = 0;
9156 rtx operands[3];
9157 FILE * f = asm_out_file;
9158 unsigned int lrm_count = 0;
9159 int really_return = (sibling == NULL);
9160 int start_reg;
9161 arm_stack_offsets *offsets;
9163 /* If we have already generated the return instruction
9164 then it is futile to generate anything else. */
9165 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9166 return "";
9168 func_type = arm_current_func_type ();
9170 if (IS_NAKED (func_type))
9171 /* Naked functions don't have epilogues. */
9172 return "";
9174 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9176 rtx op;
9178 /* A volatile function should never return. Call abort. */
9179 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9180 assemble_external_libcall (op);
9181 output_asm_insn ("bl\t%a0", &op);
9183 return "";
9186 if (current_function_calls_eh_return
9187 && ! really_return)
9188 /* If we are throwing an exception, then we really must
9189 be doing a return, so we can't tail-call. */
9190 abort ();
9192 offsets = arm_get_frame_offsets ();
9193 saved_regs_mask = arm_compute_save_reg_mask ();
9195 if (TARGET_IWMMXT)
9196 lrm_count = bit_count (saved_regs_mask);
9198 floats_offset = offsets->saved_args;
9199 /* Compute how far away the floats will be. */
9200 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9201 if (saved_regs_mask & (1 << reg))
9202 floats_offset += 4;
9204 if (frame_pointer_needed)
9206 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9207 int vfp_offset = offsets->frame;
9209 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9211 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9212 if (regs_ever_live[reg] && !call_used_regs[reg])
9214 floats_offset += 12;
9215 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9216 reg, FP_REGNUM, floats_offset - vfp_offset);
9219 else
9221 start_reg = LAST_FPA_REGNUM;
9223 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9225 if (regs_ever_live[reg] && !call_used_regs[reg])
9227 floats_offset += 12;
9229 /* We can't unstack more than four registers at once. */
9230 if (start_reg - reg == 3)
9232 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9233 reg, FP_REGNUM, floats_offset - vfp_offset);
9234 start_reg = reg - 1;
9237 else
9239 if (reg != start_reg)
9240 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9241 reg + 1, start_reg - reg,
9242 FP_REGNUM, floats_offset - vfp_offset);
9243 start_reg = reg - 1;
9247 /* Just in case the last register checked also needs unstacking. */
9248 if (reg != start_reg)
9249 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9250 reg + 1, start_reg - reg,
9251 FP_REGNUM, floats_offset - vfp_offset);
9254 if (TARGET_HARD_FLOAT && TARGET_VFP)
9256 int saved_size;
9258 /* The fldmx insn does not have base+offset addressing modes,
9259 so we use IP to hold the address. */
9260 saved_size = arm_get_vfp_saved_size ();
9262 if (saved_size > 0)
9264 floats_offset += saved_size;
9265 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9266 FP_REGNUM, floats_offset - vfp_offset);
9268 start_reg = FIRST_VFP_REGNUM;
9269 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9271 if ((!regs_ever_live[reg] || call_used_regs[reg])
9272 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9274 if (start_reg != reg)
9275 arm_output_fldmx (f, IP_REGNUM,
9276 (start_reg - FIRST_VFP_REGNUM) / 2,
9277 (reg - start_reg) / 2);
9278 start_reg = reg + 2;
9281 if (start_reg != reg)
9282 arm_output_fldmx (f, IP_REGNUM,
9283 (start_reg - FIRST_VFP_REGNUM) / 2,
9284 (reg - start_reg) / 2);
9287 if (TARGET_IWMMXT)
9289 /* The frame pointer is guaranteed to be non-double-word aligned.
9290 This is because it is set to (old_stack_pointer - 4) and the
9291 old_stack_pointer was double word aligned. Thus the offset to
9292 the iWMMXt registers to be loaded must also be non-double-word
9293 sized, so that the resultant address *is* double-word aligned.
9294 We can ignore floats_offset since that was already included in
9295 the live_regs_mask. */
9296 lrm_count += (lrm_count % 2 ? 2 : 1);
9298 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9299 if (regs_ever_live[reg] && !call_used_regs[reg])
9301 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9302 reg, FP_REGNUM, lrm_count * 4);
9303 lrm_count += 2;
9307 /* saved_regs_mask should contain the IP, which at the time of stack
9308 frame generation actually contains the old stack pointer. So a
9309 quick way to unwind the stack is just pop the IP register directly
9310 into the stack pointer. */
9311 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9312 abort ();
9313 saved_regs_mask &= ~ (1 << IP_REGNUM);
9314 saved_regs_mask |= (1 << SP_REGNUM);
9316 /* There are two registers left in saved_regs_mask - LR and PC. We
9317 only need to restore the LR register (the return address), but to
9318 save time we can load it directly into the PC, unless we need a
9319 special function exit sequence, or we are not really returning. */
9320 if (really_return
9321 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9322 && !current_function_calls_eh_return)
9323 /* Delete the LR from the register mask, so that the LR on
9324 the stack is loaded into the PC in the register mask. */
9325 saved_regs_mask &= ~ (1 << LR_REGNUM);
9326 else
9327 saved_regs_mask &= ~ (1 << PC_REGNUM);
9329 /* We must use SP as the base register, because SP is one of the
9330 registers being restored. If an interrupt or page fault
9331 happens in the ldm instruction, the SP might or might not
9332 have been restored. That would be bad, as then SP will no
9333 longer indicate the safe area of stack, and we can get stack
9334 corruption. Using SP as the base register means that it will
9335 be reset correctly to the original value, should an interrupt
9336 occur. If the stack pointer already points at the right
9337 place, then omit the subtraction. */
9338 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9339 || current_function_calls_alloca)
9340 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9341 4 * bit_count (saved_regs_mask));
9342 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9344 if (IS_INTERRUPT (func_type))
9345 /* Interrupt handlers will have pushed the
9346 IP onto the stack, so restore it now. */
9347 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9349 else
9351 /* Restore stack pointer if necessary. */
9352 if (offsets->outgoing_args != offsets->saved_regs)
9354 operands[0] = operands[1] = stack_pointer_rtx;
9355 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9356 output_add_immediate (operands);
9359 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9361 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9362 if (regs_ever_live[reg] && !call_used_regs[reg])
9363 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9364 reg, SP_REGNUM);
9366 else
9368 start_reg = FIRST_FPA_REGNUM;
9370 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9372 if (regs_ever_live[reg] && !call_used_regs[reg])
9374 if (reg - start_reg == 3)
9376 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9377 start_reg, SP_REGNUM);
9378 start_reg = reg + 1;
9381 else
9383 if (reg != start_reg)
9384 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9385 start_reg, reg - start_reg,
9386 SP_REGNUM);
9388 start_reg = reg + 1;
9392 /* Just in case the last register checked also needs unstacking. */
9393 if (reg != start_reg)
9394 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9395 start_reg, reg - start_reg, SP_REGNUM);
9398 if (TARGET_HARD_FLOAT && TARGET_VFP)
9400 start_reg = FIRST_VFP_REGNUM;
9401 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9403 if ((!regs_ever_live[reg] || call_used_regs[reg])
9404 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9406 if (start_reg != reg)
9407 arm_output_fldmx (f, SP_REGNUM,
9408 (start_reg - FIRST_VFP_REGNUM) / 2,
9409 (reg - start_reg) / 2);
9410 start_reg = reg + 2;
9413 if (start_reg != reg)
9414 arm_output_fldmx (f, SP_REGNUM,
9415 (start_reg - FIRST_VFP_REGNUM) / 2,
9416 (reg - start_reg) / 2);
9418 if (TARGET_IWMMXT)
9419 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9420 if (regs_ever_live[reg] && !call_used_regs[reg])
9421 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9423 /* If we can, restore the LR into the PC. */
9424 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9425 && really_return
9426 && current_function_pretend_args_size == 0
9427 && saved_regs_mask & (1 << LR_REGNUM)
9428 && !current_function_calls_eh_return)
9430 saved_regs_mask &= ~ (1 << LR_REGNUM);
9431 saved_regs_mask |= (1 << PC_REGNUM);
9434 /* Load the registers off the stack. If we only have one register
9435 to load use the LDR instruction - it is faster. */
9436 if (saved_regs_mask == (1 << LR_REGNUM))
9438 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9440 else if (saved_regs_mask)
9442 if (saved_regs_mask & (1 << SP_REGNUM))
9443 /* Note - write back to the stack register is not enabled
9444 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9445 in the list of registers and if we add writeback the
9446 instruction becomes UNPREDICTABLE. */
9447 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9448 else
9449 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9452 if (current_function_pretend_args_size)
9454 /* Unwind the pre-pushed regs. */
9455 operands[0] = operands[1] = stack_pointer_rtx;
9456 operands[2] = GEN_INT (current_function_pretend_args_size);
9457 output_add_immediate (operands);
9461 /* We may have already restored PC directly from the stack. */
9462 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9463 return "";
9465 /* Stack adjustment for exception handler. */
9466 if (current_function_calls_eh_return)
9467 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9468 ARM_EH_STACKADJ_REGNUM);
9470 /* Generate the return instruction. */
9471 switch ((int) ARM_FUNC_TYPE (func_type))
9473 case ARM_FT_ISR:
9474 case ARM_FT_FIQ:
9475 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9476 break;
9478 case ARM_FT_EXCEPTION:
9479 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9480 break;
9482 case ARM_FT_INTERWORKED:
9483 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9484 break;
9486 default:
9487 if (arm_arch5 || arm_arch4t)
9488 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9489 else
9490 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9491 break;
9494 return "";
9497 static void
9498 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9499 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9501 arm_stack_offsets *offsets;
9503 if (TARGET_THUMB)
9505 /* ??? Probably not safe to set this here, since it assumes that a
9506 function will be emitted as assembly immediately after we generate
9507 RTL for it. This does not happen for inline functions. */
9508 return_used_this_function = 0;
9510 else
9512 /* We need to take into account any stack-frame rounding. */
9513 offsets = arm_get_frame_offsets ();
9515 if (use_return_insn (FALSE, NULL)
9516 && return_used_this_function
9517 && offsets->saved_regs != offsets->outgoing_args
9518 && !frame_pointer_needed)
9519 abort ();
9521 /* Reset the ARM-specific per-function variables. */
9522 after_arm_reorg = 0;
9526 /* Generate and emit an insn that we will recognize as a push_multi.
9527 Unfortunately, since this insn does not reflect very well the actual
9528 semantics of the operation, we need to annotate the insn for the benefit
9529 of DWARF2 frame unwind information. */
9530 static rtx
9531 emit_multi_reg_push (int mask)
9533 int num_regs = 0;
9534 int num_dwarf_regs;
9535 int i, j;
9536 rtx par;
9537 rtx dwarf;
9538 int dwarf_par_index;
9539 rtx tmp, reg;
9541 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9542 if (mask & (1 << i))
9543 num_regs++;
9545 if (num_regs == 0 || num_regs > 16)
9546 abort ();
9548 /* We don't record the PC in the dwarf frame information. */
9549 num_dwarf_regs = num_regs;
9550 if (mask & (1 << PC_REGNUM))
9551 num_dwarf_regs--;
9553 /* For the body of the insn we are going to generate an UNSPEC in
9554 parallel with several USEs. This allows the insn to be recognized
9555 by the push_multi pattern in the arm.md file. The insn looks
9556 something like this:
9558 (parallel [
9559 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9560 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9561 (use (reg:SI 11 fp))
9562 (use (reg:SI 12 ip))
9563 (use (reg:SI 14 lr))
9564 (use (reg:SI 15 pc))
9567 For the frame note however, we try to be more explicit and actually
9568 show each register being stored into the stack frame, plus a (single)
9569 decrement of the stack pointer. We do it this way in order to be
9570 friendly to the stack unwinding code, which only wants to see a single
9571 stack decrement per instruction. The RTL we generate for the note looks
9572 something like this:
9574 (sequence [
9575 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9576 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9577 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9578 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9579 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9582 This sequence is used both by the code to support stack unwinding for
9583 exceptions handlers and the code to generate dwarf2 frame debugging. */
9585 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9586 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9587 dwarf_par_index = 1;
9589 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9591 if (mask & (1 << i))
9593 reg = gen_rtx_REG (SImode, i);
9595 XVECEXP (par, 0, 0)
9596 = gen_rtx_SET (VOIDmode,
9597 gen_rtx_MEM (BLKmode,
9598 gen_rtx_PRE_DEC (BLKmode,
9599 stack_pointer_rtx)),
9600 gen_rtx_UNSPEC (BLKmode,
9601 gen_rtvec (1, reg),
9602 UNSPEC_PUSH_MULT));
9604 if (i != PC_REGNUM)
9606 tmp = gen_rtx_SET (VOIDmode,
9607 gen_rtx_MEM (SImode, stack_pointer_rtx),
9608 reg);
9609 RTX_FRAME_RELATED_P (tmp) = 1;
9610 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9611 dwarf_par_index++;
9614 break;
9618 for (j = 1, i++; j < num_regs; i++)
9620 if (mask & (1 << i))
9622 reg = gen_rtx_REG (SImode, i);
9624 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9626 if (i != PC_REGNUM)
9628 tmp = gen_rtx_SET (VOIDmode,
9629 gen_rtx_MEM (SImode,
9630 plus_constant (stack_pointer_rtx,
9631 4 * j)),
9632 reg);
9633 RTX_FRAME_RELATED_P (tmp) = 1;
9634 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9637 j++;
9641 par = emit_insn (par);
9643 tmp = gen_rtx_SET (SImode,
9644 stack_pointer_rtx,
9645 gen_rtx_PLUS (SImode,
9646 stack_pointer_rtx,
9647 GEN_INT (-4 * num_regs)));
9648 RTX_FRAME_RELATED_P (tmp) = 1;
9649 XVECEXP (dwarf, 0, 0) = tmp;
9651 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9652 REG_NOTES (par));
9653 return par;
9656 static rtx
9657 emit_sfm (int base_reg, int count)
9659 rtx par;
9660 rtx dwarf;
9661 rtx tmp, reg;
9662 int i;
9664 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9665 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9667 reg = gen_rtx_REG (XFmode, base_reg++);
9669 XVECEXP (par, 0, 0)
9670 = gen_rtx_SET (VOIDmode,
9671 gen_rtx_MEM (BLKmode,
9672 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9673 gen_rtx_UNSPEC (BLKmode,
9674 gen_rtvec (1, reg),
9675 UNSPEC_PUSH_MULT));
9676 tmp = gen_rtx_SET (VOIDmode,
9677 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9678 RTX_FRAME_RELATED_P (tmp) = 1;
9679 XVECEXP (dwarf, 0, 1) = tmp;
9681 for (i = 1; i < count; i++)
9683 reg = gen_rtx_REG (XFmode, base_reg++);
9684 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9686 tmp = gen_rtx_SET (VOIDmode,
9687 gen_rtx_MEM (XFmode,
9688 plus_constant (stack_pointer_rtx,
9689 i * 12)),
9690 reg);
9691 RTX_FRAME_RELATED_P (tmp) = 1;
9692 XVECEXP (dwarf, 0, i + 1) = tmp;
9695 tmp = gen_rtx_SET (VOIDmode,
9696 stack_pointer_rtx,
9697 gen_rtx_PLUS (SImode,
9698 stack_pointer_rtx,
9699 GEN_INT (-12 * count)));
9700 RTX_FRAME_RELATED_P (tmp) = 1;
9701 XVECEXP (dwarf, 0, 0) = tmp;
9703 par = emit_insn (par);
9704 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9705 REG_NOTES (par));
9706 return par;
9710 /* Return true if the current function needs to save/restore LR. */
9712 static bool
9713 thumb_force_lr_save (void)
9715 return !cfun->machine->lr_save_eliminated
9716 && (!leaf_function_p ()
9717 || thumb_far_jump_used_p ()
9718 || regs_ever_live [LR_REGNUM]);
9722 /* Compute the distance from register FROM to register TO.
9723 These can be the arg pointer (26), the soft frame pointer (25),
9724 the stack pointer (13) or the hard frame pointer (11).
9725 In thumb mode r7 is used as the soft frame pointer, if needed.
9726 Typical stack layout looks like this:
9728 old stack pointer -> | |
9729 ----
9730 | | \
9731 | | saved arguments for
9732 | | vararg functions
9733 | | /
9735 hard FP & arg pointer -> | | \
9736 | | stack
9737 | | frame
9738 | | /
9740 | | \
9741 | | call saved
9742 | | registers
9743 soft frame pointer -> | | /
9745 | | \
9746 | | local
9747 | | variables
9748 | | /
9750 | | \
9751 | | outgoing
9752 | | arguments
9753 current stack pointer -> | | /
9756 For a given function some or all of these stack components
9757 may not be needed, giving rise to the possibility of
9758 eliminating some of the registers.
9760 The values returned by this function must reflect the behavior
9761 of arm_expand_prologue() and arm_compute_save_reg_mask().
9763 The sign of the number returned reflects the direction of stack
9764 growth, so the values are positive for all eliminations except
9765 from the soft frame pointer to the hard frame pointer.
9767 SFP may point just inside the local variables block to ensure correct
9768 alignment. */
9771 /* Calculate stack offsets. These are used to calculate register elimination
9772 offsets and in prologue/epilogue code. */
9774 static arm_stack_offsets *
9775 arm_get_frame_offsets (void)
9777 struct arm_stack_offsets *offsets;
9778 unsigned long func_type;
9779 int leaf;
9780 int saved;
9781 HOST_WIDE_INT frame_size;
9783 offsets = &cfun->machine->stack_offsets;
9785 /* We need to know if we are a leaf function. Unfortunately, it
9786 is possible to be called after start_sequence has been called,
9787 which causes get_insns to return the insns for the sequence,
9788 not the function, which will cause leaf_function_p to return
9789 the incorrect result.
9791 to know about leaf functions once reload has completed, and the
9792 frame size cannot be changed after that time, so we can safely
9793 use the cached value. */
9795 if (reload_completed)
9796 return offsets;
9798 /* Initially this is the size of the local variables. It will translated
9799 into an offset once we have determined the size of preceding data. */
9800 frame_size = ROUND_UP_WORD (get_frame_size ());
9802 leaf = leaf_function_p ();
9804 /* Space for variadic functions. */
9805 offsets->saved_args = current_function_pretend_args_size;
9807 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9809 if (TARGET_ARM)
9811 unsigned int regno;
9813 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9815 /* We know that SP will be doubleword aligned on entry, and we must
9816 preserve that condition at any subroutine call. We also require the
9817 soft frame pointer to be doubleword aligned. */
9819 if (TARGET_REALLY_IWMMXT)
9821 /* Check for the call-saved iWMMXt registers. */
9822 for (regno = FIRST_IWMMXT_REGNUM;
9823 regno <= LAST_IWMMXT_REGNUM;
9824 regno++)
9825 if (regs_ever_live [regno] && ! call_used_regs [regno])
9826 saved += 8;
9829 func_type = arm_current_func_type ();
9830 if (! IS_VOLATILE (func_type))
9832 /* Space for saved FPA registers. */
9833 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9834 if (regs_ever_live[regno] && ! call_used_regs[regno])
9835 saved += 12;
9837 /* Space for saved VFP registers. */
9838 if (TARGET_HARD_FLOAT && TARGET_VFP)
9839 saved += arm_get_vfp_saved_size ();
9842 else /* TARGET_THUMB */
9844 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
9845 if (TARGET_BACKTRACE)
9846 saved += 16;
9849 /* Saved registers include the stack frame. */
9850 offsets->saved_regs = offsets->saved_args + saved;
9851 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
9852 /* A leaf function does not need any stack alignment if it has nothing
9853 on the stack. */
9854 if (leaf && frame_size == 0)
9856 offsets->outgoing_args = offsets->soft_frame;
9857 return offsets;
9860 /* Ensure SFP has the correct alignment. */
9861 if (ARM_DOUBLEWORD_ALIGN
9862 && (offsets->soft_frame & 7))
9863 offsets->soft_frame += 4;
9865 offsets->outgoing_args = offsets->soft_frame + frame_size
9866 + current_function_outgoing_args_size;
9868 if (ARM_DOUBLEWORD_ALIGN)
9870 /* Ensure SP remains doubleword aligned. */
9871 if (offsets->outgoing_args & 7)
9872 offsets->outgoing_args += 4;
9873 if (offsets->outgoing_args & 7)
9874 abort ();
9877 return offsets;
9881 /* Calculate the relative offsets for the different stack pointers. Positive
9882 offsets are in the direction of stack growth. */
9884 HOST_WIDE_INT
9885 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9887 arm_stack_offsets *offsets;
9889 offsets = arm_get_frame_offsets ();
9891 /* OK, now we have enough information to compute the distances.
9892 There must be an entry in these switch tables for each pair
9893 of registers in ELIMINABLE_REGS, even if some of the entries
9894 seem to be redundant or useless. */
9895 switch (from)
9897 case ARG_POINTER_REGNUM:
9898 switch (to)
9900 case THUMB_HARD_FRAME_POINTER_REGNUM:
9901 return 0;
9903 case FRAME_POINTER_REGNUM:
9904 /* This is the reverse of the soft frame pointer
9905 to hard frame pointer elimination below. */
9906 return offsets->soft_frame - offsets->saved_args;
9908 case ARM_HARD_FRAME_POINTER_REGNUM:
9909 /* If there is no stack frame then the hard
9910 frame pointer and the arg pointer coincide. */
9911 if (offsets->frame == offsets->saved_regs)
9912 return 0;
9913 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9914 return (frame_pointer_needed
9915 && cfun->static_chain_decl != NULL
9916 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9918 case STACK_POINTER_REGNUM:
9919 /* If nothing has been pushed on the stack at all
9920 then this will return -4. This *is* correct! */
9921 return offsets->outgoing_args - (offsets->saved_args + 4);
9923 default:
9924 abort ();
9926 break;
9928 case FRAME_POINTER_REGNUM:
9929 switch (to)
9931 case THUMB_HARD_FRAME_POINTER_REGNUM:
9932 return 0;
9934 case ARM_HARD_FRAME_POINTER_REGNUM:
9935 /* The hard frame pointer points to the top entry in the
9936 stack frame. The soft frame pointer to the bottom entry
9937 in the stack frame. If there is no stack frame at all,
9938 then they are identical. */
9940 return offsets->frame - offsets->soft_frame;
9942 case STACK_POINTER_REGNUM:
9943 return offsets->outgoing_args - offsets->soft_frame;
9945 default:
9946 abort ();
9948 break;
9950 default:
9951 /* You cannot eliminate from the stack pointer.
9952 In theory you could eliminate from the hard frame
9953 pointer to the stack pointer, but this will never
9954 happen, since if a stack frame is not needed the
9955 hard frame pointer will never be used. */
9956 abort ();
9961 /* Generate the prologue instructions for entry into an ARM function. */
9962 void
9963 arm_expand_prologue (void)
9965 int reg;
9966 rtx amount;
9967 rtx insn;
9968 rtx ip_rtx;
9969 unsigned long live_regs_mask;
9970 unsigned long func_type;
9971 int fp_offset = 0;
9972 int saved_pretend_args = 0;
9973 int saved_regs = 0;
9974 unsigned HOST_WIDE_INT args_to_push;
9975 arm_stack_offsets *offsets;
9977 func_type = arm_current_func_type ();
9979 /* Naked functions don't have prologues. */
9980 if (IS_NAKED (func_type))
9981 return;
9983 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
9984 args_to_push = current_function_pretend_args_size;
9986 /* Compute which register we will have to save onto the stack. */
9987 live_regs_mask = arm_compute_save_reg_mask ();
9989 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
9991 if (frame_pointer_needed)
9993 if (IS_INTERRUPT (func_type))
9995 /* Interrupt functions must not corrupt any registers.
9996 Creating a frame pointer however, corrupts the IP
9997 register, so we must push it first. */
9998 insn = emit_multi_reg_push (1 << IP_REGNUM);
10000 /* Do not set RTX_FRAME_RELATED_P on this insn.
10001 The dwarf stack unwinding code only wants to see one
10002 stack decrement per function, and this is not it. If
10003 this instruction is labeled as being part of the frame
10004 creation sequence then dwarf2out_frame_debug_expr will
10005 abort when it encounters the assignment of IP to FP
10006 later on, since the use of SP here establishes SP as
10007 the CFA register and not IP.
10009 Anyway this instruction is not really part of the stack
10010 frame creation although it is part of the prologue. */
10012 else if (IS_NESTED (func_type))
10014 /* The Static chain register is the same as the IP register
10015 used as a scratch register during stack frame creation.
10016 To get around this need to find somewhere to store IP
10017 whilst the frame is being created. We try the following
10018 places in order:
10020 1. The last argument register.
10021 2. A slot on the stack above the frame. (This only
10022 works if the function is not a varargs function).
10023 3. Register r3, after pushing the argument registers
10024 onto the stack.
10026 Note - we only need to tell the dwarf2 backend about the SP
10027 adjustment in the second variant; the static chain register
10028 doesn't need to be unwound, as it doesn't contain a value
10029 inherited from the caller. */
10031 if (regs_ever_live[3] == 0)
10033 insn = gen_rtx_REG (SImode, 3);
10034 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10035 insn = emit_insn (insn);
10037 else if (args_to_push == 0)
10039 rtx dwarf;
10040 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10041 insn = gen_rtx_MEM (SImode, insn);
10042 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10043 insn = emit_insn (insn);
10045 fp_offset = 4;
10047 /* Just tell the dwarf backend that we adjusted SP. */
10048 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10049 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10050 GEN_INT (-fp_offset)));
10051 RTX_FRAME_RELATED_P (insn) = 1;
10052 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10053 dwarf, REG_NOTES (insn));
10055 else
10057 /* Store the args on the stack. */
10058 if (cfun->machine->uses_anonymous_args)
10059 insn = emit_multi_reg_push
10060 ((0xf0 >> (args_to_push / 4)) & 0xf);
10061 else
10062 insn = emit_insn
10063 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10064 GEN_INT (- args_to_push)));
10066 RTX_FRAME_RELATED_P (insn) = 1;
10068 saved_pretend_args = 1;
10069 fp_offset = args_to_push;
10070 args_to_push = 0;
10072 /* Now reuse r3 to preserve IP. */
10073 insn = gen_rtx_REG (SImode, 3);
10074 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10075 (void) emit_insn (insn);
10079 if (fp_offset)
10081 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10082 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10084 else
10085 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10087 insn = emit_insn (insn);
10088 RTX_FRAME_RELATED_P (insn) = 1;
10091 if (args_to_push)
10093 /* Push the argument registers, or reserve space for them. */
10094 if (cfun->machine->uses_anonymous_args)
10095 insn = emit_multi_reg_push
10096 ((0xf0 >> (args_to_push / 4)) & 0xf);
10097 else
10098 insn = emit_insn
10099 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10100 GEN_INT (- args_to_push)));
10101 RTX_FRAME_RELATED_P (insn) = 1;
10104 /* If this is an interrupt service routine, and the link register
10105 is going to be pushed, and we are not creating a stack frame,
10106 (which would involve an extra push of IP and a pop in the epilogue)
10107 subtracting four from LR now will mean that the function return
10108 can be done with a single instruction. */
10109 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10110 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10111 && ! frame_pointer_needed)
10112 emit_insn (gen_rtx_SET (SImode,
10113 gen_rtx_REG (SImode, LR_REGNUM),
10114 gen_rtx_PLUS (SImode,
10115 gen_rtx_REG (SImode, LR_REGNUM),
10116 GEN_INT (-4))));
10118 if (live_regs_mask)
10120 insn = emit_multi_reg_push (live_regs_mask);
10121 saved_regs += bit_count (live_regs_mask) * 4;
10122 RTX_FRAME_RELATED_P (insn) = 1;
10125 if (TARGET_IWMMXT)
10126 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10127 if (regs_ever_live[reg] && ! call_used_regs [reg])
10129 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10130 insn = gen_rtx_MEM (V2SImode, insn);
10131 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10132 gen_rtx_REG (V2SImode, reg)));
10133 RTX_FRAME_RELATED_P (insn) = 1;
10134 saved_regs += 8;
10137 if (! IS_VOLATILE (func_type))
10139 int start_reg;
10141 /* Save any floating point call-saved registers used by this
10142 function. */
10143 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10145 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10146 if (regs_ever_live[reg] && !call_used_regs[reg])
10148 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10149 insn = gen_rtx_MEM (XFmode, insn);
10150 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10151 gen_rtx_REG (XFmode, reg)));
10152 RTX_FRAME_RELATED_P (insn) = 1;
10153 saved_regs += 12;
10156 else
10158 start_reg = LAST_FPA_REGNUM;
10160 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10162 if (regs_ever_live[reg] && !call_used_regs[reg])
10164 if (start_reg - reg == 3)
10166 insn = emit_sfm (reg, 4);
10167 RTX_FRAME_RELATED_P (insn) = 1;
10168 saved_regs += 48;
10169 start_reg = reg - 1;
10172 else
10174 if (start_reg != reg)
10176 insn = emit_sfm (reg + 1, start_reg - reg);
10177 RTX_FRAME_RELATED_P (insn) = 1;
10178 saved_regs += (start_reg - reg) * 12;
10180 start_reg = reg - 1;
10184 if (start_reg != reg)
10186 insn = emit_sfm (reg + 1, start_reg - reg);
10187 saved_regs += (start_reg - reg) * 12;
10188 RTX_FRAME_RELATED_P (insn) = 1;
10191 if (TARGET_HARD_FLOAT && TARGET_VFP)
10193 start_reg = FIRST_VFP_REGNUM;
10195 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10197 if ((!regs_ever_live[reg] || call_used_regs[reg])
10198 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10200 if (start_reg != reg)
10201 saved_regs += vfp_emit_fstmx (start_reg,
10202 (reg - start_reg) / 2);
10203 start_reg = reg + 2;
10206 if (start_reg != reg)
10207 saved_regs += vfp_emit_fstmx (start_reg,
10208 (reg - start_reg) / 2);
10212 if (frame_pointer_needed)
10214 /* Create the new frame pointer. */
10215 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10216 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10217 RTX_FRAME_RELATED_P (insn) = 1;
10219 if (IS_NESTED (func_type))
10221 /* Recover the static chain register. */
10222 if (regs_ever_live [3] == 0
10223 || saved_pretend_args)
10224 insn = gen_rtx_REG (SImode, 3);
10225 else /* if (current_function_pretend_args_size == 0) */
10227 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10228 GEN_INT (4));
10229 insn = gen_rtx_MEM (SImode, insn);
10232 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10233 /* Add a USE to stop propagate_one_insn() from barfing. */
10234 emit_insn (gen_prologue_use (ip_rtx));
10238 offsets = arm_get_frame_offsets ();
10239 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10241 /* This add can produce multiple insns for a large constant, so we
10242 need to get tricky. */
10243 rtx last = get_last_insn ();
10245 amount = GEN_INT (offsets->saved_args + saved_regs
10246 - offsets->outgoing_args);
10248 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10249 amount));
10252 last = last ? NEXT_INSN (last) : get_insns ();
10253 RTX_FRAME_RELATED_P (last) = 1;
10255 while (last != insn);
10257 /* If the frame pointer is needed, emit a special barrier that
10258 will prevent the scheduler from moving stores to the frame
10259 before the stack adjustment. */
10260 if (frame_pointer_needed)
10261 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10262 hard_frame_pointer_rtx));
10266 if (flag_pic)
10267 arm_load_pic_register (INVALID_REGNUM);
10269 /* If we are profiling, make sure no instructions are scheduled before
10270 the call to mcount. Similarly if the user has requested no
10271 scheduling in the prolog. */
10272 if (current_function_profile || TARGET_NO_SCHED_PRO)
10273 emit_insn (gen_blockage ());
10275 /* If the link register is being kept alive, with the return address in it,
10276 then make sure that it does not get reused by the ce2 pass. */
10277 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10279 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10280 cfun->machine->lr_save_eliminated = 1;
10284 /* If CODE is 'd', then the X is a condition operand and the instruction
10285 should only be executed if the condition is true.
10286 if CODE is 'D', then the X is a condition operand and the instruction
10287 should only be executed if the condition is false: however, if the mode
10288 of the comparison is CCFPEmode, then always execute the instruction -- we
10289 do this because in these circumstances !GE does not necessarily imply LT;
10290 in these cases the instruction pattern will take care to make sure that
10291 an instruction containing %d will follow, thereby undoing the effects of
10292 doing this instruction unconditionally.
10293 If CODE is 'N' then X is a floating point operand that must be negated
10294 before output.
10295 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10296 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10297 void
10298 arm_print_operand (FILE *stream, rtx x, int code)
10300 switch (code)
10302 case '@':
10303 fputs (ASM_COMMENT_START, stream);
10304 return;
10306 case '_':
10307 fputs (user_label_prefix, stream);
10308 return;
10310 case '|':
10311 fputs (REGISTER_PREFIX, stream);
10312 return;
10314 case '?':
10315 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10317 if (TARGET_THUMB)
10319 output_operand_lossage ("predicated Thumb instruction");
10320 break;
10322 if (current_insn_predicate != NULL)
10324 output_operand_lossage
10325 ("predicated instruction in conditional sequence");
10326 break;
10329 fputs (arm_condition_codes[arm_current_cc], stream);
10331 else if (current_insn_predicate)
10333 enum arm_cond_code code;
10335 if (TARGET_THUMB)
10337 output_operand_lossage ("predicated Thumb instruction");
10338 break;
10341 code = get_arm_condition_code (current_insn_predicate);
10342 fputs (arm_condition_codes[code], stream);
10344 return;
10346 case 'N':
10348 REAL_VALUE_TYPE r;
10349 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10350 r = REAL_VALUE_NEGATE (r);
10351 fprintf (stream, "%s", fp_const_from_val (&r));
10353 return;
10355 case 'B':
10356 if (GET_CODE (x) == CONST_INT)
10358 HOST_WIDE_INT val;
10359 val = ARM_SIGN_EXTEND (~INTVAL (x));
10360 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10362 else
10364 putc ('~', stream);
10365 output_addr_const (stream, x);
10367 return;
10369 case 'i':
10370 fprintf (stream, "%s", arithmetic_instr (x, 1));
10371 return;
10373 /* Truncate Cirrus shift counts. */
10374 case 's':
10375 if (GET_CODE (x) == CONST_INT)
10377 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10378 return;
10380 arm_print_operand (stream, x, 0);
10381 return;
10383 case 'I':
10384 fprintf (stream, "%s", arithmetic_instr (x, 0));
10385 return;
10387 case 'S':
10389 HOST_WIDE_INT val;
10390 const char * shift = shift_op (x, &val);
10392 if (shift)
10394 fprintf (stream, ", %s ", shift_op (x, &val));
10395 if (val == -1)
10396 arm_print_operand (stream, XEXP (x, 1), 0);
10397 else
10398 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10401 return;
10403 /* An explanation of the 'Q', 'R' and 'H' register operands:
10405 In a pair of registers containing a DI or DF value the 'Q'
10406 operand returns the register number of the register containing
10407 the least significant part of the value. The 'R' operand returns
10408 the register number of the register containing the most
10409 significant part of the value.
10411 The 'H' operand returns the higher of the two register numbers.
10412 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10413 same as the 'Q' operand, since the most significant part of the
10414 value is held in the lower number register. The reverse is true
10415 on systems where WORDS_BIG_ENDIAN is false.
10417 The purpose of these operands is to distinguish between cases
10418 where the endian-ness of the values is important (for example
10419 when they are added together), and cases where the endian-ness
10420 is irrelevant, but the order of register operations is important.
10421 For example when loading a value from memory into a register
10422 pair, the endian-ness does not matter. Provided that the value
10423 from the lower memory address is put into the lower numbered
10424 register, and the value from the higher address is put into the
10425 higher numbered register, the load will work regardless of whether
10426 the value being loaded is big-wordian or little-wordian. The
10427 order of the two register loads can matter however, if the address
10428 of the memory location is actually held in one of the registers
10429 being overwritten by the load. */
10430 case 'Q':
10431 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10433 output_operand_lossage ("invalid operand for code '%c'", code);
10434 return;
10437 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10438 return;
10440 case 'R':
10441 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10443 output_operand_lossage ("invalid operand for code '%c'", code);
10444 return;
10447 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10448 return;
10450 case 'H':
10451 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10453 output_operand_lossage ("invalid operand for code '%c'", code);
10454 return;
10457 asm_fprintf (stream, "%r", REGNO (x) + 1);
10458 return;
10460 case 'm':
10461 asm_fprintf (stream, "%r",
10462 GET_CODE (XEXP (x, 0)) == REG
10463 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10464 return;
10466 case 'M':
10467 asm_fprintf (stream, "{%r-%r}",
10468 REGNO (x),
10469 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10470 return;
10472 case 'd':
10473 /* CONST_TRUE_RTX means always -- that's the default. */
10474 if (x == const_true_rtx)
10475 return;
10477 if (!COMPARISON_P (x))
10479 output_operand_lossage ("invalid operand for code '%c'", code);
10480 return;
10483 fputs (arm_condition_codes[get_arm_condition_code (x)],
10484 stream);
10485 return;
10487 case 'D':
10488 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10489 want to do that. */
10490 if (x == const_true_rtx)
10492 output_operand_lossage ("instruction never exectued");
10493 return;
10495 if (!COMPARISON_P (x))
10497 output_operand_lossage ("invalid operand for code '%c'", code);
10498 return;
10501 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10502 (get_arm_condition_code (x))],
10503 stream);
10504 return;
10506 /* Cirrus registers can be accessed in a variety of ways:
10507 single floating point (f)
10508 double floating point (d)
10509 32bit integer (fx)
10510 64bit integer (dx). */
10511 case 'W': /* Cirrus register in F mode. */
10512 case 'X': /* Cirrus register in D mode. */
10513 case 'Y': /* Cirrus register in FX mode. */
10514 case 'Z': /* Cirrus register in DX mode. */
10515 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10516 abort ();
10518 fprintf (stream, "mv%s%s",
10519 code == 'W' ? "f"
10520 : code == 'X' ? "d"
10521 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10523 return;
10525 /* Print cirrus register in the mode specified by the register's mode. */
10526 case 'V':
10528 int mode = GET_MODE (x);
10530 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10532 output_operand_lossage ("invalid operand for code '%c'", code);
10533 return;
10536 fprintf (stream, "mv%s%s",
10537 mode == DFmode ? "d"
10538 : mode == SImode ? "fx"
10539 : mode == DImode ? "dx"
10540 : "f", reg_names[REGNO (x)] + 2);
10542 return;
10545 case 'U':
10546 if (GET_CODE (x) != REG
10547 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10548 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10549 /* Bad value for wCG register number. */
10551 output_operand_lossage ("invalid operand for code '%c'", code);
10552 return;
10555 else
10556 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10557 return;
10559 /* Print an iWMMXt control register name. */
10560 case 'w':
10561 if (GET_CODE (x) != CONST_INT
10562 || INTVAL (x) < 0
10563 || INTVAL (x) >= 16)
10564 /* Bad value for wC register number. */
10566 output_operand_lossage ("invalid operand for code '%c'", code);
10567 return;
10570 else
10572 static const char * wc_reg_names [16] =
10574 "wCID", "wCon", "wCSSF", "wCASF",
10575 "wC4", "wC5", "wC6", "wC7",
10576 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10577 "wC12", "wC13", "wC14", "wC15"
10580 fprintf (stream, wc_reg_names [INTVAL (x)]);
10582 return;
10584 /* Print a VFP double precision register name. */
10585 case 'P':
10587 int mode = GET_MODE (x);
10588 int num;
10590 if (mode != DImode && mode != DFmode)
10592 output_operand_lossage ("invalid operand for code '%c'", code);
10593 return;
10596 if (GET_CODE (x) != REG
10597 || !IS_VFP_REGNUM (REGNO (x)))
10599 output_operand_lossage ("invalid operand for code '%c'", code);
10600 return;
10603 num = REGNO(x) - FIRST_VFP_REGNUM;
10604 if (num & 1)
10606 output_operand_lossage ("invalid operand for code '%c'", code);
10607 return;
10610 fprintf (stream, "d%d", num >> 1);
10612 return;
10614 default:
10615 if (x == 0)
10617 output_operand_lossage ("missing operand");
10618 return;
10621 if (GET_CODE (x) == REG)
10622 asm_fprintf (stream, "%r", REGNO (x));
10623 else if (GET_CODE (x) == MEM)
10625 output_memory_reference_mode = GET_MODE (x);
10626 output_address (XEXP (x, 0));
10628 else if (GET_CODE (x) == CONST_DOUBLE)
10629 fprintf (stream, "#%s", fp_immediate_constant (x));
10630 else if (GET_CODE (x) == NEG)
10631 abort (); /* This should never happen now. */
10632 else
10634 fputc ('#', stream);
10635 output_addr_const (stream, x);
10640 #ifndef AOF_ASSEMBLER
10641 /* Target hook for assembling integer objects. The ARM version needs to
10642 handle word-sized values specially. */
10643 static bool
10644 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10646 if (size == UNITS_PER_WORD && aligned_p)
10648 fputs ("\t.word\t", asm_out_file);
10649 output_addr_const (asm_out_file, x);
10651 /* Mark symbols as position independent. We only do this in the
10652 .text segment, not in the .data segment. */
10653 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10654 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10656 if (GET_CODE (x) == SYMBOL_REF
10657 && (CONSTANT_POOL_ADDRESS_P (x)
10658 || SYMBOL_REF_LOCAL_P (x)))
10659 fputs ("(GOTOFF)", asm_out_file);
10660 else if (GET_CODE (x) == LABEL_REF)
10661 fputs ("(GOTOFF)", asm_out_file);
10662 else
10663 fputs ("(GOT)", asm_out_file);
10665 fputc ('\n', asm_out_file);
10666 return true;
10669 if (arm_vector_mode_supported_p (GET_MODE (x)))
10671 int i, units;
10673 if (GET_CODE (x) != CONST_VECTOR)
10674 abort ();
10676 units = CONST_VECTOR_NUNITS (x);
10678 switch (GET_MODE (x))
10680 case V2SImode: size = 4; break;
10681 case V4HImode: size = 2; break;
10682 case V8QImode: size = 1; break;
10683 default:
10684 abort ();
10687 for (i = 0; i < units; i++)
10689 rtx elt;
10691 elt = CONST_VECTOR_ELT (x, i);
10692 assemble_integer
10693 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10696 return true;
10699 return default_assemble_integer (x, size, aligned_p);
10701 #endif
10703 /* A finite state machine takes care of noticing whether or not instructions
10704 can be conditionally executed, and thus decrease execution time and code
10705 size by deleting branch instructions. The fsm is controlled by
10706 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10708 /* The state of the fsm controlling condition codes are:
10709 0: normal, do nothing special
10710 1: make ASM_OUTPUT_OPCODE not output this instruction
10711 2: make ASM_OUTPUT_OPCODE not output this instruction
10712 3: make instructions conditional
10713 4: make instructions conditional
10715 State transitions (state->state by whom under condition):
10716 0 -> 1 final_prescan_insn if the `target' is a label
10717 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10718 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10719 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10720 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10721 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10722 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10723 (the target insn is arm_target_insn).
10725 If the jump clobbers the conditions then we use states 2 and 4.
10727 A similar thing can be done with conditional return insns.
10729 XXX In case the `target' is an unconditional branch, this conditionalising
10730 of the instructions always reduces code size, but not always execution
10731 time. But then, I want to reduce the code size to somewhere near what
10732 /bin/cc produces. */
10734 /* Returns the index of the ARM condition code string in
10735 `arm_condition_codes'. COMPARISON should be an rtx like
10736 `(eq (...) (...))'. */
10737 static enum arm_cond_code
10738 get_arm_condition_code (rtx comparison)
10740 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10741 int code;
10742 enum rtx_code comp_code = GET_CODE (comparison);
10744 if (GET_MODE_CLASS (mode) != MODE_CC)
10745 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10746 XEXP (comparison, 1));
10748 switch (mode)
10750 case CC_DNEmode: code = ARM_NE; goto dominance;
10751 case CC_DEQmode: code = ARM_EQ; goto dominance;
10752 case CC_DGEmode: code = ARM_GE; goto dominance;
10753 case CC_DGTmode: code = ARM_GT; goto dominance;
10754 case CC_DLEmode: code = ARM_LE; goto dominance;
10755 case CC_DLTmode: code = ARM_LT; goto dominance;
10756 case CC_DGEUmode: code = ARM_CS; goto dominance;
10757 case CC_DGTUmode: code = ARM_HI; goto dominance;
10758 case CC_DLEUmode: code = ARM_LS; goto dominance;
10759 case CC_DLTUmode: code = ARM_CC;
10761 dominance:
10762 if (comp_code != EQ && comp_code != NE)
10763 abort ();
10765 if (comp_code == EQ)
10766 return ARM_INVERSE_CONDITION_CODE (code);
10767 return code;
10769 case CC_NOOVmode:
10770 switch (comp_code)
10772 case NE: return ARM_NE;
10773 case EQ: return ARM_EQ;
10774 case GE: return ARM_PL;
10775 case LT: return ARM_MI;
10776 default: abort ();
10779 case CC_Zmode:
10780 switch (comp_code)
10782 case NE: return ARM_NE;
10783 case EQ: return ARM_EQ;
10784 default: abort ();
10787 case CC_Nmode:
10788 switch (comp_code)
10790 case NE: return ARM_MI;
10791 case EQ: return ARM_PL;
10792 default: abort ();
10795 case CCFPEmode:
10796 case CCFPmode:
10797 /* These encodings assume that AC=1 in the FPA system control
10798 byte. This allows us to handle all cases except UNEQ and
10799 LTGT. */
10800 switch (comp_code)
10802 case GE: return ARM_GE;
10803 case GT: return ARM_GT;
10804 case LE: return ARM_LS;
10805 case LT: return ARM_MI;
10806 case NE: return ARM_NE;
10807 case EQ: return ARM_EQ;
10808 case ORDERED: return ARM_VC;
10809 case UNORDERED: return ARM_VS;
10810 case UNLT: return ARM_LT;
10811 case UNLE: return ARM_LE;
10812 case UNGT: return ARM_HI;
10813 case UNGE: return ARM_PL;
10814 /* UNEQ and LTGT do not have a representation. */
10815 case UNEQ: /* Fall through. */
10816 case LTGT: /* Fall through. */
10817 default: abort ();
10820 case CC_SWPmode:
10821 switch (comp_code)
10823 case NE: return ARM_NE;
10824 case EQ: return ARM_EQ;
10825 case GE: return ARM_LE;
10826 case GT: return ARM_LT;
10827 case LE: return ARM_GE;
10828 case LT: return ARM_GT;
10829 case GEU: return ARM_LS;
10830 case GTU: return ARM_CC;
10831 case LEU: return ARM_CS;
10832 case LTU: return ARM_HI;
10833 default: abort ();
10836 case CC_Cmode:
10837 switch (comp_code)
10839 case LTU: return ARM_CS;
10840 case GEU: return ARM_CC;
10841 default: abort ();
10844 case CCmode:
10845 switch (comp_code)
10847 case NE: return ARM_NE;
10848 case EQ: return ARM_EQ;
10849 case GE: return ARM_GE;
10850 case GT: return ARM_GT;
10851 case LE: return ARM_LE;
10852 case LT: return ARM_LT;
10853 case GEU: return ARM_CS;
10854 case GTU: return ARM_HI;
10855 case LEU: return ARM_LS;
10856 case LTU: return ARM_CC;
10857 default: abort ();
10860 default: abort ();
10863 abort ();
10866 void
10867 arm_final_prescan_insn (rtx insn)
10869 /* BODY will hold the body of INSN. */
10870 rtx body = PATTERN (insn);
10872 /* This will be 1 if trying to repeat the trick, and things need to be
10873 reversed if it appears to fail. */
10874 int reverse = 0;
10876 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10877 taken are clobbered, even if the rtl suggests otherwise. It also
10878 means that we have to grub around within the jump expression to find
10879 out what the conditions are when the jump isn't taken. */
10880 int jump_clobbers = 0;
10882 /* If we start with a return insn, we only succeed if we find another one. */
10883 int seeking_return = 0;
10885 /* START_INSN will hold the insn from where we start looking. This is the
10886 first insn after the following code_label if REVERSE is true. */
10887 rtx start_insn = insn;
10889 /* If in state 4, check if the target branch is reached, in order to
10890 change back to state 0. */
10891 if (arm_ccfsm_state == 4)
10893 if (insn == arm_target_insn)
10895 arm_target_insn = NULL;
10896 arm_ccfsm_state = 0;
10898 return;
10901 /* If in state 3, it is possible to repeat the trick, if this insn is an
10902 unconditional branch to a label, and immediately following this branch
10903 is the previous target label which is only used once, and the label this
10904 branch jumps to is not too far off. */
10905 if (arm_ccfsm_state == 3)
10907 if (simplejump_p (insn))
10909 start_insn = next_nonnote_insn (start_insn);
10910 if (GET_CODE (start_insn) == BARRIER)
10912 /* XXX Isn't this always a barrier? */
10913 start_insn = next_nonnote_insn (start_insn);
10915 if (GET_CODE (start_insn) == CODE_LABEL
10916 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10917 && LABEL_NUSES (start_insn) == 1)
10918 reverse = TRUE;
10919 else
10920 return;
10922 else if (GET_CODE (body) == RETURN)
10924 start_insn = next_nonnote_insn (start_insn);
10925 if (GET_CODE (start_insn) == BARRIER)
10926 start_insn = next_nonnote_insn (start_insn);
10927 if (GET_CODE (start_insn) == CODE_LABEL
10928 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10929 && LABEL_NUSES (start_insn) == 1)
10931 reverse = TRUE;
10932 seeking_return = 1;
10934 else
10935 return;
10937 else
10938 return;
10941 if (arm_ccfsm_state != 0 && !reverse)
10942 abort ();
10943 if (GET_CODE (insn) != JUMP_INSN)
10944 return;
10946 /* This jump might be paralleled with a clobber of the condition codes
10947 the jump should always come first */
10948 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10949 body = XVECEXP (body, 0, 0);
10951 if (reverse
10952 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10953 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10955 int insns_skipped;
10956 int fail = FALSE, succeed = FALSE;
10957 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10958 int then_not_else = TRUE;
10959 rtx this_insn = start_insn, label = 0;
10961 /* If the jump cannot be done with one instruction, we cannot
10962 conditionally execute the instruction in the inverse case. */
10963 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10965 jump_clobbers = 1;
10966 return;
10969 /* Register the insn jumped to. */
10970 if (reverse)
10972 if (!seeking_return)
10973 label = XEXP (SET_SRC (body), 0);
10975 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10976 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10977 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10979 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10980 then_not_else = FALSE;
10982 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10983 seeking_return = 1;
10984 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10986 seeking_return = 1;
10987 then_not_else = FALSE;
10989 else
10990 abort ();
10992 /* See how many insns this branch skips, and what kind of insns. If all
10993 insns are okay, and the label or unconditional branch to the same
10994 label is not too far away, succeed. */
10995 for (insns_skipped = 0;
10996 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
10998 rtx scanbody;
11000 this_insn = next_nonnote_insn (this_insn);
11001 if (!this_insn)
11002 break;
11004 switch (GET_CODE (this_insn))
11006 case CODE_LABEL:
11007 /* Succeed if it is the target label, otherwise fail since
11008 control falls in from somewhere else. */
11009 if (this_insn == label)
11011 if (jump_clobbers)
11013 arm_ccfsm_state = 2;
11014 this_insn = next_nonnote_insn (this_insn);
11016 else
11017 arm_ccfsm_state = 1;
11018 succeed = TRUE;
11020 else
11021 fail = TRUE;
11022 break;
11024 case BARRIER:
11025 /* Succeed if the following insn is the target label.
11026 Otherwise fail.
11027 If return insns are used then the last insn in a function
11028 will be a barrier. */
11029 this_insn = next_nonnote_insn (this_insn);
11030 if (this_insn && this_insn == label)
11032 if (jump_clobbers)
11034 arm_ccfsm_state = 2;
11035 this_insn = next_nonnote_insn (this_insn);
11037 else
11038 arm_ccfsm_state = 1;
11039 succeed = TRUE;
11041 else
11042 fail = TRUE;
11043 break;
11045 case CALL_INSN:
11046 /* The AAPCS says that conditional calls should not be
11047 used since they make interworking inefficient (the
11048 linker can't transform BL<cond> into BLX). That's
11049 only a problem if the machine has BLX. */
11050 if (arm_arch5)
11052 fail = TRUE;
11053 break;
11056 /* Succeed if the following insn is the target label, or
11057 if the following two insns are a barrier and the
11058 target label. */
11059 this_insn = next_nonnote_insn (this_insn);
11060 if (this_insn && GET_CODE (this_insn) == BARRIER)
11061 this_insn = next_nonnote_insn (this_insn);
11063 if (this_insn && this_insn == label
11064 && insns_skipped < max_insns_skipped)
11066 if (jump_clobbers)
11068 arm_ccfsm_state = 2;
11069 this_insn = next_nonnote_insn (this_insn);
11071 else
11072 arm_ccfsm_state = 1;
11073 succeed = TRUE;
11075 else
11076 fail = TRUE;
11077 break;
11079 case JUMP_INSN:
11080 /* If this is an unconditional branch to the same label, succeed.
11081 If it is to another label, do nothing. If it is conditional,
11082 fail. */
11083 /* XXX Probably, the tests for SET and the PC are
11084 unnecessary. */
11086 scanbody = PATTERN (this_insn);
11087 if (GET_CODE (scanbody) == SET
11088 && GET_CODE (SET_DEST (scanbody)) == PC)
11090 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11091 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11093 arm_ccfsm_state = 2;
11094 succeed = TRUE;
11096 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11097 fail = TRUE;
11099 /* Fail if a conditional return is undesirable (e.g. on a
11100 StrongARM), but still allow this if optimizing for size. */
11101 else if (GET_CODE (scanbody) == RETURN
11102 && !use_return_insn (TRUE, NULL)
11103 && !optimize_size)
11104 fail = TRUE;
11105 else if (GET_CODE (scanbody) == RETURN
11106 && seeking_return)
11108 arm_ccfsm_state = 2;
11109 succeed = TRUE;
11111 else if (GET_CODE (scanbody) == PARALLEL)
11113 switch (get_attr_conds (this_insn))
11115 case CONDS_NOCOND:
11116 break;
11117 default:
11118 fail = TRUE;
11119 break;
11122 else
11123 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11125 break;
11127 case INSN:
11128 /* Instructions using or affecting the condition codes make it
11129 fail. */
11130 scanbody = PATTERN (this_insn);
11131 if (!(GET_CODE (scanbody) == SET
11132 || GET_CODE (scanbody) == PARALLEL)
11133 || get_attr_conds (this_insn) != CONDS_NOCOND)
11134 fail = TRUE;
11136 /* A conditional cirrus instruction must be followed by
11137 a non Cirrus instruction. However, since we
11138 conditionalize instructions in this function and by
11139 the time we get here we can't add instructions
11140 (nops), because shorten_branches() has already been
11141 called, we will disable conditionalizing Cirrus
11142 instructions to be safe. */
11143 if (GET_CODE (scanbody) != USE
11144 && GET_CODE (scanbody) != CLOBBER
11145 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11146 fail = TRUE;
11147 break;
11149 default:
11150 break;
11153 if (succeed)
11155 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11156 arm_target_label = CODE_LABEL_NUMBER (label);
11157 else if (seeking_return || arm_ccfsm_state == 2)
11159 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11161 this_insn = next_nonnote_insn (this_insn);
11162 if (this_insn && (GET_CODE (this_insn) == BARRIER
11163 || GET_CODE (this_insn) == CODE_LABEL))
11164 abort ();
11166 if (!this_insn)
11168 /* Oh, dear! we ran off the end.. give up. */
11169 recog (PATTERN (insn), insn, NULL);
11170 arm_ccfsm_state = 0;
11171 arm_target_insn = NULL;
11172 return;
11174 arm_target_insn = this_insn;
11176 else
11177 abort ();
11178 if (jump_clobbers)
11180 if (reverse)
11181 abort ();
11182 arm_current_cc =
11183 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11184 0), 0), 1));
11185 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11186 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11187 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11188 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11190 else
11192 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11193 what it was. */
11194 if (!reverse)
11195 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11196 0));
11199 if (reverse || then_not_else)
11200 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11203 /* Restore recog_data (getting the attributes of other insns can
11204 destroy this array, but final.c assumes that it remains intact
11205 across this call; since the insn has been recognized already we
11206 call recog direct). */
11207 recog (PATTERN (insn), insn, NULL);
11211 /* Returns true if REGNO is a valid register
11212 for holding a quantity of type MODE. */
11214 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11216 if (GET_MODE_CLASS (mode) == MODE_CC)
11217 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11219 if (TARGET_THUMB)
11220 /* For the Thumb we only allow values bigger than SImode in
11221 registers 0 - 6, so that there is always a second low
11222 register available to hold the upper part of the value.
11223 We probably we ought to ensure that the register is the
11224 start of an even numbered register pair. */
11225 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11227 if (IS_CIRRUS_REGNUM (regno))
11228 /* We have outlawed SI values in Cirrus registers because they
11229 reside in the lower 32 bits, but SF values reside in the
11230 upper 32 bits. This causes gcc all sorts of grief. We can't
11231 even split the registers into pairs because Cirrus SI values
11232 get sign extended to 64bits-- aldyh. */
11233 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11235 if (IS_VFP_REGNUM (regno))
11237 if (mode == SFmode || mode == SImode)
11238 return TRUE;
11240 /* DFmode values are only valid in even register pairs. */
11241 if (mode == DFmode)
11242 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11243 return FALSE;
11246 if (IS_IWMMXT_GR_REGNUM (regno))
11247 return mode == SImode;
11249 if (IS_IWMMXT_REGNUM (regno))
11250 return VALID_IWMMXT_REG_MODE (mode);
11252 /* We allow any value to be stored in the general registers.
11253 Restrict doubleword quantities to even register pairs so that we can
11254 use ldrd. */
11255 if (regno <= LAST_ARM_REGNUM)
11256 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11258 if ( regno == FRAME_POINTER_REGNUM
11259 || regno == ARG_POINTER_REGNUM)
11260 /* We only allow integers in the fake hard registers. */
11261 return GET_MODE_CLASS (mode) == MODE_INT;
11263 /* The only registers left are the FPA registers
11264 which we only allow to hold FP values. */
11265 return GET_MODE_CLASS (mode) == MODE_FLOAT
11266 && regno >= FIRST_FPA_REGNUM
11267 && regno <= LAST_FPA_REGNUM;
11271 arm_regno_class (int regno)
11273 if (TARGET_THUMB)
11275 if (regno == STACK_POINTER_REGNUM)
11276 return STACK_REG;
11277 if (regno == CC_REGNUM)
11278 return CC_REG;
11279 if (regno < 8)
11280 return LO_REGS;
11281 return HI_REGS;
11284 if ( regno <= LAST_ARM_REGNUM
11285 || regno == FRAME_POINTER_REGNUM
11286 || regno == ARG_POINTER_REGNUM)
11287 return GENERAL_REGS;
11289 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11290 return NO_REGS;
11292 if (IS_CIRRUS_REGNUM (regno))
11293 return CIRRUS_REGS;
11295 if (IS_VFP_REGNUM (regno))
11296 return VFP_REGS;
11298 if (IS_IWMMXT_REGNUM (regno))
11299 return IWMMXT_REGS;
11301 if (IS_IWMMXT_GR_REGNUM (regno))
11302 return IWMMXT_GR_REGS;
11304 return FPA_REGS;
11307 /* Handle a special case when computing the offset
11308 of an argument from the frame pointer. */
11310 arm_debugger_arg_offset (int value, rtx addr)
11312 rtx insn;
11314 /* We are only interested if dbxout_parms() failed to compute the offset. */
11315 if (value != 0)
11316 return 0;
11318 /* We can only cope with the case where the address is held in a register. */
11319 if (GET_CODE (addr) != REG)
11320 return 0;
11322 /* If we are using the frame pointer to point at the argument, then
11323 an offset of 0 is correct. */
11324 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11325 return 0;
11327 /* If we are using the stack pointer to point at the
11328 argument, then an offset of 0 is correct. */
11329 if ((TARGET_THUMB || !frame_pointer_needed)
11330 && REGNO (addr) == SP_REGNUM)
11331 return 0;
11333 /* Oh dear. The argument is pointed to by a register rather
11334 than being held in a register, or being stored at a known
11335 offset from the frame pointer. Since GDB only understands
11336 those two kinds of argument we must translate the address
11337 held in the register into an offset from the frame pointer.
11338 We do this by searching through the insns for the function
11339 looking to see where this register gets its value. If the
11340 register is initialized from the frame pointer plus an offset
11341 then we are in luck and we can continue, otherwise we give up.
11343 This code is exercised by producing debugging information
11344 for a function with arguments like this:
11346 double func (double a, double b, int c, double d) {return d;}
11348 Without this code the stab for parameter 'd' will be set to
11349 an offset of 0 from the frame pointer, rather than 8. */
11351 /* The if() statement says:
11353 If the insn is a normal instruction
11354 and if the insn is setting the value in a register
11355 and if the register being set is the register holding the address of the argument
11356 and if the address is computing by an addition
11357 that involves adding to a register
11358 which is the frame pointer
11359 a constant integer
11361 then... */
11363 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11365 if ( GET_CODE (insn) == INSN
11366 && GET_CODE (PATTERN (insn)) == SET
11367 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11368 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11369 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11370 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11371 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11374 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11376 break;
11380 if (value == 0)
11382 debug_rtx (addr);
11383 warning ("unable to compute real location of stacked parameter");
11384 value = 8; /* XXX magic hack */
11387 return value;
11390 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11391 do \
11393 if ((MASK) & insn_flags) \
11394 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11395 BUILT_IN_MD, NULL, NULL_TREE); \
11397 while (0)
11399 struct builtin_description
11401 const unsigned int mask;
11402 const enum insn_code icode;
11403 const char * const name;
11404 const enum arm_builtins code;
11405 const enum rtx_code comparison;
11406 const unsigned int flag;
11409 static const struct builtin_description bdesc_2arg[] =
11411 #define IWMMXT_BUILTIN(code, string, builtin) \
11412 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11413 ARM_BUILTIN_##builtin, 0, 0 },
11415 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11416 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11417 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11418 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11419 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11420 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11421 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11422 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11423 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11424 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11425 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11426 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11427 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11428 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11429 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11430 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11431 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11432 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11433 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11434 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11435 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11436 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11437 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11438 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11439 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11440 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11441 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11442 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11443 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11444 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11445 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11446 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11447 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11448 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11449 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11450 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11451 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11452 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11453 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11454 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11455 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11456 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11457 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11458 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11459 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11460 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11461 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11462 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11463 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11464 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11465 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11466 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11467 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11468 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11469 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11470 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11471 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11472 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11474 #define IWMMXT_BUILTIN2(code, builtin) \
11475 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11477 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11478 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11479 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11480 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11481 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11482 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11483 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11484 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11485 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11486 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11487 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11488 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11489 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11490 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11491 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11492 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11493 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11494 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11495 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11496 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11497 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11498 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11499 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11500 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11501 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11502 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11503 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11504 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11505 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11506 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11507 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11508 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11511 static const struct builtin_description bdesc_1arg[] =
11513 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11514 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11515 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11516 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11517 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11518 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11519 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11520 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11521 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11522 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11523 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11524 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11525 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11526 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11527 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11528 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11529 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11530 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11533 /* Set up all the iWMMXt builtins. This is
11534 not called if TARGET_IWMMXT is zero. */
11536 static void
11537 arm_init_iwmmxt_builtins (void)
11539 const struct builtin_description * d;
11540 size_t i;
11541 tree endlink = void_list_node;
11543 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11544 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11545 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11547 tree int_ftype_int
11548 = build_function_type (integer_type_node,
11549 tree_cons (NULL_TREE, integer_type_node, endlink));
11550 tree v8qi_ftype_v8qi_v8qi_int
11551 = build_function_type (V8QI_type_node,
11552 tree_cons (NULL_TREE, V8QI_type_node,
11553 tree_cons (NULL_TREE, V8QI_type_node,
11554 tree_cons (NULL_TREE,
11555 integer_type_node,
11556 endlink))));
11557 tree v4hi_ftype_v4hi_int
11558 = build_function_type (V4HI_type_node,
11559 tree_cons (NULL_TREE, V4HI_type_node,
11560 tree_cons (NULL_TREE, integer_type_node,
11561 endlink)));
11562 tree v2si_ftype_v2si_int
11563 = build_function_type (V2SI_type_node,
11564 tree_cons (NULL_TREE, V2SI_type_node,
11565 tree_cons (NULL_TREE, integer_type_node,
11566 endlink)));
11567 tree v2si_ftype_di_di
11568 = build_function_type (V2SI_type_node,
11569 tree_cons (NULL_TREE, long_long_integer_type_node,
11570 tree_cons (NULL_TREE, long_long_integer_type_node,
11571 endlink)));
11572 tree di_ftype_di_int
11573 = build_function_type (long_long_integer_type_node,
11574 tree_cons (NULL_TREE, long_long_integer_type_node,
11575 tree_cons (NULL_TREE, integer_type_node,
11576 endlink)));
11577 tree di_ftype_di_int_int
11578 = build_function_type (long_long_integer_type_node,
11579 tree_cons (NULL_TREE, long_long_integer_type_node,
11580 tree_cons (NULL_TREE, integer_type_node,
11581 tree_cons (NULL_TREE,
11582 integer_type_node,
11583 endlink))));
11584 tree int_ftype_v8qi
11585 = build_function_type (integer_type_node,
11586 tree_cons (NULL_TREE, V8QI_type_node,
11587 endlink));
11588 tree int_ftype_v4hi
11589 = build_function_type (integer_type_node,
11590 tree_cons (NULL_TREE, V4HI_type_node,
11591 endlink));
11592 tree int_ftype_v2si
11593 = build_function_type (integer_type_node,
11594 tree_cons (NULL_TREE, V2SI_type_node,
11595 endlink));
11596 tree int_ftype_v8qi_int
11597 = build_function_type (integer_type_node,
11598 tree_cons (NULL_TREE, V8QI_type_node,
11599 tree_cons (NULL_TREE, integer_type_node,
11600 endlink)));
11601 tree int_ftype_v4hi_int
11602 = build_function_type (integer_type_node,
11603 tree_cons (NULL_TREE, V4HI_type_node,
11604 tree_cons (NULL_TREE, integer_type_node,
11605 endlink)));
11606 tree int_ftype_v2si_int
11607 = build_function_type (integer_type_node,
11608 tree_cons (NULL_TREE, V2SI_type_node,
11609 tree_cons (NULL_TREE, integer_type_node,
11610 endlink)));
11611 tree v8qi_ftype_v8qi_int_int
11612 = build_function_type (V8QI_type_node,
11613 tree_cons (NULL_TREE, V8QI_type_node,
11614 tree_cons (NULL_TREE, integer_type_node,
11615 tree_cons (NULL_TREE,
11616 integer_type_node,
11617 endlink))));
11618 tree v4hi_ftype_v4hi_int_int
11619 = build_function_type (V4HI_type_node,
11620 tree_cons (NULL_TREE, V4HI_type_node,
11621 tree_cons (NULL_TREE, integer_type_node,
11622 tree_cons (NULL_TREE,
11623 integer_type_node,
11624 endlink))));
11625 tree v2si_ftype_v2si_int_int
11626 = build_function_type (V2SI_type_node,
11627 tree_cons (NULL_TREE, V2SI_type_node,
11628 tree_cons (NULL_TREE, integer_type_node,
11629 tree_cons (NULL_TREE,
11630 integer_type_node,
11631 endlink))));
11632 /* Miscellaneous. */
11633 tree v8qi_ftype_v4hi_v4hi
11634 = build_function_type (V8QI_type_node,
11635 tree_cons (NULL_TREE, V4HI_type_node,
11636 tree_cons (NULL_TREE, V4HI_type_node,
11637 endlink)));
11638 tree v4hi_ftype_v2si_v2si
11639 = build_function_type (V4HI_type_node,
11640 tree_cons (NULL_TREE, V2SI_type_node,
11641 tree_cons (NULL_TREE, V2SI_type_node,
11642 endlink)));
11643 tree v2si_ftype_v4hi_v4hi
11644 = build_function_type (V2SI_type_node,
11645 tree_cons (NULL_TREE, V4HI_type_node,
11646 tree_cons (NULL_TREE, V4HI_type_node,
11647 endlink)));
11648 tree v2si_ftype_v8qi_v8qi
11649 = build_function_type (V2SI_type_node,
11650 tree_cons (NULL_TREE, V8QI_type_node,
11651 tree_cons (NULL_TREE, V8QI_type_node,
11652 endlink)));
11653 tree v4hi_ftype_v4hi_di
11654 = build_function_type (V4HI_type_node,
11655 tree_cons (NULL_TREE, V4HI_type_node,
11656 tree_cons (NULL_TREE,
11657 long_long_integer_type_node,
11658 endlink)));
11659 tree v2si_ftype_v2si_di
11660 = build_function_type (V2SI_type_node,
11661 tree_cons (NULL_TREE, V2SI_type_node,
11662 tree_cons (NULL_TREE,
11663 long_long_integer_type_node,
11664 endlink)));
11665 tree void_ftype_int_int
11666 = build_function_type (void_type_node,
11667 tree_cons (NULL_TREE, integer_type_node,
11668 tree_cons (NULL_TREE, integer_type_node,
11669 endlink)));
11670 tree di_ftype_void
11671 = build_function_type (long_long_unsigned_type_node, endlink);
11672 tree di_ftype_v8qi
11673 = build_function_type (long_long_integer_type_node,
11674 tree_cons (NULL_TREE, V8QI_type_node,
11675 endlink));
11676 tree di_ftype_v4hi
11677 = build_function_type (long_long_integer_type_node,
11678 tree_cons (NULL_TREE, V4HI_type_node,
11679 endlink));
11680 tree di_ftype_v2si
11681 = build_function_type (long_long_integer_type_node,
11682 tree_cons (NULL_TREE, V2SI_type_node,
11683 endlink));
11684 tree v2si_ftype_v4hi
11685 = build_function_type (V2SI_type_node,
11686 tree_cons (NULL_TREE, V4HI_type_node,
11687 endlink));
11688 tree v4hi_ftype_v8qi
11689 = build_function_type (V4HI_type_node,
11690 tree_cons (NULL_TREE, V8QI_type_node,
11691 endlink));
11693 tree di_ftype_di_v4hi_v4hi
11694 = build_function_type (long_long_unsigned_type_node,
11695 tree_cons (NULL_TREE,
11696 long_long_unsigned_type_node,
11697 tree_cons (NULL_TREE, V4HI_type_node,
11698 tree_cons (NULL_TREE,
11699 V4HI_type_node,
11700 endlink))));
11702 tree di_ftype_v4hi_v4hi
11703 = build_function_type (long_long_unsigned_type_node,
11704 tree_cons (NULL_TREE, V4HI_type_node,
11705 tree_cons (NULL_TREE, V4HI_type_node,
11706 endlink)));
11708 /* Normal vector binops. */
11709 tree v8qi_ftype_v8qi_v8qi
11710 = build_function_type (V8QI_type_node,
11711 tree_cons (NULL_TREE, V8QI_type_node,
11712 tree_cons (NULL_TREE, V8QI_type_node,
11713 endlink)));
11714 tree v4hi_ftype_v4hi_v4hi
11715 = build_function_type (V4HI_type_node,
11716 tree_cons (NULL_TREE, V4HI_type_node,
11717 tree_cons (NULL_TREE, V4HI_type_node,
11718 endlink)));
11719 tree v2si_ftype_v2si_v2si
11720 = build_function_type (V2SI_type_node,
11721 tree_cons (NULL_TREE, V2SI_type_node,
11722 tree_cons (NULL_TREE, V2SI_type_node,
11723 endlink)));
11724 tree di_ftype_di_di
11725 = build_function_type (long_long_unsigned_type_node,
11726 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11727 tree_cons (NULL_TREE,
11728 long_long_unsigned_type_node,
11729 endlink)));
11731 /* Add all builtins that are more or less simple operations on two
11732 operands. */
11733 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11735 /* Use one of the operands; the target can have a different mode for
11736 mask-generating compares. */
11737 enum machine_mode mode;
11738 tree type;
11740 if (d->name == 0)
11741 continue;
11743 mode = insn_data[d->icode].operand[1].mode;
11745 switch (mode)
11747 case V8QImode:
11748 type = v8qi_ftype_v8qi_v8qi;
11749 break;
11750 case V4HImode:
11751 type = v4hi_ftype_v4hi_v4hi;
11752 break;
11753 case V2SImode:
11754 type = v2si_ftype_v2si_v2si;
11755 break;
11756 case DImode:
11757 type = di_ftype_di_di;
11758 break;
11760 default:
11761 abort ();
11764 def_mbuiltin (d->mask, d->name, type, d->code);
11767 /* Add the remaining MMX insns with somewhat more complicated types. */
11768 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11769 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11770 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11772 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11773 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11774 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11775 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11776 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11777 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11779 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11780 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11781 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11782 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11783 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11784 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11786 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11787 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11788 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11789 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11790 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11791 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11793 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11794 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11795 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11796 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11797 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11798 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11800 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11802 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11803 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11804 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11805 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11807 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11808 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11809 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11810 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11811 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11812 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11813 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11814 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11815 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11817 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11818 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11819 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11821 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11822 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11823 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11825 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11826 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11827 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11828 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11829 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11830 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11832 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11833 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11834 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11835 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11836 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11837 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11838 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11839 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11840 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11841 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11842 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11843 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11845 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11846 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11847 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11848 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11850 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11851 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11852 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11853 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11854 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11855 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11856 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11859 static void
11860 arm_init_builtins (void)
11862 if (TARGET_REALLY_IWMMXT)
11863 arm_init_iwmmxt_builtins ();
11866 /* Errors in the source file can cause expand_expr to return const0_rtx
11867 where we expect a vector. To avoid crashing, use one of the vector
11868 clear instructions. */
11870 static rtx
11871 safe_vector_operand (rtx x, enum machine_mode mode)
11873 if (x != const0_rtx)
11874 return x;
11875 x = gen_reg_rtx (mode);
11877 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11878 : gen_rtx_SUBREG (DImode, x, 0)));
11879 return x;
11882 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11884 static rtx
11885 arm_expand_binop_builtin (enum insn_code icode,
11886 tree arglist, rtx target)
11888 rtx pat;
11889 tree arg0 = TREE_VALUE (arglist);
11890 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11891 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11892 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11893 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11894 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11895 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11897 if (VECTOR_MODE_P (mode0))
11898 op0 = safe_vector_operand (op0, mode0);
11899 if (VECTOR_MODE_P (mode1))
11900 op1 = safe_vector_operand (op1, mode1);
11902 if (! target
11903 || GET_MODE (target) != tmode
11904 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11905 target = gen_reg_rtx (tmode);
11907 /* In case the insn wants input operands in modes different from
11908 the result, abort. */
11909 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11910 abort ();
11912 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11913 op0 = copy_to_mode_reg (mode0, op0);
11914 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11915 op1 = copy_to_mode_reg (mode1, op1);
11917 pat = GEN_FCN (icode) (target, op0, op1);
11918 if (! pat)
11919 return 0;
11920 emit_insn (pat);
11921 return target;
11924 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11926 static rtx
11927 arm_expand_unop_builtin (enum insn_code icode,
11928 tree arglist, rtx target, int do_load)
11930 rtx pat;
11931 tree arg0 = TREE_VALUE (arglist);
11932 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11933 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11934 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11936 if (! target
11937 || GET_MODE (target) != tmode
11938 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11939 target = gen_reg_rtx (tmode);
11940 if (do_load)
11941 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11942 else
11944 if (VECTOR_MODE_P (mode0))
11945 op0 = safe_vector_operand (op0, mode0);
11947 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11948 op0 = copy_to_mode_reg (mode0, op0);
11951 pat = GEN_FCN (icode) (target, op0);
11952 if (! pat)
11953 return 0;
11954 emit_insn (pat);
11955 return target;
11958 /* Expand an expression EXP that calls a built-in function,
11959 with result going to TARGET if that's convenient
11960 (and in mode MODE if that's convenient).
11961 SUBTARGET may be used as the target for computing one of EXP's operands.
11962 IGNORE is nonzero if the value is to be ignored. */
11964 static rtx
11965 arm_expand_builtin (tree exp,
11966 rtx target,
11967 rtx subtarget ATTRIBUTE_UNUSED,
11968 enum machine_mode mode ATTRIBUTE_UNUSED,
11969 int ignore ATTRIBUTE_UNUSED)
11971 const struct builtin_description * d;
11972 enum insn_code icode;
11973 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11974 tree arglist = TREE_OPERAND (exp, 1);
11975 tree arg0;
11976 tree arg1;
11977 tree arg2;
11978 rtx op0;
11979 rtx op1;
11980 rtx op2;
11981 rtx pat;
11982 int fcode = DECL_FUNCTION_CODE (fndecl);
11983 size_t i;
11984 enum machine_mode tmode;
11985 enum machine_mode mode0;
11986 enum machine_mode mode1;
11987 enum machine_mode mode2;
11989 switch (fcode)
11991 case ARM_BUILTIN_TEXTRMSB:
11992 case ARM_BUILTIN_TEXTRMUB:
11993 case ARM_BUILTIN_TEXTRMSH:
11994 case ARM_BUILTIN_TEXTRMUH:
11995 case ARM_BUILTIN_TEXTRMSW:
11996 case ARM_BUILTIN_TEXTRMUW:
11997 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11998 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11999 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12000 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12001 : CODE_FOR_iwmmxt_textrmw);
12003 arg0 = TREE_VALUE (arglist);
12004 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12005 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12006 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12007 tmode = insn_data[icode].operand[0].mode;
12008 mode0 = insn_data[icode].operand[1].mode;
12009 mode1 = insn_data[icode].operand[2].mode;
12011 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12012 op0 = copy_to_mode_reg (mode0, op0);
12013 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12015 /* @@@ better error message */
12016 error ("selector must be an immediate");
12017 return gen_reg_rtx (tmode);
12019 if (target == 0
12020 || GET_MODE (target) != tmode
12021 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12022 target = gen_reg_rtx (tmode);
12023 pat = GEN_FCN (icode) (target, op0, op1);
12024 if (! pat)
12025 return 0;
12026 emit_insn (pat);
12027 return target;
12029 case ARM_BUILTIN_TINSRB:
12030 case ARM_BUILTIN_TINSRH:
12031 case ARM_BUILTIN_TINSRW:
12032 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12033 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12034 : CODE_FOR_iwmmxt_tinsrw);
12035 arg0 = TREE_VALUE (arglist);
12036 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12037 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12038 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12039 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12040 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12041 tmode = insn_data[icode].operand[0].mode;
12042 mode0 = insn_data[icode].operand[1].mode;
12043 mode1 = insn_data[icode].operand[2].mode;
12044 mode2 = insn_data[icode].operand[3].mode;
12046 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12047 op0 = copy_to_mode_reg (mode0, op0);
12048 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12049 op1 = copy_to_mode_reg (mode1, op1);
12050 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12052 /* @@@ better error message */
12053 error ("selector must be an immediate");
12054 return const0_rtx;
12056 if (target == 0
12057 || GET_MODE (target) != tmode
12058 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12059 target = gen_reg_rtx (tmode);
12060 pat = GEN_FCN (icode) (target, op0, op1, op2);
12061 if (! pat)
12062 return 0;
12063 emit_insn (pat);
12064 return target;
12066 case ARM_BUILTIN_SETWCX:
12067 arg0 = TREE_VALUE (arglist);
12068 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12069 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12070 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12071 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12072 return 0;
12074 case ARM_BUILTIN_GETWCX:
12075 arg0 = TREE_VALUE (arglist);
12076 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12077 target = gen_reg_rtx (SImode);
12078 emit_insn (gen_iwmmxt_tmrc (target, op0));
12079 return target;
12081 case ARM_BUILTIN_WSHUFH:
12082 icode = CODE_FOR_iwmmxt_wshufh;
12083 arg0 = TREE_VALUE (arglist);
12084 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12085 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12086 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12087 tmode = insn_data[icode].operand[0].mode;
12088 mode1 = insn_data[icode].operand[1].mode;
12089 mode2 = insn_data[icode].operand[2].mode;
12091 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12092 op0 = copy_to_mode_reg (mode1, op0);
12093 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12095 /* @@@ better error message */
12096 error ("mask must be an immediate");
12097 return const0_rtx;
12099 if (target == 0
12100 || GET_MODE (target) != tmode
12101 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12102 target = gen_reg_rtx (tmode);
12103 pat = GEN_FCN (icode) (target, op0, op1);
12104 if (! pat)
12105 return 0;
12106 emit_insn (pat);
12107 return target;
12109 case ARM_BUILTIN_WSADB:
12110 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12111 case ARM_BUILTIN_WSADH:
12112 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12113 case ARM_BUILTIN_WSADBZ:
12114 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12115 case ARM_BUILTIN_WSADHZ:
12116 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12118 /* Several three-argument builtins. */
12119 case ARM_BUILTIN_WMACS:
12120 case ARM_BUILTIN_WMACU:
12121 case ARM_BUILTIN_WALIGN:
12122 case ARM_BUILTIN_TMIA:
12123 case ARM_BUILTIN_TMIAPH:
12124 case ARM_BUILTIN_TMIATT:
12125 case ARM_BUILTIN_TMIATB:
12126 case ARM_BUILTIN_TMIABT:
12127 case ARM_BUILTIN_TMIABB:
12128 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12129 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12130 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12131 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12132 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12133 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12134 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12135 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12136 : CODE_FOR_iwmmxt_walign);
12137 arg0 = TREE_VALUE (arglist);
12138 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12139 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12140 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12141 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12142 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12143 tmode = insn_data[icode].operand[0].mode;
12144 mode0 = insn_data[icode].operand[1].mode;
12145 mode1 = insn_data[icode].operand[2].mode;
12146 mode2 = insn_data[icode].operand[3].mode;
12148 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12149 op0 = copy_to_mode_reg (mode0, op0);
12150 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12151 op1 = copy_to_mode_reg (mode1, op1);
12152 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12153 op2 = copy_to_mode_reg (mode2, op2);
12154 if (target == 0
12155 || GET_MODE (target) != tmode
12156 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12157 target = gen_reg_rtx (tmode);
12158 pat = GEN_FCN (icode) (target, op0, op1, op2);
12159 if (! pat)
12160 return 0;
12161 emit_insn (pat);
12162 return target;
12164 case ARM_BUILTIN_WZERO:
12165 target = gen_reg_rtx (DImode);
12166 emit_insn (gen_iwmmxt_clrdi (target));
12167 return target;
12169 default:
12170 break;
12173 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12174 if (d->code == (const enum arm_builtins) fcode)
12175 return arm_expand_binop_builtin (d->icode, arglist, target);
12177 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12178 if (d->code == (const enum arm_builtins) fcode)
12179 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12181 /* @@@ Should really do something sensible here. */
12182 return NULL_RTX;
12185 /* Recursively search through all of the blocks in a function
12186 checking to see if any of the variables created in that
12187 function match the RTX called 'orig'. If they do then
12188 replace them with the RTX called 'new'. */
12189 static void
12190 replace_symbols_in_block (tree block, rtx orig, rtx new)
12192 for (; block; block = BLOCK_CHAIN (block))
12194 tree sym;
12196 if (!TREE_USED (block))
12197 continue;
12199 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12201 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12202 || DECL_IGNORED_P (sym)
12203 || TREE_CODE (sym) != VAR_DECL
12204 || DECL_EXTERNAL (sym)
12205 || !rtx_equal_p (DECL_RTL (sym), orig)
12207 continue;
12209 SET_DECL_RTL (sym, new);
12212 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12216 /* Return the number (counting from 0) of
12217 the least significant set bit in MASK. */
12219 inline static int
12220 number_of_first_bit_set (int mask)
12222 int bit;
12224 for (bit = 0;
12225 (mask & (1 << bit)) == 0;
12226 ++bit)
12227 continue;
12229 return bit;
12232 /* Generate code to return from a thumb function.
12233 If 'reg_containing_return_addr' is -1, then the return address is
12234 actually on the stack, at the stack pointer. */
12235 static void
12236 thumb_exit (FILE *f, int reg_containing_return_addr)
12238 unsigned regs_available_for_popping;
12239 unsigned regs_to_pop;
12240 int pops_needed;
12241 unsigned available;
12242 unsigned required;
12243 int mode;
12244 int size;
12245 int restore_a4 = FALSE;
12247 /* Compute the registers we need to pop. */
12248 regs_to_pop = 0;
12249 pops_needed = 0;
12251 if (reg_containing_return_addr == -1)
12253 regs_to_pop |= 1 << LR_REGNUM;
12254 ++pops_needed;
12257 if (TARGET_BACKTRACE)
12259 /* Restore the (ARM) frame pointer and stack pointer. */
12260 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12261 pops_needed += 2;
12264 /* If there is nothing to pop then just emit the BX instruction and
12265 return. */
12266 if (pops_needed == 0)
12268 if (current_function_calls_eh_return)
12269 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12271 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12272 return;
12274 /* Otherwise if we are not supporting interworking and we have not created
12275 a backtrace structure and the function was not entered in ARM mode then
12276 just pop the return address straight into the PC. */
12277 else if (!TARGET_INTERWORK
12278 && !TARGET_BACKTRACE
12279 && !is_called_in_ARM_mode (current_function_decl)
12280 && !current_function_calls_eh_return)
12282 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12283 return;
12286 /* Find out how many of the (return) argument registers we can corrupt. */
12287 regs_available_for_popping = 0;
12289 /* If returning via __builtin_eh_return, the bottom three registers
12290 all contain information needed for the return. */
12291 if (current_function_calls_eh_return)
12292 size = 12;
12293 else
12295 /* If we can deduce the registers used from the function's
12296 return value. This is more reliable that examining
12297 regs_ever_live[] because that will be set if the register is
12298 ever used in the function, not just if the register is used
12299 to hold a return value. */
12301 if (current_function_return_rtx != 0)
12302 mode = GET_MODE (current_function_return_rtx);
12303 else
12304 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12306 size = GET_MODE_SIZE (mode);
12308 if (size == 0)
12310 /* In a void function we can use any argument register.
12311 In a function that returns a structure on the stack
12312 we can use the second and third argument registers. */
12313 if (mode == VOIDmode)
12314 regs_available_for_popping =
12315 (1 << ARG_REGISTER (1))
12316 | (1 << ARG_REGISTER (2))
12317 | (1 << ARG_REGISTER (3));
12318 else
12319 regs_available_for_popping =
12320 (1 << ARG_REGISTER (2))
12321 | (1 << ARG_REGISTER (3));
12323 else if (size <= 4)
12324 regs_available_for_popping =
12325 (1 << ARG_REGISTER (2))
12326 | (1 << ARG_REGISTER (3));
12327 else if (size <= 8)
12328 regs_available_for_popping =
12329 (1 << ARG_REGISTER (3));
12332 /* Match registers to be popped with registers into which we pop them. */
12333 for (available = regs_available_for_popping,
12334 required = regs_to_pop;
12335 required != 0 && available != 0;
12336 available &= ~(available & - available),
12337 required &= ~(required & - required))
12338 -- pops_needed;
12340 /* If we have any popping registers left over, remove them. */
12341 if (available > 0)
12342 regs_available_for_popping &= ~available;
12344 /* Otherwise if we need another popping register we can use
12345 the fourth argument register. */
12346 else if (pops_needed)
12348 /* If we have not found any free argument registers and
12349 reg a4 contains the return address, we must move it. */
12350 if (regs_available_for_popping == 0
12351 && reg_containing_return_addr == LAST_ARG_REGNUM)
12353 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12354 reg_containing_return_addr = LR_REGNUM;
12356 else if (size > 12)
12358 /* Register a4 is being used to hold part of the return value,
12359 but we have dire need of a free, low register. */
12360 restore_a4 = TRUE;
12362 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12365 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12367 /* The fourth argument register is available. */
12368 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12370 --pops_needed;
12374 /* Pop as many registers as we can. */
12375 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12376 regs_available_for_popping);
12378 /* Process the registers we popped. */
12379 if (reg_containing_return_addr == -1)
12381 /* The return address was popped into the lowest numbered register. */
12382 regs_to_pop &= ~(1 << LR_REGNUM);
12384 reg_containing_return_addr =
12385 number_of_first_bit_set (regs_available_for_popping);
12387 /* Remove this register for the mask of available registers, so that
12388 the return address will not be corrupted by further pops. */
12389 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12392 /* If we popped other registers then handle them here. */
12393 if (regs_available_for_popping)
12395 int frame_pointer;
12397 /* Work out which register currently contains the frame pointer. */
12398 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12400 /* Move it into the correct place. */
12401 asm_fprintf (f, "\tmov\t%r, %r\n",
12402 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12404 /* (Temporarily) remove it from the mask of popped registers. */
12405 regs_available_for_popping &= ~(1 << frame_pointer);
12406 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12408 if (regs_available_for_popping)
12410 int stack_pointer;
12412 /* We popped the stack pointer as well,
12413 find the register that contains it. */
12414 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12416 /* Move it into the stack register. */
12417 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12419 /* At this point we have popped all necessary registers, so
12420 do not worry about restoring regs_available_for_popping
12421 to its correct value:
12423 assert (pops_needed == 0)
12424 assert (regs_available_for_popping == (1 << frame_pointer))
12425 assert (regs_to_pop == (1 << STACK_POINTER)) */
12427 else
12429 /* Since we have just move the popped value into the frame
12430 pointer, the popping register is available for reuse, and
12431 we know that we still have the stack pointer left to pop. */
12432 regs_available_for_popping |= (1 << frame_pointer);
12436 /* If we still have registers left on the stack, but we no longer have
12437 any registers into which we can pop them, then we must move the return
12438 address into the link register and make available the register that
12439 contained it. */
12440 if (regs_available_for_popping == 0 && pops_needed > 0)
12442 regs_available_for_popping |= 1 << reg_containing_return_addr;
12444 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12445 reg_containing_return_addr);
12447 reg_containing_return_addr = LR_REGNUM;
12450 /* If we have registers left on the stack then pop some more.
12451 We know that at most we will want to pop FP and SP. */
12452 if (pops_needed > 0)
12454 int popped_into;
12455 int move_to;
12457 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12458 regs_available_for_popping);
12460 /* We have popped either FP or SP.
12461 Move whichever one it is into the correct register. */
12462 popped_into = number_of_first_bit_set (regs_available_for_popping);
12463 move_to = number_of_first_bit_set (regs_to_pop);
12465 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12467 regs_to_pop &= ~(1 << move_to);
12469 --pops_needed;
12472 /* If we still have not popped everything then we must have only
12473 had one register available to us and we are now popping the SP. */
12474 if (pops_needed > 0)
12476 int popped_into;
12478 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12479 regs_available_for_popping);
12481 popped_into = number_of_first_bit_set (regs_available_for_popping);
12483 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12485 assert (regs_to_pop == (1 << STACK_POINTER))
12486 assert (pops_needed == 1)
12490 /* If necessary restore the a4 register. */
12491 if (restore_a4)
12493 if (reg_containing_return_addr != LR_REGNUM)
12495 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12496 reg_containing_return_addr = LR_REGNUM;
12499 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12502 if (current_function_calls_eh_return)
12503 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12505 /* Return to caller. */
12506 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12509 /* Emit code to push or pop registers to or from the stack. F is the
12510 assembly file. MASK is the registers to push or pop. PUSH is
12511 nonzero if we should push, and zero if we should pop. For debugging
12512 output, if pushing, adjust CFA_OFFSET by the amount of space added
12513 to the stack. REAL_REGS should have the same number of bits set as
12514 MASK, and will be used instead (in the same order) to describe which
12515 registers were saved - this is used to mark the save slots when we
12516 push high registers after moving them to low registers. */
12517 static void
12518 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12520 int regno;
12521 int lo_mask = mask & 0xFF;
12522 int pushed_words = 0;
12524 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12526 /* Special case. Do not generate a POP PC statement here, do it in
12527 thumb_exit() */
12528 thumb_exit (f, -1);
12529 return;
12532 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12534 /* Look at the low registers first. */
12535 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12537 if (lo_mask & 1)
12539 asm_fprintf (f, "%r", regno);
12541 if ((lo_mask & ~1) != 0)
12542 fprintf (f, ", ");
12544 pushed_words++;
12548 if (push && (mask & (1 << LR_REGNUM)))
12550 /* Catch pushing the LR. */
12551 if (mask & 0xFF)
12552 fprintf (f, ", ");
12554 asm_fprintf (f, "%r", LR_REGNUM);
12556 pushed_words++;
12558 else if (!push && (mask & (1 << PC_REGNUM)))
12560 /* Catch popping the PC. */
12561 if (TARGET_INTERWORK || TARGET_BACKTRACE
12562 || current_function_calls_eh_return)
12564 /* The PC is never poped directly, instead
12565 it is popped into r3 and then BX is used. */
12566 fprintf (f, "}\n");
12568 thumb_exit (f, -1);
12570 return;
12572 else
12574 if (mask & 0xFF)
12575 fprintf (f, ", ");
12577 asm_fprintf (f, "%r", PC_REGNUM);
12581 fprintf (f, "}\n");
12583 if (push && pushed_words && dwarf2out_do_frame ())
12585 char *l = dwarf2out_cfi_label ();
12586 int pushed_mask = real_regs;
12588 *cfa_offset += pushed_words * 4;
12589 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12591 pushed_words = 0;
12592 pushed_mask = real_regs;
12593 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12595 if (pushed_mask & 1)
12596 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12601 void
12602 thumb_final_prescan_insn (rtx insn)
12604 if (flag_print_asm_name)
12605 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12606 INSN_ADDRESSES (INSN_UID (insn)));
12610 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12612 unsigned HOST_WIDE_INT mask = 0xff;
12613 int i;
12615 if (val == 0) /* XXX */
12616 return 0;
12618 for (i = 0; i < 25; i++)
12619 if ((val & (mask << i)) == val)
12620 return 1;
12622 return 0;
12625 /* Returns nonzero if the current function contains,
12626 or might contain a far jump. */
12627 static int
12628 thumb_far_jump_used_p (void)
12630 rtx insn;
12632 /* This test is only important for leaf functions. */
12633 /* assert (!leaf_function_p ()); */
12635 /* If we have already decided that far jumps may be used,
12636 do not bother checking again, and always return true even if
12637 it turns out that they are not being used. Once we have made
12638 the decision that far jumps are present (and that hence the link
12639 register will be pushed onto the stack) we cannot go back on it. */
12640 if (cfun->machine->far_jump_used)
12641 return 1;
12643 /* If this function is not being called from the prologue/epilogue
12644 generation code then it must be being called from the
12645 INITIAL_ELIMINATION_OFFSET macro. */
12646 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12648 /* In this case we know that we are being asked about the elimination
12649 of the arg pointer register. If that register is not being used,
12650 then there are no arguments on the stack, and we do not have to
12651 worry that a far jump might force the prologue to push the link
12652 register, changing the stack offsets. In this case we can just
12653 return false, since the presence of far jumps in the function will
12654 not affect stack offsets.
12656 If the arg pointer is live (or if it was live, but has now been
12657 eliminated and so set to dead) then we do have to test to see if
12658 the function might contain a far jump. This test can lead to some
12659 false negatives, since before reload is completed, then length of
12660 branch instructions is not known, so gcc defaults to returning their
12661 longest length, which in turn sets the far jump attribute to true.
12663 A false negative will not result in bad code being generated, but it
12664 will result in a needless push and pop of the link register. We
12665 hope that this does not occur too often.
12667 If we need doubleword stack alignment this could affect the other
12668 elimination offsets so we can't risk getting it wrong. */
12669 if (regs_ever_live [ARG_POINTER_REGNUM])
12670 cfun->machine->arg_pointer_live = 1;
12671 else if (!cfun->machine->arg_pointer_live)
12672 return 0;
12675 /* Check to see if the function contains a branch
12676 insn with the far jump attribute set. */
12677 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12679 if (GET_CODE (insn) == JUMP_INSN
12680 /* Ignore tablejump patterns. */
12681 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12682 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12683 && get_attr_far_jump (insn) == FAR_JUMP_YES
12686 /* Record the fact that we have decided that
12687 the function does use far jumps. */
12688 cfun->machine->far_jump_used = 1;
12689 return 1;
12693 return 0;
12696 /* Return nonzero if FUNC must be entered in ARM mode. */
12698 is_called_in_ARM_mode (tree func)
12700 if (TREE_CODE (func) != FUNCTION_DECL)
12701 abort ();
12703 /* Ignore the problem about functions whoes address is taken. */
12704 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12705 return TRUE;
12707 #ifdef ARM_PE
12708 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12709 #else
12710 return FALSE;
12711 #endif
12714 /* The bits which aren't usefully expanded as rtl. */
12715 const char *
12716 thumb_unexpanded_epilogue (void)
12718 int regno;
12719 int live_regs_mask = 0;
12720 int high_regs_pushed = 0;
12721 int had_to_push_lr;
12722 int size;
12723 int mode;
12725 if (return_used_this_function)
12726 return "";
12728 if (IS_NAKED (arm_current_func_type ()))
12729 return "";
12731 live_regs_mask = thumb_compute_save_reg_mask ();
12732 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12734 /* If we can deduce the registers used from the function's return value.
12735 This is more reliable that examining regs_ever_live[] because that
12736 will be set if the register is ever used in the function, not just if
12737 the register is used to hold a return value. */
12739 if (current_function_return_rtx != 0)
12740 mode = GET_MODE (current_function_return_rtx);
12741 else
12742 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12744 size = GET_MODE_SIZE (mode);
12746 /* The prolog may have pushed some high registers to use as
12747 work registers. e.g. the testsuite file:
12748 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12749 compiles to produce:
12750 push {r4, r5, r6, r7, lr}
12751 mov r7, r9
12752 mov r6, r8
12753 push {r6, r7}
12754 as part of the prolog. We have to undo that pushing here. */
12756 if (high_regs_pushed)
12758 int mask = live_regs_mask & 0xff;
12759 int next_hi_reg;
12761 /* The available low registers depend on the size of the value we are
12762 returning. */
12763 if (size <= 12)
12764 mask |= 1 << 3;
12765 if (size <= 8)
12766 mask |= 1 << 2;
12768 if (mask == 0)
12769 /* Oh dear! We have no low registers into which we can pop
12770 high registers! */
12771 internal_error
12772 ("no low registers available for popping high registers");
12774 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12775 if (live_regs_mask & (1 << next_hi_reg))
12776 break;
12778 while (high_regs_pushed)
12780 /* Find lo register(s) into which the high register(s) can
12781 be popped. */
12782 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12784 if (mask & (1 << regno))
12785 high_regs_pushed--;
12786 if (high_regs_pushed == 0)
12787 break;
12790 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12792 /* Pop the values into the low register(s). */
12793 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12795 /* Move the value(s) into the high registers. */
12796 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12798 if (mask & (1 << regno))
12800 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12801 regno);
12803 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12804 if (live_regs_mask & (1 << next_hi_reg))
12805 break;
12809 live_regs_mask &= ~0x0f00;
12812 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12813 live_regs_mask &= 0xff;
12815 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12817 /* Pop the return address into the PC. */
12818 if (had_to_push_lr)
12819 live_regs_mask |= 1 << PC_REGNUM;
12821 /* Either no argument registers were pushed or a backtrace
12822 structure was created which includes an adjusted stack
12823 pointer, so just pop everything. */
12824 if (live_regs_mask)
12825 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12826 live_regs_mask);
12828 /* We have either just popped the return address into the
12829 PC or it is was kept in LR for the entire function. */
12830 if (!had_to_push_lr)
12831 thumb_exit (asm_out_file, LR_REGNUM);
12833 else
12835 /* Pop everything but the return address. */
12836 if (live_regs_mask)
12837 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12838 live_regs_mask);
12840 if (had_to_push_lr)
12842 if (size > 12)
12844 /* We have no free low regs, so save one. */
12845 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12846 LAST_ARG_REGNUM);
12849 /* Get the return address into a temporary register. */
12850 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12851 1 << LAST_ARG_REGNUM);
12853 if (size > 12)
12855 /* Move the return address to lr. */
12856 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
12857 LAST_ARG_REGNUM);
12858 /* Restore the low register. */
12859 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
12860 IP_REGNUM);
12861 regno = LR_REGNUM;
12863 else
12864 regno = LAST_ARG_REGNUM;
12866 else
12867 regno = LR_REGNUM;
12869 /* Remove the argument registers that were pushed onto the stack. */
12870 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12871 SP_REGNUM, SP_REGNUM,
12872 current_function_pretend_args_size);
12874 thumb_exit (asm_out_file, regno);
12877 return "";
12880 /* Functions to save and restore machine-specific function data. */
12881 static struct machine_function *
12882 arm_init_machine_status (void)
12884 struct machine_function *machine;
12885 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12887 #if ARM_FT_UNKNOWN != 0
12888 machine->func_type = ARM_FT_UNKNOWN;
12889 #endif
12890 return machine;
12893 /* Return an RTX indicating where the return address to the
12894 calling function can be found. */
12896 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12898 if (count != 0)
12899 return NULL_RTX;
12901 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12904 /* Do anything needed before RTL is emitted for each function. */
12905 void
12906 arm_init_expanders (void)
12908 /* Arrange to initialize and mark the machine per-function status. */
12909 init_machine_status = arm_init_machine_status;
12911 /* This is to stop the combine pass optimizing away the alignment
12912 adjustment of va_arg. */
12913 /* ??? It is claimed that this should not be necessary. */
12914 if (cfun)
12915 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
12919 /* Like arm_compute_initial_elimination offset. Simpler because
12920 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
12922 HOST_WIDE_INT
12923 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
12925 arm_stack_offsets *offsets;
12927 offsets = arm_get_frame_offsets ();
12929 switch (from)
12931 case ARG_POINTER_REGNUM:
12932 switch (to)
12934 case STACK_POINTER_REGNUM:
12935 return offsets->outgoing_args - offsets->saved_args;
12937 case FRAME_POINTER_REGNUM:
12938 return offsets->soft_frame - offsets->saved_args;
12940 case THUMB_HARD_FRAME_POINTER_REGNUM:
12941 case ARM_HARD_FRAME_POINTER_REGNUM:
12942 return offsets->saved_regs - offsets->saved_args;
12944 default:
12945 abort();
12947 break;
12949 case FRAME_POINTER_REGNUM:
12950 switch (to)
12952 case STACK_POINTER_REGNUM:
12953 return offsets->outgoing_args - offsets->soft_frame;
12955 case THUMB_HARD_FRAME_POINTER_REGNUM:
12956 case ARM_HARD_FRAME_POINTER_REGNUM:
12957 return offsets->saved_regs - offsets->soft_frame;
12959 default:
12960 abort();
12962 break;
12964 default:
12965 abort ();
12970 /* Generate the rest of a function's prologue. */
12971 void
12972 thumb_expand_prologue (void)
12974 rtx insn, dwarf;
12976 HOST_WIDE_INT amount;
12977 arm_stack_offsets *offsets;
12978 unsigned long func_type;
12979 int regno;
12980 unsigned long live_regs_mask;
12982 func_type = arm_current_func_type ();
12984 /* Naked functions don't have prologues. */
12985 if (IS_NAKED (func_type))
12986 return;
12988 if (IS_INTERRUPT (func_type))
12990 error ("interrupt Service Routines cannot be coded in Thumb mode");
12991 return;
12994 live_regs_mask = thumb_compute_save_reg_mask ();
12995 /* Load the pic register before setting the frame pointer, so we can use r7
12996 as a temporary work register. */
12997 if (flag_pic)
12998 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13000 offsets = arm_get_frame_offsets ();
13002 if (frame_pointer_needed)
13004 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13005 stack_pointer_rtx));
13006 RTX_FRAME_RELATED_P (insn) = 1;
13008 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13009 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13010 stack_pointer_rtx);
13012 amount = offsets->outgoing_args - offsets->saved_regs;
13013 if (amount)
13015 if (amount < 512)
13017 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13018 GEN_INT (- amount)));
13019 RTX_FRAME_RELATED_P (insn) = 1;
13021 else
13023 rtx reg;
13025 /* The stack decrement is too big for an immediate value in a single
13026 insn. In theory we could issue multiple subtracts, but after
13027 three of them it becomes more space efficient to place the full
13028 value in the constant pool and load into a register. (Also the
13029 ARM debugger really likes to see only one stack decrement per
13030 function). So instead we look for a scratch register into which
13031 we can load the decrement, and then we subtract this from the
13032 stack pointer. Unfortunately on the thumb the only available
13033 scratch registers are the argument registers, and we cannot use
13034 these as they may hold arguments to the function. Instead we
13035 attempt to locate a call preserved register which is used by this
13036 function. If we can find one, then we know that it will have
13037 been pushed at the start of the prologue and so we can corrupt
13038 it now. */
13039 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13040 if (live_regs_mask & (1 << regno)
13041 && !(frame_pointer_needed
13042 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13043 break;
13045 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13047 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13049 /* Choose an arbitrary, non-argument low register. */
13050 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13052 /* Save it by copying it into a high, scratch register. */
13053 emit_insn (gen_movsi (spare, reg));
13054 /* Add a USE to stop propagate_one_insn() from barfing. */
13055 emit_insn (gen_prologue_use (spare));
13057 /* Decrement the stack. */
13058 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13059 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13060 stack_pointer_rtx, reg));
13061 RTX_FRAME_RELATED_P (insn) = 1;
13062 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13063 plus_constant (stack_pointer_rtx,
13064 -amount));
13065 RTX_FRAME_RELATED_P (dwarf) = 1;
13066 REG_NOTES (insn)
13067 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13068 REG_NOTES (insn));
13070 /* Restore the low register's original value. */
13071 emit_insn (gen_movsi (reg, spare));
13073 /* Emit a USE of the restored scratch register, so that flow
13074 analysis will not consider the restore redundant. The
13075 register won't be used again in this function and isn't
13076 restored by the epilogue. */
13077 emit_insn (gen_prologue_use (reg));
13079 else
13081 reg = gen_rtx_REG (SImode, regno);
13083 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13085 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13086 stack_pointer_rtx, reg));
13087 RTX_FRAME_RELATED_P (insn) = 1;
13088 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13089 plus_constant (stack_pointer_rtx,
13090 -amount));
13091 RTX_FRAME_RELATED_P (dwarf) = 1;
13092 REG_NOTES (insn)
13093 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13094 REG_NOTES (insn));
13097 /* If the frame pointer is needed, emit a special barrier that
13098 will prevent the scheduler from moving stores to the frame
13099 before the stack adjustment. */
13100 if (frame_pointer_needed)
13101 emit_insn (gen_stack_tie (stack_pointer_rtx,
13102 hard_frame_pointer_rtx));
13105 if (current_function_profile || TARGET_NO_SCHED_PRO)
13106 emit_insn (gen_blockage ());
13108 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13109 if (live_regs_mask & 0xff)
13110 cfun->machine->lr_save_eliminated = 0;
13112 /* If the link register is being kept alive, with the return address in it,
13113 then make sure that it does not get reused by the ce2 pass. */
13114 if (cfun->machine->lr_save_eliminated)
13115 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13119 void
13120 thumb_expand_epilogue (void)
13122 HOST_WIDE_INT amount;
13123 arm_stack_offsets *offsets;
13124 int regno;
13126 /* Naked functions don't have prologues. */
13127 if (IS_NAKED (arm_current_func_type ()))
13128 return;
13130 offsets = arm_get_frame_offsets ();
13131 amount = offsets->outgoing_args - offsets->saved_regs;
13133 if (frame_pointer_needed)
13134 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13135 else if (amount)
13137 if (amount < 512)
13138 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13139 GEN_INT (amount)));
13140 else
13142 /* r3 is always free in the epilogue. */
13143 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13145 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13146 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13150 /* Emit a USE (stack_pointer_rtx), so that
13151 the stack adjustment will not be deleted. */
13152 emit_insn (gen_prologue_use (stack_pointer_rtx));
13154 if (current_function_profile || TARGET_NO_SCHED_PRO)
13155 emit_insn (gen_blockage ());
13157 /* Emit a clobber for each insn that will be restored in the epilogue,
13158 so that flow2 will get register lifetimes correct. */
13159 for (regno = 0; regno < 13; regno++)
13160 if (regs_ever_live[regno] && !call_used_regs[regno])
13161 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13163 if (! regs_ever_live[LR_REGNUM])
13164 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13167 static void
13168 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13170 int live_regs_mask = 0;
13171 int l_mask;
13172 int high_regs_pushed = 0;
13173 int cfa_offset = 0;
13174 int regno;
13176 if (IS_NAKED (arm_current_func_type ()))
13177 return;
13179 if (is_called_in_ARM_mode (current_function_decl))
13181 const char * name;
13183 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13184 abort ();
13185 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13186 abort ();
13187 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13189 /* Generate code sequence to switch us into Thumb mode. */
13190 /* The .code 32 directive has already been emitted by
13191 ASM_DECLARE_FUNCTION_NAME. */
13192 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13193 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13195 /* Generate a label, so that the debugger will notice the
13196 change in instruction sets. This label is also used by
13197 the assembler to bypass the ARM code when this function
13198 is called from a Thumb encoded function elsewhere in the
13199 same file. Hence the definition of STUB_NAME here must
13200 agree with the definition in gas/config/tc-arm.c. */
13202 #define STUB_NAME ".real_start_of"
13204 fprintf (f, "\t.code\t16\n");
13205 #ifdef ARM_PE
13206 if (arm_dllexport_name_p (name))
13207 name = arm_strip_name_encoding (name);
13208 #endif
13209 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13210 fprintf (f, "\t.thumb_func\n");
13211 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13214 if (current_function_pretend_args_size)
13216 if (cfun->machine->uses_anonymous_args)
13218 int num_pushes;
13220 fprintf (f, "\tpush\t{");
13222 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13224 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13225 regno <= LAST_ARG_REGNUM;
13226 regno++)
13227 asm_fprintf (f, "%r%s", regno,
13228 regno == LAST_ARG_REGNUM ? "" : ", ");
13230 fprintf (f, "}\n");
13232 else
13233 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13234 SP_REGNUM, SP_REGNUM,
13235 current_function_pretend_args_size);
13237 /* We don't need to record the stores for unwinding (would it
13238 help the debugger any if we did?), but record the change in
13239 the stack pointer. */
13240 if (dwarf2out_do_frame ())
13242 char *l = dwarf2out_cfi_label ();
13243 cfa_offset = cfa_offset + current_function_pretend_args_size;
13244 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13248 live_regs_mask = thumb_compute_save_reg_mask ();
13249 /* Just low regs and lr. */
13250 l_mask = live_regs_mask & 0x40ff;
13252 if (TARGET_BACKTRACE)
13254 int offset;
13255 int work_register;
13257 /* We have been asked to create a stack backtrace structure.
13258 The code looks like this:
13260 0 .align 2
13261 0 func:
13262 0 sub SP, #16 Reserve space for 4 registers.
13263 2 push {R7} Push low registers.
13264 4 add R7, SP, #20 Get the stack pointer before the push.
13265 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13266 8 mov R7, PC Get hold of the start of this code plus 12.
13267 10 str R7, [SP, #16] Store it.
13268 12 mov R7, FP Get hold of the current frame pointer.
13269 14 str R7, [SP, #4] Store it.
13270 16 mov R7, LR Get hold of the current return address.
13271 18 str R7, [SP, #12] Store it.
13272 20 add R7, SP, #16 Point at the start of the backtrace structure.
13273 22 mov FP, R7 Put this value into the frame pointer. */
13275 work_register = thumb_find_work_register (live_regs_mask);
13277 asm_fprintf
13278 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13279 SP_REGNUM, SP_REGNUM);
13281 if (dwarf2out_do_frame ())
13283 char *l = dwarf2out_cfi_label ();
13284 cfa_offset = cfa_offset + 16;
13285 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13288 if (l_mask)
13290 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13291 offset = bit_count (l_mask);
13293 else
13294 offset = 0;
13296 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13297 offset + 16 + current_function_pretend_args_size);
13299 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13300 offset + 4);
13302 /* Make sure that the instruction fetching the PC is in the right place
13303 to calculate "start of backtrace creation code + 12". */
13304 if (l_mask)
13306 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13307 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13308 offset + 12);
13309 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13310 ARM_HARD_FRAME_POINTER_REGNUM);
13311 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13312 offset);
13314 else
13316 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13317 ARM_HARD_FRAME_POINTER_REGNUM);
13318 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13319 offset);
13320 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13321 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13322 offset + 12);
13325 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13326 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13327 offset + 8);
13328 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13329 offset + 12);
13330 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13331 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13333 else if (l_mask)
13334 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13336 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13338 if (high_regs_pushed)
13340 int pushable_regs = 0;
13341 int next_hi_reg;
13343 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13344 if (live_regs_mask & (1 << next_hi_reg))
13345 break;
13347 pushable_regs = l_mask & 0xff;
13349 if (pushable_regs == 0)
13350 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13352 while (high_regs_pushed > 0)
13354 int real_regs_mask = 0;
13356 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13358 if (pushable_regs & (1 << regno))
13360 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13362 high_regs_pushed--;
13363 real_regs_mask |= (1 << next_hi_reg);
13365 if (high_regs_pushed)
13367 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13368 next_hi_reg--)
13369 if (live_regs_mask & (1 << next_hi_reg))
13370 break;
13372 else
13374 pushable_regs &= ~((1 << regno) - 1);
13375 break;
13380 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13385 /* Handle the case of a double word load into a low register from
13386 a computed memory address. The computed address may involve a
13387 register which is overwritten by the load. */
13388 const char *
13389 thumb_load_double_from_address (rtx *operands)
13391 rtx addr;
13392 rtx base;
13393 rtx offset;
13394 rtx arg1;
13395 rtx arg2;
13397 if (GET_CODE (operands[0]) != REG)
13398 abort ();
13400 if (GET_CODE (operands[1]) != MEM)
13401 abort ();
13403 /* Get the memory address. */
13404 addr = XEXP (operands[1], 0);
13406 /* Work out how the memory address is computed. */
13407 switch (GET_CODE (addr))
13409 case REG:
13410 operands[2] = gen_rtx_MEM (SImode,
13411 plus_constant (XEXP (operands[1], 0), 4));
13413 if (REGNO (operands[0]) == REGNO (addr))
13415 output_asm_insn ("ldr\t%H0, %2", operands);
13416 output_asm_insn ("ldr\t%0, %1", operands);
13418 else
13420 output_asm_insn ("ldr\t%0, %1", operands);
13421 output_asm_insn ("ldr\t%H0, %2", operands);
13423 break;
13425 case CONST:
13426 /* Compute <address> + 4 for the high order load. */
13427 operands[2] = gen_rtx_MEM (SImode,
13428 plus_constant (XEXP (operands[1], 0), 4));
13430 output_asm_insn ("ldr\t%0, %1", operands);
13431 output_asm_insn ("ldr\t%H0, %2", operands);
13432 break;
13434 case PLUS:
13435 arg1 = XEXP (addr, 0);
13436 arg2 = XEXP (addr, 1);
13438 if (CONSTANT_P (arg1))
13439 base = arg2, offset = arg1;
13440 else
13441 base = arg1, offset = arg2;
13443 if (GET_CODE (base) != REG)
13444 abort ();
13446 /* Catch the case of <address> = <reg> + <reg> */
13447 if (GET_CODE (offset) == REG)
13449 int reg_offset = REGNO (offset);
13450 int reg_base = REGNO (base);
13451 int reg_dest = REGNO (operands[0]);
13453 /* Add the base and offset registers together into the
13454 higher destination register. */
13455 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13456 reg_dest + 1, reg_base, reg_offset);
13458 /* Load the lower destination register from the address in
13459 the higher destination register. */
13460 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13461 reg_dest, reg_dest + 1);
13463 /* Load the higher destination register from its own address
13464 plus 4. */
13465 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13466 reg_dest + 1, reg_dest + 1);
13468 else
13470 /* Compute <address> + 4 for the high order load. */
13471 operands[2] = gen_rtx_MEM (SImode,
13472 plus_constant (XEXP (operands[1], 0), 4));
13474 /* If the computed address is held in the low order register
13475 then load the high order register first, otherwise always
13476 load the low order register first. */
13477 if (REGNO (operands[0]) == REGNO (base))
13479 output_asm_insn ("ldr\t%H0, %2", operands);
13480 output_asm_insn ("ldr\t%0, %1", operands);
13482 else
13484 output_asm_insn ("ldr\t%0, %1", operands);
13485 output_asm_insn ("ldr\t%H0, %2", operands);
13488 break;
13490 case LABEL_REF:
13491 /* With no registers to worry about we can just load the value
13492 directly. */
13493 operands[2] = gen_rtx_MEM (SImode,
13494 plus_constant (XEXP (operands[1], 0), 4));
13496 output_asm_insn ("ldr\t%H0, %2", operands);
13497 output_asm_insn ("ldr\t%0, %1", operands);
13498 break;
13500 default:
13501 abort ();
13502 break;
13505 return "";
13508 const char *
13509 thumb_output_move_mem_multiple (int n, rtx *operands)
13511 rtx tmp;
13513 switch (n)
13515 case 2:
13516 if (REGNO (operands[4]) > REGNO (operands[5]))
13518 tmp = operands[4];
13519 operands[4] = operands[5];
13520 operands[5] = tmp;
13522 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13523 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13524 break;
13526 case 3:
13527 if (REGNO (operands[4]) > REGNO (operands[5]))
13529 tmp = operands[4];
13530 operands[4] = operands[5];
13531 operands[5] = tmp;
13533 if (REGNO (operands[5]) > REGNO (operands[6]))
13535 tmp = operands[5];
13536 operands[5] = operands[6];
13537 operands[6] = tmp;
13539 if (REGNO (operands[4]) > REGNO (operands[5]))
13541 tmp = operands[4];
13542 operands[4] = operands[5];
13543 operands[5] = tmp;
13546 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13547 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13548 break;
13550 default:
13551 abort ();
13554 return "";
13557 /* Routines for generating rtl. */
13558 void
13559 thumb_expand_movmemqi (rtx *operands)
13561 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13562 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13563 HOST_WIDE_INT len = INTVAL (operands[2]);
13564 HOST_WIDE_INT offset = 0;
13566 while (len >= 12)
13568 emit_insn (gen_movmem12b (out, in, out, in));
13569 len -= 12;
13572 if (len >= 8)
13574 emit_insn (gen_movmem8b (out, in, out, in));
13575 len -= 8;
13578 if (len >= 4)
13580 rtx reg = gen_reg_rtx (SImode);
13581 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13582 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13583 len -= 4;
13584 offset += 4;
13587 if (len >= 2)
13589 rtx reg = gen_reg_rtx (HImode);
13590 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13591 plus_constant (in, offset))));
13592 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13593 reg));
13594 len -= 2;
13595 offset += 2;
13598 if (len)
13600 rtx reg = gen_reg_rtx (QImode);
13601 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13602 plus_constant (in, offset))));
13603 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13604 reg));
13608 void
13609 thumb_reload_out_hi (rtx *operands)
13611 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13614 /* Handle reading a half-word from memory during reload. */
13615 void
13616 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13618 abort ();
13621 /* Return the length of a function name prefix
13622 that starts with the character 'c'. */
13623 static int
13624 arm_get_strip_length (int c)
13626 switch (c)
13628 ARM_NAME_ENCODING_LENGTHS
13629 default: return 0;
13633 /* Return a pointer to a function's name with any
13634 and all prefix encodings stripped from it. */
13635 const char *
13636 arm_strip_name_encoding (const char *name)
13638 int skip;
13640 while ((skip = arm_get_strip_length (* name)))
13641 name += skip;
13643 return name;
13646 /* If there is a '*' anywhere in the name's prefix, then
13647 emit the stripped name verbatim, otherwise prepend an
13648 underscore if leading underscores are being used. */
13649 void
13650 arm_asm_output_labelref (FILE *stream, const char *name)
13652 int skip;
13653 int verbatim = 0;
13655 while ((skip = arm_get_strip_length (* name)))
13657 verbatim |= (*name == '*');
13658 name += skip;
13661 if (verbatim)
13662 fputs (name, stream);
13663 else
13664 asm_fprintf (stream, "%U%s", name);
13667 rtx aof_pic_label;
13669 #ifdef AOF_ASSEMBLER
13670 /* Special functions only needed when producing AOF syntax assembler. */
13672 struct pic_chain
13674 struct pic_chain * next;
13675 const char * symname;
13678 static struct pic_chain * aof_pic_chain = NULL;
13681 aof_pic_entry (rtx x)
13683 struct pic_chain ** chainp;
13684 int offset;
13686 if (aof_pic_label == NULL_RTX)
13688 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13691 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13692 offset += 4, chainp = &(*chainp)->next)
13693 if ((*chainp)->symname == XSTR (x, 0))
13694 return plus_constant (aof_pic_label, offset);
13696 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13697 (*chainp)->next = NULL;
13698 (*chainp)->symname = XSTR (x, 0);
13699 return plus_constant (aof_pic_label, offset);
13702 void
13703 aof_dump_pic_table (FILE *f)
13705 struct pic_chain * chain;
13707 if (aof_pic_chain == NULL)
13708 return;
13710 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13711 PIC_OFFSET_TABLE_REGNUM,
13712 PIC_OFFSET_TABLE_REGNUM);
13713 fputs ("|x$adcons|\n", f);
13715 for (chain = aof_pic_chain; chain; chain = chain->next)
13717 fputs ("\tDCD\t", f);
13718 assemble_name (f, chain->symname);
13719 fputs ("\n", f);
13723 int arm_text_section_count = 1;
13725 char *
13726 aof_text_section (void )
13728 static char buf[100];
13729 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13730 arm_text_section_count++);
13731 if (flag_pic)
13732 strcat (buf, ", PIC, REENTRANT");
13733 return buf;
13736 static int arm_data_section_count = 1;
13738 char *
13739 aof_data_section (void)
13741 static char buf[100];
13742 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13743 return buf;
13746 /* The AOF assembler is religiously strict about declarations of
13747 imported and exported symbols, so that it is impossible to declare
13748 a function as imported near the beginning of the file, and then to
13749 export it later on. It is, however, possible to delay the decision
13750 until all the functions in the file have been compiled. To get
13751 around this, we maintain a list of the imports and exports, and
13752 delete from it any that are subsequently defined. At the end of
13753 compilation we spit the remainder of the list out before the END
13754 directive. */
13756 struct import
13758 struct import * next;
13759 const char * name;
13762 static struct import * imports_list = NULL;
13764 void
13765 aof_add_import (const char *name)
13767 struct import * new;
13769 for (new = imports_list; new; new = new->next)
13770 if (new->name == name)
13771 return;
13773 new = (struct import *) xmalloc (sizeof (struct import));
13774 new->next = imports_list;
13775 imports_list = new;
13776 new->name = name;
13779 void
13780 aof_delete_import (const char *name)
13782 struct import ** old;
13784 for (old = &imports_list; *old; old = & (*old)->next)
13786 if ((*old)->name == name)
13788 *old = (*old)->next;
13789 return;
13794 int arm_main_function = 0;
13796 static void
13797 aof_dump_imports (FILE *f)
13799 /* The AOF assembler needs this to cause the startup code to be extracted
13800 from the library. Brining in __main causes the whole thing to work
13801 automagically. */
13802 if (arm_main_function)
13804 text_section ();
13805 fputs ("\tIMPORT __main\n", f);
13806 fputs ("\tDCD __main\n", f);
13809 /* Now dump the remaining imports. */
13810 while (imports_list)
13812 fprintf (f, "\tIMPORT\t");
13813 assemble_name (f, imports_list->name);
13814 fputc ('\n', f);
13815 imports_list = imports_list->next;
13819 static void
13820 aof_globalize_label (FILE *stream, const char *name)
13822 default_globalize_label (stream, name);
13823 if (! strcmp (name, "main"))
13824 arm_main_function = 1;
13827 static void
13828 aof_file_start (void)
13830 fputs ("__r0\tRN\t0\n", asm_out_file);
13831 fputs ("__a1\tRN\t0\n", asm_out_file);
13832 fputs ("__a2\tRN\t1\n", asm_out_file);
13833 fputs ("__a3\tRN\t2\n", asm_out_file);
13834 fputs ("__a4\tRN\t3\n", asm_out_file);
13835 fputs ("__v1\tRN\t4\n", asm_out_file);
13836 fputs ("__v2\tRN\t5\n", asm_out_file);
13837 fputs ("__v3\tRN\t6\n", asm_out_file);
13838 fputs ("__v4\tRN\t7\n", asm_out_file);
13839 fputs ("__v5\tRN\t8\n", asm_out_file);
13840 fputs ("__v6\tRN\t9\n", asm_out_file);
13841 fputs ("__sl\tRN\t10\n", asm_out_file);
13842 fputs ("__fp\tRN\t11\n", asm_out_file);
13843 fputs ("__ip\tRN\t12\n", asm_out_file);
13844 fputs ("__sp\tRN\t13\n", asm_out_file);
13845 fputs ("__lr\tRN\t14\n", asm_out_file);
13846 fputs ("__pc\tRN\t15\n", asm_out_file);
13847 fputs ("__f0\tFN\t0\n", asm_out_file);
13848 fputs ("__f1\tFN\t1\n", asm_out_file);
13849 fputs ("__f2\tFN\t2\n", asm_out_file);
13850 fputs ("__f3\tFN\t3\n", asm_out_file);
13851 fputs ("__f4\tFN\t4\n", asm_out_file);
13852 fputs ("__f5\tFN\t5\n", asm_out_file);
13853 fputs ("__f6\tFN\t6\n", asm_out_file);
13854 fputs ("__f7\tFN\t7\n", asm_out_file);
13855 text_section ();
13858 static void
13859 aof_file_end (void)
13861 if (flag_pic)
13862 aof_dump_pic_table (asm_out_file);
13863 aof_dump_imports (asm_out_file);
13864 fputs ("\tEND\n", asm_out_file);
13866 #endif /* AOF_ASSEMBLER */
13868 #ifndef ARM_PE
13869 /* Symbols in the text segment can be accessed without indirecting via the
13870 constant pool; it may take an extra binary operation, but this is still
13871 faster than indirecting via memory. Don't do this when not optimizing,
13872 since we won't be calculating al of the offsets necessary to do this
13873 simplification. */
13875 static void
13876 arm_encode_section_info (tree decl, rtx rtl, int first)
13878 /* This doesn't work with AOF syntax, since the string table may be in
13879 a different AREA. */
13880 #ifndef AOF_ASSEMBLER
13881 if (optimize > 0 && TREE_CONSTANT (decl))
13882 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13883 #endif
13885 /* If we are referencing a function that is weak then encode a long call
13886 flag in the function name, otherwise if the function is static or
13887 or known to be defined in this file then encode a short call flag. */
13888 if (first && DECL_P (decl))
13890 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
13891 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
13892 else if (! TREE_PUBLIC (decl))
13893 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
13896 #endif /* !ARM_PE */
13898 static void
13899 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
13901 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
13902 && !strcmp (prefix, "L"))
13904 arm_ccfsm_state = 0;
13905 arm_target_insn = NULL;
13907 default_internal_label (stream, prefix, labelno);
13910 /* Output code to add DELTA to the first argument, and then jump
13911 to FUNCTION. Used for C++ multiple inheritance. */
13912 static void
13913 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13914 HOST_WIDE_INT delta,
13915 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
13916 tree function)
13918 static int thunk_label = 0;
13919 char label[256];
13920 int mi_delta = delta;
13921 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
13922 int shift = 0;
13923 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
13924 ? 1 : 0);
13925 if (mi_delta < 0)
13926 mi_delta = - mi_delta;
13927 if (TARGET_THUMB)
13929 int labelno = thunk_label++;
13930 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
13931 fputs ("\tldr\tr12, ", file);
13932 assemble_name (file, label);
13933 fputc ('\n', file);
13935 while (mi_delta != 0)
13937 if ((mi_delta & (3 << shift)) == 0)
13938 shift += 2;
13939 else
13941 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
13942 mi_op, this_regno, this_regno,
13943 mi_delta & (0xff << shift));
13944 mi_delta &= ~(0xff << shift);
13945 shift += 8;
13948 if (TARGET_THUMB)
13950 fprintf (file, "\tbx\tr12\n");
13951 ASM_OUTPUT_ALIGN (file, 2);
13952 assemble_name (file, label);
13953 fputs (":\n", file);
13954 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
13956 else
13958 fputs ("\tb\t", file);
13959 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
13960 if (NEED_PLT_RELOC)
13961 fputs ("(PLT)", file);
13962 fputc ('\n', file);
13967 arm_emit_vector_const (FILE *file, rtx x)
13969 int i;
13970 const char * pattern;
13972 if (GET_CODE (x) != CONST_VECTOR)
13973 abort ();
13975 switch (GET_MODE (x))
13977 case V2SImode: pattern = "%08x"; break;
13978 case V4HImode: pattern = "%04x"; break;
13979 case V8QImode: pattern = "%02x"; break;
13980 default: abort ();
13983 fprintf (file, "0x");
13984 for (i = CONST_VECTOR_NUNITS (x); i--;)
13986 rtx element;
13988 element = CONST_VECTOR_ELT (x, i);
13989 fprintf (file, pattern, INTVAL (element));
13992 return 1;
13995 const char *
13996 arm_output_load_gr (rtx *operands)
13998 rtx reg;
13999 rtx offset;
14000 rtx wcgr;
14001 rtx sum;
14003 if (GET_CODE (operands [1]) != MEM
14004 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14005 || GET_CODE (reg = XEXP (sum, 0)) != REG
14006 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14007 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14008 return "wldrw%?\t%0, %1";
14010 /* Fix up an out-of-range load of a GR register. */
14011 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14012 wcgr = operands[0];
14013 operands[0] = reg;
14014 output_asm_insn ("ldr%?\t%0, %1", operands);
14016 operands[0] = wcgr;
14017 operands[1] = reg;
14018 output_asm_insn ("tmcr%?\t%0, %1", operands);
14019 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14021 return "";
14024 static rtx
14025 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14026 int incoming ATTRIBUTE_UNUSED)
14028 #if 0
14029 /* FIXME: The ARM backend has special code to handle structure
14030 returns, and will reserve its own hidden first argument. So
14031 if this macro is enabled a *second* hidden argument will be
14032 reserved, which will break binary compatibility with old
14033 toolchains and also thunk handling. One day this should be
14034 fixed. */
14035 return 0;
14036 #else
14037 /* Register in which address to store a structure value
14038 is passed to a function. */
14039 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14040 #endif
14043 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14045 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14046 named arg and all anonymous args onto the stack.
14047 XXX I know the prologue shouldn't be pushing registers, but it is faster
14048 that way. */
14050 static void
14051 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14052 enum machine_mode mode ATTRIBUTE_UNUSED,
14053 tree type ATTRIBUTE_UNUSED,
14054 int *pretend_size,
14055 int second_time ATTRIBUTE_UNUSED)
14057 cfun->machine->uses_anonymous_args = 1;
14058 if (cum->nregs < NUM_ARG_REGS)
14059 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14062 /* Return nonzero if the CONSUMER instruction (a store) does not need
14063 PRODUCER's value to calculate the address. */
14066 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14068 rtx value = PATTERN (producer);
14069 rtx addr = PATTERN (consumer);
14071 if (GET_CODE (value) == COND_EXEC)
14072 value = COND_EXEC_CODE (value);
14073 if (GET_CODE (value) == PARALLEL)
14074 value = XVECEXP (value, 0, 0);
14075 value = XEXP (value, 0);
14076 if (GET_CODE (addr) == COND_EXEC)
14077 addr = COND_EXEC_CODE (addr);
14078 if (GET_CODE (addr) == PARALLEL)
14079 addr = XVECEXP (addr, 0, 0);
14080 addr = XEXP (addr, 0);
14082 return !reg_overlap_mentioned_p (value, addr);
14085 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14086 have an early register shift value or amount dependency on the
14087 result of PRODUCER. */
14090 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14092 rtx value = PATTERN (producer);
14093 rtx op = PATTERN (consumer);
14094 rtx early_op;
14096 if (GET_CODE (value) == COND_EXEC)
14097 value = COND_EXEC_CODE (value);
14098 if (GET_CODE (value) == PARALLEL)
14099 value = XVECEXP (value, 0, 0);
14100 value = XEXP (value, 0);
14101 if (GET_CODE (op) == COND_EXEC)
14102 op = COND_EXEC_CODE (op);
14103 if (GET_CODE (op) == PARALLEL)
14104 op = XVECEXP (op, 0, 0);
14105 op = XEXP (op, 1);
14107 early_op = XEXP (op, 0);
14108 /* This is either an actual independent shift, or a shift applied to
14109 the first operand of another operation. We want the whole shift
14110 operation. */
14111 if (GET_CODE (early_op) == REG)
14112 early_op = op;
14114 return !reg_overlap_mentioned_p (value, early_op);
14117 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14118 have an early register shift value dependency on the result of
14119 PRODUCER. */
14122 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14124 rtx value = PATTERN (producer);
14125 rtx op = PATTERN (consumer);
14126 rtx early_op;
14128 if (GET_CODE (value) == COND_EXEC)
14129 value = COND_EXEC_CODE (value);
14130 if (GET_CODE (value) == PARALLEL)
14131 value = XVECEXP (value, 0, 0);
14132 value = XEXP (value, 0);
14133 if (GET_CODE (op) == COND_EXEC)
14134 op = COND_EXEC_CODE (op);
14135 if (GET_CODE (op) == PARALLEL)
14136 op = XVECEXP (op, 0, 0);
14137 op = XEXP (op, 1);
14139 early_op = XEXP (op, 0);
14141 /* This is either an actual independent shift, or a shift applied to
14142 the first operand of another operation. We want the value being
14143 shifted, in either case. */
14144 if (GET_CODE (early_op) != REG)
14145 early_op = XEXP (early_op, 0);
14147 return !reg_overlap_mentioned_p (value, early_op);
14150 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14151 have an early register mult dependency on the result of
14152 PRODUCER. */
14155 arm_no_early_mul_dep (rtx producer, rtx consumer)
14157 rtx value = PATTERN (producer);
14158 rtx op = PATTERN (consumer);
14160 if (GET_CODE (value) == COND_EXEC)
14161 value = COND_EXEC_CODE (value);
14162 if (GET_CODE (value) == PARALLEL)
14163 value = XVECEXP (value, 0, 0);
14164 value = XEXP (value, 0);
14165 if (GET_CODE (op) == COND_EXEC)
14166 op = COND_EXEC_CODE (op);
14167 if (GET_CODE (op) == PARALLEL)
14168 op = XVECEXP (op, 0, 0);
14169 op = XEXP (op, 1);
14171 return (GET_CODE (op) == PLUS
14172 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14176 /* We can't rely on the caller doing the proper promotion when
14177 using APCS or ATPCS. */
14179 static bool
14180 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14182 return !TARGET_AAPCS_BASED;
14186 /* AAPCS based ABIs use short enums by default. */
14188 static bool
14189 arm_default_short_enums (void)
14191 return TARGET_AAPCS_BASED;
14195 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14197 static bool
14198 arm_align_anon_bitfield (void)
14200 return TARGET_AAPCS_BASED;
14204 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14206 static tree
14207 arm_cxx_guard_type (void)
14209 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14213 /* The EABI says test the least significan bit of a guard variable. */
14215 static bool
14216 arm_cxx_guard_mask_bit (void)
14218 return TARGET_AAPCS_BASED;
14222 /* The EABI specifies that all array cookies are 8 bytes long. */
14224 static tree
14225 arm_get_cookie_size (tree type)
14227 tree size;
14229 if (!TARGET_AAPCS_BASED)
14230 return default_cxx_get_cookie_size (type);
14232 size = build_int_cst (sizetype, 8);
14233 return size;
14237 /* The EABI says that array cookies should also contain the element size. */
14239 static bool
14240 arm_cookie_has_size (void)
14242 return TARGET_AAPCS_BASED;
14246 /* The EABI says constructors and destructors should return a pointer to
14247 the object constructed/destroyed. */
14249 static bool
14250 arm_cxx_cdtor_returns_this (void)
14252 return TARGET_AAPCS_BASED;
14255 /* The EABI says that an inline function may never be the key
14256 method. */
14258 static bool
14259 arm_cxx_key_method_may_be_inline (void)
14261 return !TARGET_AAPCS_BASED;
14264 /* The EABI says that the virtual table, etc., for a class must be
14265 exported if it has a key method. The EABI does not specific the
14266 behavior if there is no key method, but there is no harm in
14267 exporting the class data in that case too. */
14269 static bool
14270 arm_cxx_export_class_data (void)
14272 return TARGET_AAPCS_BASED;
14275 void
14276 arm_set_return_address (rtx source, rtx scratch)
14278 arm_stack_offsets *offsets;
14279 HOST_WIDE_INT delta;
14280 rtx addr;
14281 unsigned long saved_regs;
14283 saved_regs = arm_compute_save_reg_mask ();
14285 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14286 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14287 else
14289 if (frame_pointer_needed)
14290 addr = plus_constant(hard_frame_pointer_rtx, -4);
14291 else
14293 /* LR will be the first saved register. */
14294 offsets = arm_get_frame_offsets ();
14295 delta = offsets->outgoing_args - (offsets->frame + 4);
14298 if (delta >= 4096)
14300 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14301 GEN_INT (delta & ~4095)));
14302 addr = scratch;
14303 delta &= 4095;
14305 else
14306 addr = stack_pointer_rtx;
14308 addr = plus_constant (addr, delta);
14310 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14315 void
14316 thumb_set_return_address (rtx source, rtx scratch)
14318 arm_stack_offsets *offsets;
14319 HOST_WIDE_INT delta;
14320 int reg;
14321 rtx addr;
14322 unsigned long mask;
14324 emit_insn (gen_rtx_USE (VOIDmode, source));
14326 mask = thumb_compute_save_reg_mask ();
14327 if (mask & (1 << LR_REGNUM))
14329 offsets = arm_get_frame_offsets ();
14331 /* Find the saved regs. */
14332 if (frame_pointer_needed)
14334 delta = offsets->soft_frame - offsets->saved_args;
14335 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14337 else
14339 delta = offsets->outgoing_args - offsets->saved_args;
14340 reg = SP_REGNUM;
14342 /* Allow for the stack frame. */
14343 if (TARGET_BACKTRACE)
14344 delta -= 16;
14345 /* The link register is always the first saved register. */
14346 delta -= 4;
14348 /* Construct the address. */
14349 addr = gen_rtx_REG (SImode, reg);
14350 if ((reg != SP_REGNUM && delta >= 128)
14351 || delta >= 1024)
14353 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14354 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14355 addr = scratch;
14357 else
14358 addr = plus_constant (addr, delta);
14360 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14362 else
14363 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14366 /* Implements target hook vector_mode_supported_p. */
14367 bool
14368 arm_vector_mode_supported_p (enum machine_mode mode)
14370 if ((mode == V2SImode)
14371 || (mode == V4HImode)
14372 || (mode == V8QImode))
14373 return true;
14375 return false;
14378 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14379 ARM insns and therefore guarantee that the shift count is modulo 256.
14380 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14381 guarantee no particular behavior for out-of-range counts. */
14383 static unsigned HOST_WIDE_INT
14384 arm_shift_truncation_mask (enum machine_mode mode)
14386 return mode == SImode ? 255 : 0;