* config/arm/arm.c (arm_return_in_msb): New function.
[official-gcc.git] / gcc / config / arm / arm.c
blobcd529365c2fb6a09808b376f515db52f28090c30
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void thumb_exit (FILE *, int);
88 static rtx is_jump_table (rtx);
89 static HOST_WIDE_INT get_jump_table_size (rtx);
90 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
91 static Mnode *add_minipool_forward_ref (Mfix *);
92 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_backward_ref (Mfix *);
94 static void assign_minipool_offsets (Mfix *);
95 static void arm_print_value (FILE *, rtx);
96 static void dump_minipool (rtx);
97 static int arm_barrier_cost (rtx);
98 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
99 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
100 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
101 rtx);
102 static void arm_reorg (void);
103 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
104 static int current_file_function_operand (rtx);
105 static unsigned long arm_compute_save_reg0_reg12_mask (void);
106 static unsigned long arm_compute_save_reg_mask (void);
107 static unsigned long arm_isr_value (tree);
108 static unsigned long arm_compute_func_type (void);
109 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
110 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
111 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
112 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
113 #endif
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_size_rtx_costs (rtx, int, int, int *);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
144 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
145 tree, bool);
147 #ifndef ARM_PE
148 static void arm_encode_section_info (tree, rtx, int);
149 #endif
151 static void arm_file_end (void);
153 #ifdef AOF_ASSEMBLER
154 static void aof_globalize_label (FILE *, const char *);
155 static void aof_dump_imports (FILE *);
156 static void aof_dump_pic_table (FILE *);
157 static void aof_file_start (void);
158 static void aof_file_end (void);
159 #endif
160 static rtx arm_struct_value_rtx (tree, int);
161 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
162 tree, int *, int);
163 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
164 enum machine_mode, tree, bool);
165 static bool arm_promote_prototypes (tree);
166 static bool arm_default_short_enums (void);
167 static bool arm_align_anon_bitfield (void);
168 static bool arm_return_in_msb (tree);
169 static bool arm_must_pass_in_stack (enum machine_mode, tree);
171 static tree arm_cxx_guard_type (void);
172 static bool arm_cxx_guard_mask_bit (void);
173 static tree arm_get_cookie_size (tree);
174 static bool arm_cookie_has_size (void);
175 static bool arm_cxx_cdtor_returns_this (void);
176 static bool arm_cxx_key_method_may_be_inline (void);
177 static void arm_cxx_determine_class_data_visibility (tree);
178 static bool arm_cxx_class_data_always_comdat (void);
179 static bool arm_cxx_use_aeabi_atexit (void);
180 static void arm_init_libfuncs (void);
181 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
183 /* Initialize the GCC target structure. */
184 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
185 #undef TARGET_MERGE_DECL_ATTRIBUTES
186 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
187 #endif
189 #undef TARGET_ATTRIBUTE_TABLE
190 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
192 #undef TARGET_ASM_FILE_END
193 #define TARGET_ASM_FILE_END arm_file_end
195 #ifdef AOF_ASSEMBLER
196 #undef TARGET_ASM_BYTE_OP
197 #define TARGET_ASM_BYTE_OP "\tDCB\t"
198 #undef TARGET_ASM_ALIGNED_HI_OP
199 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
200 #undef TARGET_ASM_ALIGNED_SI_OP
201 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
202 #undef TARGET_ASM_GLOBALIZE_LABEL
203 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
204 #undef TARGET_ASM_FILE_START
205 #define TARGET_ASM_FILE_START aof_file_start
206 #undef TARGET_ASM_FILE_END
207 #define TARGET_ASM_FILE_END aof_file_end
208 #else
209 #undef TARGET_ASM_ALIGNED_SI_OP
210 #define TARGET_ASM_ALIGNED_SI_OP NULL
211 #undef TARGET_ASM_INTEGER
212 #define TARGET_ASM_INTEGER arm_assemble_integer
213 #endif
215 #undef TARGET_ASM_FUNCTION_PROLOGUE
216 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
218 #undef TARGET_ASM_FUNCTION_EPILOGUE
219 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
221 #undef TARGET_COMP_TYPE_ATTRIBUTES
222 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
224 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
225 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
227 #undef TARGET_SCHED_ADJUST_COST
228 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
230 #undef TARGET_ENCODE_SECTION_INFO
231 #ifdef ARM_PE
232 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
233 #else
234 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
235 #endif
237 #undef TARGET_STRIP_NAME_ENCODING
238 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
240 #undef TARGET_ASM_INTERNAL_LABEL
241 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
243 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
244 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
246 #undef TARGET_ASM_OUTPUT_MI_THUNK
247 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
248 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
249 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
251 /* This will be overridden in arm_override_options. */
252 #undef TARGET_RTX_COSTS
253 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
254 #undef TARGET_ADDRESS_COST
255 #define TARGET_ADDRESS_COST arm_address_cost
257 #undef TARGET_SHIFT_TRUNCATION_MASK
258 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
259 #undef TARGET_VECTOR_MODE_SUPPORTED_P
260 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
262 #undef TARGET_MACHINE_DEPENDENT_REORG
263 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
265 #undef TARGET_INIT_BUILTINS
266 #define TARGET_INIT_BUILTINS arm_init_builtins
267 #undef TARGET_EXPAND_BUILTIN
268 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
270 #undef TARGET_INIT_LIBFUNCS
271 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
273 #undef TARGET_PROMOTE_FUNCTION_ARGS
274 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
275 #undef TARGET_PROMOTE_FUNCTION_RETURN
276 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
277 #undef TARGET_PROMOTE_PROTOTYPES
278 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
279 #undef TARGET_PASS_BY_REFERENCE
280 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
281 #undef TARGET_ARG_PARTIAL_BYTES
282 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
284 #undef TARGET_STRUCT_VALUE_RTX
285 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
287 #undef TARGET_SETUP_INCOMING_VARARGS
288 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
290 #undef TARGET_DEFAULT_SHORT_ENUMS
291 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
293 #undef TARGET_ALIGN_ANON_BITFIELD
294 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
296 #undef TARGET_CXX_GUARD_TYPE
297 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
299 #undef TARGET_CXX_GUARD_MASK_BIT
300 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
302 #undef TARGET_CXX_GET_COOKIE_SIZE
303 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
305 #undef TARGET_CXX_COOKIE_HAS_SIZE
306 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
308 #undef TARGET_CXX_CDTOR_RETURNS_THIS
309 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
311 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
312 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
314 #undef TARGET_CXX_USE_AEABI_ATEXIT
315 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
317 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
318 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
319 arm_cxx_determine_class_data_visibility
321 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
322 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
324 #undef TARGET_RETURN_IN_MSB
325 #define TARGET_RETURN_IN_MSB arm_return_in_msb
327 #undef TARGET_MUST_PASS_IN_STACK
328 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
330 struct gcc_target targetm = TARGET_INITIALIZER;
332 /* Obstack for minipool constant handling. */
333 static struct obstack minipool_obstack;
334 static char * minipool_startobj;
336 /* The maximum number of insns skipped which
337 will be conditionalised if possible. */
338 static int max_insns_skipped = 5;
340 extern FILE * asm_out_file;
342 /* True if we are currently building a constant table. */
343 int making_const_table;
345 /* Define the information needed to generate branch insns. This is
346 stored from the compare operation. */
347 rtx arm_compare_op0, arm_compare_op1;
349 /* The processor for which instructions should be scheduled. */
350 enum processor_type arm_tune = arm_none;
352 /* Which floating point model to use. */
353 enum arm_fp_model arm_fp_model;
355 /* Which floating point hardware is available. */
356 enum fputype arm_fpu_arch;
358 /* Which floating point hardware to schedule for. */
359 enum fputype arm_fpu_tune;
361 /* Whether to use floating point hardware. */
362 enum float_abi_type arm_float_abi;
364 /* Which ABI to use. */
365 enum arm_abi_type arm_abi;
367 /* Set by the -mfpu=... option. */
368 const char * target_fpu_name = NULL;
370 /* Set by the -mfpe=... option. */
371 const char * target_fpe_name = NULL;
373 /* Set by the -mfloat-abi=... option. */
374 const char * target_float_abi_name = NULL;
376 /* Set by the legacy -mhard-float and -msoft-float options. */
377 const char * target_float_switch = NULL;
379 /* Set by the -mabi=... option. */
380 const char * target_abi_name = NULL;
382 /* Used to parse -mstructure_size_boundary command line option. */
383 const char * structure_size_string = NULL;
384 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
386 /* Used for Thumb call_via trampolines. */
387 rtx thumb_call_via_label[14];
388 static int thumb_call_reg_needed;
390 /* Bit values used to identify processor capabilities. */
391 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
392 #define FL_ARCH3M (1 << 1) /* Extended multiply */
393 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
394 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
395 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
396 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
397 #define FL_THUMB (1 << 6) /* Thumb aware */
398 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
399 #define FL_STRONG (1 << 8) /* StrongARM */
400 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
401 #define FL_XSCALE (1 << 10) /* XScale */
402 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
403 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
404 media instructions. */
405 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
406 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
407 Note: ARM6 & 7 derivatives only. */
409 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
411 #define FL_FOR_ARCH2 0
412 #define FL_FOR_ARCH3 FL_MODE32
413 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
414 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
415 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
416 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
417 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
418 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
419 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
420 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
421 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
422 #define FL_FOR_ARCH6J FL_FOR_ARCH6
423 #define FL_FOR_ARCH6K FL_FOR_ARCH6
424 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
425 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
427 /* The bits in this mask specify which
428 instructions we are allowed to generate. */
429 static unsigned long insn_flags = 0;
431 /* The bits in this mask specify which instruction scheduling options should
432 be used. */
433 static unsigned long tune_flags = 0;
435 /* The following are used in the arm.md file as equivalents to bits
436 in the above two flag variables. */
438 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
439 int arm_arch3m = 0;
441 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
442 int arm_arch4 = 0;
444 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
445 int arm_arch4t = 0;
447 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
448 int arm_arch5 = 0;
450 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
451 int arm_arch5e = 0;
453 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
454 int arm_arch6 = 0;
456 /* Nonzero if this chip can benefit from load scheduling. */
457 int arm_ld_sched = 0;
459 /* Nonzero if this chip is a StrongARM. */
460 int arm_tune_strongarm = 0;
462 /* Nonzero if this chip is a Cirrus variant. */
463 int arm_arch_cirrus = 0;
465 /* Nonzero if this chip supports Intel Wireless MMX technology. */
466 int arm_arch_iwmmxt = 0;
468 /* Nonzero if this chip is an XScale. */
469 int arm_arch_xscale = 0;
471 /* Nonzero if tuning for XScale */
472 int arm_tune_xscale = 0;
474 /* Nonzero if we want to tune for stores that access the write-buffer.
475 This typically means an ARM6 or ARM7 with MMU or MPU. */
476 int arm_tune_wbuf = 0;
478 /* Nonzero if generating Thumb instructions. */
479 int thumb_code = 0;
481 /* Nonzero if we should define __THUMB_INTERWORK__ in the
482 preprocessor.
483 XXX This is a bit of a hack, it's intended to help work around
484 problems in GLD which doesn't understand that armv5t code is
485 interworking clean. */
486 int arm_cpp_interwork = 0;
488 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
489 must report the mode of the memory reference from PRINT_OPERAND to
490 PRINT_OPERAND_ADDRESS. */
491 enum machine_mode output_memory_reference_mode;
493 /* The register number to be used for the PIC offset register. */
494 const char * arm_pic_register_string = NULL;
495 int arm_pic_register = INVALID_REGNUM;
497 /* Set to 1 when a return insn is output, this means that the epilogue
498 is not needed. */
499 int return_used_this_function;
501 /* Set to 1 after arm_reorg has started. Reset to start at the start of
502 the next function. */
503 static int after_arm_reorg = 0;
505 /* The maximum number of insns to be used when loading a constant. */
506 static int arm_constant_limit = 3;
508 /* For an explanation of these variables, see final_prescan_insn below. */
509 int arm_ccfsm_state;
510 enum arm_cond_code arm_current_cc;
511 rtx arm_target_insn;
512 int arm_target_label;
514 /* The condition codes of the ARM, and the inverse function. */
515 static const char * const arm_condition_codes[] =
517 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
518 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
521 #define streq(string1, string2) (strcmp (string1, string2) == 0)
523 /* Initialization code. */
525 struct processors
527 const char *const name;
528 enum processor_type core;
529 const char *arch;
530 const unsigned long flags;
531 bool (* rtx_costs) (rtx, int, int, int *);
534 /* Not all of these give usefully different compilation alternatives,
535 but there is no simple way of generalizing them. */
536 static const struct processors all_cores[] =
538 /* ARM Cores */
539 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
540 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
541 #include "arm-cores.def"
542 #undef ARM_CORE
543 {NULL, arm_none, NULL, 0, NULL}
546 static const struct processors all_architectures[] =
548 /* ARM Architectures */
549 /* We don't specify rtx_costs here as it will be figured out
550 from the core. */
552 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
553 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
554 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
555 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
556 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
557 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
558 implementations that support it, so we will leave it out for now. */
559 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
560 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
561 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
562 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
563 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
564 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
565 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
566 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
567 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
568 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
569 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
570 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
571 {NULL, arm_none, NULL, 0 , NULL}
574 /* This is a magic structure. The 'string' field is magically filled in
575 with a pointer to the value specified by the user on the command line
576 assuming that the user has specified such a value. */
578 struct arm_cpu_select arm_select[] =
580 /* string name processors */
581 { NULL, "-mcpu=", all_cores },
582 { NULL, "-march=", all_architectures },
583 { NULL, "-mtune=", all_cores }
587 /* The name of the proprocessor macro to define for this architecture. */
589 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
591 struct fpu_desc
593 const char * name;
594 enum fputype fpu;
598 /* Available values for for -mfpu=. */
600 static const struct fpu_desc all_fpus[] =
602 {"fpa", FPUTYPE_FPA},
603 {"fpe2", FPUTYPE_FPA_EMU2},
604 {"fpe3", FPUTYPE_FPA_EMU2},
605 {"maverick", FPUTYPE_MAVERICK},
606 {"vfp", FPUTYPE_VFP}
610 /* Floating point models used by the different hardware.
611 See fputype in arm.h. */
613 static const enum fputype fp_model_for_fpu[] =
615 /* No FP hardware. */
616 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
617 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
618 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
619 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
620 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
621 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
625 struct float_abi
627 const char * name;
628 enum float_abi_type abi_type;
632 /* Available values for -mfloat-abi=. */
634 static const struct float_abi all_float_abis[] =
636 {"soft", ARM_FLOAT_ABI_SOFT},
637 {"softfp", ARM_FLOAT_ABI_SOFTFP},
638 {"hard", ARM_FLOAT_ABI_HARD}
642 struct abi_name
644 const char *name;
645 enum arm_abi_type abi_type;
649 /* Available values for -mabi=. */
651 static const struct abi_name arm_all_abis[] =
653 {"apcs-gnu", ARM_ABI_APCS},
654 {"atpcs", ARM_ABI_ATPCS},
655 {"aapcs", ARM_ABI_AAPCS},
656 {"iwmmxt", ARM_ABI_IWMMXT}
659 /* Return the number of bits set in VALUE. */
660 static unsigned
661 bit_count (unsigned long value)
663 unsigned long count = 0;
665 while (value)
667 count++;
668 value &= value - 1; /* Clear the least-significant set bit. */
671 return count;
674 /* Set up library functions unique to ARM. */
676 static void
677 arm_init_libfuncs (void)
679 /* There are no special library functions unless we are using the
680 ARM BPABI. */
681 if (!TARGET_BPABI)
682 return;
684 /* The functions below are described in Section 4 of the "Run-Time
685 ABI for the ARM architecture", Version 1.0. */
687 /* Double-precision floating-point arithmetic. Table 2. */
688 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
689 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
690 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
691 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
692 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
694 /* Double-precision comparisons. Table 3. */
695 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
696 set_optab_libfunc (ne_optab, DFmode, NULL);
697 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
698 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
699 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
700 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
701 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
703 /* Single-precision floating-point arithmetic. Table 4. */
704 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
705 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
706 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
707 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
708 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
710 /* Single-precision comparisons. Table 5. */
711 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
712 set_optab_libfunc (ne_optab, SFmode, NULL);
713 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
714 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
715 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
716 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
717 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
719 /* Floating-point to integer conversions. Table 6. */
720 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
721 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
722 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
723 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
724 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
725 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
726 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
727 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
729 /* Conversions between floating types. Table 7. */
730 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
731 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
733 /* Integer to floating-point conversions. Table 8. */
734 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
735 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
736 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
737 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
738 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
739 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
740 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
741 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
743 /* Long long. Table 9. */
744 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
745 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
746 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
747 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
748 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
749 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
750 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
751 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
753 /* Integer (32/32->32) division. \S 4.3.1. */
754 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
755 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
757 /* The divmod functions are designed so that they can be used for
758 plain division, even though they return both the quotient and the
759 remainder. The quotient is returned in the usual location (i.e.,
760 r0 for SImode, {r0, r1} for DImode), just as would be expected
761 for an ordinary division routine. Because the AAPCS calling
762 conventions specify that all of { r0, r1, r2, r3 } are
763 callee-saved registers, there is no need to tell the compiler
764 explicitly that those registers are clobbered by these
765 routines. */
766 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
767 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
768 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
769 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
771 /* We don't have mod libcalls. Fortunately gcc knows how to use the
772 divmod libcalls instead. */
773 set_optab_libfunc (smod_optab, DImode, NULL);
774 set_optab_libfunc (umod_optab, DImode, NULL);
775 set_optab_libfunc (smod_optab, SImode, NULL);
776 set_optab_libfunc (umod_optab, SImode, NULL);
779 /* Fix up any incompatible options that the user has specified.
780 This has now turned into a maze. */
781 void
782 arm_override_options (void)
784 unsigned i;
786 /* Set up the flags based on the cpu/architecture selected by the user. */
787 for (i = ARRAY_SIZE (arm_select); i--;)
789 struct arm_cpu_select * ptr = arm_select + i;
791 if (ptr->string != NULL && ptr->string[0] != '\0')
793 const struct processors * sel;
795 for (sel = ptr->processors; sel->name != NULL; sel++)
796 if (streq (ptr->string, sel->name))
798 /* Set the architecture define. */
799 if (i != 2)
800 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
802 /* Determine the processor core for which we should
803 tune code-generation. */
804 if (/* -mcpu= is a sensible default. */
805 i == 0
806 /* If -march= is used, and -mcpu= has not been used,
807 assume that we should tune for a representative
808 CPU from that architecture. */
809 || i == 1
810 /* -mtune= overrides -mcpu= and -march=. */
811 || i == 2)
812 arm_tune = (enum processor_type) (sel - ptr->processors);
814 if (i != 2)
816 /* If we have been given an architecture and a processor
817 make sure that they are compatible. We only generate
818 a warning though, and we prefer the CPU over the
819 architecture. */
820 if (insn_flags != 0 && (insn_flags ^ sel->flags))
821 warning (0, "switch -mcpu=%s conflicts with -march= switch",
822 ptr->string);
824 insn_flags = sel->flags;
827 break;
830 if (sel->name == NULL)
831 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
835 /* If the user did not specify a processor, choose one for them. */
836 if (insn_flags == 0)
838 const struct processors * sel;
839 unsigned int sought;
840 enum processor_type cpu;
842 cpu = TARGET_CPU_DEFAULT;
843 if (cpu == arm_none)
845 #ifdef SUBTARGET_CPU_DEFAULT
846 /* Use the subtarget default CPU if none was specified by
847 configure. */
848 cpu = SUBTARGET_CPU_DEFAULT;
849 #endif
850 /* Default to ARM6. */
851 if (cpu == arm_none)
852 cpu = arm6;
854 sel = &all_cores[cpu];
856 insn_flags = sel->flags;
858 /* Now check to see if the user has specified some command line
859 switch that require certain abilities from the cpu. */
860 sought = 0;
862 if (TARGET_INTERWORK || TARGET_THUMB)
864 sought |= (FL_THUMB | FL_MODE32);
866 /* There are no ARM processors that support both APCS-26 and
867 interworking. Therefore we force FL_MODE26 to be removed
868 from insn_flags here (if it was set), so that the search
869 below will always be able to find a compatible processor. */
870 insn_flags &= ~FL_MODE26;
873 if (sought != 0 && ((sought & insn_flags) != sought))
875 /* Try to locate a CPU type that supports all of the abilities
876 of the default CPU, plus the extra abilities requested by
877 the user. */
878 for (sel = all_cores; sel->name != NULL; sel++)
879 if ((sel->flags & sought) == (sought | insn_flags))
880 break;
882 if (sel->name == NULL)
884 unsigned current_bit_count = 0;
885 const struct processors * best_fit = NULL;
887 /* Ideally we would like to issue an error message here
888 saying that it was not possible to find a CPU compatible
889 with the default CPU, but which also supports the command
890 line options specified by the programmer, and so they
891 ought to use the -mcpu=<name> command line option to
892 override the default CPU type.
894 If we cannot find a cpu that has both the
895 characteristics of the default cpu and the given
896 command line options we scan the array again looking
897 for a best match. */
898 for (sel = all_cores; sel->name != NULL; sel++)
899 if ((sel->flags & sought) == sought)
901 unsigned count;
903 count = bit_count (sel->flags & insn_flags);
905 if (count >= current_bit_count)
907 best_fit = sel;
908 current_bit_count = count;
912 if (best_fit == NULL)
913 abort ();
914 else
915 sel = best_fit;
918 insn_flags = sel->flags;
920 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
921 if (arm_tune == arm_none)
922 arm_tune = (enum processor_type) (sel - all_cores);
925 /* The processor for which we should tune should now have been
926 chosen. */
927 if (arm_tune == arm_none)
928 abort ();
930 tune_flags = all_cores[(int)arm_tune].flags;
931 if (optimize_size)
932 targetm.rtx_costs = arm_size_rtx_costs;
933 else
934 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
936 /* Make sure that the processor choice does not conflict with any of the
937 other command line choices. */
938 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
940 warning (0, "target CPU does not support interworking" );
941 target_flags &= ~ARM_FLAG_INTERWORK;
944 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
946 warning (0, "target CPU does not support THUMB instructions");
947 target_flags &= ~ARM_FLAG_THUMB;
950 if (TARGET_APCS_FRAME && TARGET_THUMB)
952 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
953 target_flags &= ~ARM_FLAG_APCS_FRAME;
956 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
957 from here where no function is being compiled currently. */
958 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
959 && TARGET_ARM)
960 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
962 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
963 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
965 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
966 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
968 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
970 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
971 target_flags |= ARM_FLAG_APCS_FRAME;
974 if (TARGET_POKE_FUNCTION_NAME)
975 target_flags |= ARM_FLAG_APCS_FRAME;
977 if (TARGET_APCS_REENT && flag_pic)
978 error ("-fpic and -mapcs-reent are incompatible");
980 if (TARGET_APCS_REENT)
981 warning (0, "APCS reentrant code not supported. Ignored");
983 /* If this target is normally configured to use APCS frames, warn if they
984 are turned off and debugging is turned on. */
985 if (TARGET_ARM
986 && write_symbols != NO_DEBUG
987 && !TARGET_APCS_FRAME
988 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
989 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
991 /* If stack checking is disabled, we can use r10 as the PIC register,
992 which keeps r9 available. */
993 if (flag_pic)
994 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
996 if (TARGET_APCS_FLOAT)
997 warning (0, "passing floating point arguments in fp regs not yet supported");
999 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1000 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1001 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1002 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1003 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1004 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1005 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1006 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1007 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1009 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1010 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1011 thumb_code = (TARGET_ARM == 0);
1012 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1013 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1014 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1016 /* V5 code we generate is completely interworking capable, so we turn off
1017 TARGET_INTERWORK here to avoid many tests later on. */
1019 /* XXX However, we must pass the right pre-processor defines to CPP
1020 or GLD can get confused. This is a hack. */
1021 if (TARGET_INTERWORK)
1022 arm_cpp_interwork = 1;
1024 if (arm_arch5)
1025 target_flags &= ~ARM_FLAG_INTERWORK;
1027 if (target_abi_name)
1029 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1031 if (streq (arm_all_abis[i].name, target_abi_name))
1033 arm_abi = arm_all_abis[i].abi_type;
1034 break;
1037 if (i == ARRAY_SIZE (arm_all_abis))
1038 error ("invalid ABI option: -mabi=%s", target_abi_name);
1040 else
1041 arm_abi = ARM_DEFAULT_ABI;
1043 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1044 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1046 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1047 error ("iwmmxt abi requires an iwmmxt capable cpu");
1049 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1050 if (target_fpu_name == NULL && target_fpe_name != NULL)
1052 if (streq (target_fpe_name, "2"))
1053 target_fpu_name = "fpe2";
1054 else if (streq (target_fpe_name, "3"))
1055 target_fpu_name = "fpe3";
1056 else
1057 error ("invalid floating point emulation option: -mfpe=%s",
1058 target_fpe_name);
1060 if (target_fpu_name != NULL)
1062 /* The user specified a FPU. */
1063 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1065 if (streq (all_fpus[i].name, target_fpu_name))
1067 arm_fpu_arch = all_fpus[i].fpu;
1068 arm_fpu_tune = arm_fpu_arch;
1069 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1070 break;
1073 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1074 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1076 else
1078 #ifdef FPUTYPE_DEFAULT
1079 /* Use the default if it is specified for this platform. */
1080 arm_fpu_arch = FPUTYPE_DEFAULT;
1081 arm_fpu_tune = FPUTYPE_DEFAULT;
1082 #else
1083 /* Pick one based on CPU type. */
1084 /* ??? Some targets assume FPA is the default.
1085 if ((insn_flags & FL_VFP) != 0)
1086 arm_fpu_arch = FPUTYPE_VFP;
1087 else
1089 if (arm_arch_cirrus)
1090 arm_fpu_arch = FPUTYPE_MAVERICK;
1091 else
1092 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1093 #endif
1094 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1095 arm_fpu_tune = FPUTYPE_FPA;
1096 else
1097 arm_fpu_tune = arm_fpu_arch;
1098 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1099 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1100 abort ();
1103 if (target_float_abi_name != NULL)
1105 /* The user specified a FP ABI. */
1106 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1108 if (streq (all_float_abis[i].name, target_float_abi_name))
1110 arm_float_abi = all_float_abis[i].abi_type;
1111 break;
1114 if (i == ARRAY_SIZE (all_float_abis))
1115 error ("invalid floating point abi: -mfloat-abi=%s",
1116 target_float_abi_name);
1118 else if (target_float_switch)
1120 /* This is a bit of a hack to avoid needing target flags for these. */
1121 if (target_float_switch[0] == 'h')
1122 arm_float_abi = ARM_FLOAT_ABI_HARD;
1123 else
1124 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1126 else
1127 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1129 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1130 sorry ("-mfloat-abi=hard and VFP");
1132 /* If soft-float is specified then don't use FPU. */
1133 if (TARGET_SOFT_FLOAT)
1134 arm_fpu_arch = FPUTYPE_NONE;
1136 /* For arm2/3 there is no need to do any scheduling if there is only
1137 a floating point emulator, or we are doing software floating-point. */
1138 if ((TARGET_SOFT_FLOAT
1139 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1140 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1141 && (tune_flags & FL_MODE32) == 0)
1142 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1144 /* Override the default structure alignment for AAPCS ABI. */
1145 if (arm_abi == ARM_ABI_AAPCS)
1146 arm_structure_size_boundary = 8;
1148 if (structure_size_string != NULL)
1150 int size = strtol (structure_size_string, NULL, 0);
1152 if (size == 8 || size == 32
1153 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1154 arm_structure_size_boundary = size;
1155 else
1156 warning (0, "structure size boundary can only be set to %s",
1157 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1160 if (arm_pic_register_string != NULL)
1162 int pic_register = decode_reg_name (arm_pic_register_string);
1164 if (!flag_pic)
1165 warning (0, "-mpic-register= is useless without -fpic");
1167 /* Prevent the user from choosing an obviously stupid PIC register. */
1168 else if (pic_register < 0 || call_used_regs[pic_register]
1169 || pic_register == HARD_FRAME_POINTER_REGNUM
1170 || pic_register == STACK_POINTER_REGNUM
1171 || pic_register >= PC_REGNUM)
1172 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1173 else
1174 arm_pic_register = pic_register;
1177 if (TARGET_THUMB && flag_schedule_insns)
1179 /* Don't warn since it's on by default in -O2. */
1180 flag_schedule_insns = 0;
1183 if (optimize_size)
1185 /* There's some dispute as to whether this should be 1 or 2. However,
1186 experiments seem to show that in pathological cases a setting of
1187 1 degrades less severely than a setting of 2. This could change if
1188 other parts of the compiler change their behavior. */
1189 arm_constant_limit = 1;
1191 /* If optimizing for size, bump the number of instructions that we
1192 are prepared to conditionally execute (even on a StrongARM). */
1193 max_insns_skipped = 6;
1195 else
1197 /* For processors with load scheduling, it never costs more than
1198 2 cycles to load a constant, and the load scheduler may well
1199 reduce that to 1. */
1200 if (arm_ld_sched)
1201 arm_constant_limit = 1;
1203 /* On XScale the longer latency of a load makes it more difficult
1204 to achieve a good schedule, so it's faster to synthesize
1205 constants that can be done in two insns. */
1206 if (arm_tune_xscale)
1207 arm_constant_limit = 2;
1209 /* StrongARM has early execution of branches, so a sequence
1210 that is worth skipping is shorter. */
1211 if (arm_tune_strongarm)
1212 max_insns_skipped = 3;
1215 /* Register global variables with the garbage collector. */
1216 arm_add_gc_roots ();
1219 static void
1220 arm_add_gc_roots (void)
1222 gcc_obstack_init(&minipool_obstack);
1223 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1226 /* A table of known ARM exception types.
1227 For use with the interrupt function attribute. */
1229 typedef struct
1231 const char *const arg;
1232 const unsigned long return_value;
1234 isr_attribute_arg;
1236 static const isr_attribute_arg isr_attribute_args [] =
1238 { "IRQ", ARM_FT_ISR },
1239 { "irq", ARM_FT_ISR },
1240 { "FIQ", ARM_FT_FIQ },
1241 { "fiq", ARM_FT_FIQ },
1242 { "ABORT", ARM_FT_ISR },
1243 { "abort", ARM_FT_ISR },
1244 { "ABORT", ARM_FT_ISR },
1245 { "abort", ARM_FT_ISR },
1246 { "UNDEF", ARM_FT_EXCEPTION },
1247 { "undef", ARM_FT_EXCEPTION },
1248 { "SWI", ARM_FT_EXCEPTION },
1249 { "swi", ARM_FT_EXCEPTION },
1250 { NULL, ARM_FT_NORMAL }
1253 /* Returns the (interrupt) function type of the current
1254 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1256 static unsigned long
1257 arm_isr_value (tree argument)
1259 const isr_attribute_arg * ptr;
1260 const char * arg;
1262 /* No argument - default to IRQ. */
1263 if (argument == NULL_TREE)
1264 return ARM_FT_ISR;
1266 /* Get the value of the argument. */
1267 if (TREE_VALUE (argument) == NULL_TREE
1268 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1269 return ARM_FT_UNKNOWN;
1271 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1273 /* Check it against the list of known arguments. */
1274 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1275 if (streq (arg, ptr->arg))
1276 return ptr->return_value;
1278 /* An unrecognized interrupt type. */
1279 return ARM_FT_UNKNOWN;
1282 /* Computes the type of the current function. */
1284 static unsigned long
1285 arm_compute_func_type (void)
1287 unsigned long type = ARM_FT_UNKNOWN;
1288 tree a;
1289 tree attr;
1291 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1292 abort ();
1294 /* Decide if the current function is volatile. Such functions
1295 never return, and many memory cycles can be saved by not storing
1296 register values that will never be needed again. This optimization
1297 was added to speed up context switching in a kernel application. */
1298 if (optimize > 0
1299 && TREE_NOTHROW (current_function_decl)
1300 && TREE_THIS_VOLATILE (current_function_decl))
1301 type |= ARM_FT_VOLATILE;
1303 if (cfun->static_chain_decl != NULL)
1304 type |= ARM_FT_NESTED;
1306 attr = DECL_ATTRIBUTES (current_function_decl);
1308 a = lookup_attribute ("naked", attr);
1309 if (a != NULL_TREE)
1310 type |= ARM_FT_NAKED;
1312 a = lookup_attribute ("isr", attr);
1313 if (a == NULL_TREE)
1314 a = lookup_attribute ("interrupt", attr);
1316 if (a == NULL_TREE)
1317 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1318 else
1319 type |= arm_isr_value (TREE_VALUE (a));
1321 return type;
1324 /* Returns the type of the current function. */
1326 unsigned long
1327 arm_current_func_type (void)
1329 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1330 cfun->machine->func_type = arm_compute_func_type ();
1332 return cfun->machine->func_type;
1335 /* Return 1 if it is possible to return using a single instruction.
1336 If SIBLING is non-null, this is a test for a return before a sibling
1337 call. SIBLING is the call insn, so we can examine its register usage. */
1340 use_return_insn (int iscond, rtx sibling)
1342 int regno;
1343 unsigned int func_type;
1344 unsigned long saved_int_regs;
1345 unsigned HOST_WIDE_INT stack_adjust;
1346 arm_stack_offsets *offsets;
1348 /* Never use a return instruction before reload has run. */
1349 if (!reload_completed)
1350 return 0;
1352 func_type = arm_current_func_type ();
1354 /* Naked functions and volatile functions need special
1355 consideration. */
1356 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1357 return 0;
1359 /* So do interrupt functions that use the frame pointer. */
1360 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1361 return 0;
1363 offsets = arm_get_frame_offsets ();
1364 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1366 /* As do variadic functions. */
1367 if (current_function_pretend_args_size
1368 || cfun->machine->uses_anonymous_args
1369 /* Or if the function calls __builtin_eh_return () */
1370 || current_function_calls_eh_return
1371 /* Or if the function calls alloca */
1372 || current_function_calls_alloca
1373 /* Or if there is a stack adjustment. However, if the stack pointer
1374 is saved on the stack, we can use a pre-incrementing stack load. */
1375 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1376 return 0;
1378 saved_int_regs = arm_compute_save_reg_mask ();
1380 /* Unfortunately, the insn
1382 ldmib sp, {..., sp, ...}
1384 triggers a bug on most SA-110 based devices, such that the stack
1385 pointer won't be correctly restored if the instruction takes a
1386 page fault. We work around this problem by popping r3 along with
1387 the other registers, since that is never slower than executing
1388 another instruction.
1390 We test for !arm_arch5 here, because code for any architecture
1391 less than this could potentially be run on one of the buggy
1392 chips. */
1393 if (stack_adjust == 4 && !arm_arch5)
1395 /* Validate that r3 is a call-clobbered register (always true in
1396 the default abi) ... */
1397 if (!call_used_regs[3])
1398 return 0;
1400 /* ... that it isn't being used for a return value (always true
1401 until we implement return-in-regs), or for a tail-call
1402 argument ... */
1403 if (sibling)
1405 if (GET_CODE (sibling) != CALL_INSN)
1406 abort ();
1408 if (find_regno_fusage (sibling, USE, 3))
1409 return 0;
1412 /* ... and that there are no call-saved registers in r0-r2
1413 (always true in the default ABI). */
1414 if (saved_int_regs & 0x7)
1415 return 0;
1418 /* Can't be done if interworking with Thumb, and any registers have been
1419 stacked. */
1420 if (TARGET_INTERWORK && saved_int_regs != 0)
1421 return 0;
1423 /* On StrongARM, conditional returns are expensive if they aren't
1424 taken and multiple registers have been stacked. */
1425 if (iscond && arm_tune_strongarm)
1427 /* Conditional return when just the LR is stored is a simple
1428 conditional-load instruction, that's not expensive. */
1429 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1430 return 0;
1432 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1433 return 0;
1436 /* If there are saved registers but the LR isn't saved, then we need
1437 two instructions for the return. */
1438 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1439 return 0;
1441 /* Can't be done if any of the FPA regs are pushed,
1442 since this also requires an insn. */
1443 if (TARGET_HARD_FLOAT && TARGET_FPA)
1444 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1445 if (regs_ever_live[regno] && !call_used_regs[regno])
1446 return 0;
1448 /* Likewise VFP regs. */
1449 if (TARGET_HARD_FLOAT && TARGET_VFP)
1450 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1451 if (regs_ever_live[regno] && !call_used_regs[regno])
1452 return 0;
1454 if (TARGET_REALLY_IWMMXT)
1455 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1456 if (regs_ever_live[regno] && ! call_used_regs [regno])
1457 return 0;
1459 return 1;
1462 /* Return TRUE if int I is a valid immediate ARM constant. */
1465 const_ok_for_arm (HOST_WIDE_INT i)
1467 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1469 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1470 be all zero, or all one. */
1471 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1472 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1473 != ((~(unsigned HOST_WIDE_INT) 0)
1474 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1475 return FALSE;
1477 /* Fast return for 0 and powers of 2 */
1478 if ((i & (i - 1)) == 0)
1479 return TRUE;
1483 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1484 return TRUE;
1485 mask =
1486 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1487 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1489 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1491 return FALSE;
1494 /* Return true if I is a valid constant for the operation CODE. */
1495 static int
1496 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1498 if (const_ok_for_arm (i))
1499 return 1;
1501 switch (code)
1503 case PLUS:
1504 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1506 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1507 case XOR:
1508 case IOR:
1509 return 0;
1511 case AND:
1512 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1514 default:
1515 abort ();
1519 /* Emit a sequence of insns to handle a large constant.
1520 CODE is the code of the operation required, it can be any of SET, PLUS,
1521 IOR, AND, XOR, MINUS;
1522 MODE is the mode in which the operation is being performed;
1523 VAL is the integer to operate on;
1524 SOURCE is the other operand (a register, or a null-pointer for SET);
1525 SUBTARGETS means it is safe to create scratch registers if that will
1526 either produce a simpler sequence, or we will want to cse the values.
1527 Return value is the number of insns emitted. */
1530 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1531 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1533 rtx cond;
1535 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1536 cond = COND_EXEC_TEST (PATTERN (insn));
1537 else
1538 cond = NULL_RTX;
1540 if (subtargets || code == SET
1541 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1542 && REGNO (target) != REGNO (source)))
1544 /* After arm_reorg has been called, we can't fix up expensive
1545 constants by pushing them into memory so we must synthesize
1546 them in-line, regardless of the cost. This is only likely to
1547 be more costly on chips that have load delay slots and we are
1548 compiling without running the scheduler (so no splitting
1549 occurred before the final instruction emission).
1551 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1553 if (!after_arm_reorg
1554 && !cond
1555 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1556 1, 0)
1557 > arm_constant_limit + (code != SET)))
1559 if (code == SET)
1561 /* Currently SET is the only monadic value for CODE, all
1562 the rest are diadic. */
1563 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1564 return 1;
1566 else
1568 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1570 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1571 /* For MINUS, the value is subtracted from, since we never
1572 have subtraction of a constant. */
1573 if (code == MINUS)
1574 emit_insn (gen_rtx_SET (VOIDmode, target,
1575 gen_rtx_MINUS (mode, temp, source)));
1576 else
1577 emit_insn (gen_rtx_SET (VOIDmode, target,
1578 gen_rtx_fmt_ee (code, mode, source, temp)));
1579 return 2;
1584 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1588 static int
1589 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1591 HOST_WIDE_INT temp1;
1592 int num_insns = 0;
1595 int end;
1597 if (i <= 0)
1598 i += 32;
1599 if (remainder & (3 << (i - 2)))
1601 end = i - 8;
1602 if (end < 0)
1603 end += 32;
1604 temp1 = remainder & ((0x0ff << end)
1605 | ((i < end) ? (0xff >> (32 - end)) : 0));
1606 remainder &= ~temp1;
1607 num_insns++;
1608 i -= 6;
1610 i -= 2;
1611 } while (remainder);
1612 return num_insns;
1615 /* Emit an instruction with the indicated PATTERN. If COND is
1616 non-NULL, conditionalize the execution of the instruction on COND
1617 being true. */
1619 static void
1620 emit_constant_insn (rtx cond, rtx pattern)
1622 if (cond)
1623 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1624 emit_insn (pattern);
1627 /* As above, but extra parameter GENERATE which, if clear, suppresses
1628 RTL generation. */
1630 static int
1631 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1632 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1633 int generate)
1635 int can_invert = 0;
1636 int can_negate = 0;
1637 int can_negate_initial = 0;
1638 int can_shift = 0;
1639 int i;
1640 int num_bits_set = 0;
1641 int set_sign_bit_copies = 0;
1642 int clear_sign_bit_copies = 0;
1643 int clear_zero_bit_copies = 0;
1644 int set_zero_bit_copies = 0;
1645 int insns = 0;
1646 unsigned HOST_WIDE_INT temp1, temp2;
1647 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1649 /* Find out which operations are safe for a given CODE. Also do a quick
1650 check for degenerate cases; these can occur when DImode operations
1651 are split. */
1652 switch (code)
1654 case SET:
1655 can_invert = 1;
1656 can_shift = 1;
1657 can_negate = 1;
1658 break;
1660 case PLUS:
1661 can_negate = 1;
1662 can_negate_initial = 1;
1663 break;
1665 case IOR:
1666 if (remainder == 0xffffffff)
1668 if (generate)
1669 emit_constant_insn (cond,
1670 gen_rtx_SET (VOIDmode, target,
1671 GEN_INT (ARM_SIGN_EXTEND (val))));
1672 return 1;
1674 if (remainder == 0)
1676 if (reload_completed && rtx_equal_p (target, source))
1677 return 0;
1678 if (generate)
1679 emit_constant_insn (cond,
1680 gen_rtx_SET (VOIDmode, target, source));
1681 return 1;
1683 break;
1685 case AND:
1686 if (remainder == 0)
1688 if (generate)
1689 emit_constant_insn (cond,
1690 gen_rtx_SET (VOIDmode, target, const0_rtx));
1691 return 1;
1693 if (remainder == 0xffffffff)
1695 if (reload_completed && rtx_equal_p (target, source))
1696 return 0;
1697 if (generate)
1698 emit_constant_insn (cond,
1699 gen_rtx_SET (VOIDmode, target, source));
1700 return 1;
1702 can_invert = 1;
1703 break;
1705 case XOR:
1706 if (remainder == 0)
1708 if (reload_completed && rtx_equal_p (target, source))
1709 return 0;
1710 if (generate)
1711 emit_constant_insn (cond,
1712 gen_rtx_SET (VOIDmode, target, source));
1713 return 1;
1715 if (remainder == 0xffffffff)
1717 if (generate)
1718 emit_constant_insn (cond,
1719 gen_rtx_SET (VOIDmode, target,
1720 gen_rtx_NOT (mode, source)));
1721 return 1;
1724 /* We don't know how to handle this yet below. */
1725 abort ();
1727 case MINUS:
1728 /* We treat MINUS as (val - source), since (source - val) is always
1729 passed as (source + (-val)). */
1730 if (remainder == 0)
1732 if (generate)
1733 emit_constant_insn (cond,
1734 gen_rtx_SET (VOIDmode, target,
1735 gen_rtx_NEG (mode, source)));
1736 return 1;
1738 if (const_ok_for_arm (val))
1740 if (generate)
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, target,
1743 gen_rtx_MINUS (mode, GEN_INT (val),
1744 source)));
1745 return 1;
1747 can_negate = 1;
1749 break;
1751 default:
1752 abort ();
1755 /* If we can do it in one insn get out quickly. */
1756 if (const_ok_for_arm (val)
1757 || (can_negate_initial && const_ok_for_arm (-val))
1758 || (can_invert && const_ok_for_arm (~val)))
1760 if (generate)
1761 emit_constant_insn (cond,
1762 gen_rtx_SET (VOIDmode, target,
1763 (source
1764 ? gen_rtx_fmt_ee (code, mode, source,
1765 GEN_INT (val))
1766 : GEN_INT (val))));
1767 return 1;
1770 /* Calculate a few attributes that may be useful for specific
1771 optimizations. */
1772 for (i = 31; i >= 0; i--)
1774 if ((remainder & (1 << i)) == 0)
1775 clear_sign_bit_copies++;
1776 else
1777 break;
1780 for (i = 31; i >= 0; i--)
1782 if ((remainder & (1 << i)) != 0)
1783 set_sign_bit_copies++;
1784 else
1785 break;
1788 for (i = 0; i <= 31; i++)
1790 if ((remainder & (1 << i)) == 0)
1791 clear_zero_bit_copies++;
1792 else
1793 break;
1796 for (i = 0; i <= 31; i++)
1798 if ((remainder & (1 << i)) != 0)
1799 set_zero_bit_copies++;
1800 else
1801 break;
1804 switch (code)
1806 case SET:
1807 /* See if we can do this by sign_extending a constant that is known
1808 to be negative. This is a good, way of doing it, since the shift
1809 may well merge into a subsequent insn. */
1810 if (set_sign_bit_copies > 1)
1812 if (const_ok_for_arm
1813 (temp1 = ARM_SIGN_EXTEND (remainder
1814 << (set_sign_bit_copies - 1))))
1816 if (generate)
1818 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1819 emit_constant_insn (cond,
1820 gen_rtx_SET (VOIDmode, new_src,
1821 GEN_INT (temp1)));
1822 emit_constant_insn (cond,
1823 gen_ashrsi3 (target, new_src,
1824 GEN_INT (set_sign_bit_copies - 1)));
1826 return 2;
1828 /* For an inverted constant, we will need to set the low bits,
1829 these will be shifted out of harm's way. */
1830 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1831 if (const_ok_for_arm (~temp1))
1833 if (generate)
1835 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1836 emit_constant_insn (cond,
1837 gen_rtx_SET (VOIDmode, new_src,
1838 GEN_INT (temp1)));
1839 emit_constant_insn (cond,
1840 gen_ashrsi3 (target, new_src,
1841 GEN_INT (set_sign_bit_copies - 1)));
1843 return 2;
1847 /* See if we can generate this by setting the bottom (or the top)
1848 16 bits, and then shifting these into the other half of the
1849 word. We only look for the simplest cases, to do more would cost
1850 too much. Be careful, however, not to generate this when the
1851 alternative would take fewer insns. */
1852 if (val & 0xffff0000)
1854 temp1 = remainder & 0xffff0000;
1855 temp2 = remainder & 0x0000ffff;
1857 /* Overlaps outside this range are best done using other methods. */
1858 for (i = 9; i < 24; i++)
1860 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1861 && !const_ok_for_arm (temp2))
1863 rtx new_src = (subtargets
1864 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1865 : target);
1866 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1867 source, subtargets, generate);
1868 source = new_src;
1869 if (generate)
1870 emit_constant_insn
1871 (cond,
1872 gen_rtx_SET
1873 (VOIDmode, target,
1874 gen_rtx_IOR (mode,
1875 gen_rtx_ASHIFT (mode, source,
1876 GEN_INT (i)),
1877 source)));
1878 return insns + 1;
1882 /* Don't duplicate cases already considered. */
1883 for (i = 17; i < 24; i++)
1885 if (((temp1 | (temp1 >> i)) == remainder)
1886 && !const_ok_for_arm (temp1))
1888 rtx new_src = (subtargets
1889 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1890 : target);
1891 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1892 source, subtargets, generate);
1893 source = new_src;
1894 if (generate)
1895 emit_constant_insn
1896 (cond,
1897 gen_rtx_SET (VOIDmode, target,
1898 gen_rtx_IOR
1899 (mode,
1900 gen_rtx_LSHIFTRT (mode, source,
1901 GEN_INT (i)),
1902 source)));
1903 return insns + 1;
1907 break;
1909 case IOR:
1910 case XOR:
1911 /* If we have IOR or XOR, and the constant can be loaded in a
1912 single instruction, and we can find a temporary to put it in,
1913 then this can be done in two instructions instead of 3-4. */
1914 if (subtargets
1915 /* TARGET can't be NULL if SUBTARGETS is 0 */
1916 || (reload_completed && !reg_mentioned_p (target, source)))
1918 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1920 if (generate)
1922 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1924 emit_constant_insn (cond,
1925 gen_rtx_SET (VOIDmode, sub,
1926 GEN_INT (val)));
1927 emit_constant_insn (cond,
1928 gen_rtx_SET (VOIDmode, target,
1929 gen_rtx_fmt_ee (code, mode,
1930 source, sub)));
1932 return 2;
1936 if (code == XOR)
1937 break;
1939 if (set_sign_bit_copies > 8
1940 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1942 if (generate)
1944 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1945 rtx shift = GEN_INT (set_sign_bit_copies);
1947 emit_constant_insn
1948 (cond,
1949 gen_rtx_SET (VOIDmode, sub,
1950 gen_rtx_NOT (mode,
1951 gen_rtx_ASHIFT (mode,
1952 source,
1953 shift))));
1954 emit_constant_insn
1955 (cond,
1956 gen_rtx_SET (VOIDmode, target,
1957 gen_rtx_NOT (mode,
1958 gen_rtx_LSHIFTRT (mode, sub,
1959 shift))));
1961 return 2;
1964 if (set_zero_bit_copies > 8
1965 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1967 if (generate)
1969 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1970 rtx shift = GEN_INT (set_zero_bit_copies);
1972 emit_constant_insn
1973 (cond,
1974 gen_rtx_SET (VOIDmode, sub,
1975 gen_rtx_NOT (mode,
1976 gen_rtx_LSHIFTRT (mode,
1977 source,
1978 shift))));
1979 emit_constant_insn
1980 (cond,
1981 gen_rtx_SET (VOIDmode, target,
1982 gen_rtx_NOT (mode,
1983 gen_rtx_ASHIFT (mode, sub,
1984 shift))));
1986 return 2;
1989 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1991 if (generate)
1993 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1994 emit_constant_insn (cond,
1995 gen_rtx_SET (VOIDmode, sub,
1996 gen_rtx_NOT (mode, source)));
1997 source = sub;
1998 if (subtargets)
1999 sub = gen_reg_rtx (mode);
2000 emit_constant_insn (cond,
2001 gen_rtx_SET (VOIDmode, sub,
2002 gen_rtx_AND (mode, source,
2003 GEN_INT (temp1))));
2004 emit_constant_insn (cond,
2005 gen_rtx_SET (VOIDmode, target,
2006 gen_rtx_NOT (mode, sub)));
2008 return 3;
2010 break;
2012 case AND:
2013 /* See if two shifts will do 2 or more insn's worth of work. */
2014 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2016 HOST_WIDE_INT shift_mask = ((0xffffffff
2017 << (32 - clear_sign_bit_copies))
2018 & 0xffffffff);
2020 if ((remainder | shift_mask) != 0xffffffff)
2022 if (generate)
2024 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2025 insns = arm_gen_constant (AND, mode, cond,
2026 remainder | shift_mask,
2027 new_src, source, subtargets, 1);
2028 source = new_src;
2030 else
2032 rtx targ = subtargets ? NULL_RTX : target;
2033 insns = arm_gen_constant (AND, mode, cond,
2034 remainder | shift_mask,
2035 targ, source, subtargets, 0);
2039 if (generate)
2041 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2042 rtx shift = GEN_INT (clear_sign_bit_copies);
2044 emit_insn (gen_ashlsi3 (new_src, source, shift));
2045 emit_insn (gen_lshrsi3 (target, new_src, shift));
2048 return insns + 2;
2051 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2053 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2055 if ((remainder | shift_mask) != 0xffffffff)
2057 if (generate)
2059 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2061 insns = arm_gen_constant (AND, mode, cond,
2062 remainder | shift_mask,
2063 new_src, source, subtargets, 1);
2064 source = new_src;
2066 else
2068 rtx targ = subtargets ? NULL_RTX : target;
2070 insns = arm_gen_constant (AND, mode, cond,
2071 remainder | shift_mask,
2072 targ, source, subtargets, 0);
2076 if (generate)
2078 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2079 rtx shift = GEN_INT (clear_zero_bit_copies);
2081 emit_insn (gen_lshrsi3 (new_src, source, shift));
2082 emit_insn (gen_ashlsi3 (target, new_src, shift));
2085 return insns + 2;
2088 break;
2090 default:
2091 break;
2094 for (i = 0; i < 32; i++)
2095 if (remainder & (1 << i))
2096 num_bits_set++;
2098 if (code == AND || (can_invert && num_bits_set > 16))
2099 remainder = (~remainder) & 0xffffffff;
2100 else if (code == PLUS && num_bits_set > 16)
2101 remainder = (-remainder) & 0xffffffff;
2102 else
2104 can_invert = 0;
2105 can_negate = 0;
2108 /* Now try and find a way of doing the job in either two or three
2109 instructions.
2110 We start by looking for the largest block of zeros that are aligned on
2111 a 2-bit boundary, we then fill up the temps, wrapping around to the
2112 top of the word when we drop off the bottom.
2113 In the worst case this code should produce no more than four insns. */
2115 int best_start = 0;
2116 int best_consecutive_zeros = 0;
2118 for (i = 0; i < 32; i += 2)
2120 int consecutive_zeros = 0;
2122 if (!(remainder & (3 << i)))
2124 while ((i < 32) && !(remainder & (3 << i)))
2126 consecutive_zeros += 2;
2127 i += 2;
2129 if (consecutive_zeros > best_consecutive_zeros)
2131 best_consecutive_zeros = consecutive_zeros;
2132 best_start = i - consecutive_zeros;
2134 i -= 2;
2138 /* So long as it won't require any more insns to do so, it's
2139 desirable to emit a small constant (in bits 0...9) in the last
2140 insn. This way there is more chance that it can be combined with
2141 a later addressing insn to form a pre-indexed load or store
2142 operation. Consider:
2144 *((volatile int *)0xe0000100) = 1;
2145 *((volatile int *)0xe0000110) = 2;
2147 We want this to wind up as:
2149 mov rA, #0xe0000000
2150 mov rB, #1
2151 str rB, [rA, #0x100]
2152 mov rB, #2
2153 str rB, [rA, #0x110]
2155 rather than having to synthesize both large constants from scratch.
2157 Therefore, we calculate how many insns would be required to emit
2158 the constant starting from `best_start', and also starting from
2159 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2160 yield a shorter sequence, we may as well use zero. */
2161 if (best_start != 0
2162 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2163 && (count_insns_for_constant (remainder, 0) <=
2164 count_insns_for_constant (remainder, best_start)))
2165 best_start = 0;
2167 /* Now start emitting the insns. */
2168 i = best_start;
2171 int end;
2173 if (i <= 0)
2174 i += 32;
2175 if (remainder & (3 << (i - 2)))
2177 end = i - 8;
2178 if (end < 0)
2179 end += 32;
2180 temp1 = remainder & ((0x0ff << end)
2181 | ((i < end) ? (0xff >> (32 - end)) : 0));
2182 remainder &= ~temp1;
2184 if (generate)
2186 rtx new_src, temp1_rtx;
2188 if (code == SET || code == MINUS)
2190 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2191 if (can_invert && code != MINUS)
2192 temp1 = ~temp1;
2194 else
2196 if (remainder && subtargets)
2197 new_src = gen_reg_rtx (mode);
2198 else
2199 new_src = target;
2200 if (can_invert)
2201 temp1 = ~temp1;
2202 else if (can_negate)
2203 temp1 = -temp1;
2206 temp1 = trunc_int_for_mode (temp1, mode);
2207 temp1_rtx = GEN_INT (temp1);
2209 if (code == SET)
2211 else if (code == MINUS)
2212 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2213 else
2214 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2216 emit_constant_insn (cond,
2217 gen_rtx_SET (VOIDmode, new_src,
2218 temp1_rtx));
2219 source = new_src;
2222 if (code == SET)
2224 can_invert = 0;
2225 code = PLUS;
2227 else if (code == MINUS)
2228 code = PLUS;
2230 insns++;
2231 i -= 6;
2233 i -= 2;
2235 while (remainder);
2238 return insns;
2241 /* Canonicalize a comparison so that we are more likely to recognize it.
2242 This can be done for a few constant compares, where we can make the
2243 immediate value easier to load. */
2245 enum rtx_code
2246 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2248 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2250 switch (code)
2252 case EQ:
2253 case NE:
2254 return code;
2256 case GT:
2257 case LE:
2258 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2259 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2261 *op1 = GEN_INT (i + 1);
2262 return code == GT ? GE : LT;
2264 break;
2266 case GE:
2267 case LT:
2268 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2269 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2271 *op1 = GEN_INT (i - 1);
2272 return code == GE ? GT : LE;
2274 break;
2276 case GTU:
2277 case LEU:
2278 if (i != ~((unsigned HOST_WIDE_INT) 0)
2279 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2281 *op1 = GEN_INT (i + 1);
2282 return code == GTU ? GEU : LTU;
2284 break;
2286 case GEU:
2287 case LTU:
2288 if (i != 0
2289 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2291 *op1 = GEN_INT (i - 1);
2292 return code == GEU ? GTU : LEU;
2294 break;
2296 default:
2297 abort ();
2300 return code;
2304 /* Define how to find the value returned by a function. */
2307 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2309 enum machine_mode mode;
2310 int unsignedp ATTRIBUTE_UNUSED;
2311 rtx r ATTRIBUTE_UNUSED;
2313 mode = TYPE_MODE (type);
2314 /* Promote integer types. */
2315 if (INTEGRAL_TYPE_P (type))
2316 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2318 /* Promotes small structs returned in a register to full-word size
2319 for big-endian AAPCS. */
2320 if (arm_return_in_msb (type))
2322 HOST_WIDE_INT size = int_size_in_bytes (type);
2323 if (size % UNITS_PER_WORD != 0)
2325 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2326 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2330 return LIBCALL_VALUE(mode);
2333 /* Determine the amount of memory needed to store the possible return
2334 registers of an untyped call. */
2336 arm_apply_result_size (void)
2338 int size = 16;
2340 if (TARGET_ARM)
2342 if (TARGET_HARD_FLOAT_ABI)
2344 if (TARGET_FPA)
2345 size += 12;
2346 if (TARGET_MAVERICK)
2347 size += 8;
2349 if (TARGET_IWMMXT_ABI)
2350 size += 8;
2353 return size;
2356 /* Decide whether a type should be returned in memory (true)
2357 or in a register (false). This is called by the macro
2358 RETURN_IN_MEMORY. */
2360 arm_return_in_memory (tree type)
2362 HOST_WIDE_INT size;
2364 if (!AGGREGATE_TYPE_P (type) &&
2365 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2366 /* All simple types are returned in registers.
2367 For AAPCS, complex types are treated the same as aggregates. */
2368 return 0;
2370 size = int_size_in_bytes (type);
2372 if (arm_abi != ARM_ABI_APCS)
2374 /* ATPCS and later return aggregate types in memory only if they are
2375 larger than a word (or are variable size). */
2376 return (size < 0 || size > UNITS_PER_WORD);
2379 /* For the arm-wince targets we choose to be compatible with Microsoft's
2380 ARM and Thumb compilers, which always return aggregates in memory. */
2381 #ifndef ARM_WINCE
2382 /* All structures/unions bigger than one word are returned in memory.
2383 Also catch the case where int_size_in_bytes returns -1. In this case
2384 the aggregate is either huge or of variable size, and in either case
2385 we will want to return it via memory and not in a register. */
2386 if (size < 0 || size > UNITS_PER_WORD)
2387 return 1;
2389 if (TREE_CODE (type) == RECORD_TYPE)
2391 tree field;
2393 /* For a struct the APCS says that we only return in a register
2394 if the type is 'integer like' and every addressable element
2395 has an offset of zero. For practical purposes this means
2396 that the structure can have at most one non bit-field element
2397 and that this element must be the first one in the structure. */
2399 /* Find the first field, ignoring non FIELD_DECL things which will
2400 have been created by C++. */
2401 for (field = TYPE_FIELDS (type);
2402 field && TREE_CODE (field) != FIELD_DECL;
2403 field = TREE_CHAIN (field))
2404 continue;
2406 if (field == NULL)
2407 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2409 /* Check that the first field is valid for returning in a register. */
2411 /* ... Floats are not allowed */
2412 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2413 return 1;
2415 /* ... Aggregates that are not themselves valid for returning in
2416 a register are not allowed. */
2417 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2418 return 1;
2420 /* Now check the remaining fields, if any. Only bitfields are allowed,
2421 since they are not addressable. */
2422 for (field = TREE_CHAIN (field);
2423 field;
2424 field = TREE_CHAIN (field))
2426 if (TREE_CODE (field) != FIELD_DECL)
2427 continue;
2429 if (!DECL_BIT_FIELD_TYPE (field))
2430 return 1;
2433 return 0;
2436 if (TREE_CODE (type) == UNION_TYPE)
2438 tree field;
2440 /* Unions can be returned in registers if every element is
2441 integral, or can be returned in an integer register. */
2442 for (field = TYPE_FIELDS (type);
2443 field;
2444 field = TREE_CHAIN (field))
2446 if (TREE_CODE (field) != FIELD_DECL)
2447 continue;
2449 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2450 return 1;
2452 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2453 return 1;
2456 return 0;
2458 #endif /* not ARM_WINCE */
2460 /* Return all other types in memory. */
2461 return 1;
2464 /* Indicate whether or not words of a double are in big-endian order. */
2467 arm_float_words_big_endian (void)
2469 if (TARGET_MAVERICK)
2470 return 0;
2472 /* For FPA, float words are always big-endian. For VFP, floats words
2473 follow the memory system mode. */
2475 if (TARGET_FPA)
2477 return 1;
2480 if (TARGET_VFP)
2481 return (TARGET_BIG_END ? 1 : 0);
2483 return 1;
2486 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2487 for a call to a function whose data type is FNTYPE.
2488 For a library call, FNTYPE is NULL. */
2489 void
2490 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2491 rtx libname ATTRIBUTE_UNUSED,
2492 tree fndecl ATTRIBUTE_UNUSED)
2494 /* On the ARM, the offset starts at 0. */
2495 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2496 pcum->iwmmxt_nregs = 0;
2497 pcum->can_split = true;
2499 pcum->call_cookie = CALL_NORMAL;
2501 if (TARGET_LONG_CALLS)
2502 pcum->call_cookie = CALL_LONG;
2504 /* Check for long call/short call attributes. The attributes
2505 override any command line option. */
2506 if (fntype)
2508 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2509 pcum->call_cookie = CALL_SHORT;
2510 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2511 pcum->call_cookie = CALL_LONG;
2514 /* Varargs vectors are treated the same as long long.
2515 named_count avoids having to change the way arm handles 'named' */
2516 pcum->named_count = 0;
2517 pcum->nargs = 0;
2519 if (TARGET_REALLY_IWMMXT && fntype)
2521 tree fn_arg;
2523 for (fn_arg = TYPE_ARG_TYPES (fntype);
2524 fn_arg;
2525 fn_arg = TREE_CHAIN (fn_arg))
2526 pcum->named_count += 1;
2528 if (! pcum->named_count)
2529 pcum->named_count = INT_MAX;
2534 /* Return true if mode/type need doubleword alignment. */
2535 bool
2536 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2538 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2539 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2543 /* Determine where to put an argument to a function.
2544 Value is zero to push the argument on the stack,
2545 or a hard register in which to store the argument.
2547 MODE is the argument's machine mode.
2548 TYPE is the data type of the argument (as a tree).
2549 This is null for libcalls where that information may
2550 not be available.
2551 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2552 the preceding args and about the function being called.
2553 NAMED is nonzero if this argument is a named parameter
2554 (otherwise it is an extra parameter matching an ellipsis). */
2557 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2558 tree type, int named)
2560 int nregs;
2562 /* Varargs vectors are treated the same as long long.
2563 named_count avoids having to change the way arm handles 'named' */
2564 if (TARGET_IWMMXT_ABI
2565 && arm_vector_mode_supported_p (mode)
2566 && pcum->named_count > pcum->nargs + 1)
2568 if (pcum->iwmmxt_nregs <= 9)
2569 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2570 else
2572 pcum->can_split = false;
2573 return NULL_RTX;
2577 /* Put doubleword aligned quantities in even register pairs. */
2578 if (pcum->nregs & 1
2579 && ARM_DOUBLEWORD_ALIGN
2580 && arm_needs_doubleword_align (mode, type))
2581 pcum->nregs++;
2583 if (mode == VOIDmode)
2584 /* Compute operand 2 of the call insn. */
2585 return GEN_INT (pcum->call_cookie);
2587 /* Only allow splitting an arg between regs and memory if all preceding
2588 args were allocated to regs. For args passed by reference we only count
2589 the reference pointer. */
2590 if (pcum->can_split)
2591 nregs = 1;
2592 else
2593 nregs = ARM_NUM_REGS2 (mode, type);
2595 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2596 return NULL_RTX;
2598 return gen_rtx_REG (mode, pcum->nregs);
2601 static int
2602 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2603 tree type, bool named ATTRIBUTE_UNUSED)
2605 int nregs = pcum->nregs;
2607 if (arm_vector_mode_supported_p (mode))
2608 return 0;
2610 if (NUM_ARG_REGS > nregs
2611 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2612 && pcum->can_split)
2613 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2615 return 0;
2618 /* Variable sized types are passed by reference. This is a GCC
2619 extension to the ARM ABI. */
2621 static bool
2622 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2623 enum machine_mode mode ATTRIBUTE_UNUSED,
2624 tree type, bool named ATTRIBUTE_UNUSED)
2626 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2629 /* Encode the current state of the #pragma [no_]long_calls. */
2630 typedef enum
2632 OFF, /* No #pramgma [no_]long_calls is in effect. */
2633 LONG, /* #pragma long_calls is in effect. */
2634 SHORT /* #pragma no_long_calls is in effect. */
2635 } arm_pragma_enum;
2637 static arm_pragma_enum arm_pragma_long_calls = OFF;
2639 void
2640 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2642 arm_pragma_long_calls = LONG;
2645 void
2646 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2648 arm_pragma_long_calls = SHORT;
2651 void
2652 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2654 arm_pragma_long_calls = OFF;
2657 /* Table of machine attributes. */
2658 const struct attribute_spec arm_attribute_table[] =
2660 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2661 /* Function calls made to this symbol must be done indirectly, because
2662 it may lie outside of the 26 bit addressing range of a normal function
2663 call. */
2664 { "long_call", 0, 0, false, true, true, NULL },
2665 /* Whereas these functions are always known to reside within the 26 bit
2666 addressing range. */
2667 { "short_call", 0, 0, false, true, true, NULL },
2668 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2669 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2670 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2671 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2672 #ifdef ARM_PE
2673 /* ARM/PE has three new attributes:
2674 interfacearm - ?
2675 dllexport - for exporting a function/variable that will live in a dll
2676 dllimport - for importing a function/variable from a dll
2678 Microsoft allows multiple declspecs in one __declspec, separating
2679 them with spaces. We do NOT support this. Instead, use __declspec
2680 multiple times.
2682 { "dllimport", 0, 0, true, false, false, NULL },
2683 { "dllexport", 0, 0, true, false, false, NULL },
2684 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2685 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2686 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2687 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2688 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2689 #endif
2690 { NULL, 0, 0, false, false, false, NULL }
2693 /* Handle an attribute requiring a FUNCTION_DECL;
2694 arguments as in struct attribute_spec.handler. */
2695 static tree
2696 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2697 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2699 if (TREE_CODE (*node) != FUNCTION_DECL)
2701 warning (0, "%qs attribute only applies to functions",
2702 IDENTIFIER_POINTER (name));
2703 *no_add_attrs = true;
2706 return NULL_TREE;
2709 /* Handle an "interrupt" or "isr" attribute;
2710 arguments as in struct attribute_spec.handler. */
2711 static tree
2712 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2713 bool *no_add_attrs)
2715 if (DECL_P (*node))
2717 if (TREE_CODE (*node) != FUNCTION_DECL)
2719 warning (0, "%qs attribute only applies to functions",
2720 IDENTIFIER_POINTER (name));
2721 *no_add_attrs = true;
2723 /* FIXME: the argument if any is checked for type attributes;
2724 should it be checked for decl ones? */
2726 else
2728 if (TREE_CODE (*node) == FUNCTION_TYPE
2729 || TREE_CODE (*node) == METHOD_TYPE)
2731 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2733 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2734 *no_add_attrs = true;
2737 else if (TREE_CODE (*node) == POINTER_TYPE
2738 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2739 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2740 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2742 *node = build_variant_type_copy (*node);
2743 TREE_TYPE (*node) = build_type_attribute_variant
2744 (TREE_TYPE (*node),
2745 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2746 *no_add_attrs = true;
2748 else
2750 /* Possibly pass this attribute on from the type to a decl. */
2751 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2752 | (int) ATTR_FLAG_FUNCTION_NEXT
2753 | (int) ATTR_FLAG_ARRAY_NEXT))
2755 *no_add_attrs = true;
2756 return tree_cons (name, args, NULL_TREE);
2758 else
2760 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2765 return NULL_TREE;
2768 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2769 /* Handle the "notshared" attribute. This attribute is another way of
2770 requesting hidden visibility. ARM's compiler supports
2771 "__declspec(notshared)"; we support the same thing via an
2772 attribute. */
2774 static tree
2775 arm_handle_notshared_attribute (tree *node,
2776 tree name ATTRIBUTE_UNUSED,
2777 tree args ATTRIBUTE_UNUSED,
2778 int flags ATTRIBUTE_UNUSED,
2779 bool *no_add_attrs)
2781 tree decl = TYPE_NAME (*node);
2783 if (decl)
2785 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2786 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2787 *no_add_attrs = false;
2789 return NULL_TREE;
2791 #endif
2793 /* Return 0 if the attributes for two types are incompatible, 1 if they
2794 are compatible, and 2 if they are nearly compatible (which causes a
2795 warning to be generated). */
2796 static int
2797 arm_comp_type_attributes (tree type1, tree type2)
2799 int l1, l2, s1, s2;
2801 /* Check for mismatch of non-default calling convention. */
2802 if (TREE_CODE (type1) != FUNCTION_TYPE)
2803 return 1;
2805 /* Check for mismatched call attributes. */
2806 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2807 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2808 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2809 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2811 /* Only bother to check if an attribute is defined. */
2812 if (l1 | l2 | s1 | s2)
2814 /* If one type has an attribute, the other must have the same attribute. */
2815 if ((l1 != l2) || (s1 != s2))
2816 return 0;
2818 /* Disallow mixed attributes. */
2819 if ((l1 & s2) || (l2 & s1))
2820 return 0;
2823 /* Check for mismatched ISR attribute. */
2824 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2825 if (! l1)
2826 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2827 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2828 if (! l2)
2829 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2830 if (l1 != l2)
2831 return 0;
2833 return 1;
2836 /* Encode long_call or short_call attribute by prefixing
2837 symbol name in DECL with a special character FLAG. */
2838 void
2839 arm_encode_call_attribute (tree decl, int flag)
2841 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2842 int len = strlen (str);
2843 char * newstr;
2845 /* Do not allow weak functions to be treated as short call. */
2846 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2847 return;
2849 newstr = alloca (len + 2);
2850 newstr[0] = flag;
2851 strcpy (newstr + 1, str);
2853 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2854 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2857 /* Assigns default attributes to newly defined type. This is used to
2858 set short_call/long_call attributes for function types of
2859 functions defined inside corresponding #pragma scopes. */
2860 static void
2861 arm_set_default_type_attributes (tree type)
2863 /* Add __attribute__ ((long_call)) to all functions, when
2864 inside #pragma long_calls or __attribute__ ((short_call)),
2865 when inside #pragma no_long_calls. */
2866 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2868 tree type_attr_list, attr_name;
2869 type_attr_list = TYPE_ATTRIBUTES (type);
2871 if (arm_pragma_long_calls == LONG)
2872 attr_name = get_identifier ("long_call");
2873 else if (arm_pragma_long_calls == SHORT)
2874 attr_name = get_identifier ("short_call");
2875 else
2876 return;
2878 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2879 TYPE_ATTRIBUTES (type) = type_attr_list;
2883 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2884 defined within the current compilation unit. If this cannot be
2885 determined, then 0 is returned. */
2886 static int
2887 current_file_function_operand (rtx sym_ref)
2889 /* This is a bit of a fib. A function will have a short call flag
2890 applied to its name if it has the short call attribute, or it has
2891 already been defined within the current compilation unit. */
2892 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2893 return 1;
2895 /* The current function is always defined within the current compilation
2896 unit. If it s a weak definition however, then this may not be the real
2897 definition of the function, and so we have to say no. */
2898 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2899 && !DECL_WEAK (current_function_decl))
2900 return 1;
2902 /* We cannot make the determination - default to returning 0. */
2903 return 0;
2906 /* Return nonzero if a 32 bit "long_call" should be generated for
2907 this call. We generate a long_call if the function:
2909 a. has an __attribute__((long call))
2910 or b. is within the scope of a #pragma long_calls
2911 or c. the -mlong-calls command line switch has been specified
2912 . and either:
2913 1. -ffunction-sections is in effect
2914 or 2. the current function has __attribute__ ((section))
2915 or 3. the target function has __attribute__ ((section))
2917 However we do not generate a long call if the function:
2919 d. has an __attribute__ ((short_call))
2920 or e. is inside the scope of a #pragma no_long_calls
2921 or f. is defined within the current compilation unit.
2923 This function will be called by C fragments contained in the machine
2924 description file. SYM_REF and CALL_COOKIE correspond to the matched
2925 rtl operands. CALL_SYMBOL is used to distinguish between
2926 two different callers of the function. It is set to 1 in the
2927 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2928 and "call_value" patterns. This is because of the difference in the
2929 SYM_REFs passed by these patterns. */
2931 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2933 if (!call_symbol)
2935 if (GET_CODE (sym_ref) != MEM)
2936 return 0;
2938 sym_ref = XEXP (sym_ref, 0);
2941 if (GET_CODE (sym_ref) != SYMBOL_REF)
2942 return 0;
2944 if (call_cookie & CALL_SHORT)
2945 return 0;
2947 if (TARGET_LONG_CALLS)
2949 if (flag_function_sections
2950 || DECL_SECTION_NAME (current_function_decl))
2951 /* c.3 is handled by the definition of the
2952 ARM_DECLARE_FUNCTION_SIZE macro. */
2953 return 1;
2956 if (current_file_function_operand (sym_ref))
2957 return 0;
2959 return (call_cookie & CALL_LONG)
2960 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2961 || TARGET_LONG_CALLS;
2964 /* Return nonzero if it is ok to make a tail-call to DECL. */
2965 static bool
2966 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2968 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2970 if (cfun->machine->sibcall_blocked)
2971 return false;
2973 /* Never tailcall something for which we have no decl, or if we
2974 are in Thumb mode. */
2975 if (decl == NULL || TARGET_THUMB)
2976 return false;
2978 /* Get the calling method. */
2979 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2980 call_type = CALL_SHORT;
2981 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2982 call_type = CALL_LONG;
2984 /* Cannot tail-call to long calls, since these are out of range of
2985 a branch instruction. However, if not compiling PIC, we know
2986 we can reach the symbol if it is in this compilation unit. */
2987 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2988 return false;
2990 /* If we are interworking and the function is not declared static
2991 then we can't tail-call it unless we know that it exists in this
2992 compilation unit (since it might be a Thumb routine). */
2993 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2994 return false;
2996 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2997 if (IS_INTERRUPT (arm_current_func_type ()))
2998 return false;
3000 /* Everything else is ok. */
3001 return true;
3005 /* Addressing mode support functions. */
3007 /* Return nonzero if X is a legitimate immediate operand when compiling
3008 for PIC. */
3010 legitimate_pic_operand_p (rtx x)
3012 if (CONSTANT_P (x)
3013 && flag_pic
3014 && (GET_CODE (x) == SYMBOL_REF
3015 || (GET_CODE (x) == CONST
3016 && GET_CODE (XEXP (x, 0)) == PLUS
3017 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3018 return 0;
3020 return 1;
3024 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3026 if (GET_CODE (orig) == SYMBOL_REF
3027 || GET_CODE (orig) == LABEL_REF)
3029 #ifndef AOF_ASSEMBLER
3030 rtx pic_ref, address;
3031 #endif
3032 rtx insn;
3033 int subregs = 0;
3035 if (reg == 0)
3037 if (no_new_pseudos)
3038 abort ();
3039 else
3040 reg = gen_reg_rtx (Pmode);
3042 subregs = 1;
3045 #ifdef AOF_ASSEMBLER
3046 /* The AOF assembler can generate relocations for these directly, and
3047 understands that the PIC register has to be added into the offset. */
3048 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3049 #else
3050 if (subregs)
3051 address = gen_reg_rtx (Pmode);
3052 else
3053 address = reg;
3055 if (TARGET_ARM)
3056 emit_insn (gen_pic_load_addr_arm (address, orig));
3057 else
3058 emit_insn (gen_pic_load_addr_thumb (address, orig));
3060 if ((GET_CODE (orig) == LABEL_REF
3061 || (GET_CODE (orig) == SYMBOL_REF &&
3062 SYMBOL_REF_LOCAL_P (orig)))
3063 && NEED_GOT_RELOC)
3064 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3065 else
3067 pic_ref = gen_const_mem (Pmode,
3068 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3069 address));
3072 insn = emit_move_insn (reg, pic_ref);
3073 #endif
3074 current_function_uses_pic_offset_table = 1;
3075 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3076 by loop. */
3077 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3078 REG_NOTES (insn));
3079 return reg;
3081 else if (GET_CODE (orig) == CONST)
3083 rtx base, offset;
3085 if (GET_CODE (XEXP (orig, 0)) == PLUS
3086 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3087 return orig;
3089 if (reg == 0)
3091 if (no_new_pseudos)
3092 abort ();
3093 else
3094 reg = gen_reg_rtx (Pmode);
3097 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3099 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3100 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3101 base == reg ? 0 : reg);
3103 else
3104 abort ();
3106 if (GET_CODE (offset) == CONST_INT)
3108 /* The base register doesn't really matter, we only want to
3109 test the index for the appropriate mode. */
3110 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3112 if (!no_new_pseudos)
3113 offset = force_reg (Pmode, offset);
3114 else
3115 abort ();
3118 if (GET_CODE (offset) == CONST_INT)
3119 return plus_constant (base, INTVAL (offset));
3122 if (GET_MODE_SIZE (mode) > 4
3123 && (GET_MODE_CLASS (mode) == MODE_INT
3124 || TARGET_SOFT_FLOAT))
3126 emit_insn (gen_addsi3 (reg, base, offset));
3127 return reg;
3130 return gen_rtx_PLUS (Pmode, base, offset);
3133 return orig;
3137 /* Find a spare low register to use during the prolog of a function. */
3139 static int
3140 thumb_find_work_register (unsigned long pushed_regs_mask)
3142 int reg;
3144 /* Check the argument registers first as these are call-used. The
3145 register allocation order means that sometimes r3 might be used
3146 but earlier argument registers might not, so check them all. */
3147 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3148 if (!regs_ever_live[reg])
3149 return reg;
3151 /* Before going on to check the call-saved registers we can try a couple
3152 more ways of deducing that r3 is available. The first is when we are
3153 pushing anonymous arguments onto the stack and we have less than 4
3154 registers worth of fixed arguments(*). In this case r3 will be part of
3155 the variable argument list and so we can be sure that it will be
3156 pushed right at the start of the function. Hence it will be available
3157 for the rest of the prologue.
3158 (*): ie current_function_pretend_args_size is greater than 0. */
3159 if (cfun->machine->uses_anonymous_args
3160 && current_function_pretend_args_size > 0)
3161 return LAST_ARG_REGNUM;
3163 /* The other case is when we have fixed arguments but less than 4 registers
3164 worth. In this case r3 might be used in the body of the function, but
3165 it is not being used to convey an argument into the function. In theory
3166 we could just check current_function_args_size to see how many bytes are
3167 being passed in argument registers, but it seems that it is unreliable.
3168 Sometimes it will have the value 0 when in fact arguments are being
3169 passed. (See testcase execute/20021111-1.c for an example). So we also
3170 check the args_info.nregs field as well. The problem with this field is
3171 that it makes no allowances for arguments that are passed to the
3172 function but which are not used. Hence we could miss an opportunity
3173 when a function has an unused argument in r3. But it is better to be
3174 safe than to be sorry. */
3175 if (! cfun->machine->uses_anonymous_args
3176 && current_function_args_size >= 0
3177 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3178 && cfun->args_info.nregs < 4)
3179 return LAST_ARG_REGNUM;
3181 /* Otherwise look for a call-saved register that is going to be pushed. */
3182 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3183 if (pushed_regs_mask & (1 << reg))
3184 return reg;
3186 /* Something went wrong - thumb_compute_save_reg_mask()
3187 should have arranged for a suitable register to be pushed. */
3188 abort ();
3192 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3193 low register. */
3195 void
3196 arm_load_pic_register (unsigned int scratch)
3198 #ifndef AOF_ASSEMBLER
3199 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3200 rtx global_offset_table;
3202 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3203 return;
3205 if (!flag_pic)
3206 abort ();
3208 l1 = gen_label_rtx ();
3210 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3211 /* On the ARM the PC register contains 'dot + 8' at the time of the
3212 addition, on the Thumb it is 'dot + 4'. */
3213 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3214 if (GOT_PCREL)
3215 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3216 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3217 else
3218 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3220 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3222 if (TARGET_ARM)
3224 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3225 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3227 else
3229 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3231 /* We will have pushed the pic register, so should always be
3232 able to find a work register. */
3233 pic_tmp = gen_rtx_REG (SImode, scratch);
3234 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3235 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3237 else
3238 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3239 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3242 /* Need to emit this whether or not we obey regdecls,
3243 since setjmp/longjmp can cause life info to screw up. */
3244 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3245 #endif /* AOF_ASSEMBLER */
3249 /* Return nonzero if X is valid as an ARM state addressing register. */
3250 static int
3251 arm_address_register_rtx_p (rtx x, int strict_p)
3253 int regno;
3255 if (GET_CODE (x) != REG)
3256 return 0;
3258 regno = REGNO (x);
3260 if (strict_p)
3261 return ARM_REGNO_OK_FOR_BASE_P (regno);
3263 return (regno <= LAST_ARM_REGNUM
3264 || regno >= FIRST_PSEUDO_REGISTER
3265 || regno == FRAME_POINTER_REGNUM
3266 || regno == ARG_POINTER_REGNUM);
3269 /* Return nonzero if X is a valid ARM state address operand. */
3271 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3272 int strict_p)
3274 bool use_ldrd;
3275 enum rtx_code code = GET_CODE (x);
3277 if (arm_address_register_rtx_p (x, strict_p))
3278 return 1;
3280 use_ldrd = (TARGET_LDRD
3281 && (mode == DImode
3282 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3284 if (code == POST_INC || code == PRE_DEC
3285 || ((code == PRE_INC || code == POST_DEC)
3286 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3287 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3289 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3290 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3291 && GET_CODE (XEXP (x, 1)) == PLUS
3292 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3294 rtx addend = XEXP (XEXP (x, 1), 1);
3296 /* Don't allow ldrd post increment by register because it's hard
3297 to fixup invalid register choices. */
3298 if (use_ldrd
3299 && GET_CODE (x) == POST_MODIFY
3300 && GET_CODE (addend) == REG)
3301 return 0;
3303 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3304 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3307 /* After reload constants split into minipools will have addresses
3308 from a LABEL_REF. */
3309 else if (reload_completed
3310 && (code == LABEL_REF
3311 || (code == CONST
3312 && GET_CODE (XEXP (x, 0)) == PLUS
3313 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3314 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3315 return 1;
3317 else if (mode == TImode)
3318 return 0;
3320 else if (code == PLUS)
3322 rtx xop0 = XEXP (x, 0);
3323 rtx xop1 = XEXP (x, 1);
3325 return ((arm_address_register_rtx_p (xop0, strict_p)
3326 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3327 || (arm_address_register_rtx_p (xop1, strict_p)
3328 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3331 #if 0
3332 /* Reload currently can't handle MINUS, so disable this for now */
3333 else if (GET_CODE (x) == MINUS)
3335 rtx xop0 = XEXP (x, 0);
3336 rtx xop1 = XEXP (x, 1);
3338 return (arm_address_register_rtx_p (xop0, strict_p)
3339 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3341 #endif
3343 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3344 && code == SYMBOL_REF
3345 && CONSTANT_POOL_ADDRESS_P (x)
3346 && ! (flag_pic
3347 && symbol_mentioned_p (get_pool_constant (x))))
3348 return 1;
3350 return 0;
3353 /* Return nonzero if INDEX is valid for an address index operand in
3354 ARM state. */
3355 static int
3356 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3357 int strict_p)
3359 HOST_WIDE_INT range;
3360 enum rtx_code code = GET_CODE (index);
3362 /* Standard coprocessor addressing modes. */
3363 if (TARGET_HARD_FLOAT
3364 && (TARGET_FPA || TARGET_MAVERICK)
3365 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3366 || (TARGET_MAVERICK && mode == DImode)))
3367 return (code == CONST_INT && INTVAL (index) < 1024
3368 && INTVAL (index) > -1024
3369 && (INTVAL (index) & 3) == 0);
3371 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3372 return (code == CONST_INT
3373 && INTVAL (index) < 1024
3374 && INTVAL (index) > -1024
3375 && (INTVAL (index) & 3) == 0);
3377 if (arm_address_register_rtx_p (index, strict_p)
3378 && (GET_MODE_SIZE (mode) <= 4))
3379 return 1;
3381 if (mode == DImode || mode == DFmode)
3383 if (code == CONST_INT)
3385 HOST_WIDE_INT val = INTVAL (index);
3387 if (TARGET_LDRD)
3388 return val > -256 && val < 256;
3389 else
3390 return val > -4096 && val < 4092;
3393 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3396 if (GET_MODE_SIZE (mode) <= 4
3397 && ! (arm_arch4
3398 && (mode == HImode
3399 || (mode == QImode && outer == SIGN_EXTEND))))
3401 if (code == MULT)
3403 rtx xiop0 = XEXP (index, 0);
3404 rtx xiop1 = XEXP (index, 1);
3406 return ((arm_address_register_rtx_p (xiop0, strict_p)
3407 && power_of_two_operand (xiop1, SImode))
3408 || (arm_address_register_rtx_p (xiop1, strict_p)
3409 && power_of_two_operand (xiop0, SImode)));
3411 else if (code == LSHIFTRT || code == ASHIFTRT
3412 || code == ASHIFT || code == ROTATERT)
3414 rtx op = XEXP (index, 1);
3416 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3417 && GET_CODE (op) == CONST_INT
3418 && INTVAL (op) > 0
3419 && INTVAL (op) <= 31);
3423 /* For ARM v4 we may be doing a sign-extend operation during the
3424 load. */
3425 if (arm_arch4)
3427 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3428 range = 256;
3429 else
3430 range = 4096;
3432 else
3433 range = (mode == HImode) ? 4095 : 4096;
3435 return (code == CONST_INT
3436 && INTVAL (index) < range
3437 && INTVAL (index) > -range);
3440 /* Return nonzero if X is valid as a Thumb state base register. */
3441 static int
3442 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3444 int regno;
3446 if (GET_CODE (x) != REG)
3447 return 0;
3449 regno = REGNO (x);
3451 if (strict_p)
3452 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3454 return (regno <= LAST_LO_REGNUM
3455 || regno > LAST_VIRTUAL_REGISTER
3456 || regno == FRAME_POINTER_REGNUM
3457 || (GET_MODE_SIZE (mode) >= 4
3458 && (regno == STACK_POINTER_REGNUM
3459 || regno >= FIRST_PSEUDO_REGISTER
3460 || x == hard_frame_pointer_rtx
3461 || x == arg_pointer_rtx)));
3464 /* Return nonzero if x is a legitimate index register. This is the case
3465 for any base register that can access a QImode object. */
3466 inline static int
3467 thumb_index_register_rtx_p (rtx x, int strict_p)
3469 return thumb_base_register_rtx_p (x, QImode, strict_p);
3472 /* Return nonzero if x is a legitimate Thumb-state address.
3474 The AP may be eliminated to either the SP or the FP, so we use the
3475 least common denominator, e.g. SImode, and offsets from 0 to 64.
3477 ??? Verify whether the above is the right approach.
3479 ??? Also, the FP may be eliminated to the SP, so perhaps that
3480 needs special handling also.
3482 ??? Look at how the mips16 port solves this problem. It probably uses
3483 better ways to solve some of these problems.
3485 Although it is not incorrect, we don't accept QImode and HImode
3486 addresses based on the frame pointer or arg pointer until the
3487 reload pass starts. This is so that eliminating such addresses
3488 into stack based ones won't produce impossible code. */
3490 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3492 /* ??? Not clear if this is right. Experiment. */
3493 if (GET_MODE_SIZE (mode) < 4
3494 && !(reload_in_progress || reload_completed)
3495 && (reg_mentioned_p (frame_pointer_rtx, x)
3496 || reg_mentioned_p (arg_pointer_rtx, x)
3497 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3498 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3499 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3500 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3501 return 0;
3503 /* Accept any base register. SP only in SImode or larger. */
3504 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3505 return 1;
3507 /* This is PC relative data before arm_reorg runs. */
3508 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3509 && GET_CODE (x) == SYMBOL_REF
3510 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3511 return 1;
3513 /* This is PC relative data after arm_reorg runs. */
3514 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3515 && (GET_CODE (x) == LABEL_REF
3516 || (GET_CODE (x) == CONST
3517 && GET_CODE (XEXP (x, 0)) == PLUS
3518 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3519 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3520 return 1;
3522 /* Post-inc indexing only supported for SImode and larger. */
3523 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3524 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3525 return 1;
3527 else if (GET_CODE (x) == PLUS)
3529 /* REG+REG address can be any two index registers. */
3530 /* We disallow FRAME+REG addressing since we know that FRAME
3531 will be replaced with STACK, and SP relative addressing only
3532 permits SP+OFFSET. */
3533 if (GET_MODE_SIZE (mode) <= 4
3534 && XEXP (x, 0) != frame_pointer_rtx
3535 && XEXP (x, 1) != frame_pointer_rtx
3536 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3537 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3538 return 1;
3540 /* REG+const has 5-7 bit offset for non-SP registers. */
3541 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3542 || XEXP (x, 0) == arg_pointer_rtx)
3543 && GET_CODE (XEXP (x, 1)) == CONST_INT
3544 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3545 return 1;
3547 /* REG+const has 10 bit offset for SP, but only SImode and
3548 larger is supported. */
3549 /* ??? Should probably check for DI/DFmode overflow here
3550 just like GO_IF_LEGITIMATE_OFFSET does. */
3551 else if (GET_CODE (XEXP (x, 0)) == REG
3552 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3553 && GET_MODE_SIZE (mode) >= 4
3554 && GET_CODE (XEXP (x, 1)) == CONST_INT
3555 && INTVAL (XEXP (x, 1)) >= 0
3556 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3557 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3558 return 1;
3560 else if (GET_CODE (XEXP (x, 0)) == REG
3561 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3562 && GET_MODE_SIZE (mode) >= 4
3563 && GET_CODE (XEXP (x, 1)) == CONST_INT
3564 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3565 return 1;
3568 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3569 && GET_MODE_SIZE (mode) == 4
3570 && GET_CODE (x) == SYMBOL_REF
3571 && CONSTANT_POOL_ADDRESS_P (x)
3572 && !(flag_pic
3573 && symbol_mentioned_p (get_pool_constant (x))))
3574 return 1;
3576 return 0;
3579 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3580 instruction of mode MODE. */
3582 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3584 switch (GET_MODE_SIZE (mode))
3586 case 1:
3587 return val >= 0 && val < 32;
3589 case 2:
3590 return val >= 0 && val < 64 && (val & 1) == 0;
3592 default:
3593 return (val >= 0
3594 && (val + GET_MODE_SIZE (mode)) <= 128
3595 && (val & 3) == 0);
3599 /* Try machine-dependent ways of modifying an illegitimate address
3600 to be legitimate. If we find one, return the new, valid address. */
3602 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3604 if (GET_CODE (x) == PLUS)
3606 rtx xop0 = XEXP (x, 0);
3607 rtx xop1 = XEXP (x, 1);
3609 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3610 xop0 = force_reg (SImode, xop0);
3612 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3613 xop1 = force_reg (SImode, xop1);
3615 if (ARM_BASE_REGISTER_RTX_P (xop0)
3616 && GET_CODE (xop1) == CONST_INT)
3618 HOST_WIDE_INT n, low_n;
3619 rtx base_reg, val;
3620 n = INTVAL (xop1);
3622 /* VFP addressing modes actually allow greater offsets, but for
3623 now we just stick with the lowest common denominator. */
3624 if (mode == DImode
3625 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3627 low_n = n & 0x0f;
3628 n &= ~0x0f;
3629 if (low_n > 4)
3631 n += 16;
3632 low_n -= 16;
3635 else
3637 low_n = ((mode) == TImode ? 0
3638 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3639 n -= low_n;
3642 base_reg = gen_reg_rtx (SImode);
3643 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3644 GEN_INT (n)), NULL_RTX);
3645 emit_move_insn (base_reg, val);
3646 x = (low_n == 0 ? base_reg
3647 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3649 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3650 x = gen_rtx_PLUS (SImode, xop0, xop1);
3653 /* XXX We don't allow MINUS any more -- see comment in
3654 arm_legitimate_address_p (). */
3655 else if (GET_CODE (x) == MINUS)
3657 rtx xop0 = XEXP (x, 0);
3658 rtx xop1 = XEXP (x, 1);
3660 if (CONSTANT_P (xop0))
3661 xop0 = force_reg (SImode, xop0);
3663 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3664 xop1 = force_reg (SImode, xop1);
3666 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3667 x = gen_rtx_MINUS (SImode, xop0, xop1);
3670 if (flag_pic)
3672 /* We need to find and carefully transform any SYMBOL and LABEL
3673 references; so go back to the original address expression. */
3674 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3676 if (new_x != orig_x)
3677 x = new_x;
3680 return x;
3684 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3685 to be legitimate. If we find one, return the new, valid address. */
3687 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3689 if (GET_CODE (x) == PLUS
3690 && GET_CODE (XEXP (x, 1)) == CONST_INT
3691 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3692 || INTVAL (XEXP (x, 1)) < 0))
3694 rtx xop0 = XEXP (x, 0);
3695 rtx xop1 = XEXP (x, 1);
3696 HOST_WIDE_INT offset = INTVAL (xop1);
3698 /* Try and fold the offset into a biasing of the base register and
3699 then offsetting that. Don't do this when optimizing for space
3700 since it can cause too many CSEs. */
3701 if (optimize_size && offset >= 0
3702 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3704 HOST_WIDE_INT delta;
3706 if (offset >= 256)
3707 delta = offset - (256 - GET_MODE_SIZE (mode));
3708 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3709 delta = 31 * GET_MODE_SIZE (mode);
3710 else
3711 delta = offset & (~31 * GET_MODE_SIZE (mode));
3713 xop0 = force_operand (plus_constant (xop0, offset - delta),
3714 NULL_RTX);
3715 x = plus_constant (xop0, delta);
3717 else if (offset < 0 && offset > -256)
3718 /* Small negative offsets are best done with a subtract before the
3719 dereference, forcing these into a register normally takes two
3720 instructions. */
3721 x = force_operand (x, NULL_RTX);
3722 else
3724 /* For the remaining cases, force the constant into a register. */
3725 xop1 = force_reg (SImode, xop1);
3726 x = gen_rtx_PLUS (SImode, xop0, xop1);
3729 else if (GET_CODE (x) == PLUS
3730 && s_register_operand (XEXP (x, 1), SImode)
3731 && !s_register_operand (XEXP (x, 0), SImode))
3733 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3735 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3738 if (flag_pic)
3740 /* We need to find and carefully transform any SYMBOL and LABEL
3741 references; so go back to the original address expression. */
3742 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3744 if (new_x != orig_x)
3745 x = new_x;
3748 return x;
3753 #define REG_OR_SUBREG_REG(X) \
3754 (GET_CODE (X) == REG \
3755 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3757 #define REG_OR_SUBREG_RTX(X) \
3758 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3760 #ifndef COSTS_N_INSNS
3761 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3762 #endif
3763 static inline int
3764 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3766 enum machine_mode mode = GET_MODE (x);
3768 switch (code)
3770 case ASHIFT:
3771 case ASHIFTRT:
3772 case LSHIFTRT:
3773 case ROTATERT:
3774 case PLUS:
3775 case MINUS:
3776 case COMPARE:
3777 case NEG:
3778 case NOT:
3779 return COSTS_N_INSNS (1);
3781 case MULT:
3782 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3784 int cycles = 0;
3785 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3787 while (i)
3789 i >>= 2;
3790 cycles++;
3792 return COSTS_N_INSNS (2) + cycles;
3794 return COSTS_N_INSNS (1) + 16;
3796 case SET:
3797 return (COSTS_N_INSNS (1)
3798 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3799 + GET_CODE (SET_DEST (x)) == MEM));
3801 case CONST_INT:
3802 if (outer == SET)
3804 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3805 return 0;
3806 if (thumb_shiftable_const (INTVAL (x)))
3807 return COSTS_N_INSNS (2);
3808 return COSTS_N_INSNS (3);
3810 else if ((outer == PLUS || outer == COMPARE)
3811 && INTVAL (x) < 256 && INTVAL (x) > -256)
3812 return 0;
3813 else if (outer == AND
3814 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3815 return COSTS_N_INSNS (1);
3816 else if (outer == ASHIFT || outer == ASHIFTRT
3817 || outer == LSHIFTRT)
3818 return 0;
3819 return COSTS_N_INSNS (2);
3821 case CONST:
3822 case CONST_DOUBLE:
3823 case LABEL_REF:
3824 case SYMBOL_REF:
3825 return COSTS_N_INSNS (3);
3827 case UDIV:
3828 case UMOD:
3829 case DIV:
3830 case MOD:
3831 return 100;
3833 case TRUNCATE:
3834 return 99;
3836 case AND:
3837 case XOR:
3838 case IOR:
3839 /* XXX guess. */
3840 return 8;
3842 case MEM:
3843 /* XXX another guess. */
3844 /* Memory costs quite a lot for the first word, but subsequent words
3845 load at the equivalent of a single insn each. */
3846 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3847 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3848 ? 4 : 0));
3850 case IF_THEN_ELSE:
3851 /* XXX a guess. */
3852 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3853 return 14;
3854 return 2;
3856 case ZERO_EXTEND:
3857 /* XXX still guessing. */
3858 switch (GET_MODE (XEXP (x, 0)))
3860 case QImode:
3861 return (1 + (mode == DImode ? 4 : 0)
3862 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3864 case HImode:
3865 return (4 + (mode == DImode ? 4 : 0)
3866 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3868 case SImode:
3869 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3871 default:
3872 return 99;
3875 default:
3876 return 99;
3881 /* Worker routine for arm_rtx_costs. */
3882 static inline int
3883 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3885 enum machine_mode mode = GET_MODE (x);
3886 enum rtx_code subcode;
3887 int extra_cost;
3889 switch (code)
3891 case MEM:
3892 /* Memory costs quite a lot for the first word, but subsequent words
3893 load at the equivalent of a single insn each. */
3894 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3895 + (GET_CODE (x) == SYMBOL_REF
3896 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3898 case DIV:
3899 case MOD:
3900 case UDIV:
3901 case UMOD:
3902 return optimize_size ? COSTS_N_INSNS (2) : 100;
3904 case ROTATE:
3905 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3906 return 4;
3907 /* Fall through */
3908 case ROTATERT:
3909 if (mode != SImode)
3910 return 8;
3911 /* Fall through */
3912 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3913 if (mode == DImode)
3914 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3915 + ((GET_CODE (XEXP (x, 0)) == REG
3916 || (GET_CODE (XEXP (x, 0)) == SUBREG
3917 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3918 ? 0 : 8));
3919 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3920 || (GET_CODE (XEXP (x, 0)) == SUBREG
3921 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3922 ? 0 : 4)
3923 + ((GET_CODE (XEXP (x, 1)) == REG
3924 || (GET_CODE (XEXP (x, 1)) == SUBREG
3925 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3926 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3927 ? 0 : 4));
3929 case MINUS:
3930 if (mode == DImode)
3931 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3932 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3933 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3934 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3935 ? 0 : 8));
3937 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3938 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3939 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3940 && arm_const_double_rtx (XEXP (x, 1))))
3941 ? 0 : 8)
3942 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3943 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3944 && arm_const_double_rtx (XEXP (x, 0))))
3945 ? 0 : 8));
3947 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3948 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3949 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3950 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3951 || subcode == ASHIFTRT || subcode == LSHIFTRT
3952 || subcode == ROTATE || subcode == ROTATERT
3953 || (subcode == MULT
3954 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3955 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3956 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3957 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3958 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3959 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3960 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3961 return 1;
3962 /* Fall through */
3964 case PLUS:
3965 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3966 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3967 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3968 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3969 && arm_const_double_rtx (XEXP (x, 1))))
3970 ? 0 : 8));
3972 /* Fall through */
3973 case AND: case XOR: case IOR:
3974 extra_cost = 0;
3976 /* Normally the frame registers will be spilt into reg+const during
3977 reload, so it is a bad idea to combine them with other instructions,
3978 since then they might not be moved outside of loops. As a compromise
3979 we allow integration with ops that have a constant as their second
3980 operand. */
3981 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3982 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3983 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3984 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3985 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3986 extra_cost = 4;
3988 if (mode == DImode)
3989 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3990 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3991 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3992 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3993 ? 0 : 8));
3995 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3996 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3997 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3998 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3999 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4000 ? 0 : 4));
4002 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4003 return (1 + extra_cost
4004 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4005 || subcode == LSHIFTRT || subcode == ASHIFTRT
4006 || subcode == ROTATE || subcode == ROTATERT
4007 || (subcode == MULT
4008 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4009 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4010 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4011 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4012 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4013 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4014 ? 0 : 4));
4016 return 8;
4018 case MULT:
4019 /* This should have been handled by the CPU specific routines. */
4020 abort ();
4022 case TRUNCATE:
4023 if (arm_arch3m && mode == SImode
4024 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4025 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4026 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4027 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4028 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4029 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4030 return 8;
4031 return 99;
4033 case NEG:
4034 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4035 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4036 /* Fall through */
4037 case NOT:
4038 if (mode == DImode)
4039 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4041 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4043 case IF_THEN_ELSE:
4044 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4045 return 14;
4046 return 2;
4048 case COMPARE:
4049 return 1;
4051 case ABS:
4052 return 4 + (mode == DImode ? 4 : 0);
4054 case SIGN_EXTEND:
4055 if (GET_MODE (XEXP (x, 0)) == QImode)
4056 return (4 + (mode == DImode ? 4 : 0)
4057 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4058 /* Fall through */
4059 case ZERO_EXTEND:
4060 switch (GET_MODE (XEXP (x, 0)))
4062 case QImode:
4063 return (1 + (mode == DImode ? 4 : 0)
4064 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4066 case HImode:
4067 return (4 + (mode == DImode ? 4 : 0)
4068 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4070 case SImode:
4071 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4073 case V8QImode:
4074 case V4HImode:
4075 case V2SImode:
4076 case V4QImode:
4077 case V2HImode:
4078 return 1;
4080 default:
4081 break;
4083 abort ();
4085 case CONST_INT:
4086 if (const_ok_for_arm (INTVAL (x)))
4087 return outer == SET ? 2 : -1;
4088 else if (outer == AND
4089 && const_ok_for_arm (~INTVAL (x)))
4090 return -1;
4091 else if ((outer == COMPARE
4092 || outer == PLUS || outer == MINUS)
4093 && const_ok_for_arm (-INTVAL (x)))
4094 return -1;
4095 else
4096 return 5;
4098 case CONST:
4099 case LABEL_REF:
4100 case SYMBOL_REF:
4101 return 6;
4103 case CONST_DOUBLE:
4104 if (arm_const_double_rtx (x))
4105 return outer == SET ? 2 : -1;
4106 else if ((outer == COMPARE || outer == PLUS)
4107 && neg_const_double_rtx_ok_for_fpa (x))
4108 return -1;
4109 return 7;
4111 default:
4112 return 99;
4116 /* RTX costs when optimizing for size. */
4117 static bool
4118 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4120 enum machine_mode mode = GET_MODE (x);
4122 if (TARGET_THUMB)
4124 /* XXX TBD. For now, use the standard costs. */
4125 *total = thumb_rtx_costs (x, code, outer_code);
4126 return true;
4129 switch (code)
4131 case MEM:
4132 /* A memory access costs 1 insn if the mode is small, or the address is
4133 a single register, otherwise it costs one insn per word. */
4134 if (REG_P (XEXP (x, 0)))
4135 *total = COSTS_N_INSNS (1);
4136 else
4137 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4138 return true;
4140 case DIV:
4141 case MOD:
4142 case UDIV:
4143 case UMOD:
4144 /* Needs a libcall, so it costs about this. */
4145 *total = COSTS_N_INSNS (2);
4146 return false;
4148 case ROTATE:
4149 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4151 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4152 return true;
4154 /* Fall through */
4155 case ROTATERT:
4156 case ASHIFT:
4157 case LSHIFTRT:
4158 case ASHIFTRT:
4159 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4161 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4162 return true;
4164 else if (mode == SImode)
4166 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4167 /* Slightly disparage register shifts, but not by much. */
4168 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4169 *total += 1 + rtx_cost (XEXP (x, 1), code);
4170 return true;
4173 /* Needs a libcall. */
4174 *total = COSTS_N_INSNS (2);
4175 return false;
4177 case MINUS:
4178 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4180 *total = COSTS_N_INSNS (1);
4181 return false;
4184 if (mode == SImode)
4186 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4187 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4189 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4190 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4191 || subcode1 == ROTATE || subcode1 == ROTATERT
4192 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4193 || subcode1 == ASHIFTRT)
4195 /* It's just the cost of the two operands. */
4196 *total = 0;
4197 return false;
4200 *total = COSTS_N_INSNS (1);
4201 return false;
4204 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4205 return false;
4207 case PLUS:
4208 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4210 *total = COSTS_N_INSNS (1);
4211 return false;
4214 /* Fall through */
4215 case AND: case XOR: case IOR:
4216 if (mode == SImode)
4218 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4220 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4221 || subcode == LSHIFTRT || subcode == ASHIFTRT
4222 || (code == AND && subcode == NOT))
4224 /* It's just the cost of the two operands. */
4225 *total = 0;
4226 return false;
4230 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4231 return false;
4233 case MULT:
4234 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4235 return false;
4237 case NEG:
4238 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4239 *total = COSTS_N_INSNS (1);
4240 /* Fall through */
4241 case NOT:
4242 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4244 return false;
4246 case IF_THEN_ELSE:
4247 *total = 0;
4248 return false;
4250 case COMPARE:
4251 if (cc_register (XEXP (x, 0), VOIDmode))
4252 * total = 0;
4253 else
4254 *total = COSTS_N_INSNS (1);
4255 return false;
4257 case ABS:
4258 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4259 *total = COSTS_N_INSNS (1);
4260 else
4261 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4262 return false;
4264 case SIGN_EXTEND:
4265 *total = 0;
4266 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4268 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4269 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4271 if (mode == DImode)
4272 *total += COSTS_N_INSNS (1);
4273 return false;
4275 case ZERO_EXTEND:
4276 *total = 0;
4277 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4279 switch (GET_MODE (XEXP (x, 0)))
4281 case QImode:
4282 *total += COSTS_N_INSNS (1);
4283 break;
4285 case HImode:
4286 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4288 case SImode:
4289 break;
4291 default:
4292 *total += COSTS_N_INSNS (2);
4296 if (mode == DImode)
4297 *total += COSTS_N_INSNS (1);
4299 return false;
4301 case CONST_INT:
4302 if (const_ok_for_arm (INTVAL (x)))
4303 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4304 else if (const_ok_for_arm (~INTVAL (x)))
4305 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4306 else if (const_ok_for_arm (-INTVAL (x)))
4308 if (outer_code == COMPARE || outer_code == PLUS
4309 || outer_code == MINUS)
4310 *total = 0;
4311 else
4312 *total = COSTS_N_INSNS (1);
4314 else
4315 *total = COSTS_N_INSNS (2);
4316 return true;
4318 case CONST:
4319 case LABEL_REF:
4320 case SYMBOL_REF:
4321 *total = COSTS_N_INSNS (2);
4322 return true;
4324 case CONST_DOUBLE:
4325 *total = COSTS_N_INSNS (4);
4326 return true;
4328 default:
4329 if (mode != VOIDmode)
4330 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4331 else
4332 *total = COSTS_N_INSNS (4); /* How knows? */
4333 return false;
4337 /* RTX costs for cores with a slow MUL implementation. */
4339 static bool
4340 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4342 enum machine_mode mode = GET_MODE (x);
4344 if (TARGET_THUMB)
4346 *total = thumb_rtx_costs (x, code, outer_code);
4347 return true;
4350 switch (code)
4352 case MULT:
4353 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4354 || mode == DImode)
4356 *total = 30;
4357 return true;
4360 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4362 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4363 & (unsigned HOST_WIDE_INT) 0xffffffff);
4364 int cost, const_ok = const_ok_for_arm (i);
4365 int j, booth_unit_size;
4367 /* Tune as appropriate. */
4368 cost = const_ok ? 4 : 8;
4369 booth_unit_size = 2;
4370 for (j = 0; i && j < 32; j += booth_unit_size)
4372 i >>= booth_unit_size;
4373 cost += 2;
4376 *total = cost;
4377 return true;
4380 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4381 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4382 return true;
4384 default:
4385 *total = arm_rtx_costs_1 (x, code, outer_code);
4386 return true;
4391 /* RTX cost for cores with a fast multiply unit (M variants). */
4393 static bool
4394 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4396 enum machine_mode mode = GET_MODE (x);
4398 if (TARGET_THUMB)
4400 *total = thumb_rtx_costs (x, code, outer_code);
4401 return true;
4404 switch (code)
4406 case MULT:
4407 /* There is no point basing this on the tuning, since it is always the
4408 fast variant if it exists at all. */
4409 if (mode == DImode
4410 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4411 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4412 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4414 *total = 8;
4415 return true;
4419 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4420 || mode == DImode)
4422 *total = 30;
4423 return true;
4426 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4428 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4429 & (unsigned HOST_WIDE_INT) 0xffffffff);
4430 int cost, const_ok = const_ok_for_arm (i);
4431 int j, booth_unit_size;
4433 /* Tune as appropriate. */
4434 cost = const_ok ? 4 : 8;
4435 booth_unit_size = 8;
4436 for (j = 0; i && j < 32; j += booth_unit_size)
4438 i >>= booth_unit_size;
4439 cost += 2;
4442 *total = cost;
4443 return true;
4446 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4447 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4448 return true;
4450 default:
4451 *total = arm_rtx_costs_1 (x, code, outer_code);
4452 return true;
4457 /* RTX cost for XScale CPUs. */
4459 static bool
4460 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4462 enum machine_mode mode = GET_MODE (x);
4464 if (TARGET_THUMB)
4466 *total = thumb_rtx_costs (x, code, outer_code);
4467 return true;
4470 switch (code)
4472 case MULT:
4473 /* There is no point basing this on the tuning, since it is always the
4474 fast variant if it exists at all. */
4475 if (mode == DImode
4476 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4477 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4478 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4480 *total = 8;
4481 return true;
4485 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4486 || mode == DImode)
4488 *total = 30;
4489 return true;
4492 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4494 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4495 & (unsigned HOST_WIDE_INT) 0xffffffff);
4496 int cost, const_ok = const_ok_for_arm (i);
4497 unsigned HOST_WIDE_INT masked_const;
4499 /* The cost will be related to two insns.
4500 First a load of the constant (MOV or LDR), then a multiply. */
4501 cost = 2;
4502 if (! const_ok)
4503 cost += 1; /* LDR is probably more expensive because
4504 of longer result latency. */
4505 masked_const = i & 0xffff8000;
4506 if (masked_const != 0 && masked_const != 0xffff8000)
4508 masked_const = i & 0xf8000000;
4509 if (masked_const == 0 || masked_const == 0xf8000000)
4510 cost += 1;
4511 else
4512 cost += 2;
4514 *total = cost;
4515 return true;
4518 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4519 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4520 return true;
4522 case COMPARE:
4523 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4524 will stall until the multiplication is complete. */
4525 if (GET_CODE (XEXP (x, 0)) == MULT)
4526 *total = 4 + rtx_cost (XEXP (x, 0), code);
4527 else
4528 *total = arm_rtx_costs_1 (x, code, outer_code);
4529 return true;
4531 default:
4532 *total = arm_rtx_costs_1 (x, code, outer_code);
4533 return true;
4538 /* RTX costs for 9e (and later) cores. */
4540 static bool
4541 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4543 enum machine_mode mode = GET_MODE (x);
4544 int nonreg_cost;
4545 int cost;
4547 if (TARGET_THUMB)
4549 switch (code)
4551 case MULT:
4552 *total = COSTS_N_INSNS (3);
4553 return true;
4555 default:
4556 *total = thumb_rtx_costs (x, code, outer_code);
4557 return true;
4561 switch (code)
4563 case MULT:
4564 /* There is no point basing this on the tuning, since it is always the
4565 fast variant if it exists at all. */
4566 if (mode == DImode
4567 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4568 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4569 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4571 *total = 3;
4572 return true;
4576 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4578 *total = 30;
4579 return true;
4581 if (mode == DImode)
4583 cost = 7;
4584 nonreg_cost = 8;
4586 else
4588 cost = 2;
4589 nonreg_cost = 4;
4593 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4594 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4595 return true;
4597 default:
4598 *total = arm_rtx_costs_1 (x, code, outer_code);
4599 return true;
4602 /* All address computations that can be done are free, but rtx cost returns
4603 the same for practically all of them. So we weight the different types
4604 of address here in the order (most pref first):
4605 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4606 static inline int
4607 arm_arm_address_cost (rtx x)
4609 enum rtx_code c = GET_CODE (x);
4611 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4612 return 0;
4613 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4614 return 10;
4616 if (c == PLUS || c == MINUS)
4618 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4619 return 2;
4621 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4622 return 3;
4624 return 4;
4627 return 6;
4630 static inline int
4631 arm_thumb_address_cost (rtx x)
4633 enum rtx_code c = GET_CODE (x);
4635 if (c == REG)
4636 return 1;
4637 if (c == PLUS
4638 && GET_CODE (XEXP (x, 0)) == REG
4639 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4640 return 1;
4642 return 2;
4645 static int
4646 arm_address_cost (rtx x)
4648 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4651 static int
4652 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4654 rtx i_pat, d_pat;
4656 /* Some true dependencies can have a higher cost depending
4657 on precisely how certain input operands are used. */
4658 if (arm_tune_xscale
4659 && REG_NOTE_KIND (link) == 0
4660 && recog_memoized (insn) >= 0
4661 && recog_memoized (dep) >= 0)
4663 int shift_opnum = get_attr_shift (insn);
4664 enum attr_type attr_type = get_attr_type (dep);
4666 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4667 operand for INSN. If we have a shifted input operand and the
4668 instruction we depend on is another ALU instruction, then we may
4669 have to account for an additional stall. */
4670 if (shift_opnum != 0
4671 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4673 rtx shifted_operand;
4674 int opno;
4676 /* Get the shifted operand. */
4677 extract_insn (insn);
4678 shifted_operand = recog_data.operand[shift_opnum];
4680 /* Iterate over all the operands in DEP. If we write an operand
4681 that overlaps with SHIFTED_OPERAND, then we have increase the
4682 cost of this dependency. */
4683 extract_insn (dep);
4684 preprocess_constraints ();
4685 for (opno = 0; opno < recog_data.n_operands; opno++)
4687 /* We can ignore strict inputs. */
4688 if (recog_data.operand_type[opno] == OP_IN)
4689 continue;
4691 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4692 shifted_operand))
4693 return 2;
4698 /* XXX This is not strictly true for the FPA. */
4699 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4700 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4701 return 0;
4703 /* Call insns don't incur a stall, even if they follow a load. */
4704 if (REG_NOTE_KIND (link) == 0
4705 && GET_CODE (insn) == CALL_INSN)
4706 return 1;
4708 if ((i_pat = single_set (insn)) != NULL
4709 && GET_CODE (SET_SRC (i_pat)) == MEM
4710 && (d_pat = single_set (dep)) != NULL
4711 && GET_CODE (SET_DEST (d_pat)) == MEM)
4713 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4714 /* This is a load after a store, there is no conflict if the load reads
4715 from a cached area. Assume that loads from the stack, and from the
4716 constant pool are cached, and that others will miss. This is a
4717 hack. */
4719 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4720 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4721 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4722 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4723 return 1;
4726 return cost;
4729 static int fp_consts_inited = 0;
4731 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4732 static const char * const strings_fp[8] =
4734 "0", "1", "2", "3",
4735 "4", "5", "0.5", "10"
4738 static REAL_VALUE_TYPE values_fp[8];
4740 static void
4741 init_fp_table (void)
4743 int i;
4744 REAL_VALUE_TYPE r;
4746 if (TARGET_VFP)
4747 fp_consts_inited = 1;
4748 else
4749 fp_consts_inited = 8;
4751 for (i = 0; i < fp_consts_inited; i++)
4753 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4754 values_fp[i] = r;
4758 /* Return TRUE if rtx X is a valid immediate FP constant. */
4760 arm_const_double_rtx (rtx x)
4762 REAL_VALUE_TYPE r;
4763 int i;
4765 if (!fp_consts_inited)
4766 init_fp_table ();
4768 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4769 if (REAL_VALUE_MINUS_ZERO (r))
4770 return 0;
4772 for (i = 0; i < fp_consts_inited; i++)
4773 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4774 return 1;
4776 return 0;
4779 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4781 neg_const_double_rtx_ok_for_fpa (rtx x)
4783 REAL_VALUE_TYPE r;
4784 int i;
4786 if (!fp_consts_inited)
4787 init_fp_table ();
4789 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4790 r = REAL_VALUE_NEGATE (r);
4791 if (REAL_VALUE_MINUS_ZERO (r))
4792 return 0;
4794 for (i = 0; i < 8; i++)
4795 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4796 return 1;
4798 return 0;
4801 /* Predicates for `match_operand' and `match_operator'. */
4803 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4805 cirrus_memory_offset (rtx op)
4807 /* Reject eliminable registers. */
4808 if (! (reload_in_progress || reload_completed)
4809 && ( reg_mentioned_p (frame_pointer_rtx, op)
4810 || reg_mentioned_p (arg_pointer_rtx, op)
4811 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4812 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4813 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4814 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4815 return 0;
4817 if (GET_CODE (op) == MEM)
4819 rtx ind;
4821 ind = XEXP (op, 0);
4823 /* Match: (mem (reg)). */
4824 if (GET_CODE (ind) == REG)
4825 return 1;
4827 /* Match:
4828 (mem (plus (reg)
4829 (const))). */
4830 if (GET_CODE (ind) == PLUS
4831 && GET_CODE (XEXP (ind, 0)) == REG
4832 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4833 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4834 return 1;
4837 return 0;
4840 /* Return TRUE if OP is a valid VFP memory address pattern.
4841 WB if true if writeback address modes are allowed. */
4844 arm_coproc_mem_operand (rtx op, bool wb)
4846 rtx ind;
4848 /* Reject eliminable registers. */
4849 if (! (reload_in_progress || reload_completed)
4850 && ( reg_mentioned_p (frame_pointer_rtx, op)
4851 || reg_mentioned_p (arg_pointer_rtx, op)
4852 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4853 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4854 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4855 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4856 return FALSE;
4858 /* Constants are converted into offsets from labels. */
4859 if (GET_CODE (op) != MEM)
4860 return FALSE;
4862 ind = XEXP (op, 0);
4864 if (reload_completed
4865 && (GET_CODE (ind) == LABEL_REF
4866 || (GET_CODE (ind) == CONST
4867 && GET_CODE (XEXP (ind, 0)) == PLUS
4868 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4869 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4870 return TRUE;
4872 /* Match: (mem (reg)). */
4873 if (GET_CODE (ind) == REG)
4874 return arm_address_register_rtx_p (ind, 0);
4876 /* Autoincremment addressing modes. */
4877 if (wb
4878 && (GET_CODE (ind) == PRE_INC
4879 || GET_CODE (ind) == POST_INC
4880 || GET_CODE (ind) == PRE_DEC
4881 || GET_CODE (ind) == POST_DEC))
4882 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4884 if (wb
4885 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4886 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4887 && GET_CODE (XEXP (ind, 1)) == PLUS
4888 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4889 ind = XEXP (ind, 1);
4891 /* Match:
4892 (plus (reg)
4893 (const)). */
4894 if (GET_CODE (ind) == PLUS
4895 && GET_CODE (XEXP (ind, 0)) == REG
4896 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4897 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4898 && INTVAL (XEXP (ind, 1)) > -1024
4899 && INTVAL (XEXP (ind, 1)) < 1024
4900 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4901 return TRUE;
4903 return FALSE;
4906 /* Return true if X is a register that will be eliminated later on. */
4908 arm_eliminable_register (rtx x)
4910 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4911 || REGNO (x) == ARG_POINTER_REGNUM
4912 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4913 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4916 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4917 VFP registers. Otherwise return NO_REGS. */
4919 enum reg_class
4920 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4922 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4923 return NO_REGS;
4925 return GENERAL_REGS;
4928 /* Values which must be returned in the most-significant end of the return
4929 register. */
4931 static bool
4932 arm_return_in_msb (tree valtype)
4934 return (TARGET_AAPCS_BASED
4935 && BYTES_BIG_ENDIAN
4936 && (AGGREGATE_TYPE_P (valtype)
4937 || TREE_CODE (valtype) == COMPLEX_TYPE));
4940 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4941 Use by the Cirrus Maverick code which has to workaround
4942 a hardware bug triggered by such instructions. */
4943 static bool
4944 arm_memory_load_p (rtx insn)
4946 rtx body, lhs, rhs;;
4948 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4949 return false;
4951 body = PATTERN (insn);
4953 if (GET_CODE (body) != SET)
4954 return false;
4956 lhs = XEXP (body, 0);
4957 rhs = XEXP (body, 1);
4959 lhs = REG_OR_SUBREG_RTX (lhs);
4961 /* If the destination is not a general purpose
4962 register we do not have to worry. */
4963 if (GET_CODE (lhs) != REG
4964 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4965 return false;
4967 /* As well as loads from memory we also have to react
4968 to loads of invalid constants which will be turned
4969 into loads from the minipool. */
4970 return (GET_CODE (rhs) == MEM
4971 || GET_CODE (rhs) == SYMBOL_REF
4972 || note_invalid_constants (insn, -1, false));
4975 /* Return TRUE if INSN is a Cirrus instruction. */
4976 static bool
4977 arm_cirrus_insn_p (rtx insn)
4979 enum attr_cirrus attr;
4981 /* get_attr aborts on USE and CLOBBER. */
4982 if (!insn
4983 || GET_CODE (insn) != INSN
4984 || GET_CODE (PATTERN (insn)) == USE
4985 || GET_CODE (PATTERN (insn)) == CLOBBER)
4986 return 0;
4988 attr = get_attr_cirrus (insn);
4990 return attr != CIRRUS_NOT;
4993 /* Cirrus reorg for invalid instruction combinations. */
4994 static void
4995 cirrus_reorg (rtx first)
4997 enum attr_cirrus attr;
4998 rtx body = PATTERN (first);
4999 rtx t;
5000 int nops;
5002 /* Any branch must be followed by 2 non Cirrus instructions. */
5003 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5005 nops = 0;
5006 t = next_nonnote_insn (first);
5008 if (arm_cirrus_insn_p (t))
5009 ++ nops;
5011 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5012 ++ nops;
5014 while (nops --)
5015 emit_insn_after (gen_nop (), first);
5017 return;
5020 /* (float (blah)) is in parallel with a clobber. */
5021 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5022 body = XVECEXP (body, 0, 0);
5024 if (GET_CODE (body) == SET)
5026 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5028 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5029 be followed by a non Cirrus insn. */
5030 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5032 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5033 emit_insn_after (gen_nop (), first);
5035 return;
5037 else if (arm_memory_load_p (first))
5039 unsigned int arm_regno;
5041 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5042 ldr/cfmv64hr combination where the Rd field is the same
5043 in both instructions must be split with a non Cirrus
5044 insn. Example:
5046 ldr r0, blah
5048 cfmvsr mvf0, r0. */
5050 /* Get Arm register number for ldr insn. */
5051 if (GET_CODE (lhs) == REG)
5052 arm_regno = REGNO (lhs);
5053 else if (GET_CODE (rhs) == REG)
5054 arm_regno = REGNO (rhs);
5055 else
5056 abort ();
5058 /* Next insn. */
5059 first = next_nonnote_insn (first);
5061 if (! arm_cirrus_insn_p (first))
5062 return;
5064 body = PATTERN (first);
5066 /* (float (blah)) is in parallel with a clobber. */
5067 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5068 body = XVECEXP (body, 0, 0);
5070 if (GET_CODE (body) == FLOAT)
5071 body = XEXP (body, 0);
5073 if (get_attr_cirrus (first) == CIRRUS_MOVE
5074 && GET_CODE (XEXP (body, 1)) == REG
5075 && arm_regno == REGNO (XEXP (body, 1)))
5076 emit_insn_after (gen_nop (), first);
5078 return;
5082 /* get_attr aborts on USE and CLOBBER. */
5083 if (!first
5084 || GET_CODE (first) != INSN
5085 || GET_CODE (PATTERN (first)) == USE
5086 || GET_CODE (PATTERN (first)) == CLOBBER)
5087 return;
5089 attr = get_attr_cirrus (first);
5091 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5092 must be followed by a non-coprocessor instruction. */
5093 if (attr == CIRRUS_COMPARE)
5095 nops = 0;
5097 t = next_nonnote_insn (first);
5099 if (arm_cirrus_insn_p (t))
5100 ++ nops;
5102 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5103 ++ nops;
5105 while (nops --)
5106 emit_insn_after (gen_nop (), first);
5108 return;
5112 /* Return TRUE if X references a SYMBOL_REF. */
5114 symbol_mentioned_p (rtx x)
5116 const char * fmt;
5117 int i;
5119 if (GET_CODE (x) == SYMBOL_REF)
5120 return 1;
5122 fmt = GET_RTX_FORMAT (GET_CODE (x));
5124 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5126 if (fmt[i] == 'E')
5128 int j;
5130 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5131 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5132 return 1;
5134 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5135 return 1;
5138 return 0;
5141 /* Return TRUE if X references a LABEL_REF. */
5143 label_mentioned_p (rtx x)
5145 const char * fmt;
5146 int i;
5148 if (GET_CODE (x) == LABEL_REF)
5149 return 1;
5151 fmt = GET_RTX_FORMAT (GET_CODE (x));
5152 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5154 if (fmt[i] == 'E')
5156 int j;
5158 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5159 if (label_mentioned_p (XVECEXP (x, i, j)))
5160 return 1;
5162 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5163 return 1;
5166 return 0;
5169 enum rtx_code
5170 minmax_code (rtx x)
5172 enum rtx_code code = GET_CODE (x);
5174 if (code == SMAX)
5175 return GE;
5176 else if (code == SMIN)
5177 return LE;
5178 else if (code == UMIN)
5179 return LEU;
5180 else if (code == UMAX)
5181 return GEU;
5183 abort ();
5186 /* Return 1 if memory locations are adjacent. */
5188 adjacent_mem_locations (rtx a, rtx b)
5190 /* We don't guarantee to preserve the order of these memory refs. */
5191 if (volatile_refs_p (a) || volatile_refs_p (b))
5192 return 0;
5194 if ((GET_CODE (XEXP (a, 0)) == REG
5195 || (GET_CODE (XEXP (a, 0)) == PLUS
5196 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5197 && (GET_CODE (XEXP (b, 0)) == REG
5198 || (GET_CODE (XEXP (b, 0)) == PLUS
5199 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5201 HOST_WIDE_INT val0 = 0, val1 = 0;
5202 rtx reg0, reg1;
5203 int val_diff;
5205 if (GET_CODE (XEXP (a, 0)) == PLUS)
5207 reg0 = XEXP (XEXP (a, 0), 0);
5208 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5210 else
5211 reg0 = XEXP (a, 0);
5213 if (GET_CODE (XEXP (b, 0)) == PLUS)
5215 reg1 = XEXP (XEXP (b, 0), 0);
5216 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5218 else
5219 reg1 = XEXP (b, 0);
5221 /* Don't accept any offset that will require multiple
5222 instructions to handle, since this would cause the
5223 arith_adjacentmem pattern to output an overlong sequence. */
5224 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5225 return 0;
5227 /* Don't allow an eliminable register: register elimination can make
5228 the offset too large. */
5229 if (arm_eliminable_register (reg0))
5230 return 0;
5232 val_diff = val1 - val0;
5234 if (arm_ld_sched)
5236 /* If the target has load delay slots, then there's no benefit
5237 to using an ldm instruction unless the offset is zero and
5238 we are optimizing for size. */
5239 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5240 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5241 && (val_diff == 4 || val_diff == -4));
5244 return ((REGNO (reg0) == REGNO (reg1))
5245 && (val_diff == 4 || val_diff == -4));
5248 return 0;
5252 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5253 HOST_WIDE_INT *load_offset)
5255 int unsorted_regs[4];
5256 HOST_WIDE_INT unsorted_offsets[4];
5257 int order[4];
5258 int base_reg = -1;
5259 int i;
5261 /* Can only handle 2, 3, or 4 insns at present,
5262 though could be easily extended if required. */
5263 if (nops < 2 || nops > 4)
5264 abort ();
5266 /* Loop over the operands and check that the memory references are
5267 suitable (i.e. immediate offsets from the same base register). At
5268 the same time, extract the target register, and the memory
5269 offsets. */
5270 for (i = 0; i < nops; i++)
5272 rtx reg;
5273 rtx offset;
5275 /* Convert a subreg of a mem into the mem itself. */
5276 if (GET_CODE (operands[nops + i]) == SUBREG)
5277 operands[nops + i] = alter_subreg (operands + (nops + i));
5279 if (GET_CODE (operands[nops + i]) != MEM)
5280 abort ();
5282 /* Don't reorder volatile memory references; it doesn't seem worth
5283 looking for the case where the order is ok anyway. */
5284 if (MEM_VOLATILE_P (operands[nops + i]))
5285 return 0;
5287 offset = const0_rtx;
5289 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5290 || (GET_CODE (reg) == SUBREG
5291 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5292 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5293 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5294 == REG)
5295 || (GET_CODE (reg) == SUBREG
5296 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5297 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5298 == CONST_INT)))
5300 if (i == 0)
5302 base_reg = REGNO (reg);
5303 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5304 ? REGNO (operands[i])
5305 : REGNO (SUBREG_REG (operands[i])));
5306 order[0] = 0;
5308 else
5310 if (base_reg != (int) REGNO (reg))
5311 /* Not addressed from the same base register. */
5312 return 0;
5314 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5315 ? REGNO (operands[i])
5316 : REGNO (SUBREG_REG (operands[i])));
5317 if (unsorted_regs[i] < unsorted_regs[order[0]])
5318 order[0] = i;
5321 /* If it isn't an integer register, or if it overwrites the
5322 base register but isn't the last insn in the list, then
5323 we can't do this. */
5324 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5325 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5326 return 0;
5328 unsorted_offsets[i] = INTVAL (offset);
5330 else
5331 /* Not a suitable memory address. */
5332 return 0;
5335 /* All the useful information has now been extracted from the
5336 operands into unsorted_regs and unsorted_offsets; additionally,
5337 order[0] has been set to the lowest numbered register in the
5338 list. Sort the registers into order, and check that the memory
5339 offsets are ascending and adjacent. */
5341 for (i = 1; i < nops; i++)
5343 int j;
5345 order[i] = order[i - 1];
5346 for (j = 0; j < nops; j++)
5347 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5348 && (order[i] == order[i - 1]
5349 || unsorted_regs[j] < unsorted_regs[order[i]]))
5350 order[i] = j;
5352 /* Have we found a suitable register? if not, one must be used more
5353 than once. */
5354 if (order[i] == order[i - 1])
5355 return 0;
5357 /* Is the memory address adjacent and ascending? */
5358 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5359 return 0;
5362 if (base)
5364 *base = base_reg;
5366 for (i = 0; i < nops; i++)
5367 regs[i] = unsorted_regs[order[i]];
5369 *load_offset = unsorted_offsets[order[0]];
5372 if (unsorted_offsets[order[0]] == 0)
5373 return 1; /* ldmia */
5375 if (unsorted_offsets[order[0]] == 4)
5376 return 2; /* ldmib */
5378 if (unsorted_offsets[order[nops - 1]] == 0)
5379 return 3; /* ldmda */
5381 if (unsorted_offsets[order[nops - 1]] == -4)
5382 return 4; /* ldmdb */
5384 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5385 if the offset isn't small enough. The reason 2 ldrs are faster
5386 is because these ARMs are able to do more than one cache access
5387 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5388 whilst the ARM8 has a double bandwidth cache. This means that
5389 these cores can do both an instruction fetch and a data fetch in
5390 a single cycle, so the trick of calculating the address into a
5391 scratch register (one of the result regs) and then doing a load
5392 multiple actually becomes slower (and no smaller in code size).
5393 That is the transformation
5395 ldr rd1, [rbase + offset]
5396 ldr rd2, [rbase + offset + 4]
5400 add rd1, rbase, offset
5401 ldmia rd1, {rd1, rd2}
5403 produces worse code -- '3 cycles + any stalls on rd2' instead of
5404 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5405 access per cycle, the first sequence could never complete in less
5406 than 6 cycles, whereas the ldm sequence would only take 5 and
5407 would make better use of sequential accesses if not hitting the
5408 cache.
5410 We cheat here and test 'arm_ld_sched' which we currently know to
5411 only be true for the ARM8, ARM9 and StrongARM. If this ever
5412 changes, then the test below needs to be reworked. */
5413 if (nops == 2 && arm_ld_sched)
5414 return 0;
5416 /* Can't do it without setting up the offset, only do this if it takes
5417 no more than one insn. */
5418 return (const_ok_for_arm (unsorted_offsets[order[0]])
5419 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5422 const char *
5423 emit_ldm_seq (rtx *operands, int nops)
5425 int regs[4];
5426 int base_reg;
5427 HOST_WIDE_INT offset;
5428 char buf[100];
5429 int i;
5431 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5433 case 1:
5434 strcpy (buf, "ldm%?ia\t");
5435 break;
5437 case 2:
5438 strcpy (buf, "ldm%?ib\t");
5439 break;
5441 case 3:
5442 strcpy (buf, "ldm%?da\t");
5443 break;
5445 case 4:
5446 strcpy (buf, "ldm%?db\t");
5447 break;
5449 case 5:
5450 if (offset >= 0)
5451 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5452 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5453 (long) offset);
5454 else
5455 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5456 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5457 (long) -offset);
5458 output_asm_insn (buf, operands);
5459 base_reg = regs[0];
5460 strcpy (buf, "ldm%?ia\t");
5461 break;
5463 default:
5464 abort ();
5467 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5468 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5470 for (i = 1; i < nops; i++)
5471 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5472 reg_names[regs[i]]);
5474 strcat (buf, "}\t%@ phole ldm");
5476 output_asm_insn (buf, operands);
5477 return "";
5481 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5482 HOST_WIDE_INT * load_offset)
5484 int unsorted_regs[4];
5485 HOST_WIDE_INT unsorted_offsets[4];
5486 int order[4];
5487 int base_reg = -1;
5488 int i;
5490 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5491 extended if required. */
5492 if (nops < 2 || nops > 4)
5493 abort ();
5495 /* Loop over the operands and check that the memory references are
5496 suitable (i.e. immediate offsets from the same base register). At
5497 the same time, extract the target register, and the memory
5498 offsets. */
5499 for (i = 0; i < nops; i++)
5501 rtx reg;
5502 rtx offset;
5504 /* Convert a subreg of a mem into the mem itself. */
5505 if (GET_CODE (operands[nops + i]) == SUBREG)
5506 operands[nops + i] = alter_subreg (operands + (nops + i));
5508 if (GET_CODE (operands[nops + i]) != MEM)
5509 abort ();
5511 /* Don't reorder volatile memory references; it doesn't seem worth
5512 looking for the case where the order is ok anyway. */
5513 if (MEM_VOLATILE_P (operands[nops + i]))
5514 return 0;
5516 offset = const0_rtx;
5518 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5519 || (GET_CODE (reg) == SUBREG
5520 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5521 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5522 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5523 == REG)
5524 || (GET_CODE (reg) == SUBREG
5525 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5526 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5527 == CONST_INT)))
5529 if (i == 0)
5531 base_reg = REGNO (reg);
5532 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5533 ? REGNO (operands[i])
5534 : REGNO (SUBREG_REG (operands[i])));
5535 order[0] = 0;
5537 else
5539 if (base_reg != (int) REGNO (reg))
5540 /* Not addressed from the same base register. */
5541 return 0;
5543 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5544 ? REGNO (operands[i])
5545 : REGNO (SUBREG_REG (operands[i])));
5546 if (unsorted_regs[i] < unsorted_regs[order[0]])
5547 order[0] = i;
5550 /* If it isn't an integer register, then we can't do this. */
5551 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5552 return 0;
5554 unsorted_offsets[i] = INTVAL (offset);
5556 else
5557 /* Not a suitable memory address. */
5558 return 0;
5561 /* All the useful information has now been extracted from the
5562 operands into unsorted_regs and unsorted_offsets; additionally,
5563 order[0] has been set to the lowest numbered register in the
5564 list. Sort the registers into order, and check that the memory
5565 offsets are ascending and adjacent. */
5567 for (i = 1; i < nops; i++)
5569 int j;
5571 order[i] = order[i - 1];
5572 for (j = 0; j < nops; j++)
5573 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5574 && (order[i] == order[i - 1]
5575 || unsorted_regs[j] < unsorted_regs[order[i]]))
5576 order[i] = j;
5578 /* Have we found a suitable register? if not, one must be used more
5579 than once. */
5580 if (order[i] == order[i - 1])
5581 return 0;
5583 /* Is the memory address adjacent and ascending? */
5584 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5585 return 0;
5588 if (base)
5590 *base = base_reg;
5592 for (i = 0; i < nops; i++)
5593 regs[i] = unsorted_regs[order[i]];
5595 *load_offset = unsorted_offsets[order[0]];
5598 if (unsorted_offsets[order[0]] == 0)
5599 return 1; /* stmia */
5601 if (unsorted_offsets[order[0]] == 4)
5602 return 2; /* stmib */
5604 if (unsorted_offsets[order[nops - 1]] == 0)
5605 return 3; /* stmda */
5607 if (unsorted_offsets[order[nops - 1]] == -4)
5608 return 4; /* stmdb */
5610 return 0;
5613 const char *
5614 emit_stm_seq (rtx *operands, int nops)
5616 int regs[4];
5617 int base_reg;
5618 HOST_WIDE_INT offset;
5619 char buf[100];
5620 int i;
5622 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5624 case 1:
5625 strcpy (buf, "stm%?ia\t");
5626 break;
5628 case 2:
5629 strcpy (buf, "stm%?ib\t");
5630 break;
5632 case 3:
5633 strcpy (buf, "stm%?da\t");
5634 break;
5636 case 4:
5637 strcpy (buf, "stm%?db\t");
5638 break;
5640 default:
5641 abort ();
5644 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5645 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5647 for (i = 1; i < nops; i++)
5648 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5649 reg_names[regs[i]]);
5651 strcat (buf, "}\t%@ phole stm");
5653 output_asm_insn (buf, operands);
5654 return "";
5658 /* Routines for use in generating RTL. */
5661 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5662 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5664 HOST_WIDE_INT offset = *offsetp;
5665 int i = 0, j;
5666 rtx result;
5667 int sign = up ? 1 : -1;
5668 rtx mem, addr;
5670 /* XScale has load-store double instructions, but they have stricter
5671 alignment requirements than load-store multiple, so we cannot
5672 use them.
5674 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5675 the pipeline until completion.
5677 NREGS CYCLES
5683 An ldr instruction takes 1-3 cycles, but does not block the
5684 pipeline.
5686 NREGS CYCLES
5687 1 1-3
5688 2 2-6
5689 3 3-9
5690 4 4-12
5692 Best case ldr will always win. However, the more ldr instructions
5693 we issue, the less likely we are to be able to schedule them well.
5694 Using ldr instructions also increases code size.
5696 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5697 for counts of 3 or 4 regs. */
5698 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5700 rtx seq;
5702 start_sequence ();
5704 for (i = 0; i < count; i++)
5706 addr = plus_constant (from, i * 4 * sign);
5707 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5708 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5709 offset += 4 * sign;
5712 if (write_back)
5714 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5715 *offsetp = offset;
5718 seq = get_insns ();
5719 end_sequence ();
5721 return seq;
5724 result = gen_rtx_PARALLEL (VOIDmode,
5725 rtvec_alloc (count + (write_back ? 1 : 0)));
5726 if (write_back)
5728 XVECEXP (result, 0, 0)
5729 = gen_rtx_SET (GET_MODE (from), from,
5730 plus_constant (from, count * 4 * sign));
5731 i = 1;
5732 count++;
5735 for (j = 0; i < count; i++, j++)
5737 addr = plus_constant (from, j * 4 * sign);
5738 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5739 XVECEXP (result, 0, i)
5740 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5741 offset += 4 * sign;
5744 if (write_back)
5745 *offsetp = offset;
5747 return result;
5751 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5752 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5754 HOST_WIDE_INT offset = *offsetp;
5755 int i = 0, j;
5756 rtx result;
5757 int sign = up ? 1 : -1;
5758 rtx mem, addr;
5760 /* See arm_gen_load_multiple for discussion of
5761 the pros/cons of ldm/stm usage for XScale. */
5762 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5764 rtx seq;
5766 start_sequence ();
5768 for (i = 0; i < count; i++)
5770 addr = plus_constant (to, i * 4 * sign);
5771 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5772 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5773 offset += 4 * sign;
5776 if (write_back)
5778 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5779 *offsetp = offset;
5782 seq = get_insns ();
5783 end_sequence ();
5785 return seq;
5788 result = gen_rtx_PARALLEL (VOIDmode,
5789 rtvec_alloc (count + (write_back ? 1 : 0)));
5790 if (write_back)
5792 XVECEXP (result, 0, 0)
5793 = gen_rtx_SET (GET_MODE (to), to,
5794 plus_constant (to, count * 4 * sign));
5795 i = 1;
5796 count++;
5799 for (j = 0; i < count; i++, j++)
5801 addr = plus_constant (to, j * 4 * sign);
5802 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5803 XVECEXP (result, 0, i)
5804 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5805 offset += 4 * sign;
5808 if (write_back)
5809 *offsetp = offset;
5811 return result;
5815 arm_gen_movmemqi (rtx *operands)
5817 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5818 HOST_WIDE_INT srcoffset, dstoffset;
5819 int i;
5820 rtx src, dst, srcbase, dstbase;
5821 rtx part_bytes_reg = NULL;
5822 rtx mem;
5824 if (GET_CODE (operands[2]) != CONST_INT
5825 || GET_CODE (operands[3]) != CONST_INT
5826 || INTVAL (operands[2]) > 64
5827 || INTVAL (operands[3]) & 3)
5828 return 0;
5830 dstbase = operands[0];
5831 srcbase = operands[1];
5833 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5834 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5836 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5837 out_words_to_go = INTVAL (operands[2]) / 4;
5838 last_bytes = INTVAL (operands[2]) & 3;
5839 dstoffset = srcoffset = 0;
5841 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5842 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5844 for (i = 0; in_words_to_go >= 2; i+=4)
5846 if (in_words_to_go > 4)
5847 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5848 srcbase, &srcoffset));
5849 else
5850 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5851 FALSE, srcbase, &srcoffset));
5853 if (out_words_to_go)
5855 if (out_words_to_go > 4)
5856 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5857 dstbase, &dstoffset));
5858 else if (out_words_to_go != 1)
5859 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5860 dst, TRUE,
5861 (last_bytes == 0
5862 ? FALSE : TRUE),
5863 dstbase, &dstoffset));
5864 else
5866 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5867 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5868 if (last_bytes != 0)
5870 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5871 dstoffset += 4;
5876 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5877 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5880 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5881 if (out_words_to_go)
5883 rtx sreg;
5885 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5886 sreg = copy_to_reg (mem);
5888 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5889 emit_move_insn (mem, sreg);
5890 in_words_to_go--;
5892 if (in_words_to_go) /* Sanity check */
5893 abort ();
5896 if (in_words_to_go)
5898 if (in_words_to_go < 0)
5899 abort ();
5901 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5902 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5905 if (last_bytes && part_bytes_reg == NULL)
5906 abort ();
5908 if (BYTES_BIG_ENDIAN && last_bytes)
5910 rtx tmp = gen_reg_rtx (SImode);
5912 /* The bytes we want are in the top end of the word. */
5913 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5914 GEN_INT (8 * (4 - last_bytes))));
5915 part_bytes_reg = tmp;
5917 while (last_bytes)
5919 mem = adjust_automodify_address (dstbase, QImode,
5920 plus_constant (dst, last_bytes - 1),
5921 dstoffset + last_bytes - 1);
5922 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5924 if (--last_bytes)
5926 tmp = gen_reg_rtx (SImode);
5927 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5928 part_bytes_reg = tmp;
5933 else
5935 if (last_bytes > 1)
5937 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5938 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5939 last_bytes -= 2;
5940 if (last_bytes)
5942 rtx tmp = gen_reg_rtx (SImode);
5943 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5944 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5945 part_bytes_reg = tmp;
5946 dstoffset += 2;
5950 if (last_bytes)
5952 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5953 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5957 return 1;
5960 /* Generate a memory reference for a half word, such that it will be loaded
5961 into the top 16 bits of the word. We can assume that the address is
5962 known to be alignable and of the form reg, or plus (reg, const). */
5965 arm_gen_rotated_half_load (rtx memref)
5967 HOST_WIDE_INT offset = 0;
5968 rtx base = XEXP (memref, 0);
5970 if (GET_CODE (base) == PLUS)
5972 offset = INTVAL (XEXP (base, 1));
5973 base = XEXP (base, 0);
5976 /* If we aren't allowed to generate unaligned addresses, then fail. */
5977 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5978 return NULL;
5980 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5982 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5983 return base;
5985 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5988 /* Select a dominance comparison mode if possible for a test of the general
5989 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5990 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5991 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5992 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5993 In all cases OP will be either EQ or NE, but we don't need to know which
5994 here. If we are unable to support a dominance comparison we return
5995 CC mode. This will then fail to match for the RTL expressions that
5996 generate this call. */
5997 enum machine_mode
5998 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6000 enum rtx_code cond1, cond2;
6001 int swapped = 0;
6003 /* Currently we will probably get the wrong result if the individual
6004 comparisons are not simple. This also ensures that it is safe to
6005 reverse a comparison if necessary. */
6006 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6007 != CCmode)
6008 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6009 != CCmode))
6010 return CCmode;
6012 /* The if_then_else variant of this tests the second condition if the
6013 first passes, but is true if the first fails. Reverse the first
6014 condition to get a true "inclusive-or" expression. */
6015 if (cond_or == DOM_CC_NX_OR_Y)
6016 cond1 = reverse_condition (cond1);
6018 /* If the comparisons are not equal, and one doesn't dominate the other,
6019 then we can't do this. */
6020 if (cond1 != cond2
6021 && !comparison_dominates_p (cond1, cond2)
6022 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6023 return CCmode;
6025 if (swapped)
6027 enum rtx_code temp = cond1;
6028 cond1 = cond2;
6029 cond2 = temp;
6032 switch (cond1)
6034 case EQ:
6035 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6036 return CC_DEQmode;
6038 switch (cond2)
6040 case LE: return CC_DLEmode;
6041 case LEU: return CC_DLEUmode;
6042 case GE: return CC_DGEmode;
6043 case GEU: return CC_DGEUmode;
6044 default: break;
6047 break;
6049 case LT:
6050 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6051 return CC_DLTmode;
6052 if (cond2 == LE)
6053 return CC_DLEmode;
6054 if (cond2 == NE)
6055 return CC_DNEmode;
6056 break;
6058 case GT:
6059 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6060 return CC_DGTmode;
6061 if (cond2 == GE)
6062 return CC_DGEmode;
6063 if (cond2 == NE)
6064 return CC_DNEmode;
6065 break;
6067 case LTU:
6068 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6069 return CC_DLTUmode;
6070 if (cond2 == LEU)
6071 return CC_DLEUmode;
6072 if (cond2 == NE)
6073 return CC_DNEmode;
6074 break;
6076 case GTU:
6077 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6078 return CC_DGTUmode;
6079 if (cond2 == GEU)
6080 return CC_DGEUmode;
6081 if (cond2 == NE)
6082 return CC_DNEmode;
6083 break;
6085 /* The remaining cases only occur when both comparisons are the
6086 same. */
6087 case NE:
6088 return CC_DNEmode;
6090 case LE:
6091 return CC_DLEmode;
6093 case GE:
6094 return CC_DGEmode;
6096 case LEU:
6097 return CC_DLEUmode;
6099 case GEU:
6100 return CC_DGEUmode;
6102 default:
6103 break;
6106 abort ();
6109 enum machine_mode
6110 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6112 /* All floating point compares return CCFP if it is an equality
6113 comparison, and CCFPE otherwise. */
6114 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6116 switch (op)
6118 case EQ:
6119 case NE:
6120 case UNORDERED:
6121 case ORDERED:
6122 case UNLT:
6123 case UNLE:
6124 case UNGT:
6125 case UNGE:
6126 case UNEQ:
6127 case LTGT:
6128 return CCFPmode;
6130 case LT:
6131 case LE:
6132 case GT:
6133 case GE:
6134 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6135 return CCFPmode;
6136 return CCFPEmode;
6138 default:
6139 abort ();
6143 /* A compare with a shifted operand. Because of canonicalization, the
6144 comparison will have to be swapped when we emit the assembler. */
6145 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6146 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6147 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6148 || GET_CODE (x) == ROTATERT))
6149 return CC_SWPmode;
6151 /* This operation is performed swapped, but since we only rely on the Z
6152 flag we don't need an additional mode. */
6153 if (GET_MODE (y) == SImode && REG_P (y)
6154 && GET_CODE (x) == NEG
6155 && (op == EQ || op == NE))
6156 return CC_Zmode;
6158 /* This is a special case that is used by combine to allow a
6159 comparison of a shifted byte load to be split into a zero-extend
6160 followed by a comparison of the shifted integer (only valid for
6161 equalities and unsigned inequalities). */
6162 if (GET_MODE (x) == SImode
6163 && GET_CODE (x) == ASHIFT
6164 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6165 && GET_CODE (XEXP (x, 0)) == SUBREG
6166 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6167 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6168 && (op == EQ || op == NE
6169 || op == GEU || op == GTU || op == LTU || op == LEU)
6170 && GET_CODE (y) == CONST_INT)
6171 return CC_Zmode;
6173 /* A construct for a conditional compare, if the false arm contains
6174 0, then both conditions must be true, otherwise either condition
6175 must be true. Not all conditions are possible, so CCmode is
6176 returned if it can't be done. */
6177 if (GET_CODE (x) == IF_THEN_ELSE
6178 && (XEXP (x, 2) == const0_rtx
6179 || XEXP (x, 2) == const1_rtx)
6180 && COMPARISON_P (XEXP (x, 0))
6181 && COMPARISON_P (XEXP (x, 1)))
6182 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6183 INTVAL (XEXP (x, 2)));
6185 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6186 if (GET_CODE (x) == AND
6187 && COMPARISON_P (XEXP (x, 0))
6188 && COMPARISON_P (XEXP (x, 1)))
6189 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6190 DOM_CC_X_AND_Y);
6192 if (GET_CODE (x) == IOR
6193 && COMPARISON_P (XEXP (x, 0))
6194 && COMPARISON_P (XEXP (x, 1)))
6195 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6196 DOM_CC_X_OR_Y);
6198 /* An operation (on Thumb) where we want to test for a single bit.
6199 This is done by shifting that bit up into the top bit of a
6200 scratch register; we can then branch on the sign bit. */
6201 if (TARGET_THUMB
6202 && GET_MODE (x) == SImode
6203 && (op == EQ || op == NE)
6204 && (GET_CODE (x) == ZERO_EXTRACT))
6205 return CC_Nmode;
6207 /* An operation that sets the condition codes as a side-effect, the
6208 V flag is not set correctly, so we can only use comparisons where
6209 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6210 instead.) */
6211 if (GET_MODE (x) == SImode
6212 && y == const0_rtx
6213 && (op == EQ || op == NE || op == LT || op == GE)
6214 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6215 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6216 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6217 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6218 || GET_CODE (x) == LSHIFTRT
6219 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6220 || GET_CODE (x) == ROTATERT
6221 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6222 return CC_NOOVmode;
6224 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6225 return CC_Zmode;
6227 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6228 && GET_CODE (x) == PLUS
6229 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6230 return CC_Cmode;
6232 return CCmode;
6235 /* X and Y are two things to compare using CODE. Emit the compare insn and
6236 return the rtx for register 0 in the proper mode. FP means this is a
6237 floating point compare: I don't think that it is needed on the arm. */
6239 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6241 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6242 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6244 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6245 gen_rtx_COMPARE (mode, x, y)));
6247 return cc_reg;
6250 /* Generate a sequence of insns that will generate the correct return
6251 address mask depending on the physical architecture that the program
6252 is running on. */
6254 arm_gen_return_addr_mask (void)
6256 rtx reg = gen_reg_rtx (Pmode);
6258 emit_insn (gen_return_addr_mask (reg));
6259 return reg;
6262 void
6263 arm_reload_in_hi (rtx *operands)
6265 rtx ref = operands[1];
6266 rtx base, scratch;
6267 HOST_WIDE_INT offset = 0;
6269 if (GET_CODE (ref) == SUBREG)
6271 offset = SUBREG_BYTE (ref);
6272 ref = SUBREG_REG (ref);
6275 if (GET_CODE (ref) == REG)
6277 /* We have a pseudo which has been spilt onto the stack; there
6278 are two cases here: the first where there is a simple
6279 stack-slot replacement and a second where the stack-slot is
6280 out of range, or is used as a subreg. */
6281 if (reg_equiv_mem[REGNO (ref)])
6283 ref = reg_equiv_mem[REGNO (ref)];
6284 base = find_replacement (&XEXP (ref, 0));
6286 else
6287 /* The slot is out of range, or was dressed up in a SUBREG. */
6288 base = reg_equiv_address[REGNO (ref)];
6290 else
6291 base = find_replacement (&XEXP (ref, 0));
6293 /* Handle the case where the address is too complex to be offset by 1. */
6294 if (GET_CODE (base) == MINUS
6295 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6297 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6299 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6300 base = base_plus;
6302 else if (GET_CODE (base) == PLUS)
6304 /* The addend must be CONST_INT, or we would have dealt with it above. */
6305 HOST_WIDE_INT hi, lo;
6307 offset += INTVAL (XEXP (base, 1));
6308 base = XEXP (base, 0);
6310 /* Rework the address into a legal sequence of insns. */
6311 /* Valid range for lo is -4095 -> 4095 */
6312 lo = (offset >= 0
6313 ? (offset & 0xfff)
6314 : -((-offset) & 0xfff));
6316 /* Corner case, if lo is the max offset then we would be out of range
6317 once we have added the additional 1 below, so bump the msb into the
6318 pre-loading insn(s). */
6319 if (lo == 4095)
6320 lo &= 0x7ff;
6322 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6323 ^ (HOST_WIDE_INT) 0x80000000)
6324 - (HOST_WIDE_INT) 0x80000000);
6326 if (hi + lo != offset)
6327 abort ();
6329 if (hi != 0)
6331 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6333 /* Get the base address; addsi3 knows how to handle constants
6334 that require more than one insn. */
6335 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6336 base = base_plus;
6337 offset = lo;
6341 /* Operands[2] may overlap operands[0] (though it won't overlap
6342 operands[1]), that's why we asked for a DImode reg -- so we can
6343 use the bit that does not overlap. */
6344 if (REGNO (operands[2]) == REGNO (operands[0]))
6345 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6346 else
6347 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6349 emit_insn (gen_zero_extendqisi2 (scratch,
6350 gen_rtx_MEM (QImode,
6351 plus_constant (base,
6352 offset))));
6353 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6354 gen_rtx_MEM (QImode,
6355 plus_constant (base,
6356 offset + 1))));
6357 if (!BYTES_BIG_ENDIAN)
6358 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6359 gen_rtx_IOR (SImode,
6360 gen_rtx_ASHIFT
6361 (SImode,
6362 gen_rtx_SUBREG (SImode, operands[0], 0),
6363 GEN_INT (8)),
6364 scratch)));
6365 else
6366 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6367 gen_rtx_IOR (SImode,
6368 gen_rtx_ASHIFT (SImode, scratch,
6369 GEN_INT (8)),
6370 gen_rtx_SUBREG (SImode, operands[0],
6371 0))));
6374 /* Handle storing a half-word to memory during reload by synthesizing as two
6375 byte stores. Take care not to clobber the input values until after we
6376 have moved them somewhere safe. This code assumes that if the DImode
6377 scratch in operands[2] overlaps either the input value or output address
6378 in some way, then that value must die in this insn (we absolutely need
6379 two scratch registers for some corner cases). */
6380 void
6381 arm_reload_out_hi (rtx *operands)
6383 rtx ref = operands[0];
6384 rtx outval = operands[1];
6385 rtx base, scratch;
6386 HOST_WIDE_INT offset = 0;
6388 if (GET_CODE (ref) == SUBREG)
6390 offset = SUBREG_BYTE (ref);
6391 ref = SUBREG_REG (ref);
6394 if (GET_CODE (ref) == REG)
6396 /* We have a pseudo which has been spilt onto the stack; there
6397 are two cases here: the first where there is a simple
6398 stack-slot replacement and a second where the stack-slot is
6399 out of range, or is used as a subreg. */
6400 if (reg_equiv_mem[REGNO (ref)])
6402 ref = reg_equiv_mem[REGNO (ref)];
6403 base = find_replacement (&XEXP (ref, 0));
6405 else
6406 /* The slot is out of range, or was dressed up in a SUBREG. */
6407 base = reg_equiv_address[REGNO (ref)];
6409 else
6410 base = find_replacement (&XEXP (ref, 0));
6412 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6414 /* Handle the case where the address is too complex to be offset by 1. */
6415 if (GET_CODE (base) == MINUS
6416 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6418 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6420 /* Be careful not to destroy OUTVAL. */
6421 if (reg_overlap_mentioned_p (base_plus, outval))
6423 /* Updating base_plus might destroy outval, see if we can
6424 swap the scratch and base_plus. */
6425 if (!reg_overlap_mentioned_p (scratch, outval))
6427 rtx tmp = scratch;
6428 scratch = base_plus;
6429 base_plus = tmp;
6431 else
6433 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6435 /* Be conservative and copy OUTVAL into the scratch now,
6436 this should only be necessary if outval is a subreg
6437 of something larger than a word. */
6438 /* XXX Might this clobber base? I can't see how it can,
6439 since scratch is known to overlap with OUTVAL, and
6440 must be wider than a word. */
6441 emit_insn (gen_movhi (scratch_hi, outval));
6442 outval = scratch_hi;
6446 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6447 base = base_plus;
6449 else if (GET_CODE (base) == PLUS)
6451 /* The addend must be CONST_INT, or we would have dealt with it above. */
6452 HOST_WIDE_INT hi, lo;
6454 offset += INTVAL (XEXP (base, 1));
6455 base = XEXP (base, 0);
6457 /* Rework the address into a legal sequence of insns. */
6458 /* Valid range for lo is -4095 -> 4095 */
6459 lo = (offset >= 0
6460 ? (offset & 0xfff)
6461 : -((-offset) & 0xfff));
6463 /* Corner case, if lo is the max offset then we would be out of range
6464 once we have added the additional 1 below, so bump the msb into the
6465 pre-loading insn(s). */
6466 if (lo == 4095)
6467 lo &= 0x7ff;
6469 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6470 ^ (HOST_WIDE_INT) 0x80000000)
6471 - (HOST_WIDE_INT) 0x80000000);
6473 if (hi + lo != offset)
6474 abort ();
6476 if (hi != 0)
6478 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6480 /* Be careful not to destroy OUTVAL. */
6481 if (reg_overlap_mentioned_p (base_plus, outval))
6483 /* Updating base_plus might destroy outval, see if we
6484 can swap the scratch and base_plus. */
6485 if (!reg_overlap_mentioned_p (scratch, outval))
6487 rtx tmp = scratch;
6488 scratch = base_plus;
6489 base_plus = tmp;
6491 else
6493 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6495 /* Be conservative and copy outval into scratch now,
6496 this should only be necessary if outval is a
6497 subreg of something larger than a word. */
6498 /* XXX Might this clobber base? I can't see how it
6499 can, since scratch is known to overlap with
6500 outval. */
6501 emit_insn (gen_movhi (scratch_hi, outval));
6502 outval = scratch_hi;
6506 /* Get the base address; addsi3 knows how to handle constants
6507 that require more than one insn. */
6508 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6509 base = base_plus;
6510 offset = lo;
6514 if (BYTES_BIG_ENDIAN)
6516 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6517 plus_constant (base, offset + 1)),
6518 gen_lowpart (QImode, outval)));
6519 emit_insn (gen_lshrsi3 (scratch,
6520 gen_rtx_SUBREG (SImode, outval, 0),
6521 GEN_INT (8)));
6522 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6523 gen_lowpart (QImode, scratch)));
6525 else
6527 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6528 gen_lowpart (QImode, outval)));
6529 emit_insn (gen_lshrsi3 (scratch,
6530 gen_rtx_SUBREG (SImode, outval, 0),
6531 GEN_INT (8)));
6532 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6533 plus_constant (base, offset + 1)),
6534 gen_lowpart (QImode, scratch)));
6538 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6539 (padded to the size of a word) should be passed in a register. */
6541 static bool
6542 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6544 if (TARGET_AAPCS_BASED)
6545 return must_pass_in_stack_var_size (mode, type);
6546 else
6547 return must_pass_in_stack_var_size_or_pad (mode, type);
6551 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6552 Return true if an argument passed on the stack should be padded upwards,
6553 i.e. if the least-significant byte has useful data. */
6555 bool
6556 arm_pad_arg_upward (enum machine_mode mode, tree type)
6558 if (!TARGET_AAPCS_BASED)
6559 return DEFAULT_FUNCTION_ARG_PADDING(mode, type);
6561 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6562 return false;
6564 return true;
6568 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6569 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6570 byte of the register has useful data, and return the opposite if the
6571 most significant byte does.
6572 For AAPCS, small aggregates and small complex types are always padded
6573 upwards. */
6575 bool
6576 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6577 tree type, int first ATTRIBUTE_UNUSED)
6579 if (TARGET_AAPCS_BASED
6580 && BYTES_BIG_ENDIAN
6581 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6582 && int_size_in_bytes (type) <= 4)
6583 return true;
6585 /* Otherwise, use default padding. */
6586 return !BYTES_BIG_ENDIAN;
6591 /* Print a symbolic form of X to the debug file, F. */
6592 static void
6593 arm_print_value (FILE *f, rtx x)
6595 switch (GET_CODE (x))
6597 case CONST_INT:
6598 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6599 return;
6601 case CONST_DOUBLE:
6602 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6603 return;
6605 case CONST_VECTOR:
6607 int i;
6609 fprintf (f, "<");
6610 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6612 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6613 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6614 fputc (',', f);
6616 fprintf (f, ">");
6618 return;
6620 case CONST_STRING:
6621 fprintf (f, "\"%s\"", XSTR (x, 0));
6622 return;
6624 case SYMBOL_REF:
6625 fprintf (f, "`%s'", XSTR (x, 0));
6626 return;
6628 case LABEL_REF:
6629 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6630 return;
6632 case CONST:
6633 arm_print_value (f, XEXP (x, 0));
6634 return;
6636 case PLUS:
6637 arm_print_value (f, XEXP (x, 0));
6638 fprintf (f, "+");
6639 arm_print_value (f, XEXP (x, 1));
6640 return;
6642 case PC:
6643 fprintf (f, "pc");
6644 return;
6646 default:
6647 fprintf (f, "????");
6648 return;
6652 /* Routines for manipulation of the constant pool. */
6654 /* Arm instructions cannot load a large constant directly into a
6655 register; they have to come from a pc relative load. The constant
6656 must therefore be placed in the addressable range of the pc
6657 relative load. Depending on the precise pc relative load
6658 instruction the range is somewhere between 256 bytes and 4k. This
6659 means that we often have to dump a constant inside a function, and
6660 generate code to branch around it.
6662 It is important to minimize this, since the branches will slow
6663 things down and make the code larger.
6665 Normally we can hide the table after an existing unconditional
6666 branch so that there is no interruption of the flow, but in the
6667 worst case the code looks like this:
6669 ldr rn, L1
6671 b L2
6672 align
6673 L1: .long value
6677 ldr rn, L3
6679 b L4
6680 align
6681 L3: .long value
6685 We fix this by performing a scan after scheduling, which notices
6686 which instructions need to have their operands fetched from the
6687 constant table and builds the table.
6689 The algorithm starts by building a table of all the constants that
6690 need fixing up and all the natural barriers in the function (places
6691 where a constant table can be dropped without breaking the flow).
6692 For each fixup we note how far the pc-relative replacement will be
6693 able to reach and the offset of the instruction into the function.
6695 Having built the table we then group the fixes together to form
6696 tables that are as large as possible (subject to addressing
6697 constraints) and emit each table of constants after the last
6698 barrier that is within range of all the instructions in the group.
6699 If a group does not contain a barrier, then we forcibly create one
6700 by inserting a jump instruction into the flow. Once the table has
6701 been inserted, the insns are then modified to reference the
6702 relevant entry in the pool.
6704 Possible enhancements to the algorithm (not implemented) are:
6706 1) For some processors and object formats, there may be benefit in
6707 aligning the pools to the start of cache lines; this alignment
6708 would need to be taken into account when calculating addressability
6709 of a pool. */
6711 /* These typedefs are located at the start of this file, so that
6712 they can be used in the prototypes there. This comment is to
6713 remind readers of that fact so that the following structures
6714 can be understood more easily.
6716 typedef struct minipool_node Mnode;
6717 typedef struct minipool_fixup Mfix; */
6719 struct minipool_node
6721 /* Doubly linked chain of entries. */
6722 Mnode * next;
6723 Mnode * prev;
6724 /* The maximum offset into the code that this entry can be placed. While
6725 pushing fixes for forward references, all entries are sorted in order
6726 of increasing max_address. */
6727 HOST_WIDE_INT max_address;
6728 /* Similarly for an entry inserted for a backwards ref. */
6729 HOST_WIDE_INT min_address;
6730 /* The number of fixes referencing this entry. This can become zero
6731 if we "unpush" an entry. In this case we ignore the entry when we
6732 come to emit the code. */
6733 int refcount;
6734 /* The offset from the start of the minipool. */
6735 HOST_WIDE_INT offset;
6736 /* The value in table. */
6737 rtx value;
6738 /* The mode of value. */
6739 enum machine_mode mode;
6740 /* The size of the value. With iWMMXt enabled
6741 sizes > 4 also imply an alignment of 8-bytes. */
6742 int fix_size;
6745 struct minipool_fixup
6747 Mfix * next;
6748 rtx insn;
6749 HOST_WIDE_INT address;
6750 rtx * loc;
6751 enum machine_mode mode;
6752 int fix_size;
6753 rtx value;
6754 Mnode * minipool;
6755 HOST_WIDE_INT forwards;
6756 HOST_WIDE_INT backwards;
6759 /* Fixes less than a word need padding out to a word boundary. */
6760 #define MINIPOOL_FIX_SIZE(mode) \
6761 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6763 static Mnode * minipool_vector_head;
6764 static Mnode * minipool_vector_tail;
6765 static rtx minipool_vector_label;
6767 /* The linked list of all minipool fixes required for this function. */
6768 Mfix * minipool_fix_head;
6769 Mfix * minipool_fix_tail;
6770 /* The fix entry for the current minipool, once it has been placed. */
6771 Mfix * minipool_barrier;
6773 /* Determines if INSN is the start of a jump table. Returns the end
6774 of the TABLE or NULL_RTX. */
6775 static rtx
6776 is_jump_table (rtx insn)
6778 rtx table;
6780 if (GET_CODE (insn) == JUMP_INSN
6781 && JUMP_LABEL (insn) != NULL
6782 && ((table = next_real_insn (JUMP_LABEL (insn)))
6783 == next_real_insn (insn))
6784 && table != NULL
6785 && GET_CODE (table) == JUMP_INSN
6786 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6787 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6788 return table;
6790 return NULL_RTX;
6793 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6794 #define JUMP_TABLES_IN_TEXT_SECTION 0
6795 #endif
6797 static HOST_WIDE_INT
6798 get_jump_table_size (rtx insn)
6800 /* ADDR_VECs only take room if read-only data does into the text
6801 section. */
6802 if (JUMP_TABLES_IN_TEXT_SECTION
6803 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6804 || 1
6805 #endif
6808 rtx body = PATTERN (insn);
6809 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6811 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6814 return 0;
6817 /* Move a minipool fix MP from its current location to before MAX_MP.
6818 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6819 constraints may need updating. */
6820 static Mnode *
6821 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6822 HOST_WIDE_INT max_address)
6824 /* This should never be true and the code below assumes these are
6825 different. */
6826 if (mp == max_mp)
6827 abort ();
6829 if (max_mp == NULL)
6831 if (max_address < mp->max_address)
6832 mp->max_address = max_address;
6834 else
6836 if (max_address > max_mp->max_address - mp->fix_size)
6837 mp->max_address = max_mp->max_address - mp->fix_size;
6838 else
6839 mp->max_address = max_address;
6841 /* Unlink MP from its current position. Since max_mp is non-null,
6842 mp->prev must be non-null. */
6843 mp->prev->next = mp->next;
6844 if (mp->next != NULL)
6845 mp->next->prev = mp->prev;
6846 else
6847 minipool_vector_tail = mp->prev;
6849 /* Re-insert it before MAX_MP. */
6850 mp->next = max_mp;
6851 mp->prev = max_mp->prev;
6852 max_mp->prev = mp;
6854 if (mp->prev != NULL)
6855 mp->prev->next = mp;
6856 else
6857 minipool_vector_head = mp;
6860 /* Save the new entry. */
6861 max_mp = mp;
6863 /* Scan over the preceding entries and adjust their addresses as
6864 required. */
6865 while (mp->prev != NULL
6866 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6868 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6869 mp = mp->prev;
6872 return max_mp;
6875 /* Add a constant to the minipool for a forward reference. Returns the
6876 node added or NULL if the constant will not fit in this pool. */
6877 static Mnode *
6878 add_minipool_forward_ref (Mfix *fix)
6880 /* If set, max_mp is the first pool_entry that has a lower
6881 constraint than the one we are trying to add. */
6882 Mnode * max_mp = NULL;
6883 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6884 Mnode * mp;
6886 /* If this fix's address is greater than the address of the first
6887 entry, then we can't put the fix in this pool. We subtract the
6888 size of the current fix to ensure that if the table is fully
6889 packed we still have enough room to insert this value by suffling
6890 the other fixes forwards. */
6891 if (minipool_vector_head &&
6892 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6893 return NULL;
6895 /* Scan the pool to see if a constant with the same value has
6896 already been added. While we are doing this, also note the
6897 location where we must insert the constant if it doesn't already
6898 exist. */
6899 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6901 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6902 && fix->mode == mp->mode
6903 && (GET_CODE (fix->value) != CODE_LABEL
6904 || (CODE_LABEL_NUMBER (fix->value)
6905 == CODE_LABEL_NUMBER (mp->value)))
6906 && rtx_equal_p (fix->value, mp->value))
6908 /* More than one fix references this entry. */
6909 mp->refcount++;
6910 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6913 /* Note the insertion point if necessary. */
6914 if (max_mp == NULL
6915 && mp->max_address > max_address)
6916 max_mp = mp;
6918 /* If we are inserting an 8-bytes aligned quantity and
6919 we have not already found an insertion point, then
6920 make sure that all such 8-byte aligned quantities are
6921 placed at the start of the pool. */
6922 if (ARM_DOUBLEWORD_ALIGN
6923 && max_mp == NULL
6924 && fix->fix_size == 8
6925 && mp->fix_size != 8)
6927 max_mp = mp;
6928 max_address = mp->max_address;
6932 /* The value is not currently in the minipool, so we need to create
6933 a new entry for it. If MAX_MP is NULL, the entry will be put on
6934 the end of the list since the placement is less constrained than
6935 any existing entry. Otherwise, we insert the new fix before
6936 MAX_MP and, if necessary, adjust the constraints on the other
6937 entries. */
6938 mp = xmalloc (sizeof (* mp));
6939 mp->fix_size = fix->fix_size;
6940 mp->mode = fix->mode;
6941 mp->value = fix->value;
6942 mp->refcount = 1;
6943 /* Not yet required for a backwards ref. */
6944 mp->min_address = -65536;
6946 if (max_mp == NULL)
6948 mp->max_address = max_address;
6949 mp->next = NULL;
6950 mp->prev = minipool_vector_tail;
6952 if (mp->prev == NULL)
6954 minipool_vector_head = mp;
6955 minipool_vector_label = gen_label_rtx ();
6957 else
6958 mp->prev->next = mp;
6960 minipool_vector_tail = mp;
6962 else
6964 if (max_address > max_mp->max_address - mp->fix_size)
6965 mp->max_address = max_mp->max_address - mp->fix_size;
6966 else
6967 mp->max_address = max_address;
6969 mp->next = max_mp;
6970 mp->prev = max_mp->prev;
6971 max_mp->prev = mp;
6972 if (mp->prev != NULL)
6973 mp->prev->next = mp;
6974 else
6975 minipool_vector_head = mp;
6978 /* Save the new entry. */
6979 max_mp = mp;
6981 /* Scan over the preceding entries and adjust their addresses as
6982 required. */
6983 while (mp->prev != NULL
6984 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6986 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6987 mp = mp->prev;
6990 return max_mp;
6993 static Mnode *
6994 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6995 HOST_WIDE_INT min_address)
6997 HOST_WIDE_INT offset;
6999 /* This should never be true, and the code below assumes these are
7000 different. */
7001 if (mp == min_mp)
7002 abort ();
7004 if (min_mp == NULL)
7006 if (min_address > mp->min_address)
7007 mp->min_address = min_address;
7009 else
7011 /* We will adjust this below if it is too loose. */
7012 mp->min_address = min_address;
7014 /* Unlink MP from its current position. Since min_mp is non-null,
7015 mp->next must be non-null. */
7016 mp->next->prev = mp->prev;
7017 if (mp->prev != NULL)
7018 mp->prev->next = mp->next;
7019 else
7020 minipool_vector_head = mp->next;
7022 /* Reinsert it after MIN_MP. */
7023 mp->prev = min_mp;
7024 mp->next = min_mp->next;
7025 min_mp->next = mp;
7026 if (mp->next != NULL)
7027 mp->next->prev = mp;
7028 else
7029 minipool_vector_tail = mp;
7032 min_mp = mp;
7034 offset = 0;
7035 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7037 mp->offset = offset;
7038 if (mp->refcount > 0)
7039 offset += mp->fix_size;
7041 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7042 mp->next->min_address = mp->min_address + mp->fix_size;
7045 return min_mp;
7048 /* Add a constant to the minipool for a backward reference. Returns the
7049 node added or NULL if the constant will not fit in this pool.
7051 Note that the code for insertion for a backwards reference can be
7052 somewhat confusing because the calculated offsets for each fix do
7053 not take into account the size of the pool (which is still under
7054 construction. */
7055 static Mnode *
7056 add_minipool_backward_ref (Mfix *fix)
7058 /* If set, min_mp is the last pool_entry that has a lower constraint
7059 than the one we are trying to add. */
7060 Mnode *min_mp = NULL;
7061 /* This can be negative, since it is only a constraint. */
7062 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7063 Mnode *mp;
7065 /* If we can't reach the current pool from this insn, or if we can't
7066 insert this entry at the end of the pool without pushing other
7067 fixes out of range, then we don't try. This ensures that we
7068 can't fail later on. */
7069 if (min_address >= minipool_barrier->address
7070 || (minipool_vector_tail->min_address + fix->fix_size
7071 >= minipool_barrier->address))
7072 return NULL;
7074 /* Scan the pool to see if a constant with the same value has
7075 already been added. While we are doing this, also note the
7076 location where we must insert the constant if it doesn't already
7077 exist. */
7078 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7080 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7081 && fix->mode == mp->mode
7082 && (GET_CODE (fix->value) != CODE_LABEL
7083 || (CODE_LABEL_NUMBER (fix->value)
7084 == CODE_LABEL_NUMBER (mp->value)))
7085 && rtx_equal_p (fix->value, mp->value)
7086 /* Check that there is enough slack to move this entry to the
7087 end of the table (this is conservative). */
7088 && (mp->max_address
7089 > (minipool_barrier->address
7090 + minipool_vector_tail->offset
7091 + minipool_vector_tail->fix_size)))
7093 mp->refcount++;
7094 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7097 if (min_mp != NULL)
7098 mp->min_address += fix->fix_size;
7099 else
7101 /* Note the insertion point if necessary. */
7102 if (mp->min_address < min_address)
7104 /* For now, we do not allow the insertion of 8-byte alignment
7105 requiring nodes anywhere but at the start of the pool. */
7106 if (ARM_DOUBLEWORD_ALIGN
7107 && fix->fix_size == 8 && mp->fix_size != 8)
7108 return NULL;
7109 else
7110 min_mp = mp;
7112 else if (mp->max_address
7113 < minipool_barrier->address + mp->offset + fix->fix_size)
7115 /* Inserting before this entry would push the fix beyond
7116 its maximum address (which can happen if we have
7117 re-located a forwards fix); force the new fix to come
7118 after it. */
7119 min_mp = mp;
7120 min_address = mp->min_address + fix->fix_size;
7122 /* If we are inserting an 8-bytes aligned quantity and
7123 we have not already found an insertion point, then
7124 make sure that all such 8-byte aligned quantities are
7125 placed at the start of the pool. */
7126 else if (ARM_DOUBLEWORD_ALIGN
7127 && min_mp == NULL
7128 && fix->fix_size == 8
7129 && mp->fix_size < 8)
7131 min_mp = mp;
7132 min_address = mp->min_address + fix->fix_size;
7137 /* We need to create a new entry. */
7138 mp = xmalloc (sizeof (* mp));
7139 mp->fix_size = fix->fix_size;
7140 mp->mode = fix->mode;
7141 mp->value = fix->value;
7142 mp->refcount = 1;
7143 mp->max_address = minipool_barrier->address + 65536;
7145 mp->min_address = min_address;
7147 if (min_mp == NULL)
7149 mp->prev = NULL;
7150 mp->next = minipool_vector_head;
7152 if (mp->next == NULL)
7154 minipool_vector_tail = mp;
7155 minipool_vector_label = gen_label_rtx ();
7157 else
7158 mp->next->prev = mp;
7160 minipool_vector_head = mp;
7162 else
7164 mp->next = min_mp->next;
7165 mp->prev = min_mp;
7166 min_mp->next = mp;
7168 if (mp->next != NULL)
7169 mp->next->prev = mp;
7170 else
7171 minipool_vector_tail = mp;
7174 /* Save the new entry. */
7175 min_mp = mp;
7177 if (mp->prev)
7178 mp = mp->prev;
7179 else
7180 mp->offset = 0;
7182 /* Scan over the following entries and adjust their offsets. */
7183 while (mp->next != NULL)
7185 if (mp->next->min_address < mp->min_address + mp->fix_size)
7186 mp->next->min_address = mp->min_address + mp->fix_size;
7188 if (mp->refcount)
7189 mp->next->offset = mp->offset + mp->fix_size;
7190 else
7191 mp->next->offset = mp->offset;
7193 mp = mp->next;
7196 return min_mp;
7199 static void
7200 assign_minipool_offsets (Mfix *barrier)
7202 HOST_WIDE_INT offset = 0;
7203 Mnode *mp;
7205 minipool_barrier = barrier;
7207 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7209 mp->offset = offset;
7211 if (mp->refcount > 0)
7212 offset += mp->fix_size;
7216 /* Output the literal table */
7217 static void
7218 dump_minipool (rtx scan)
7220 Mnode * mp;
7221 Mnode * nmp;
7222 int align64 = 0;
7224 if (ARM_DOUBLEWORD_ALIGN)
7225 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7226 if (mp->refcount > 0 && mp->fix_size == 8)
7228 align64 = 1;
7229 break;
7232 if (dump_file)
7233 fprintf (dump_file,
7234 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7235 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7237 scan = emit_label_after (gen_label_rtx (), scan);
7238 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7239 scan = emit_label_after (minipool_vector_label, scan);
7241 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7243 if (mp->refcount > 0)
7245 if (dump_file)
7247 fprintf (dump_file,
7248 ";; Offset %u, min %ld, max %ld ",
7249 (unsigned) mp->offset, (unsigned long) mp->min_address,
7250 (unsigned long) mp->max_address);
7251 arm_print_value (dump_file, mp->value);
7252 fputc ('\n', dump_file);
7255 switch (mp->fix_size)
7257 #ifdef HAVE_consttable_1
7258 case 1:
7259 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7260 break;
7262 #endif
7263 #ifdef HAVE_consttable_2
7264 case 2:
7265 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7266 break;
7268 #endif
7269 #ifdef HAVE_consttable_4
7270 case 4:
7271 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7272 break;
7274 #endif
7275 #ifdef HAVE_consttable_8
7276 case 8:
7277 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7278 break;
7280 #endif
7281 default:
7282 abort ();
7283 break;
7287 nmp = mp->next;
7288 free (mp);
7291 minipool_vector_head = minipool_vector_tail = NULL;
7292 scan = emit_insn_after (gen_consttable_end (), scan);
7293 scan = emit_barrier_after (scan);
7296 /* Return the cost of forcibly inserting a barrier after INSN. */
7297 static int
7298 arm_barrier_cost (rtx insn)
7300 /* Basing the location of the pool on the loop depth is preferable,
7301 but at the moment, the basic block information seems to be
7302 corrupt by this stage of the compilation. */
7303 int base_cost = 50;
7304 rtx next = next_nonnote_insn (insn);
7306 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7307 base_cost -= 20;
7309 switch (GET_CODE (insn))
7311 case CODE_LABEL:
7312 /* It will always be better to place the table before the label, rather
7313 than after it. */
7314 return 50;
7316 case INSN:
7317 case CALL_INSN:
7318 return base_cost;
7320 case JUMP_INSN:
7321 return base_cost - 10;
7323 default:
7324 return base_cost + 10;
7328 /* Find the best place in the insn stream in the range
7329 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7330 Create the barrier by inserting a jump and add a new fix entry for
7331 it. */
7332 static Mfix *
7333 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7335 HOST_WIDE_INT count = 0;
7336 rtx barrier;
7337 rtx from = fix->insn;
7338 rtx selected = from;
7339 int selected_cost;
7340 HOST_WIDE_INT selected_address;
7341 Mfix * new_fix;
7342 HOST_WIDE_INT max_count = max_address - fix->address;
7343 rtx label = gen_label_rtx ();
7345 selected_cost = arm_barrier_cost (from);
7346 selected_address = fix->address;
7348 while (from && count < max_count)
7350 rtx tmp;
7351 int new_cost;
7353 /* This code shouldn't have been called if there was a natural barrier
7354 within range. */
7355 if (GET_CODE (from) == BARRIER)
7356 abort ();
7358 /* Count the length of this insn. */
7359 count += get_attr_length (from);
7361 /* If there is a jump table, add its length. */
7362 tmp = is_jump_table (from);
7363 if (tmp != NULL)
7365 count += get_jump_table_size (tmp);
7367 /* Jump tables aren't in a basic block, so base the cost on
7368 the dispatch insn. If we select this location, we will
7369 still put the pool after the table. */
7370 new_cost = arm_barrier_cost (from);
7372 if (count < max_count && new_cost <= selected_cost)
7374 selected = tmp;
7375 selected_cost = new_cost;
7376 selected_address = fix->address + count;
7379 /* Continue after the dispatch table. */
7380 from = NEXT_INSN (tmp);
7381 continue;
7384 new_cost = arm_barrier_cost (from);
7386 if (count < max_count && new_cost <= selected_cost)
7388 selected = from;
7389 selected_cost = new_cost;
7390 selected_address = fix->address + count;
7393 from = NEXT_INSN (from);
7396 /* Create a new JUMP_INSN that branches around a barrier. */
7397 from = emit_jump_insn_after (gen_jump (label), selected);
7398 JUMP_LABEL (from) = label;
7399 barrier = emit_barrier_after (from);
7400 emit_label_after (label, barrier);
7402 /* Create a minipool barrier entry for the new barrier. */
7403 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7404 new_fix->insn = barrier;
7405 new_fix->address = selected_address;
7406 new_fix->next = fix->next;
7407 fix->next = new_fix;
7409 return new_fix;
7412 /* Record that there is a natural barrier in the insn stream at
7413 ADDRESS. */
7414 static void
7415 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7417 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7419 fix->insn = insn;
7420 fix->address = address;
7422 fix->next = NULL;
7423 if (minipool_fix_head != NULL)
7424 minipool_fix_tail->next = fix;
7425 else
7426 minipool_fix_head = fix;
7428 minipool_fix_tail = fix;
7431 /* Record INSN, which will need fixing up to load a value from the
7432 minipool. ADDRESS is the offset of the insn since the start of the
7433 function; LOC is a pointer to the part of the insn which requires
7434 fixing; VALUE is the constant that must be loaded, which is of type
7435 MODE. */
7436 static void
7437 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7438 enum machine_mode mode, rtx value)
7440 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7442 #ifdef AOF_ASSEMBLER
7443 /* PIC symbol references need to be converted into offsets into the
7444 based area. */
7445 /* XXX This shouldn't be done here. */
7446 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7447 value = aof_pic_entry (value);
7448 #endif /* AOF_ASSEMBLER */
7450 fix->insn = insn;
7451 fix->address = address;
7452 fix->loc = loc;
7453 fix->mode = mode;
7454 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7455 fix->value = value;
7456 fix->forwards = get_attr_pool_range (insn);
7457 fix->backwards = get_attr_neg_pool_range (insn);
7458 fix->minipool = NULL;
7460 /* If an insn doesn't have a range defined for it, then it isn't
7461 expecting to be reworked by this code. Better to abort now than
7462 to generate duff assembly code. */
7463 if (fix->forwards == 0 && fix->backwards == 0)
7464 abort ();
7466 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7467 So there might be an empty word before the start of the pool.
7468 Hence we reduce the forward range by 4 to allow for this
7469 possibility. */
7470 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7471 fix->forwards -= 4;
7473 if (dump_file)
7475 fprintf (dump_file,
7476 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7477 GET_MODE_NAME (mode),
7478 INSN_UID (insn), (unsigned long) address,
7479 -1 * (long)fix->backwards, (long)fix->forwards);
7480 arm_print_value (dump_file, fix->value);
7481 fprintf (dump_file, "\n");
7484 /* Add it to the chain of fixes. */
7485 fix->next = NULL;
7487 if (minipool_fix_head != NULL)
7488 minipool_fix_tail->next = fix;
7489 else
7490 minipool_fix_head = fix;
7492 minipool_fix_tail = fix;
7495 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7496 Returns the number of insns needed, or 99 if we don't know how to
7497 do it. */
7499 arm_const_double_inline_cost (rtx val)
7501 rtx lowpart, highpart;
7502 enum machine_mode mode;
7504 mode = GET_MODE (val);
7506 if (mode == VOIDmode)
7507 mode = DImode;
7509 gcc_assert (GET_MODE_SIZE (mode) == 8);
7511 lowpart = gen_lowpart (SImode, val);
7512 highpart = gen_highpart_mode (SImode, mode, val);
7514 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7515 gcc_assert (GET_CODE (highpart) == CONST_INT);
7517 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7518 NULL_RTX, NULL_RTX, 0, 0)
7519 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7520 NULL_RTX, NULL_RTX, 0, 0));
7523 /* Return true if it is worthwhile to split a 64-bit constant into two
7524 32-bit operations. This is the case if optimizing for size, or
7525 if we have load delay slots, or if one 32-bit part can be done with
7526 a single data operation. */
7527 bool
7528 arm_const_double_by_parts (rtx val)
7530 enum machine_mode mode = GET_MODE (val);
7531 rtx part;
7533 if (optimize_size || arm_ld_sched)
7534 return true;
7536 if (mode == VOIDmode)
7537 mode = DImode;
7539 part = gen_highpart_mode (SImode, mode, val);
7541 gcc_assert (GET_CODE (part) == CONST_INT);
7543 if (const_ok_for_arm (INTVAL (part))
7544 || const_ok_for_arm (~INTVAL (part)))
7545 return true;
7547 part = gen_lowpart (SImode, val);
7549 gcc_assert (GET_CODE (part) == CONST_INT);
7551 if (const_ok_for_arm (INTVAL (part))
7552 || const_ok_for_arm (~INTVAL (part)))
7553 return true;
7555 return false;
7558 /* Scan INSN and note any of its operands that need fixing.
7559 If DO_PUSHES is false we do not actually push any of the fixups
7560 needed. The function returns TRUE if any fixups were needed/pushed.
7561 This is used by arm_memory_load_p() which needs to know about loads
7562 of constants that will be converted into minipool loads. */
7563 static bool
7564 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7566 bool result = false;
7567 int opno;
7569 extract_insn (insn);
7571 if (!constrain_operands (1))
7572 fatal_insn_not_found (insn);
7574 if (recog_data.n_alternatives == 0)
7575 return false;
7577 /* Fill in recog_op_alt with information about the constraints of
7578 this insn. */
7579 preprocess_constraints ();
7581 for (opno = 0; opno < recog_data.n_operands; opno++)
7583 /* Things we need to fix can only occur in inputs. */
7584 if (recog_data.operand_type[opno] != OP_IN)
7585 continue;
7587 /* If this alternative is a memory reference, then any mention
7588 of constants in this alternative is really to fool reload
7589 into allowing us to accept one there. We need to fix them up
7590 now so that we output the right code. */
7591 if (recog_op_alt[opno][which_alternative].memory_ok)
7593 rtx op = recog_data.operand[opno];
7595 if (CONSTANT_P (op))
7597 if (do_pushes)
7598 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7599 recog_data.operand_mode[opno], op);
7600 result = true;
7602 else if (GET_CODE (op) == MEM
7603 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7604 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7606 if (do_pushes)
7608 rtx cop = avoid_constant_pool_reference (op);
7610 /* Casting the address of something to a mode narrower
7611 than a word can cause avoid_constant_pool_reference()
7612 to return the pool reference itself. That's no good to
7613 us here. Lets just hope that we can use the
7614 constant pool value directly. */
7615 if (op == cop)
7616 cop = get_pool_constant (XEXP (op, 0));
7618 push_minipool_fix (insn, address,
7619 recog_data.operand_loc[opno],
7620 recog_data.operand_mode[opno], cop);
7623 result = true;
7628 return result;
7631 /* Gcc puts the pool in the wrong place for ARM, since we can only
7632 load addresses a limited distance around the pc. We do some
7633 special munging to move the constant pool values to the correct
7634 point in the code. */
7635 static void
7636 arm_reorg (void)
7638 rtx insn;
7639 HOST_WIDE_INT address = 0;
7640 Mfix * fix;
7642 minipool_fix_head = minipool_fix_tail = NULL;
7644 /* The first insn must always be a note, or the code below won't
7645 scan it properly. */
7646 insn = get_insns ();
7647 if (GET_CODE (insn) != NOTE)
7648 abort ();
7650 /* Scan all the insns and record the operands that will need fixing. */
7651 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7653 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7654 && (arm_cirrus_insn_p (insn)
7655 || GET_CODE (insn) == JUMP_INSN
7656 || arm_memory_load_p (insn)))
7657 cirrus_reorg (insn);
7659 if (GET_CODE (insn) == BARRIER)
7660 push_minipool_barrier (insn, address);
7661 else if (INSN_P (insn))
7663 rtx table;
7665 note_invalid_constants (insn, address, true);
7666 address += get_attr_length (insn);
7668 /* If the insn is a vector jump, add the size of the table
7669 and skip the table. */
7670 if ((table = is_jump_table (insn)) != NULL)
7672 address += get_jump_table_size (table);
7673 insn = table;
7678 fix = minipool_fix_head;
7680 /* Now scan the fixups and perform the required changes. */
7681 while (fix)
7683 Mfix * ftmp;
7684 Mfix * fdel;
7685 Mfix * last_added_fix;
7686 Mfix * last_barrier = NULL;
7687 Mfix * this_fix;
7689 /* Skip any further barriers before the next fix. */
7690 while (fix && GET_CODE (fix->insn) == BARRIER)
7691 fix = fix->next;
7693 /* No more fixes. */
7694 if (fix == NULL)
7695 break;
7697 last_added_fix = NULL;
7699 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7701 if (GET_CODE (ftmp->insn) == BARRIER)
7703 if (ftmp->address >= minipool_vector_head->max_address)
7704 break;
7706 last_barrier = ftmp;
7708 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7709 break;
7711 last_added_fix = ftmp; /* Keep track of the last fix added. */
7714 /* If we found a barrier, drop back to that; any fixes that we
7715 could have reached but come after the barrier will now go in
7716 the next mini-pool. */
7717 if (last_barrier != NULL)
7719 /* Reduce the refcount for those fixes that won't go into this
7720 pool after all. */
7721 for (fdel = last_barrier->next;
7722 fdel && fdel != ftmp;
7723 fdel = fdel->next)
7725 fdel->minipool->refcount--;
7726 fdel->minipool = NULL;
7729 ftmp = last_barrier;
7731 else
7733 /* ftmp is first fix that we can't fit into this pool and
7734 there no natural barriers that we could use. Insert a
7735 new barrier in the code somewhere between the previous
7736 fix and this one, and arrange to jump around it. */
7737 HOST_WIDE_INT max_address;
7739 /* The last item on the list of fixes must be a barrier, so
7740 we can never run off the end of the list of fixes without
7741 last_barrier being set. */
7742 if (ftmp == NULL)
7743 abort ();
7745 max_address = minipool_vector_head->max_address;
7746 /* Check that there isn't another fix that is in range that
7747 we couldn't fit into this pool because the pool was
7748 already too large: we need to put the pool before such an
7749 instruction. */
7750 if (ftmp->address < max_address)
7751 max_address = ftmp->address;
7753 last_barrier = create_fix_barrier (last_added_fix, max_address);
7756 assign_minipool_offsets (last_barrier);
7758 while (ftmp)
7760 if (GET_CODE (ftmp->insn) != BARRIER
7761 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7762 == NULL))
7763 break;
7765 ftmp = ftmp->next;
7768 /* Scan over the fixes we have identified for this pool, fixing them
7769 up and adding the constants to the pool itself. */
7770 for (this_fix = fix; this_fix && ftmp != this_fix;
7771 this_fix = this_fix->next)
7772 if (GET_CODE (this_fix->insn) != BARRIER)
7774 rtx addr
7775 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7776 minipool_vector_label),
7777 this_fix->minipool->offset);
7778 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7781 dump_minipool (last_barrier->insn);
7782 fix = ftmp;
7785 /* From now on we must synthesize any constants that we can't handle
7786 directly. This can happen if the RTL gets split during final
7787 instruction generation. */
7788 after_arm_reorg = 1;
7790 /* Free the minipool memory. */
7791 obstack_free (&minipool_obstack, minipool_startobj);
7794 /* Routines to output assembly language. */
7796 /* If the rtx is the correct value then return the string of the number.
7797 In this way we can ensure that valid double constants are generated even
7798 when cross compiling. */
7799 const char *
7800 fp_immediate_constant (rtx x)
7802 REAL_VALUE_TYPE r;
7803 int i;
7805 if (!fp_consts_inited)
7806 init_fp_table ();
7808 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7809 for (i = 0; i < 8; i++)
7810 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7811 return strings_fp[i];
7813 abort ();
7816 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7817 static const char *
7818 fp_const_from_val (REAL_VALUE_TYPE *r)
7820 int i;
7822 if (!fp_consts_inited)
7823 init_fp_table ();
7825 for (i = 0; i < 8; i++)
7826 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7827 return strings_fp[i];
7829 abort ();
7832 /* Output the operands of a LDM/STM instruction to STREAM.
7833 MASK is the ARM register set mask of which only bits 0-15 are important.
7834 REG is the base register, either the frame pointer or the stack pointer,
7835 INSTR is the possibly suffixed load or store instruction. */
7837 static void
7838 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7839 unsigned long mask)
7841 unsigned i;
7842 bool not_first = FALSE;
7844 fputc ('\t', stream);
7845 asm_fprintf (stream, instr, reg);
7846 fputs (", {", stream);
7848 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7849 if (mask & (1 << i))
7851 if (not_first)
7852 fprintf (stream, ", ");
7854 asm_fprintf (stream, "%r", i);
7855 not_first = TRUE;
7858 fprintf (stream, "}\n");
7862 /* Output a FLDMX instruction to STREAM.
7863 BASE if the register containing the address.
7864 REG and COUNT specify the register range.
7865 Extra registers may be added to avoid hardware bugs. */
7867 static void
7868 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7870 int i;
7872 /* Workaround ARM10 VFPr1 bug. */
7873 if (count == 2 && !arm_arch6)
7875 if (reg == 15)
7876 reg--;
7877 count++;
7880 fputc ('\t', stream);
7881 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7883 for (i = reg; i < reg + count; i++)
7885 if (i > reg)
7886 fputs (", ", stream);
7887 asm_fprintf (stream, "d%d", i);
7889 fputs ("}\n", stream);
7894 /* Output the assembly for a store multiple. */
7896 const char *
7897 vfp_output_fstmx (rtx * operands)
7899 char pattern[100];
7900 int p;
7901 int base;
7902 int i;
7904 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7905 p = strlen (pattern);
7907 if (GET_CODE (operands[1]) != REG)
7908 abort ();
7910 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7911 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7913 p += sprintf (&pattern[p], ", d%d", base + i);
7915 strcpy (&pattern[p], "}");
7917 output_asm_insn (pattern, operands);
7918 return "";
7922 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7923 number of bytes pushed. */
7925 static int
7926 vfp_emit_fstmx (int base_reg, int count)
7928 rtx par;
7929 rtx dwarf;
7930 rtx tmp, reg;
7931 int i;
7933 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7934 register pairs are stored by a store multiple insn. We avoid this
7935 by pushing an extra pair. */
7936 if (count == 2 && !arm_arch6)
7938 if (base_reg == LAST_VFP_REGNUM - 3)
7939 base_reg -= 2;
7940 count++;
7943 /* ??? The frame layout is implementation defined. We describe
7944 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7945 We really need some way of representing the whole block so that the
7946 unwinder can figure it out at runtime. */
7947 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7948 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7950 reg = gen_rtx_REG (DFmode, base_reg);
7951 base_reg += 2;
7953 XVECEXP (par, 0, 0)
7954 = gen_rtx_SET (VOIDmode,
7955 gen_rtx_MEM (BLKmode,
7956 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7957 gen_rtx_UNSPEC (BLKmode,
7958 gen_rtvec (1, reg),
7959 UNSPEC_PUSH_MULT));
7961 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7962 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7963 GEN_INT (-(count * 8 + 4))));
7964 RTX_FRAME_RELATED_P (tmp) = 1;
7965 XVECEXP (dwarf, 0, 0) = tmp;
7967 tmp = gen_rtx_SET (VOIDmode,
7968 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7969 reg);
7970 RTX_FRAME_RELATED_P (tmp) = 1;
7971 XVECEXP (dwarf, 0, 1) = tmp;
7973 for (i = 1; i < count; i++)
7975 reg = gen_rtx_REG (DFmode, base_reg);
7976 base_reg += 2;
7977 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7979 tmp = gen_rtx_SET (VOIDmode,
7980 gen_rtx_MEM (DFmode,
7981 gen_rtx_PLUS (SImode,
7982 stack_pointer_rtx,
7983 GEN_INT (i * 8))),
7984 reg);
7985 RTX_FRAME_RELATED_P (tmp) = 1;
7986 XVECEXP (dwarf, 0, i + 1) = tmp;
7989 par = emit_insn (par);
7990 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7991 REG_NOTES (par));
7992 RTX_FRAME_RELATED_P (par) = 1;
7994 return count * 8 + 4;
7998 /* Output a 'call' insn. */
7999 const char *
8000 output_call (rtx *operands)
8002 if (arm_arch5)
8003 abort (); /* Patterns should call blx <reg> directly. */
8005 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8006 if (REGNO (operands[0]) == LR_REGNUM)
8008 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8009 output_asm_insn ("mov%?\t%0, %|lr", operands);
8012 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8014 if (TARGET_INTERWORK || arm_arch4t)
8015 output_asm_insn ("bx%?\t%0", operands);
8016 else
8017 output_asm_insn ("mov%?\t%|pc, %0", operands);
8019 return "";
8022 /* Output a 'call' insn that is a reference in memory. */
8023 const char *
8024 output_call_mem (rtx *operands)
8026 if (TARGET_INTERWORK && !arm_arch5)
8028 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8029 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8030 output_asm_insn ("bx%?\t%|ip", operands);
8032 else if (regno_use_in (LR_REGNUM, operands[0]))
8034 /* LR is used in the memory address. We load the address in the
8035 first instruction. It's safe to use IP as the target of the
8036 load since the call will kill it anyway. */
8037 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8038 if (arm_arch5)
8039 output_asm_insn ("blx%?\t%|ip", operands);
8040 else
8042 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8043 if (arm_arch4t)
8044 output_asm_insn ("bx%?\t%|ip", operands);
8045 else
8046 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8049 else
8051 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8052 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8055 return "";
8059 /* Output a move from arm registers to an fpa registers.
8060 OPERANDS[0] is an fpa register.
8061 OPERANDS[1] is the first registers of an arm register pair. */
8062 const char *
8063 output_mov_long_double_fpa_from_arm (rtx *operands)
8065 int arm_reg0 = REGNO (operands[1]);
8066 rtx ops[3];
8068 if (arm_reg0 == IP_REGNUM)
8069 abort ();
8071 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8072 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8073 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8075 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8076 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8078 return "";
8081 /* Output a move from an fpa register to arm registers.
8082 OPERANDS[0] is the first registers of an arm register pair.
8083 OPERANDS[1] is an fpa register. */
8084 const char *
8085 output_mov_long_double_arm_from_fpa (rtx *operands)
8087 int arm_reg0 = REGNO (operands[0]);
8088 rtx ops[3];
8090 if (arm_reg0 == IP_REGNUM)
8091 abort ();
8093 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8094 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8095 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8097 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8098 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8099 return "";
8102 /* Output a move from arm registers to arm registers of a long double
8103 OPERANDS[0] is the destination.
8104 OPERANDS[1] is the source. */
8105 const char *
8106 output_mov_long_double_arm_from_arm (rtx *operands)
8108 /* We have to be careful here because the two might overlap. */
8109 int dest_start = REGNO (operands[0]);
8110 int src_start = REGNO (operands[1]);
8111 rtx ops[2];
8112 int i;
8114 if (dest_start < src_start)
8116 for (i = 0; i < 3; i++)
8118 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8119 ops[1] = gen_rtx_REG (SImode, src_start + i);
8120 output_asm_insn ("mov%?\t%0, %1", ops);
8123 else
8125 for (i = 2; i >= 0; i--)
8127 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8128 ops[1] = gen_rtx_REG (SImode, src_start + i);
8129 output_asm_insn ("mov%?\t%0, %1", ops);
8133 return "";
8137 /* Output a move from arm registers to an fpa registers.
8138 OPERANDS[0] is an fpa register.
8139 OPERANDS[1] is the first registers of an arm register pair. */
8140 const char *
8141 output_mov_double_fpa_from_arm (rtx *operands)
8143 int arm_reg0 = REGNO (operands[1]);
8144 rtx ops[2];
8146 if (arm_reg0 == IP_REGNUM)
8147 abort ();
8149 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8150 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8151 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8152 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8153 return "";
8156 /* Output a move from an fpa register to arm registers.
8157 OPERANDS[0] is the first registers of an arm register pair.
8158 OPERANDS[1] is an fpa register. */
8159 const char *
8160 output_mov_double_arm_from_fpa (rtx *operands)
8162 int arm_reg0 = REGNO (operands[0]);
8163 rtx ops[2];
8165 if (arm_reg0 == IP_REGNUM)
8166 abort ();
8168 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8169 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8170 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8171 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8172 return "";
8175 /* Output a move between double words.
8176 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8177 or MEM<-REG and all MEMs must be offsettable addresses. */
8178 const char *
8179 output_move_double (rtx *operands)
8181 enum rtx_code code0 = GET_CODE (operands[0]);
8182 enum rtx_code code1 = GET_CODE (operands[1]);
8183 rtx otherops[3];
8185 if (code0 == REG)
8187 int reg0 = REGNO (operands[0]);
8189 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8191 if (code1 == MEM)
8193 switch (GET_CODE (XEXP (operands[1], 0)))
8195 case REG:
8196 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8197 break;
8199 case PRE_INC:
8200 if (!TARGET_LDRD)
8201 abort (); /* Should never happen now. */
8202 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8203 break;
8205 case PRE_DEC:
8206 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8207 break;
8209 case POST_INC:
8210 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8211 break;
8213 case POST_DEC:
8214 if (!TARGET_LDRD)
8215 abort (); /* Should never happen now. */
8216 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8217 break;
8219 case PRE_MODIFY:
8220 case POST_MODIFY:
8221 otherops[0] = operands[0];
8222 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8223 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8225 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8227 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8229 /* Registers overlap so split out the increment. */
8230 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8231 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8233 else
8234 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8236 else
8238 /* We only allow constant increments, so this is safe. */
8239 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8241 break;
8243 case LABEL_REF:
8244 case CONST:
8245 output_asm_insn ("adr%?\t%0, %1", operands);
8246 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8247 break;
8249 default:
8250 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8251 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8253 otherops[0] = operands[0];
8254 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8255 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8257 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8259 if (GET_CODE (otherops[2]) == CONST_INT)
8261 switch ((int) INTVAL (otherops[2]))
8263 case -8:
8264 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8265 return "";
8266 case -4:
8267 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8268 return "";
8269 case 4:
8270 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8271 return "";
8274 if (TARGET_LDRD
8275 && (GET_CODE (otherops[2]) == REG
8276 || (GET_CODE (otherops[2]) == CONST_INT
8277 && INTVAL (otherops[2]) > -256
8278 && INTVAL (otherops[2]) < 256)))
8280 if (reg_overlap_mentioned_p (otherops[0],
8281 otherops[2]))
8283 /* Swap base and index registers over to
8284 avoid a conflict. */
8285 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8286 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8289 /* If both registers conflict, it will usually
8290 have been fixed by a splitter. */
8291 if (reg_overlap_mentioned_p (otherops[0],
8292 otherops[2]))
8294 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8295 output_asm_insn ("ldr%?d\t%0, [%1]",
8296 otherops);
8297 return "";
8299 else
8301 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8302 otherops);
8303 return "";
8306 if (GET_CODE (otherops[2]) == CONST_INT)
8308 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8309 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8310 else
8311 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8313 else
8314 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8316 else
8317 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8319 return "ldm%?ia\t%0, %M0";
8321 else
8323 otherops[1] = adjust_address (operands[1], SImode, 4);
8324 /* Take care of overlapping base/data reg. */
8325 if (reg_mentioned_p (operands[0], operands[1]))
8327 output_asm_insn ("ldr%?\t%0, %1", otherops);
8328 output_asm_insn ("ldr%?\t%0, %1", operands);
8330 else
8332 output_asm_insn ("ldr%?\t%0, %1", operands);
8333 output_asm_insn ("ldr%?\t%0, %1", otherops);
8338 else
8339 abort (); /* Constraints should prevent this. */
8341 else if (code0 == MEM && code1 == REG)
8343 if (REGNO (operands[1]) == IP_REGNUM)
8344 abort ();
8346 switch (GET_CODE (XEXP (operands[0], 0)))
8348 case REG:
8349 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8350 break;
8352 case PRE_INC:
8353 if (!TARGET_LDRD)
8354 abort (); /* Should never happen now. */
8355 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8356 break;
8358 case PRE_DEC:
8359 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8360 break;
8362 case POST_INC:
8363 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8364 break;
8366 case POST_DEC:
8367 if (!TARGET_LDRD)
8368 abort (); /* Should never happen now. */
8369 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8370 break;
8372 case PRE_MODIFY:
8373 case POST_MODIFY:
8374 otherops[0] = operands[1];
8375 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8376 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8378 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8379 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8380 else
8381 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8382 break;
8384 case PLUS:
8385 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8386 if (GET_CODE (otherops[2]) == CONST_INT)
8388 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8390 case -8:
8391 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8392 return "";
8394 case -4:
8395 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8396 return "";
8398 case 4:
8399 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8400 return "";
8403 if (TARGET_LDRD
8404 && (GET_CODE (otherops[2]) == REG
8405 || (GET_CODE (otherops[2]) == CONST_INT
8406 && INTVAL (otherops[2]) > -256
8407 && INTVAL (otherops[2]) < 256)))
8409 otherops[0] = operands[1];
8410 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8411 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8412 return "";
8414 /* Fall through */
8416 default:
8417 otherops[0] = adjust_address (operands[0], SImode, 4);
8418 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8419 output_asm_insn ("str%?\t%1, %0", operands);
8420 output_asm_insn ("str%?\t%1, %0", otherops);
8423 else
8424 /* Constraints should prevent this. */
8425 abort ();
8427 return "";
8430 /* Output an ADD r, s, #n where n may be too big for one instruction.
8431 If adding zero to one register, output nothing. */
8432 const char *
8433 output_add_immediate (rtx *operands)
8435 HOST_WIDE_INT n = INTVAL (operands[2]);
8437 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8439 if (n < 0)
8440 output_multi_immediate (operands,
8441 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8442 -n);
8443 else
8444 output_multi_immediate (operands,
8445 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8449 return "";
8452 /* Output a multiple immediate operation.
8453 OPERANDS is the vector of operands referred to in the output patterns.
8454 INSTR1 is the output pattern to use for the first constant.
8455 INSTR2 is the output pattern to use for subsequent constants.
8456 IMMED_OP is the index of the constant slot in OPERANDS.
8457 N is the constant value. */
8458 static const char *
8459 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8460 int immed_op, HOST_WIDE_INT n)
8462 #if HOST_BITS_PER_WIDE_INT > 32
8463 n &= 0xffffffff;
8464 #endif
8466 if (n == 0)
8468 /* Quick and easy output. */
8469 operands[immed_op] = const0_rtx;
8470 output_asm_insn (instr1, operands);
8472 else
8474 int i;
8475 const char * instr = instr1;
8477 /* Note that n is never zero here (which would give no output). */
8478 for (i = 0; i < 32; i += 2)
8480 if (n & (3 << i))
8482 operands[immed_op] = GEN_INT (n & (255 << i));
8483 output_asm_insn (instr, operands);
8484 instr = instr2;
8485 i += 6;
8490 return "";
8493 /* Return the appropriate ARM instruction for the operation code.
8494 The returned result should not be overwritten. OP is the rtx of the
8495 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8496 was shifted. */
8497 const char *
8498 arithmetic_instr (rtx op, int shift_first_arg)
8500 switch (GET_CODE (op))
8502 case PLUS:
8503 return "add";
8505 case MINUS:
8506 return shift_first_arg ? "rsb" : "sub";
8508 case IOR:
8509 return "orr";
8511 case XOR:
8512 return "eor";
8514 case AND:
8515 return "and";
8517 default:
8518 abort ();
8522 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8523 for the operation code. The returned result should not be overwritten.
8524 OP is the rtx code of the shift.
8525 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8526 shift. */
8527 static const char *
8528 shift_op (rtx op, HOST_WIDE_INT *amountp)
8530 const char * mnem;
8531 enum rtx_code code = GET_CODE (op);
8533 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8534 *amountp = -1;
8535 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8536 *amountp = INTVAL (XEXP (op, 1));
8537 else
8538 abort ();
8540 switch (code)
8542 case ASHIFT:
8543 mnem = "asl";
8544 break;
8546 case ASHIFTRT:
8547 mnem = "asr";
8548 break;
8550 case LSHIFTRT:
8551 mnem = "lsr";
8552 break;
8554 case ROTATE:
8555 if (*amountp == -1)
8556 abort ();
8557 *amountp = 32 - *amountp;
8559 /* Fall through. */
8561 case ROTATERT:
8562 mnem = "ror";
8563 break;
8565 case MULT:
8566 /* We never have to worry about the amount being other than a
8567 power of 2, since this case can never be reloaded from a reg. */
8568 if (*amountp != -1)
8569 *amountp = int_log2 (*amountp);
8570 else
8571 abort ();
8572 return "asl";
8574 default:
8575 abort ();
8578 if (*amountp != -1)
8580 /* This is not 100% correct, but follows from the desire to merge
8581 multiplication by a power of 2 with the recognizer for a
8582 shift. >=32 is not a valid shift for "asl", so we must try and
8583 output a shift that produces the correct arithmetical result.
8584 Using lsr #32 is identical except for the fact that the carry bit
8585 is not set correctly if we set the flags; but we never use the
8586 carry bit from such an operation, so we can ignore that. */
8587 if (code == ROTATERT)
8588 /* Rotate is just modulo 32. */
8589 *amountp &= 31;
8590 else if (*amountp != (*amountp & 31))
8592 if (code == ASHIFT)
8593 mnem = "lsr";
8594 *amountp = 32;
8597 /* Shifts of 0 are no-ops. */
8598 if (*amountp == 0)
8599 return NULL;
8602 return mnem;
8605 /* Obtain the shift from the POWER of two. */
8607 static HOST_WIDE_INT
8608 int_log2 (HOST_WIDE_INT power)
8610 HOST_WIDE_INT shift = 0;
8612 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8614 if (shift > 31)
8615 abort ();
8616 shift++;
8619 return shift;
8622 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8623 because /bin/as is horribly restrictive. The judgement about
8624 whether or not each character is 'printable' (and can be output as
8625 is) or not (and must be printed with an octal escape) must be made
8626 with reference to the *host* character set -- the situation is
8627 similar to that discussed in the comments above pp_c_char in
8628 c-pretty-print.c. */
8630 #define MAX_ASCII_LEN 51
8632 void
8633 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8635 int i;
8636 int len_so_far = 0;
8638 fputs ("\t.ascii\t\"", stream);
8640 for (i = 0; i < len; i++)
8642 int c = p[i];
8644 if (len_so_far >= MAX_ASCII_LEN)
8646 fputs ("\"\n\t.ascii\t\"", stream);
8647 len_so_far = 0;
8650 if (ISPRINT (c))
8652 if (c == '\\' || c == '\"')
8654 putc ('\\', stream);
8655 len_so_far++;
8657 putc (c, stream);
8658 len_so_far++;
8660 else
8662 fprintf (stream, "\\%03o", c);
8663 len_so_far += 4;
8667 fputs ("\"\n", stream);
8670 /* Compute the register save mask for registers 0 through 12
8671 inclusive. This code is used by arm_compute_save_reg_mask. */
8673 static unsigned long
8674 arm_compute_save_reg0_reg12_mask (void)
8676 unsigned long func_type = arm_current_func_type ();
8677 unsigned long save_reg_mask = 0;
8678 unsigned int reg;
8680 if (IS_INTERRUPT (func_type))
8682 unsigned int max_reg;
8683 /* Interrupt functions must not corrupt any registers,
8684 even call clobbered ones. If this is a leaf function
8685 we can just examine the registers used by the RTL, but
8686 otherwise we have to assume that whatever function is
8687 called might clobber anything, and so we have to save
8688 all the call-clobbered registers as well. */
8689 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8690 /* FIQ handlers have registers r8 - r12 banked, so
8691 we only need to check r0 - r7, Normal ISRs only
8692 bank r14 and r15, so we must check up to r12.
8693 r13 is the stack pointer which is always preserved,
8694 so we do not need to consider it here. */
8695 max_reg = 7;
8696 else
8697 max_reg = 12;
8699 for (reg = 0; reg <= max_reg; reg++)
8700 if (regs_ever_live[reg]
8701 || (! current_function_is_leaf && call_used_regs [reg]))
8702 save_reg_mask |= (1 << reg);
8704 /* Also save the pic base register if necessary. */
8705 if (flag_pic
8706 && !TARGET_SINGLE_PIC_BASE
8707 && current_function_uses_pic_offset_table)
8708 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8710 else
8712 /* In the normal case we only need to save those registers
8713 which are call saved and which are used by this function. */
8714 for (reg = 0; reg <= 10; reg++)
8715 if (regs_ever_live[reg] && ! call_used_regs [reg])
8716 save_reg_mask |= (1 << reg);
8718 /* Handle the frame pointer as a special case. */
8719 if (! TARGET_APCS_FRAME
8720 && ! frame_pointer_needed
8721 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8722 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8723 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8725 /* If we aren't loading the PIC register,
8726 don't stack it even though it may be live. */
8727 if (flag_pic
8728 && !TARGET_SINGLE_PIC_BASE
8729 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8730 || current_function_uses_pic_offset_table))
8731 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8734 /* Save registers so the exception handler can modify them. */
8735 if (current_function_calls_eh_return)
8737 unsigned int i;
8739 for (i = 0; ; i++)
8741 reg = EH_RETURN_DATA_REGNO (i);
8742 if (reg == INVALID_REGNUM)
8743 break;
8744 save_reg_mask |= 1 << reg;
8748 return save_reg_mask;
8751 /* Compute a bit mask of which registers need to be
8752 saved on the stack for the current function. */
8754 static unsigned long
8755 arm_compute_save_reg_mask (void)
8757 unsigned int save_reg_mask = 0;
8758 unsigned long func_type = arm_current_func_type ();
8760 if (IS_NAKED (func_type))
8761 /* This should never really happen. */
8762 return 0;
8764 /* If we are creating a stack frame, then we must save the frame pointer,
8765 IP (which will hold the old stack pointer), LR and the PC. */
8766 if (frame_pointer_needed)
8767 save_reg_mask |=
8768 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8769 | (1 << IP_REGNUM)
8770 | (1 << LR_REGNUM)
8771 | (1 << PC_REGNUM);
8773 /* Volatile functions do not return, so there
8774 is no need to save any other registers. */
8775 if (IS_VOLATILE (func_type))
8776 return save_reg_mask;
8778 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8780 /* Decide if we need to save the link register.
8781 Interrupt routines have their own banked link register,
8782 so they never need to save it.
8783 Otherwise if we do not use the link register we do not need to save
8784 it. If we are pushing other registers onto the stack however, we
8785 can save an instruction in the epilogue by pushing the link register
8786 now and then popping it back into the PC. This incurs extra memory
8787 accesses though, so we only do it when optimizing for size, and only
8788 if we know that we will not need a fancy return sequence. */
8789 if (regs_ever_live [LR_REGNUM]
8790 || (save_reg_mask
8791 && optimize_size
8792 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8793 && !current_function_calls_eh_return))
8794 save_reg_mask |= 1 << LR_REGNUM;
8796 if (cfun->machine->lr_save_eliminated)
8797 save_reg_mask &= ~ (1 << LR_REGNUM);
8799 if (TARGET_REALLY_IWMMXT
8800 && ((bit_count (save_reg_mask)
8801 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8803 unsigned int reg;
8805 /* The total number of registers that are going to be pushed
8806 onto the stack is odd. We need to ensure that the stack
8807 is 64-bit aligned before we start to save iWMMXt registers,
8808 and also before we start to create locals. (A local variable
8809 might be a double or long long which we will load/store using
8810 an iWMMXt instruction). Therefore we need to push another
8811 ARM register, so that the stack will be 64-bit aligned. We
8812 try to avoid using the arg registers (r0 -r3) as they might be
8813 used to pass values in a tail call. */
8814 for (reg = 4; reg <= 12; reg++)
8815 if ((save_reg_mask & (1 << reg)) == 0)
8816 break;
8818 if (reg <= 12)
8819 save_reg_mask |= (1 << reg);
8820 else
8822 cfun->machine->sibcall_blocked = 1;
8823 save_reg_mask |= (1 << 3);
8827 return save_reg_mask;
8831 /* Compute a bit mask of which registers need to be
8832 saved on the stack for the current function. */
8833 static unsigned long
8834 thumb_compute_save_reg_mask (void)
8836 unsigned long mask;
8837 unsigned reg;
8839 mask = 0;
8840 for (reg = 0; reg < 12; reg ++)
8841 if (regs_ever_live[reg] && !call_used_regs[reg])
8842 mask |= 1 << reg;
8844 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8845 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8847 if (TARGET_SINGLE_PIC_BASE)
8848 mask &= ~(1 << arm_pic_register);
8850 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8851 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8852 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8854 /* LR will also be pushed if any lo regs are pushed. */
8855 if (mask & 0xff || thumb_force_lr_save ())
8856 mask |= (1 << LR_REGNUM);
8858 /* Make sure we have a low work register if we need one.
8859 We will need one if we are going to push a high register,
8860 but we are not currently intending to push a low register. */
8861 if ((mask & 0xff) == 0
8862 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8864 /* Use thumb_find_work_register to choose which register
8865 we will use. If the register is live then we will
8866 have to push it. Use LAST_LO_REGNUM as our fallback
8867 choice for the register to select. */
8868 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8870 if (! call_used_regs[reg])
8871 mask |= 1 << reg;
8874 return mask;
8878 /* Return the number of bytes required to save VFP registers. */
8879 static int
8880 arm_get_vfp_saved_size (void)
8882 unsigned int regno;
8883 int count;
8884 int saved;
8886 saved = 0;
8887 /* Space for saved VFP registers. */
8888 if (TARGET_HARD_FLOAT && TARGET_VFP)
8890 count = 0;
8891 for (regno = FIRST_VFP_REGNUM;
8892 regno < LAST_VFP_REGNUM;
8893 regno += 2)
8895 if ((!regs_ever_live[regno] || call_used_regs[regno])
8896 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8898 if (count > 0)
8900 /* Workaround ARM10 VFPr1 bug. */
8901 if (count == 2 && !arm_arch6)
8902 count++;
8903 saved += count * 8 + 4;
8905 count = 0;
8907 else
8908 count++;
8910 if (count > 0)
8912 if (count == 2 && !arm_arch6)
8913 count++;
8914 saved += count * 8 + 4;
8917 return saved;
8921 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8922 everything bar the final return instruction. */
8923 const char *
8924 output_return_instruction (rtx operand, int really_return, int reverse)
8926 char conditional[10];
8927 char instr[100];
8928 unsigned reg;
8929 unsigned long live_regs_mask;
8930 unsigned long func_type;
8931 arm_stack_offsets *offsets;
8933 func_type = arm_current_func_type ();
8935 if (IS_NAKED (func_type))
8936 return "";
8938 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8940 /* If this function was declared non-returning, and we have
8941 found a tail call, then we have to trust that the called
8942 function won't return. */
8943 if (really_return)
8945 rtx ops[2];
8947 /* Otherwise, trap an attempted return by aborting. */
8948 ops[0] = operand;
8949 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8950 : "abort");
8951 assemble_external_libcall (ops[1]);
8952 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8955 return "";
8958 if (current_function_calls_alloca && !really_return)
8959 abort ();
8961 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8963 return_used_this_function = 1;
8965 live_regs_mask = arm_compute_save_reg_mask ();
8967 if (live_regs_mask)
8969 const char * return_reg;
8971 /* If we do not have any special requirements for function exit
8972 (e.g. interworking, or ISR) then we can load the return address
8973 directly into the PC. Otherwise we must load it into LR. */
8974 if (really_return
8975 && ! TARGET_INTERWORK)
8976 return_reg = reg_names[PC_REGNUM];
8977 else
8978 return_reg = reg_names[LR_REGNUM];
8980 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8982 /* There are three possible reasons for the IP register
8983 being saved. 1) a stack frame was created, in which case
8984 IP contains the old stack pointer, or 2) an ISR routine
8985 corrupted it, or 3) it was saved to align the stack on
8986 iWMMXt. In case 1, restore IP into SP, otherwise just
8987 restore IP. */
8988 if (frame_pointer_needed)
8990 live_regs_mask &= ~ (1 << IP_REGNUM);
8991 live_regs_mask |= (1 << SP_REGNUM);
8993 else
8995 if (! IS_INTERRUPT (func_type)
8996 && ! TARGET_REALLY_IWMMXT)
8997 abort ();
9001 /* On some ARM architectures it is faster to use LDR rather than
9002 LDM to load a single register. On other architectures, the
9003 cost is the same. In 26 bit mode, or for exception handlers,
9004 we have to use LDM to load the PC so that the CPSR is also
9005 restored. */
9006 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9007 if (live_regs_mask == (1U << reg))
9008 break;
9010 if (reg <= LAST_ARM_REGNUM
9011 && (reg != LR_REGNUM
9012 || ! really_return
9013 || ! IS_INTERRUPT (func_type)))
9015 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9016 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9018 else
9020 char *p;
9021 int first = 1;
9023 /* Generate the load multiple instruction to restore the
9024 registers. Note we can get here, even if
9025 frame_pointer_needed is true, but only if sp already
9026 points to the base of the saved core registers. */
9027 if (live_regs_mask & (1 << SP_REGNUM))
9029 unsigned HOST_WIDE_INT stack_adjust;
9031 offsets = arm_get_frame_offsets ();
9032 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9033 if (stack_adjust != 0 && stack_adjust != 4)
9034 abort ();
9036 if (stack_adjust && arm_arch5)
9037 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9038 else
9040 /* If we can't use ldmib (SA110 bug),
9041 then try to pop r3 instead. */
9042 if (stack_adjust)
9043 live_regs_mask |= 1 << 3;
9044 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9047 else
9048 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9050 p = instr + strlen (instr);
9052 for (reg = 0; reg <= SP_REGNUM; reg++)
9053 if (live_regs_mask & (1 << reg))
9055 int l = strlen (reg_names[reg]);
9057 if (first)
9058 first = 0;
9059 else
9061 memcpy (p, ", ", 2);
9062 p += 2;
9065 memcpy (p, "%|", 2);
9066 memcpy (p + 2, reg_names[reg], l);
9067 p += l + 2;
9070 if (live_regs_mask & (1 << LR_REGNUM))
9072 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9073 /* If returning from an interrupt, restore the CPSR. */
9074 if (IS_INTERRUPT (func_type))
9075 strcat (p, "^");
9077 else
9078 strcpy (p, "}");
9081 output_asm_insn (instr, & operand);
9083 /* See if we need to generate an extra instruction to
9084 perform the actual function return. */
9085 if (really_return
9086 && func_type != ARM_FT_INTERWORKED
9087 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9089 /* The return has already been handled
9090 by loading the LR into the PC. */
9091 really_return = 0;
9095 if (really_return)
9097 switch ((int) ARM_FUNC_TYPE (func_type))
9099 case ARM_FT_ISR:
9100 case ARM_FT_FIQ:
9101 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9102 break;
9104 case ARM_FT_INTERWORKED:
9105 sprintf (instr, "bx%s\t%%|lr", conditional);
9106 break;
9108 case ARM_FT_EXCEPTION:
9109 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9110 break;
9112 default:
9113 /* Use bx if it's available. */
9114 if (arm_arch5 || arm_arch4t)
9115 sprintf (instr, "bx%s\t%%|lr", conditional);
9116 else
9117 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9118 break;
9121 output_asm_insn (instr, & operand);
9124 return "";
9127 /* Write the function name into the code section, directly preceding
9128 the function prologue.
9130 Code will be output similar to this:
9132 .ascii "arm_poke_function_name", 0
9133 .align
9135 .word 0xff000000 + (t1 - t0)
9136 arm_poke_function_name
9137 mov ip, sp
9138 stmfd sp!, {fp, ip, lr, pc}
9139 sub fp, ip, #4
9141 When performing a stack backtrace, code can inspect the value
9142 of 'pc' stored at 'fp' + 0. If the trace function then looks
9143 at location pc - 12 and the top 8 bits are set, then we know
9144 that there is a function name embedded immediately preceding this
9145 location and has length ((pc[-3]) & 0xff000000).
9147 We assume that pc is declared as a pointer to an unsigned long.
9149 It is of no benefit to output the function name if we are assembling
9150 a leaf function. These function types will not contain a stack
9151 backtrace structure, therefore it is not possible to determine the
9152 function name. */
9153 void
9154 arm_poke_function_name (FILE *stream, const char *name)
9156 unsigned long alignlength;
9157 unsigned long length;
9158 rtx x;
9160 length = strlen (name) + 1;
9161 alignlength = ROUND_UP_WORD (length);
9163 ASM_OUTPUT_ASCII (stream, name, length);
9164 ASM_OUTPUT_ALIGN (stream, 2);
9165 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9166 assemble_aligned_integer (UNITS_PER_WORD, x);
9169 /* Place some comments into the assembler stream
9170 describing the current function. */
9171 static void
9172 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9174 unsigned long func_type;
9176 if (!TARGET_ARM)
9178 thumb_output_function_prologue (f, frame_size);
9179 return;
9182 /* Sanity check. */
9183 if (arm_ccfsm_state || arm_target_insn)
9184 abort ();
9186 func_type = arm_current_func_type ();
9188 switch ((int) ARM_FUNC_TYPE (func_type))
9190 default:
9191 case ARM_FT_NORMAL:
9192 break;
9193 case ARM_FT_INTERWORKED:
9194 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9195 break;
9196 case ARM_FT_ISR:
9197 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9198 break;
9199 case ARM_FT_FIQ:
9200 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9201 break;
9202 case ARM_FT_EXCEPTION:
9203 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9204 break;
9207 if (IS_NAKED (func_type))
9208 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9210 if (IS_VOLATILE (func_type))
9211 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9213 if (IS_NESTED (func_type))
9214 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9216 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9217 current_function_args_size,
9218 current_function_pretend_args_size, frame_size);
9220 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9221 frame_pointer_needed,
9222 cfun->machine->uses_anonymous_args);
9224 if (cfun->machine->lr_save_eliminated)
9225 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9227 if (current_function_calls_eh_return)
9228 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9230 #ifdef AOF_ASSEMBLER
9231 if (flag_pic)
9232 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9233 #endif
9235 return_used_this_function = 0;
9238 const char *
9239 arm_output_epilogue (rtx sibling)
9241 int reg;
9242 unsigned long saved_regs_mask;
9243 unsigned long func_type;
9244 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9245 frame that is $fp + 4 for a non-variadic function. */
9246 int floats_offset = 0;
9247 rtx operands[3];
9248 FILE * f = asm_out_file;
9249 unsigned int lrm_count = 0;
9250 int really_return = (sibling == NULL);
9251 int start_reg;
9252 arm_stack_offsets *offsets;
9254 /* If we have already generated the return instruction
9255 then it is futile to generate anything else. */
9256 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9257 return "";
9259 func_type = arm_current_func_type ();
9261 if (IS_NAKED (func_type))
9262 /* Naked functions don't have epilogues. */
9263 return "";
9265 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9267 rtx op;
9269 /* A volatile function should never return. Call abort. */
9270 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9271 assemble_external_libcall (op);
9272 output_asm_insn ("bl\t%a0", &op);
9274 return "";
9277 if (current_function_calls_eh_return
9278 && ! really_return)
9279 /* If we are throwing an exception, then we really must
9280 be doing a return, so we can't tail-call. */
9281 abort ();
9283 offsets = arm_get_frame_offsets ();
9284 saved_regs_mask = arm_compute_save_reg_mask ();
9286 if (TARGET_IWMMXT)
9287 lrm_count = bit_count (saved_regs_mask);
9289 floats_offset = offsets->saved_args;
9290 /* Compute how far away the floats will be. */
9291 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9292 if (saved_regs_mask & (1 << reg))
9293 floats_offset += 4;
9295 if (frame_pointer_needed)
9297 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9298 int vfp_offset = offsets->frame;
9300 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9302 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9303 if (regs_ever_live[reg] && !call_used_regs[reg])
9305 floats_offset += 12;
9306 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9307 reg, FP_REGNUM, floats_offset - vfp_offset);
9310 else
9312 start_reg = LAST_FPA_REGNUM;
9314 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9316 if (regs_ever_live[reg] && !call_used_regs[reg])
9318 floats_offset += 12;
9320 /* We can't unstack more than four registers at once. */
9321 if (start_reg - reg == 3)
9323 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9324 reg, FP_REGNUM, floats_offset - vfp_offset);
9325 start_reg = reg - 1;
9328 else
9330 if (reg != start_reg)
9331 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9332 reg + 1, start_reg - reg,
9333 FP_REGNUM, floats_offset - vfp_offset);
9334 start_reg = reg - 1;
9338 /* Just in case the last register checked also needs unstacking. */
9339 if (reg != start_reg)
9340 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9341 reg + 1, start_reg - reg,
9342 FP_REGNUM, floats_offset - vfp_offset);
9345 if (TARGET_HARD_FLOAT && TARGET_VFP)
9347 int saved_size;
9349 /* The fldmx insn does not have base+offset addressing modes,
9350 so we use IP to hold the address. */
9351 saved_size = arm_get_vfp_saved_size ();
9353 if (saved_size > 0)
9355 floats_offset += saved_size;
9356 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9357 FP_REGNUM, floats_offset - vfp_offset);
9359 start_reg = FIRST_VFP_REGNUM;
9360 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9362 if ((!regs_ever_live[reg] || call_used_regs[reg])
9363 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9365 if (start_reg != reg)
9366 arm_output_fldmx (f, IP_REGNUM,
9367 (start_reg - FIRST_VFP_REGNUM) / 2,
9368 (reg - start_reg) / 2);
9369 start_reg = reg + 2;
9372 if (start_reg != reg)
9373 arm_output_fldmx (f, IP_REGNUM,
9374 (start_reg - FIRST_VFP_REGNUM) / 2,
9375 (reg - start_reg) / 2);
9378 if (TARGET_IWMMXT)
9380 /* The frame pointer is guaranteed to be non-double-word aligned.
9381 This is because it is set to (old_stack_pointer - 4) and the
9382 old_stack_pointer was double word aligned. Thus the offset to
9383 the iWMMXt registers to be loaded must also be non-double-word
9384 sized, so that the resultant address *is* double-word aligned.
9385 We can ignore floats_offset since that was already included in
9386 the live_regs_mask. */
9387 lrm_count += (lrm_count % 2 ? 2 : 1);
9389 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9390 if (regs_ever_live[reg] && !call_used_regs[reg])
9392 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9393 reg, FP_REGNUM, lrm_count * 4);
9394 lrm_count += 2;
9398 /* saved_regs_mask should contain the IP, which at the time of stack
9399 frame generation actually contains the old stack pointer. So a
9400 quick way to unwind the stack is just pop the IP register directly
9401 into the stack pointer. */
9402 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9403 abort ();
9404 saved_regs_mask &= ~ (1 << IP_REGNUM);
9405 saved_regs_mask |= (1 << SP_REGNUM);
9407 /* There are two registers left in saved_regs_mask - LR and PC. We
9408 only need to restore the LR register (the return address), but to
9409 save time we can load it directly into the PC, unless we need a
9410 special function exit sequence, or we are not really returning. */
9411 if (really_return
9412 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9413 && !current_function_calls_eh_return)
9414 /* Delete the LR from the register mask, so that the LR on
9415 the stack is loaded into the PC in the register mask. */
9416 saved_regs_mask &= ~ (1 << LR_REGNUM);
9417 else
9418 saved_regs_mask &= ~ (1 << PC_REGNUM);
9420 /* We must use SP as the base register, because SP is one of the
9421 registers being restored. If an interrupt or page fault
9422 happens in the ldm instruction, the SP might or might not
9423 have been restored. That would be bad, as then SP will no
9424 longer indicate the safe area of stack, and we can get stack
9425 corruption. Using SP as the base register means that it will
9426 be reset correctly to the original value, should an interrupt
9427 occur. If the stack pointer already points at the right
9428 place, then omit the subtraction. */
9429 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9430 || current_function_calls_alloca)
9431 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9432 4 * bit_count (saved_regs_mask));
9433 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9435 if (IS_INTERRUPT (func_type))
9436 /* Interrupt handlers will have pushed the
9437 IP onto the stack, so restore it now. */
9438 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9440 else
9442 /* Restore stack pointer if necessary. */
9443 if (offsets->outgoing_args != offsets->saved_regs)
9445 operands[0] = operands[1] = stack_pointer_rtx;
9446 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9447 output_add_immediate (operands);
9450 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9452 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9453 if (regs_ever_live[reg] && !call_used_regs[reg])
9454 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9455 reg, SP_REGNUM);
9457 else
9459 start_reg = FIRST_FPA_REGNUM;
9461 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9463 if (regs_ever_live[reg] && !call_used_regs[reg])
9465 if (reg - start_reg == 3)
9467 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9468 start_reg, SP_REGNUM);
9469 start_reg = reg + 1;
9472 else
9474 if (reg != start_reg)
9475 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9476 start_reg, reg - start_reg,
9477 SP_REGNUM);
9479 start_reg = reg + 1;
9483 /* Just in case the last register checked also needs unstacking. */
9484 if (reg != start_reg)
9485 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9486 start_reg, reg - start_reg, SP_REGNUM);
9489 if (TARGET_HARD_FLOAT && TARGET_VFP)
9491 start_reg = FIRST_VFP_REGNUM;
9492 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9494 if ((!regs_ever_live[reg] || call_used_regs[reg])
9495 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9497 if (start_reg != reg)
9498 arm_output_fldmx (f, SP_REGNUM,
9499 (start_reg - FIRST_VFP_REGNUM) / 2,
9500 (reg - start_reg) / 2);
9501 start_reg = reg + 2;
9504 if (start_reg != reg)
9505 arm_output_fldmx (f, SP_REGNUM,
9506 (start_reg - FIRST_VFP_REGNUM) / 2,
9507 (reg - start_reg) / 2);
9509 if (TARGET_IWMMXT)
9510 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9511 if (regs_ever_live[reg] && !call_used_regs[reg])
9512 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9514 /* If we can, restore the LR into the PC. */
9515 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9516 && really_return
9517 && current_function_pretend_args_size == 0
9518 && saved_regs_mask & (1 << LR_REGNUM)
9519 && !current_function_calls_eh_return)
9521 saved_regs_mask &= ~ (1 << LR_REGNUM);
9522 saved_regs_mask |= (1 << PC_REGNUM);
9525 /* Load the registers off the stack. If we only have one register
9526 to load use the LDR instruction - it is faster. */
9527 if (saved_regs_mask == (1 << LR_REGNUM))
9529 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9531 else if (saved_regs_mask)
9533 if (saved_regs_mask & (1 << SP_REGNUM))
9534 /* Note - write back to the stack register is not enabled
9535 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9536 in the list of registers and if we add writeback the
9537 instruction becomes UNPREDICTABLE. */
9538 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9539 else
9540 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9543 if (current_function_pretend_args_size)
9545 /* Unwind the pre-pushed regs. */
9546 operands[0] = operands[1] = stack_pointer_rtx;
9547 operands[2] = GEN_INT (current_function_pretend_args_size);
9548 output_add_immediate (operands);
9552 /* We may have already restored PC directly from the stack. */
9553 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9554 return "";
9556 /* Stack adjustment for exception handler. */
9557 if (current_function_calls_eh_return)
9558 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9559 ARM_EH_STACKADJ_REGNUM);
9561 /* Generate the return instruction. */
9562 switch ((int) ARM_FUNC_TYPE (func_type))
9564 case ARM_FT_ISR:
9565 case ARM_FT_FIQ:
9566 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9567 break;
9569 case ARM_FT_EXCEPTION:
9570 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9571 break;
9573 case ARM_FT_INTERWORKED:
9574 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9575 break;
9577 default:
9578 if (arm_arch5 || arm_arch4t)
9579 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9580 else
9581 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9582 break;
9585 return "";
9588 static void
9589 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9590 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9592 arm_stack_offsets *offsets;
9594 if (TARGET_THUMB)
9596 int regno;
9598 /* Emit any call-via-reg trampolines that are needed for v4t support
9599 of call_reg and call_value_reg type insns. */
9600 for (regno = 0; regno < LR_REGNUM; regno++)
9602 rtx label = cfun->machine->call_via[regno];
9604 if (label != NULL)
9606 function_section (current_function_decl);
9607 targetm.asm_out.internal_label (asm_out_file, "L",
9608 CODE_LABEL_NUMBER (label));
9609 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9613 /* ??? Probably not safe to set this here, since it assumes that a
9614 function will be emitted as assembly immediately after we generate
9615 RTL for it. This does not happen for inline functions. */
9616 return_used_this_function = 0;
9618 else
9620 /* We need to take into account any stack-frame rounding. */
9621 offsets = arm_get_frame_offsets ();
9623 if (use_return_insn (FALSE, NULL)
9624 && return_used_this_function
9625 && offsets->saved_regs != offsets->outgoing_args
9626 && !frame_pointer_needed)
9627 abort ();
9629 /* Reset the ARM-specific per-function variables. */
9630 after_arm_reorg = 0;
9634 /* Generate and emit an insn that we will recognize as a push_multi.
9635 Unfortunately, since this insn does not reflect very well the actual
9636 semantics of the operation, we need to annotate the insn for the benefit
9637 of DWARF2 frame unwind information. */
9638 static rtx
9639 emit_multi_reg_push (unsigned long mask)
9641 int num_regs = 0;
9642 int num_dwarf_regs;
9643 int i, j;
9644 rtx par;
9645 rtx dwarf;
9646 int dwarf_par_index;
9647 rtx tmp, reg;
9649 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9650 if (mask & (1 << i))
9651 num_regs++;
9653 if (num_regs == 0 || num_regs > 16)
9654 abort ();
9656 /* We don't record the PC in the dwarf frame information. */
9657 num_dwarf_regs = num_regs;
9658 if (mask & (1 << PC_REGNUM))
9659 num_dwarf_regs--;
9661 /* For the body of the insn we are going to generate an UNSPEC in
9662 parallel with several USEs. This allows the insn to be recognized
9663 by the push_multi pattern in the arm.md file. The insn looks
9664 something like this:
9666 (parallel [
9667 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9668 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9669 (use (reg:SI 11 fp))
9670 (use (reg:SI 12 ip))
9671 (use (reg:SI 14 lr))
9672 (use (reg:SI 15 pc))
9675 For the frame note however, we try to be more explicit and actually
9676 show each register being stored into the stack frame, plus a (single)
9677 decrement of the stack pointer. We do it this way in order to be
9678 friendly to the stack unwinding code, which only wants to see a single
9679 stack decrement per instruction. The RTL we generate for the note looks
9680 something like this:
9682 (sequence [
9683 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9684 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9685 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9686 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9687 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9690 This sequence is used both by the code to support stack unwinding for
9691 exceptions handlers and the code to generate dwarf2 frame debugging. */
9693 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9694 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9695 dwarf_par_index = 1;
9697 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9699 if (mask & (1 << i))
9701 reg = gen_rtx_REG (SImode, i);
9703 XVECEXP (par, 0, 0)
9704 = gen_rtx_SET (VOIDmode,
9705 gen_rtx_MEM (BLKmode,
9706 gen_rtx_PRE_DEC (BLKmode,
9707 stack_pointer_rtx)),
9708 gen_rtx_UNSPEC (BLKmode,
9709 gen_rtvec (1, reg),
9710 UNSPEC_PUSH_MULT));
9712 if (i != PC_REGNUM)
9714 tmp = gen_rtx_SET (VOIDmode,
9715 gen_rtx_MEM (SImode, stack_pointer_rtx),
9716 reg);
9717 RTX_FRAME_RELATED_P (tmp) = 1;
9718 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9719 dwarf_par_index++;
9722 break;
9726 for (j = 1, i++; j < num_regs; i++)
9728 if (mask & (1 << i))
9730 reg = gen_rtx_REG (SImode, i);
9732 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9734 if (i != PC_REGNUM)
9736 tmp = gen_rtx_SET (VOIDmode,
9737 gen_rtx_MEM (SImode,
9738 plus_constant (stack_pointer_rtx,
9739 4 * j)),
9740 reg);
9741 RTX_FRAME_RELATED_P (tmp) = 1;
9742 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9745 j++;
9749 par = emit_insn (par);
9751 tmp = gen_rtx_SET (SImode,
9752 stack_pointer_rtx,
9753 gen_rtx_PLUS (SImode,
9754 stack_pointer_rtx,
9755 GEN_INT (-4 * num_regs)));
9756 RTX_FRAME_RELATED_P (tmp) = 1;
9757 XVECEXP (dwarf, 0, 0) = tmp;
9759 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9760 REG_NOTES (par));
9761 return par;
9764 static rtx
9765 emit_sfm (int base_reg, int count)
9767 rtx par;
9768 rtx dwarf;
9769 rtx tmp, reg;
9770 int i;
9772 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9773 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9775 reg = gen_rtx_REG (XFmode, base_reg++);
9777 XVECEXP (par, 0, 0)
9778 = gen_rtx_SET (VOIDmode,
9779 gen_rtx_MEM (BLKmode,
9780 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9781 gen_rtx_UNSPEC (BLKmode,
9782 gen_rtvec (1, reg),
9783 UNSPEC_PUSH_MULT));
9784 tmp = gen_rtx_SET (VOIDmode,
9785 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9786 RTX_FRAME_RELATED_P (tmp) = 1;
9787 XVECEXP (dwarf, 0, 1) = tmp;
9789 for (i = 1; i < count; i++)
9791 reg = gen_rtx_REG (XFmode, base_reg++);
9792 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9794 tmp = gen_rtx_SET (VOIDmode,
9795 gen_rtx_MEM (XFmode,
9796 plus_constant (stack_pointer_rtx,
9797 i * 12)),
9798 reg);
9799 RTX_FRAME_RELATED_P (tmp) = 1;
9800 XVECEXP (dwarf, 0, i + 1) = tmp;
9803 tmp = gen_rtx_SET (VOIDmode,
9804 stack_pointer_rtx,
9805 gen_rtx_PLUS (SImode,
9806 stack_pointer_rtx,
9807 GEN_INT (-12 * count)));
9808 RTX_FRAME_RELATED_P (tmp) = 1;
9809 XVECEXP (dwarf, 0, 0) = tmp;
9811 par = emit_insn (par);
9812 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9813 REG_NOTES (par));
9814 return par;
9818 /* Return true if the current function needs to save/restore LR. */
9820 static bool
9821 thumb_force_lr_save (void)
9823 return !cfun->machine->lr_save_eliminated
9824 && (!leaf_function_p ()
9825 || thumb_far_jump_used_p ()
9826 || regs_ever_live [LR_REGNUM]);
9830 /* Compute the distance from register FROM to register TO.
9831 These can be the arg pointer (26), the soft frame pointer (25),
9832 the stack pointer (13) or the hard frame pointer (11).
9833 In thumb mode r7 is used as the soft frame pointer, if needed.
9834 Typical stack layout looks like this:
9836 old stack pointer -> | |
9837 ----
9838 | | \
9839 | | saved arguments for
9840 | | vararg functions
9841 | | /
9843 hard FP & arg pointer -> | | \
9844 | | stack
9845 | | frame
9846 | | /
9848 | | \
9849 | | call saved
9850 | | registers
9851 soft frame pointer -> | | /
9853 | | \
9854 | | local
9855 | | variables
9856 | | /
9858 | | \
9859 | | outgoing
9860 | | arguments
9861 current stack pointer -> | | /
9864 For a given function some or all of these stack components
9865 may not be needed, giving rise to the possibility of
9866 eliminating some of the registers.
9868 The values returned by this function must reflect the behavior
9869 of arm_expand_prologue() and arm_compute_save_reg_mask().
9871 The sign of the number returned reflects the direction of stack
9872 growth, so the values are positive for all eliminations except
9873 from the soft frame pointer to the hard frame pointer.
9875 SFP may point just inside the local variables block to ensure correct
9876 alignment. */
9879 /* Calculate stack offsets. These are used to calculate register elimination
9880 offsets and in prologue/epilogue code. */
9882 static arm_stack_offsets *
9883 arm_get_frame_offsets (void)
9885 struct arm_stack_offsets *offsets;
9886 unsigned long func_type;
9887 int leaf;
9888 int saved;
9889 HOST_WIDE_INT frame_size;
9891 offsets = &cfun->machine->stack_offsets;
9893 /* We need to know if we are a leaf function. Unfortunately, it
9894 is possible to be called after start_sequence has been called,
9895 which causes get_insns to return the insns for the sequence,
9896 not the function, which will cause leaf_function_p to return
9897 the incorrect result.
9899 to know about leaf functions once reload has completed, and the
9900 frame size cannot be changed after that time, so we can safely
9901 use the cached value. */
9903 if (reload_completed)
9904 return offsets;
9906 /* Initially this is the size of the local variables. It will translated
9907 into an offset once we have determined the size of preceding data. */
9908 frame_size = ROUND_UP_WORD (get_frame_size ());
9910 leaf = leaf_function_p ();
9912 /* Space for variadic functions. */
9913 offsets->saved_args = current_function_pretend_args_size;
9915 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9917 if (TARGET_ARM)
9919 unsigned int regno;
9921 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9923 /* We know that SP will be doubleword aligned on entry, and we must
9924 preserve that condition at any subroutine call. We also require the
9925 soft frame pointer to be doubleword aligned. */
9927 if (TARGET_REALLY_IWMMXT)
9929 /* Check for the call-saved iWMMXt registers. */
9930 for (regno = FIRST_IWMMXT_REGNUM;
9931 regno <= LAST_IWMMXT_REGNUM;
9932 regno++)
9933 if (regs_ever_live [regno] && ! call_used_regs [regno])
9934 saved += 8;
9937 func_type = arm_current_func_type ();
9938 if (! IS_VOLATILE (func_type))
9940 /* Space for saved FPA registers. */
9941 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9942 if (regs_ever_live[regno] && ! call_used_regs[regno])
9943 saved += 12;
9945 /* Space for saved VFP registers. */
9946 if (TARGET_HARD_FLOAT && TARGET_VFP)
9947 saved += arm_get_vfp_saved_size ();
9950 else /* TARGET_THUMB */
9952 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
9953 if (TARGET_BACKTRACE)
9954 saved += 16;
9957 /* Saved registers include the stack frame. */
9958 offsets->saved_regs = offsets->saved_args + saved;
9959 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
9960 /* A leaf function does not need any stack alignment if it has nothing
9961 on the stack. */
9962 if (leaf && frame_size == 0)
9964 offsets->outgoing_args = offsets->soft_frame;
9965 return offsets;
9968 /* Ensure SFP has the correct alignment. */
9969 if (ARM_DOUBLEWORD_ALIGN
9970 && (offsets->soft_frame & 7))
9971 offsets->soft_frame += 4;
9973 offsets->outgoing_args = offsets->soft_frame + frame_size
9974 + current_function_outgoing_args_size;
9976 if (ARM_DOUBLEWORD_ALIGN)
9978 /* Ensure SP remains doubleword aligned. */
9979 if (offsets->outgoing_args & 7)
9980 offsets->outgoing_args += 4;
9981 if (offsets->outgoing_args & 7)
9982 abort ();
9985 return offsets;
9989 /* Calculate the relative offsets for the different stack pointers. Positive
9990 offsets are in the direction of stack growth. */
9992 HOST_WIDE_INT
9993 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9995 arm_stack_offsets *offsets;
9997 offsets = arm_get_frame_offsets ();
9999 /* OK, now we have enough information to compute the distances.
10000 There must be an entry in these switch tables for each pair
10001 of registers in ELIMINABLE_REGS, even if some of the entries
10002 seem to be redundant or useless. */
10003 switch (from)
10005 case ARG_POINTER_REGNUM:
10006 switch (to)
10008 case THUMB_HARD_FRAME_POINTER_REGNUM:
10009 return 0;
10011 case FRAME_POINTER_REGNUM:
10012 /* This is the reverse of the soft frame pointer
10013 to hard frame pointer elimination below. */
10014 return offsets->soft_frame - offsets->saved_args;
10016 case ARM_HARD_FRAME_POINTER_REGNUM:
10017 /* If there is no stack frame then the hard
10018 frame pointer and the arg pointer coincide. */
10019 if (offsets->frame == offsets->saved_regs)
10020 return 0;
10021 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10022 return (frame_pointer_needed
10023 && cfun->static_chain_decl != NULL
10024 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10026 case STACK_POINTER_REGNUM:
10027 /* If nothing has been pushed on the stack at all
10028 then this will return -4. This *is* correct! */
10029 return offsets->outgoing_args - (offsets->saved_args + 4);
10031 default:
10032 abort ();
10034 break;
10036 case FRAME_POINTER_REGNUM:
10037 switch (to)
10039 case THUMB_HARD_FRAME_POINTER_REGNUM:
10040 return 0;
10042 case ARM_HARD_FRAME_POINTER_REGNUM:
10043 /* The hard frame pointer points to the top entry in the
10044 stack frame. The soft frame pointer to the bottom entry
10045 in the stack frame. If there is no stack frame at all,
10046 then they are identical. */
10048 return offsets->frame - offsets->soft_frame;
10050 case STACK_POINTER_REGNUM:
10051 return offsets->outgoing_args - offsets->soft_frame;
10053 default:
10054 abort ();
10056 break;
10058 default:
10059 /* You cannot eliminate from the stack pointer.
10060 In theory you could eliminate from the hard frame
10061 pointer to the stack pointer, but this will never
10062 happen, since if a stack frame is not needed the
10063 hard frame pointer will never be used. */
10064 abort ();
10069 /* Generate the prologue instructions for entry into an ARM function. */
10070 void
10071 arm_expand_prologue (void)
10073 int reg;
10074 rtx amount;
10075 rtx insn;
10076 rtx ip_rtx;
10077 unsigned long live_regs_mask;
10078 unsigned long func_type;
10079 int fp_offset = 0;
10080 int saved_pretend_args = 0;
10081 int saved_regs = 0;
10082 unsigned HOST_WIDE_INT args_to_push;
10083 arm_stack_offsets *offsets;
10085 func_type = arm_current_func_type ();
10087 /* Naked functions don't have prologues. */
10088 if (IS_NAKED (func_type))
10089 return;
10091 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10092 args_to_push = current_function_pretend_args_size;
10094 /* Compute which register we will have to save onto the stack. */
10095 live_regs_mask = arm_compute_save_reg_mask ();
10097 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10099 if (frame_pointer_needed)
10101 if (IS_INTERRUPT (func_type))
10103 /* Interrupt functions must not corrupt any registers.
10104 Creating a frame pointer however, corrupts the IP
10105 register, so we must push it first. */
10106 insn = emit_multi_reg_push (1 << IP_REGNUM);
10108 /* Do not set RTX_FRAME_RELATED_P on this insn.
10109 The dwarf stack unwinding code only wants to see one
10110 stack decrement per function, and this is not it. If
10111 this instruction is labeled as being part of the frame
10112 creation sequence then dwarf2out_frame_debug_expr will
10113 abort when it encounters the assignment of IP to FP
10114 later on, since the use of SP here establishes SP as
10115 the CFA register and not IP.
10117 Anyway this instruction is not really part of the stack
10118 frame creation although it is part of the prologue. */
10120 else if (IS_NESTED (func_type))
10122 /* The Static chain register is the same as the IP register
10123 used as a scratch register during stack frame creation.
10124 To get around this need to find somewhere to store IP
10125 whilst the frame is being created. We try the following
10126 places in order:
10128 1. The last argument register.
10129 2. A slot on the stack above the frame. (This only
10130 works if the function is not a varargs function).
10131 3. Register r3, after pushing the argument registers
10132 onto the stack.
10134 Note - we only need to tell the dwarf2 backend about the SP
10135 adjustment in the second variant; the static chain register
10136 doesn't need to be unwound, as it doesn't contain a value
10137 inherited from the caller. */
10139 if (regs_ever_live[3] == 0)
10141 insn = gen_rtx_REG (SImode, 3);
10142 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10143 insn = emit_insn (insn);
10145 else if (args_to_push == 0)
10147 rtx dwarf;
10148 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10149 insn = gen_rtx_MEM (SImode, insn);
10150 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10151 insn = emit_insn (insn);
10153 fp_offset = 4;
10155 /* Just tell the dwarf backend that we adjusted SP. */
10156 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10157 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10158 GEN_INT (-fp_offset)));
10159 RTX_FRAME_RELATED_P (insn) = 1;
10160 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10161 dwarf, REG_NOTES (insn));
10163 else
10165 /* Store the args on the stack. */
10166 if (cfun->machine->uses_anonymous_args)
10167 insn = emit_multi_reg_push
10168 ((0xf0 >> (args_to_push / 4)) & 0xf);
10169 else
10170 insn = emit_insn
10171 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10172 GEN_INT (- args_to_push)));
10174 RTX_FRAME_RELATED_P (insn) = 1;
10176 saved_pretend_args = 1;
10177 fp_offset = args_to_push;
10178 args_to_push = 0;
10180 /* Now reuse r3 to preserve IP. */
10181 insn = gen_rtx_REG (SImode, 3);
10182 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10183 (void) emit_insn (insn);
10187 if (fp_offset)
10189 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10190 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10192 else
10193 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10195 insn = emit_insn (insn);
10196 RTX_FRAME_RELATED_P (insn) = 1;
10199 if (args_to_push)
10201 /* Push the argument registers, or reserve space for them. */
10202 if (cfun->machine->uses_anonymous_args)
10203 insn = emit_multi_reg_push
10204 ((0xf0 >> (args_to_push / 4)) & 0xf);
10205 else
10206 insn = emit_insn
10207 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10208 GEN_INT (- args_to_push)));
10209 RTX_FRAME_RELATED_P (insn) = 1;
10212 /* If this is an interrupt service routine, and the link register
10213 is going to be pushed, and we are not creating a stack frame,
10214 (which would involve an extra push of IP and a pop in the epilogue)
10215 subtracting four from LR now will mean that the function return
10216 can be done with a single instruction. */
10217 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10218 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10219 && ! frame_pointer_needed)
10220 emit_insn (gen_rtx_SET (SImode,
10221 gen_rtx_REG (SImode, LR_REGNUM),
10222 gen_rtx_PLUS (SImode,
10223 gen_rtx_REG (SImode, LR_REGNUM),
10224 GEN_INT (-4))));
10226 if (live_regs_mask)
10228 insn = emit_multi_reg_push (live_regs_mask);
10229 saved_regs += bit_count (live_regs_mask) * 4;
10230 RTX_FRAME_RELATED_P (insn) = 1;
10233 if (TARGET_IWMMXT)
10234 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10235 if (regs_ever_live[reg] && ! call_used_regs [reg])
10237 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10238 insn = gen_rtx_MEM (V2SImode, insn);
10239 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10240 gen_rtx_REG (V2SImode, reg)));
10241 RTX_FRAME_RELATED_P (insn) = 1;
10242 saved_regs += 8;
10245 if (! IS_VOLATILE (func_type))
10247 int start_reg;
10249 /* Save any floating point call-saved registers used by this
10250 function. */
10251 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10253 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10254 if (regs_ever_live[reg] && !call_used_regs[reg])
10256 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10257 insn = gen_rtx_MEM (XFmode, insn);
10258 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10259 gen_rtx_REG (XFmode, reg)));
10260 RTX_FRAME_RELATED_P (insn) = 1;
10261 saved_regs += 12;
10264 else
10266 start_reg = LAST_FPA_REGNUM;
10268 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10270 if (regs_ever_live[reg] && !call_used_regs[reg])
10272 if (start_reg - reg == 3)
10274 insn = emit_sfm (reg, 4);
10275 RTX_FRAME_RELATED_P (insn) = 1;
10276 saved_regs += 48;
10277 start_reg = reg - 1;
10280 else
10282 if (start_reg != reg)
10284 insn = emit_sfm (reg + 1, start_reg - reg);
10285 RTX_FRAME_RELATED_P (insn) = 1;
10286 saved_regs += (start_reg - reg) * 12;
10288 start_reg = reg - 1;
10292 if (start_reg != reg)
10294 insn = emit_sfm (reg + 1, start_reg - reg);
10295 saved_regs += (start_reg - reg) * 12;
10296 RTX_FRAME_RELATED_P (insn) = 1;
10299 if (TARGET_HARD_FLOAT && TARGET_VFP)
10301 start_reg = FIRST_VFP_REGNUM;
10303 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10305 if ((!regs_ever_live[reg] || call_used_regs[reg])
10306 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10308 if (start_reg != reg)
10309 saved_regs += vfp_emit_fstmx (start_reg,
10310 (reg - start_reg) / 2);
10311 start_reg = reg + 2;
10314 if (start_reg != reg)
10315 saved_regs += vfp_emit_fstmx (start_reg,
10316 (reg - start_reg) / 2);
10320 if (frame_pointer_needed)
10322 /* Create the new frame pointer. */
10323 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10324 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10325 RTX_FRAME_RELATED_P (insn) = 1;
10327 if (IS_NESTED (func_type))
10329 /* Recover the static chain register. */
10330 if (regs_ever_live [3] == 0
10331 || saved_pretend_args)
10332 insn = gen_rtx_REG (SImode, 3);
10333 else /* if (current_function_pretend_args_size == 0) */
10335 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10336 GEN_INT (4));
10337 insn = gen_rtx_MEM (SImode, insn);
10340 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10341 /* Add a USE to stop propagate_one_insn() from barfing. */
10342 emit_insn (gen_prologue_use (ip_rtx));
10346 offsets = arm_get_frame_offsets ();
10347 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10349 /* This add can produce multiple insns for a large constant, so we
10350 need to get tricky. */
10351 rtx last = get_last_insn ();
10353 amount = GEN_INT (offsets->saved_args + saved_regs
10354 - offsets->outgoing_args);
10356 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10357 amount));
10360 last = last ? NEXT_INSN (last) : get_insns ();
10361 RTX_FRAME_RELATED_P (last) = 1;
10363 while (last != insn);
10365 /* If the frame pointer is needed, emit a special barrier that
10366 will prevent the scheduler from moving stores to the frame
10367 before the stack adjustment. */
10368 if (frame_pointer_needed)
10369 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10370 hard_frame_pointer_rtx));
10374 if (flag_pic)
10375 arm_load_pic_register (INVALID_REGNUM);
10377 /* If we are profiling, make sure no instructions are scheduled before
10378 the call to mcount. Similarly if the user has requested no
10379 scheduling in the prolog. */
10380 if (current_function_profile || TARGET_NO_SCHED_PRO)
10381 emit_insn (gen_blockage ());
10383 /* If the link register is being kept alive, with the return address in it,
10384 then make sure that it does not get reused by the ce2 pass. */
10385 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10387 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10388 cfun->machine->lr_save_eliminated = 1;
10392 /* If CODE is 'd', then the X is a condition operand and the instruction
10393 should only be executed if the condition is true.
10394 if CODE is 'D', then the X is a condition operand and the instruction
10395 should only be executed if the condition is false: however, if the mode
10396 of the comparison is CCFPEmode, then always execute the instruction -- we
10397 do this because in these circumstances !GE does not necessarily imply LT;
10398 in these cases the instruction pattern will take care to make sure that
10399 an instruction containing %d will follow, thereby undoing the effects of
10400 doing this instruction unconditionally.
10401 If CODE is 'N' then X is a floating point operand that must be negated
10402 before output.
10403 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10404 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10405 void
10406 arm_print_operand (FILE *stream, rtx x, int code)
10408 switch (code)
10410 case '@':
10411 fputs (ASM_COMMENT_START, stream);
10412 return;
10414 case '_':
10415 fputs (user_label_prefix, stream);
10416 return;
10418 case '|':
10419 fputs (REGISTER_PREFIX, stream);
10420 return;
10422 case '?':
10423 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10425 if (TARGET_THUMB)
10427 output_operand_lossage ("predicated Thumb instruction");
10428 break;
10430 if (current_insn_predicate != NULL)
10432 output_operand_lossage
10433 ("predicated instruction in conditional sequence");
10434 break;
10437 fputs (arm_condition_codes[arm_current_cc], stream);
10439 else if (current_insn_predicate)
10441 enum arm_cond_code code;
10443 if (TARGET_THUMB)
10445 output_operand_lossage ("predicated Thumb instruction");
10446 break;
10449 code = get_arm_condition_code (current_insn_predicate);
10450 fputs (arm_condition_codes[code], stream);
10452 return;
10454 case 'N':
10456 REAL_VALUE_TYPE r;
10457 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10458 r = REAL_VALUE_NEGATE (r);
10459 fprintf (stream, "%s", fp_const_from_val (&r));
10461 return;
10463 case 'B':
10464 if (GET_CODE (x) == CONST_INT)
10466 HOST_WIDE_INT val;
10467 val = ARM_SIGN_EXTEND (~INTVAL (x));
10468 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10470 else
10472 putc ('~', stream);
10473 output_addr_const (stream, x);
10475 return;
10477 case 'i':
10478 fprintf (stream, "%s", arithmetic_instr (x, 1));
10479 return;
10481 /* Truncate Cirrus shift counts. */
10482 case 's':
10483 if (GET_CODE (x) == CONST_INT)
10485 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10486 return;
10488 arm_print_operand (stream, x, 0);
10489 return;
10491 case 'I':
10492 fprintf (stream, "%s", arithmetic_instr (x, 0));
10493 return;
10495 case 'S':
10497 HOST_WIDE_INT val;
10498 const char * shift = shift_op (x, &val);
10500 if (shift)
10502 fprintf (stream, ", %s ", shift_op (x, &val));
10503 if (val == -1)
10504 arm_print_operand (stream, XEXP (x, 1), 0);
10505 else
10506 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10509 return;
10511 /* An explanation of the 'Q', 'R' and 'H' register operands:
10513 In a pair of registers containing a DI or DF value the 'Q'
10514 operand returns the register number of the register containing
10515 the least significant part of the value. The 'R' operand returns
10516 the register number of the register containing the most
10517 significant part of the value.
10519 The 'H' operand returns the higher of the two register numbers.
10520 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10521 same as the 'Q' operand, since the most significant part of the
10522 value is held in the lower number register. The reverse is true
10523 on systems where WORDS_BIG_ENDIAN is false.
10525 The purpose of these operands is to distinguish between cases
10526 where the endian-ness of the values is important (for example
10527 when they are added together), and cases where the endian-ness
10528 is irrelevant, but the order of register operations is important.
10529 For example when loading a value from memory into a register
10530 pair, the endian-ness does not matter. Provided that the value
10531 from the lower memory address is put into the lower numbered
10532 register, and the value from the higher address is put into the
10533 higher numbered register, the load will work regardless of whether
10534 the value being loaded is big-wordian or little-wordian. The
10535 order of the two register loads can matter however, if the address
10536 of the memory location is actually held in one of the registers
10537 being overwritten by the load. */
10538 case 'Q':
10539 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10541 output_operand_lossage ("invalid operand for code '%c'", code);
10542 return;
10545 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10546 return;
10548 case 'R':
10549 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10551 output_operand_lossage ("invalid operand for code '%c'", code);
10552 return;
10555 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10556 return;
10558 case 'H':
10559 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10561 output_operand_lossage ("invalid operand for code '%c'", code);
10562 return;
10565 asm_fprintf (stream, "%r", REGNO (x) + 1);
10566 return;
10568 case 'm':
10569 asm_fprintf (stream, "%r",
10570 GET_CODE (XEXP (x, 0)) == REG
10571 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10572 return;
10574 case 'M':
10575 asm_fprintf (stream, "{%r-%r}",
10576 REGNO (x),
10577 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10578 return;
10580 case 'd':
10581 /* CONST_TRUE_RTX means always -- that's the default. */
10582 if (x == const_true_rtx)
10583 return;
10585 if (!COMPARISON_P (x))
10587 output_operand_lossage ("invalid operand for code '%c'", code);
10588 return;
10591 fputs (arm_condition_codes[get_arm_condition_code (x)],
10592 stream);
10593 return;
10595 case 'D':
10596 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10597 want to do that. */
10598 if (x == const_true_rtx)
10600 output_operand_lossage ("instruction never exectued");
10601 return;
10603 if (!COMPARISON_P (x))
10605 output_operand_lossage ("invalid operand for code '%c'", code);
10606 return;
10609 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10610 (get_arm_condition_code (x))],
10611 stream);
10612 return;
10614 /* Cirrus registers can be accessed in a variety of ways:
10615 single floating point (f)
10616 double floating point (d)
10617 32bit integer (fx)
10618 64bit integer (dx). */
10619 case 'W': /* Cirrus register in F mode. */
10620 case 'X': /* Cirrus register in D mode. */
10621 case 'Y': /* Cirrus register in FX mode. */
10622 case 'Z': /* Cirrus register in DX mode. */
10623 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10624 abort ();
10626 fprintf (stream, "mv%s%s",
10627 code == 'W' ? "f"
10628 : code == 'X' ? "d"
10629 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10631 return;
10633 /* Print cirrus register in the mode specified by the register's mode. */
10634 case 'V':
10636 int mode = GET_MODE (x);
10638 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10640 output_operand_lossage ("invalid operand for code '%c'", code);
10641 return;
10644 fprintf (stream, "mv%s%s",
10645 mode == DFmode ? "d"
10646 : mode == SImode ? "fx"
10647 : mode == DImode ? "dx"
10648 : "f", reg_names[REGNO (x)] + 2);
10650 return;
10653 case 'U':
10654 if (GET_CODE (x) != REG
10655 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10656 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10657 /* Bad value for wCG register number. */
10659 output_operand_lossage ("invalid operand for code '%c'", code);
10660 return;
10663 else
10664 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10665 return;
10667 /* Print an iWMMXt control register name. */
10668 case 'w':
10669 if (GET_CODE (x) != CONST_INT
10670 || INTVAL (x) < 0
10671 || INTVAL (x) >= 16)
10672 /* Bad value for wC register number. */
10674 output_operand_lossage ("invalid operand for code '%c'", code);
10675 return;
10678 else
10680 static const char * wc_reg_names [16] =
10682 "wCID", "wCon", "wCSSF", "wCASF",
10683 "wC4", "wC5", "wC6", "wC7",
10684 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10685 "wC12", "wC13", "wC14", "wC15"
10688 fprintf (stream, wc_reg_names [INTVAL (x)]);
10690 return;
10692 /* Print a VFP double precision register name. */
10693 case 'P':
10695 int mode = GET_MODE (x);
10696 int num;
10698 if (mode != DImode && mode != DFmode)
10700 output_operand_lossage ("invalid operand for code '%c'", code);
10701 return;
10704 if (GET_CODE (x) != REG
10705 || !IS_VFP_REGNUM (REGNO (x)))
10707 output_operand_lossage ("invalid operand for code '%c'", code);
10708 return;
10711 num = REGNO(x) - FIRST_VFP_REGNUM;
10712 if (num & 1)
10714 output_operand_lossage ("invalid operand for code '%c'", code);
10715 return;
10718 fprintf (stream, "d%d", num >> 1);
10720 return;
10722 default:
10723 if (x == 0)
10725 output_operand_lossage ("missing operand");
10726 return;
10729 if (GET_CODE (x) == REG)
10730 asm_fprintf (stream, "%r", REGNO (x));
10731 else if (GET_CODE (x) == MEM)
10733 output_memory_reference_mode = GET_MODE (x);
10734 output_address (XEXP (x, 0));
10736 else if (GET_CODE (x) == CONST_DOUBLE)
10737 fprintf (stream, "#%s", fp_immediate_constant (x));
10738 else if (GET_CODE (x) == NEG)
10739 abort (); /* This should never happen now. */
10740 else
10742 fputc ('#', stream);
10743 output_addr_const (stream, x);
10748 #ifndef AOF_ASSEMBLER
10749 /* Target hook for assembling integer objects. The ARM version needs to
10750 handle word-sized values specially. */
10751 static bool
10752 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10754 if (size == UNITS_PER_WORD && aligned_p)
10756 fputs ("\t.word\t", asm_out_file);
10757 output_addr_const (asm_out_file, x);
10759 /* Mark symbols as position independent. We only do this in the
10760 .text segment, not in the .data segment. */
10761 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10762 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10764 if (GET_CODE (x) == SYMBOL_REF
10765 && (CONSTANT_POOL_ADDRESS_P (x)
10766 || SYMBOL_REF_LOCAL_P (x)))
10767 fputs ("(GOTOFF)", asm_out_file);
10768 else if (GET_CODE (x) == LABEL_REF)
10769 fputs ("(GOTOFF)", asm_out_file);
10770 else
10771 fputs ("(GOT)", asm_out_file);
10773 fputc ('\n', asm_out_file);
10774 return true;
10777 if (arm_vector_mode_supported_p (GET_MODE (x)))
10779 int i, units;
10781 if (GET_CODE (x) != CONST_VECTOR)
10782 abort ();
10784 units = CONST_VECTOR_NUNITS (x);
10786 switch (GET_MODE (x))
10788 case V2SImode: size = 4; break;
10789 case V4HImode: size = 2; break;
10790 case V8QImode: size = 1; break;
10791 default:
10792 abort ();
10795 for (i = 0; i < units; i++)
10797 rtx elt;
10799 elt = CONST_VECTOR_ELT (x, i);
10800 assemble_integer
10801 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10804 return true;
10807 return default_assemble_integer (x, size, aligned_p);
10809 #endif
10811 /* A finite state machine takes care of noticing whether or not instructions
10812 can be conditionally executed, and thus decrease execution time and code
10813 size by deleting branch instructions. The fsm is controlled by
10814 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10816 /* The state of the fsm controlling condition codes are:
10817 0: normal, do nothing special
10818 1: make ASM_OUTPUT_OPCODE not output this instruction
10819 2: make ASM_OUTPUT_OPCODE not output this instruction
10820 3: make instructions conditional
10821 4: make instructions conditional
10823 State transitions (state->state by whom under condition):
10824 0 -> 1 final_prescan_insn if the `target' is a label
10825 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10826 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10827 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10828 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10829 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10830 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10831 (the target insn is arm_target_insn).
10833 If the jump clobbers the conditions then we use states 2 and 4.
10835 A similar thing can be done with conditional return insns.
10837 XXX In case the `target' is an unconditional branch, this conditionalising
10838 of the instructions always reduces code size, but not always execution
10839 time. But then, I want to reduce the code size to somewhere near what
10840 /bin/cc produces. */
10842 /* Returns the index of the ARM condition code string in
10843 `arm_condition_codes'. COMPARISON should be an rtx like
10844 `(eq (...) (...))'. */
10845 static enum arm_cond_code
10846 get_arm_condition_code (rtx comparison)
10848 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10849 int code;
10850 enum rtx_code comp_code = GET_CODE (comparison);
10852 if (GET_MODE_CLASS (mode) != MODE_CC)
10853 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10854 XEXP (comparison, 1));
10856 switch (mode)
10858 case CC_DNEmode: code = ARM_NE; goto dominance;
10859 case CC_DEQmode: code = ARM_EQ; goto dominance;
10860 case CC_DGEmode: code = ARM_GE; goto dominance;
10861 case CC_DGTmode: code = ARM_GT; goto dominance;
10862 case CC_DLEmode: code = ARM_LE; goto dominance;
10863 case CC_DLTmode: code = ARM_LT; goto dominance;
10864 case CC_DGEUmode: code = ARM_CS; goto dominance;
10865 case CC_DGTUmode: code = ARM_HI; goto dominance;
10866 case CC_DLEUmode: code = ARM_LS; goto dominance;
10867 case CC_DLTUmode: code = ARM_CC;
10869 dominance:
10870 if (comp_code != EQ && comp_code != NE)
10871 abort ();
10873 if (comp_code == EQ)
10874 return ARM_INVERSE_CONDITION_CODE (code);
10875 return code;
10877 case CC_NOOVmode:
10878 switch (comp_code)
10880 case NE: return ARM_NE;
10881 case EQ: return ARM_EQ;
10882 case GE: return ARM_PL;
10883 case LT: return ARM_MI;
10884 default: abort ();
10887 case CC_Zmode:
10888 switch (comp_code)
10890 case NE: return ARM_NE;
10891 case EQ: return ARM_EQ;
10892 default: abort ();
10895 case CC_Nmode:
10896 switch (comp_code)
10898 case NE: return ARM_MI;
10899 case EQ: return ARM_PL;
10900 default: abort ();
10903 case CCFPEmode:
10904 case CCFPmode:
10905 /* These encodings assume that AC=1 in the FPA system control
10906 byte. This allows us to handle all cases except UNEQ and
10907 LTGT. */
10908 switch (comp_code)
10910 case GE: return ARM_GE;
10911 case GT: return ARM_GT;
10912 case LE: return ARM_LS;
10913 case LT: return ARM_MI;
10914 case NE: return ARM_NE;
10915 case EQ: return ARM_EQ;
10916 case ORDERED: return ARM_VC;
10917 case UNORDERED: return ARM_VS;
10918 case UNLT: return ARM_LT;
10919 case UNLE: return ARM_LE;
10920 case UNGT: return ARM_HI;
10921 case UNGE: return ARM_PL;
10922 /* UNEQ and LTGT do not have a representation. */
10923 case UNEQ: /* Fall through. */
10924 case LTGT: /* Fall through. */
10925 default: abort ();
10928 case CC_SWPmode:
10929 switch (comp_code)
10931 case NE: return ARM_NE;
10932 case EQ: return ARM_EQ;
10933 case GE: return ARM_LE;
10934 case GT: return ARM_LT;
10935 case LE: return ARM_GE;
10936 case LT: return ARM_GT;
10937 case GEU: return ARM_LS;
10938 case GTU: return ARM_CC;
10939 case LEU: return ARM_CS;
10940 case LTU: return ARM_HI;
10941 default: abort ();
10944 case CC_Cmode:
10945 switch (comp_code)
10947 case LTU: return ARM_CS;
10948 case GEU: return ARM_CC;
10949 default: abort ();
10952 case CCmode:
10953 switch (comp_code)
10955 case NE: return ARM_NE;
10956 case EQ: return ARM_EQ;
10957 case GE: return ARM_GE;
10958 case GT: return ARM_GT;
10959 case LE: return ARM_LE;
10960 case LT: return ARM_LT;
10961 case GEU: return ARM_CS;
10962 case GTU: return ARM_HI;
10963 case LEU: return ARM_LS;
10964 case LTU: return ARM_CC;
10965 default: abort ();
10968 default: abort ();
10971 abort ();
10974 void
10975 arm_final_prescan_insn (rtx insn)
10977 /* BODY will hold the body of INSN. */
10978 rtx body = PATTERN (insn);
10980 /* This will be 1 if trying to repeat the trick, and things need to be
10981 reversed if it appears to fail. */
10982 int reverse = 0;
10984 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10985 taken are clobbered, even if the rtl suggests otherwise. It also
10986 means that we have to grub around within the jump expression to find
10987 out what the conditions are when the jump isn't taken. */
10988 int jump_clobbers = 0;
10990 /* If we start with a return insn, we only succeed if we find another one. */
10991 int seeking_return = 0;
10993 /* START_INSN will hold the insn from where we start looking. This is the
10994 first insn after the following code_label if REVERSE is true. */
10995 rtx start_insn = insn;
10997 /* If in state 4, check if the target branch is reached, in order to
10998 change back to state 0. */
10999 if (arm_ccfsm_state == 4)
11001 if (insn == arm_target_insn)
11003 arm_target_insn = NULL;
11004 arm_ccfsm_state = 0;
11006 return;
11009 /* If in state 3, it is possible to repeat the trick, if this insn is an
11010 unconditional branch to a label, and immediately following this branch
11011 is the previous target label which is only used once, and the label this
11012 branch jumps to is not too far off. */
11013 if (arm_ccfsm_state == 3)
11015 if (simplejump_p (insn))
11017 start_insn = next_nonnote_insn (start_insn);
11018 if (GET_CODE (start_insn) == BARRIER)
11020 /* XXX Isn't this always a barrier? */
11021 start_insn = next_nonnote_insn (start_insn);
11023 if (GET_CODE (start_insn) == CODE_LABEL
11024 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11025 && LABEL_NUSES (start_insn) == 1)
11026 reverse = TRUE;
11027 else
11028 return;
11030 else if (GET_CODE (body) == RETURN)
11032 start_insn = next_nonnote_insn (start_insn);
11033 if (GET_CODE (start_insn) == BARRIER)
11034 start_insn = next_nonnote_insn (start_insn);
11035 if (GET_CODE (start_insn) == CODE_LABEL
11036 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11037 && LABEL_NUSES (start_insn) == 1)
11039 reverse = TRUE;
11040 seeking_return = 1;
11042 else
11043 return;
11045 else
11046 return;
11049 if (arm_ccfsm_state != 0 && !reverse)
11050 abort ();
11051 if (GET_CODE (insn) != JUMP_INSN)
11052 return;
11054 /* This jump might be paralleled with a clobber of the condition codes
11055 the jump should always come first */
11056 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11057 body = XVECEXP (body, 0, 0);
11059 if (reverse
11060 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11061 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11063 int insns_skipped;
11064 int fail = FALSE, succeed = FALSE;
11065 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11066 int then_not_else = TRUE;
11067 rtx this_insn = start_insn, label = 0;
11069 /* If the jump cannot be done with one instruction, we cannot
11070 conditionally execute the instruction in the inverse case. */
11071 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11073 jump_clobbers = 1;
11074 return;
11077 /* Register the insn jumped to. */
11078 if (reverse)
11080 if (!seeking_return)
11081 label = XEXP (SET_SRC (body), 0);
11083 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11084 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11085 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11087 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11088 then_not_else = FALSE;
11090 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11091 seeking_return = 1;
11092 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11094 seeking_return = 1;
11095 then_not_else = FALSE;
11097 else
11098 abort ();
11100 /* See how many insns this branch skips, and what kind of insns. If all
11101 insns are okay, and the label or unconditional branch to the same
11102 label is not too far away, succeed. */
11103 for (insns_skipped = 0;
11104 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11106 rtx scanbody;
11108 this_insn = next_nonnote_insn (this_insn);
11109 if (!this_insn)
11110 break;
11112 switch (GET_CODE (this_insn))
11114 case CODE_LABEL:
11115 /* Succeed if it is the target label, otherwise fail since
11116 control falls in from somewhere else. */
11117 if (this_insn == label)
11119 if (jump_clobbers)
11121 arm_ccfsm_state = 2;
11122 this_insn = next_nonnote_insn (this_insn);
11124 else
11125 arm_ccfsm_state = 1;
11126 succeed = TRUE;
11128 else
11129 fail = TRUE;
11130 break;
11132 case BARRIER:
11133 /* Succeed if the following insn is the target label.
11134 Otherwise fail.
11135 If return insns are used then the last insn in a function
11136 will be a barrier. */
11137 this_insn = next_nonnote_insn (this_insn);
11138 if (this_insn && this_insn == label)
11140 if (jump_clobbers)
11142 arm_ccfsm_state = 2;
11143 this_insn = next_nonnote_insn (this_insn);
11145 else
11146 arm_ccfsm_state = 1;
11147 succeed = TRUE;
11149 else
11150 fail = TRUE;
11151 break;
11153 case CALL_INSN:
11154 /* The AAPCS says that conditional calls should not be
11155 used since they make interworking inefficient (the
11156 linker can't transform BL<cond> into BLX). That's
11157 only a problem if the machine has BLX. */
11158 if (arm_arch5)
11160 fail = TRUE;
11161 break;
11164 /* Succeed if the following insn is the target label, or
11165 if the following two insns are a barrier and the
11166 target label. */
11167 this_insn = next_nonnote_insn (this_insn);
11168 if (this_insn && GET_CODE (this_insn) == BARRIER)
11169 this_insn = next_nonnote_insn (this_insn);
11171 if (this_insn && this_insn == label
11172 && insns_skipped < max_insns_skipped)
11174 if (jump_clobbers)
11176 arm_ccfsm_state = 2;
11177 this_insn = next_nonnote_insn (this_insn);
11179 else
11180 arm_ccfsm_state = 1;
11181 succeed = TRUE;
11183 else
11184 fail = TRUE;
11185 break;
11187 case JUMP_INSN:
11188 /* If this is an unconditional branch to the same label, succeed.
11189 If it is to another label, do nothing. If it is conditional,
11190 fail. */
11191 /* XXX Probably, the tests for SET and the PC are
11192 unnecessary. */
11194 scanbody = PATTERN (this_insn);
11195 if (GET_CODE (scanbody) == SET
11196 && GET_CODE (SET_DEST (scanbody)) == PC)
11198 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11199 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11201 arm_ccfsm_state = 2;
11202 succeed = TRUE;
11204 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11205 fail = TRUE;
11207 /* Fail if a conditional return is undesirable (e.g. on a
11208 StrongARM), but still allow this if optimizing for size. */
11209 else if (GET_CODE (scanbody) == RETURN
11210 && !use_return_insn (TRUE, NULL)
11211 && !optimize_size)
11212 fail = TRUE;
11213 else if (GET_CODE (scanbody) == RETURN
11214 && seeking_return)
11216 arm_ccfsm_state = 2;
11217 succeed = TRUE;
11219 else if (GET_CODE (scanbody) == PARALLEL)
11221 switch (get_attr_conds (this_insn))
11223 case CONDS_NOCOND:
11224 break;
11225 default:
11226 fail = TRUE;
11227 break;
11230 else
11231 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11233 break;
11235 case INSN:
11236 /* Instructions using or affecting the condition codes make it
11237 fail. */
11238 scanbody = PATTERN (this_insn);
11239 if (!(GET_CODE (scanbody) == SET
11240 || GET_CODE (scanbody) == PARALLEL)
11241 || get_attr_conds (this_insn) != CONDS_NOCOND)
11242 fail = TRUE;
11244 /* A conditional cirrus instruction must be followed by
11245 a non Cirrus instruction. However, since we
11246 conditionalize instructions in this function and by
11247 the time we get here we can't add instructions
11248 (nops), because shorten_branches() has already been
11249 called, we will disable conditionalizing Cirrus
11250 instructions to be safe. */
11251 if (GET_CODE (scanbody) != USE
11252 && GET_CODE (scanbody) != CLOBBER
11253 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11254 fail = TRUE;
11255 break;
11257 default:
11258 break;
11261 if (succeed)
11263 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11264 arm_target_label = CODE_LABEL_NUMBER (label);
11265 else if (seeking_return || arm_ccfsm_state == 2)
11267 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11269 this_insn = next_nonnote_insn (this_insn);
11270 if (this_insn && (GET_CODE (this_insn) == BARRIER
11271 || GET_CODE (this_insn) == CODE_LABEL))
11272 abort ();
11274 if (!this_insn)
11276 /* Oh, dear! we ran off the end.. give up. */
11277 recog (PATTERN (insn), insn, NULL);
11278 arm_ccfsm_state = 0;
11279 arm_target_insn = NULL;
11280 return;
11282 arm_target_insn = this_insn;
11284 else
11285 abort ();
11286 if (jump_clobbers)
11288 if (reverse)
11289 abort ();
11290 arm_current_cc =
11291 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11292 0), 0), 1));
11293 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11294 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11295 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11296 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11298 else
11300 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11301 what it was. */
11302 if (!reverse)
11303 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11304 0));
11307 if (reverse || then_not_else)
11308 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11311 /* Restore recog_data (getting the attributes of other insns can
11312 destroy this array, but final.c assumes that it remains intact
11313 across this call; since the insn has been recognized already we
11314 call recog direct). */
11315 recog (PATTERN (insn), insn, NULL);
11319 /* Returns true if REGNO is a valid register
11320 for holding a quantity of type MODE. */
11322 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11324 if (GET_MODE_CLASS (mode) == MODE_CC)
11325 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11327 if (TARGET_THUMB)
11328 /* For the Thumb we only allow values bigger than SImode in
11329 registers 0 - 6, so that there is always a second low
11330 register available to hold the upper part of the value.
11331 We probably we ought to ensure that the register is the
11332 start of an even numbered register pair. */
11333 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11335 if (IS_CIRRUS_REGNUM (regno))
11336 /* We have outlawed SI values in Cirrus registers because they
11337 reside in the lower 32 bits, but SF values reside in the
11338 upper 32 bits. This causes gcc all sorts of grief. We can't
11339 even split the registers into pairs because Cirrus SI values
11340 get sign extended to 64bits-- aldyh. */
11341 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11343 if (IS_VFP_REGNUM (regno))
11345 if (mode == SFmode || mode == SImode)
11346 return TRUE;
11348 /* DFmode values are only valid in even register pairs. */
11349 if (mode == DFmode)
11350 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11351 return FALSE;
11354 if (IS_IWMMXT_GR_REGNUM (regno))
11355 return mode == SImode;
11357 if (IS_IWMMXT_REGNUM (regno))
11358 return VALID_IWMMXT_REG_MODE (mode);
11360 /* We allow any value to be stored in the general registers.
11361 Restrict doubleword quantities to even register pairs so that we can
11362 use ldrd. */
11363 if (regno <= LAST_ARM_REGNUM)
11364 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11366 if ( regno == FRAME_POINTER_REGNUM
11367 || regno == ARG_POINTER_REGNUM)
11368 /* We only allow integers in the fake hard registers. */
11369 return GET_MODE_CLASS (mode) == MODE_INT;
11371 /* The only registers left are the FPA registers
11372 which we only allow to hold FP values. */
11373 return GET_MODE_CLASS (mode) == MODE_FLOAT
11374 && regno >= FIRST_FPA_REGNUM
11375 && regno <= LAST_FPA_REGNUM;
11379 arm_regno_class (int regno)
11381 if (TARGET_THUMB)
11383 if (regno == STACK_POINTER_REGNUM)
11384 return STACK_REG;
11385 if (regno == CC_REGNUM)
11386 return CC_REG;
11387 if (regno < 8)
11388 return LO_REGS;
11389 return HI_REGS;
11392 if ( regno <= LAST_ARM_REGNUM
11393 || regno == FRAME_POINTER_REGNUM
11394 || regno == ARG_POINTER_REGNUM)
11395 return GENERAL_REGS;
11397 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11398 return NO_REGS;
11400 if (IS_CIRRUS_REGNUM (regno))
11401 return CIRRUS_REGS;
11403 if (IS_VFP_REGNUM (regno))
11404 return VFP_REGS;
11406 if (IS_IWMMXT_REGNUM (regno))
11407 return IWMMXT_REGS;
11409 if (IS_IWMMXT_GR_REGNUM (regno))
11410 return IWMMXT_GR_REGS;
11412 return FPA_REGS;
11415 /* Handle a special case when computing the offset
11416 of an argument from the frame pointer. */
11418 arm_debugger_arg_offset (int value, rtx addr)
11420 rtx insn;
11422 /* We are only interested if dbxout_parms() failed to compute the offset. */
11423 if (value != 0)
11424 return 0;
11426 /* We can only cope with the case where the address is held in a register. */
11427 if (GET_CODE (addr) != REG)
11428 return 0;
11430 /* If we are using the frame pointer to point at the argument, then
11431 an offset of 0 is correct. */
11432 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11433 return 0;
11435 /* If we are using the stack pointer to point at the
11436 argument, then an offset of 0 is correct. */
11437 if ((TARGET_THUMB || !frame_pointer_needed)
11438 && REGNO (addr) == SP_REGNUM)
11439 return 0;
11441 /* Oh dear. The argument is pointed to by a register rather
11442 than being held in a register, or being stored at a known
11443 offset from the frame pointer. Since GDB only understands
11444 those two kinds of argument we must translate the address
11445 held in the register into an offset from the frame pointer.
11446 We do this by searching through the insns for the function
11447 looking to see where this register gets its value. If the
11448 register is initialized from the frame pointer plus an offset
11449 then we are in luck and we can continue, otherwise we give up.
11451 This code is exercised by producing debugging information
11452 for a function with arguments like this:
11454 double func (double a, double b, int c, double d) {return d;}
11456 Without this code the stab for parameter 'd' will be set to
11457 an offset of 0 from the frame pointer, rather than 8. */
11459 /* The if() statement says:
11461 If the insn is a normal instruction
11462 and if the insn is setting the value in a register
11463 and if the register being set is the register holding the address of the argument
11464 and if the address is computing by an addition
11465 that involves adding to a register
11466 which is the frame pointer
11467 a constant integer
11469 then... */
11471 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11473 if ( GET_CODE (insn) == INSN
11474 && GET_CODE (PATTERN (insn)) == SET
11475 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11476 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11477 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11478 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11479 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11482 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11484 break;
11488 if (value == 0)
11490 debug_rtx (addr);
11491 warning (0, "unable to compute real location of stacked parameter");
11492 value = 8; /* XXX magic hack */
11495 return value;
11498 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11499 do \
11501 if ((MASK) & insn_flags) \
11502 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11503 BUILT_IN_MD, NULL, NULL_TREE); \
11505 while (0)
11507 struct builtin_description
11509 const unsigned int mask;
11510 const enum insn_code icode;
11511 const char * const name;
11512 const enum arm_builtins code;
11513 const enum rtx_code comparison;
11514 const unsigned int flag;
11517 static const struct builtin_description bdesc_2arg[] =
11519 #define IWMMXT_BUILTIN(code, string, builtin) \
11520 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11521 ARM_BUILTIN_##builtin, 0, 0 },
11523 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11524 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11525 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11526 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11527 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11528 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11529 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11530 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11531 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11532 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11533 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11534 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11535 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11536 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11537 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11538 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11539 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11540 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11541 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11542 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11543 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11544 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11545 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11546 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11547 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11548 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11549 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11550 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11551 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11552 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11553 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11554 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11555 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11556 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11557 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11558 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11559 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11560 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11561 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11562 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11563 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11564 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11565 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11566 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11567 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11568 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11569 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11570 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11571 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11572 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11573 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11574 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11575 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11576 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11577 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11578 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11579 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11580 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11582 #define IWMMXT_BUILTIN2(code, builtin) \
11583 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11585 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11586 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11587 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11588 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11589 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11590 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11591 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11592 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11593 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11594 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11595 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11596 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11597 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11598 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11599 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11600 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11601 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11602 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11603 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11604 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11605 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11606 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11607 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11608 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11609 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11610 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11611 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11612 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11613 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11614 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11615 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11616 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11619 static const struct builtin_description bdesc_1arg[] =
11621 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11622 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11623 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11624 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11625 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11626 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11627 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11628 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11629 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11630 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11631 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11632 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11633 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11634 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11635 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11636 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11637 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11638 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11641 /* Set up all the iWMMXt builtins. This is
11642 not called if TARGET_IWMMXT is zero. */
11644 static void
11645 arm_init_iwmmxt_builtins (void)
11647 const struct builtin_description * d;
11648 size_t i;
11649 tree endlink = void_list_node;
11651 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11652 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11653 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11655 tree int_ftype_int
11656 = build_function_type (integer_type_node,
11657 tree_cons (NULL_TREE, integer_type_node, endlink));
11658 tree v8qi_ftype_v8qi_v8qi_int
11659 = build_function_type (V8QI_type_node,
11660 tree_cons (NULL_TREE, V8QI_type_node,
11661 tree_cons (NULL_TREE, V8QI_type_node,
11662 tree_cons (NULL_TREE,
11663 integer_type_node,
11664 endlink))));
11665 tree v4hi_ftype_v4hi_int
11666 = build_function_type (V4HI_type_node,
11667 tree_cons (NULL_TREE, V4HI_type_node,
11668 tree_cons (NULL_TREE, integer_type_node,
11669 endlink)));
11670 tree v2si_ftype_v2si_int
11671 = build_function_type (V2SI_type_node,
11672 tree_cons (NULL_TREE, V2SI_type_node,
11673 tree_cons (NULL_TREE, integer_type_node,
11674 endlink)));
11675 tree v2si_ftype_di_di
11676 = build_function_type (V2SI_type_node,
11677 tree_cons (NULL_TREE, long_long_integer_type_node,
11678 tree_cons (NULL_TREE, long_long_integer_type_node,
11679 endlink)));
11680 tree di_ftype_di_int
11681 = build_function_type (long_long_integer_type_node,
11682 tree_cons (NULL_TREE, long_long_integer_type_node,
11683 tree_cons (NULL_TREE, integer_type_node,
11684 endlink)));
11685 tree di_ftype_di_int_int
11686 = build_function_type (long_long_integer_type_node,
11687 tree_cons (NULL_TREE, long_long_integer_type_node,
11688 tree_cons (NULL_TREE, integer_type_node,
11689 tree_cons (NULL_TREE,
11690 integer_type_node,
11691 endlink))));
11692 tree int_ftype_v8qi
11693 = build_function_type (integer_type_node,
11694 tree_cons (NULL_TREE, V8QI_type_node,
11695 endlink));
11696 tree int_ftype_v4hi
11697 = build_function_type (integer_type_node,
11698 tree_cons (NULL_TREE, V4HI_type_node,
11699 endlink));
11700 tree int_ftype_v2si
11701 = build_function_type (integer_type_node,
11702 tree_cons (NULL_TREE, V2SI_type_node,
11703 endlink));
11704 tree int_ftype_v8qi_int
11705 = build_function_type (integer_type_node,
11706 tree_cons (NULL_TREE, V8QI_type_node,
11707 tree_cons (NULL_TREE, integer_type_node,
11708 endlink)));
11709 tree int_ftype_v4hi_int
11710 = build_function_type (integer_type_node,
11711 tree_cons (NULL_TREE, V4HI_type_node,
11712 tree_cons (NULL_TREE, integer_type_node,
11713 endlink)));
11714 tree int_ftype_v2si_int
11715 = build_function_type (integer_type_node,
11716 tree_cons (NULL_TREE, V2SI_type_node,
11717 tree_cons (NULL_TREE, integer_type_node,
11718 endlink)));
11719 tree v8qi_ftype_v8qi_int_int
11720 = build_function_type (V8QI_type_node,
11721 tree_cons (NULL_TREE, V8QI_type_node,
11722 tree_cons (NULL_TREE, integer_type_node,
11723 tree_cons (NULL_TREE,
11724 integer_type_node,
11725 endlink))));
11726 tree v4hi_ftype_v4hi_int_int
11727 = build_function_type (V4HI_type_node,
11728 tree_cons (NULL_TREE, V4HI_type_node,
11729 tree_cons (NULL_TREE, integer_type_node,
11730 tree_cons (NULL_TREE,
11731 integer_type_node,
11732 endlink))));
11733 tree v2si_ftype_v2si_int_int
11734 = build_function_type (V2SI_type_node,
11735 tree_cons (NULL_TREE, V2SI_type_node,
11736 tree_cons (NULL_TREE, integer_type_node,
11737 tree_cons (NULL_TREE,
11738 integer_type_node,
11739 endlink))));
11740 /* Miscellaneous. */
11741 tree v8qi_ftype_v4hi_v4hi
11742 = build_function_type (V8QI_type_node,
11743 tree_cons (NULL_TREE, V4HI_type_node,
11744 tree_cons (NULL_TREE, V4HI_type_node,
11745 endlink)));
11746 tree v4hi_ftype_v2si_v2si
11747 = build_function_type (V4HI_type_node,
11748 tree_cons (NULL_TREE, V2SI_type_node,
11749 tree_cons (NULL_TREE, V2SI_type_node,
11750 endlink)));
11751 tree v2si_ftype_v4hi_v4hi
11752 = build_function_type (V2SI_type_node,
11753 tree_cons (NULL_TREE, V4HI_type_node,
11754 tree_cons (NULL_TREE, V4HI_type_node,
11755 endlink)));
11756 tree v2si_ftype_v8qi_v8qi
11757 = build_function_type (V2SI_type_node,
11758 tree_cons (NULL_TREE, V8QI_type_node,
11759 tree_cons (NULL_TREE, V8QI_type_node,
11760 endlink)));
11761 tree v4hi_ftype_v4hi_di
11762 = build_function_type (V4HI_type_node,
11763 tree_cons (NULL_TREE, V4HI_type_node,
11764 tree_cons (NULL_TREE,
11765 long_long_integer_type_node,
11766 endlink)));
11767 tree v2si_ftype_v2si_di
11768 = build_function_type (V2SI_type_node,
11769 tree_cons (NULL_TREE, V2SI_type_node,
11770 tree_cons (NULL_TREE,
11771 long_long_integer_type_node,
11772 endlink)));
11773 tree void_ftype_int_int
11774 = build_function_type (void_type_node,
11775 tree_cons (NULL_TREE, integer_type_node,
11776 tree_cons (NULL_TREE, integer_type_node,
11777 endlink)));
11778 tree di_ftype_void
11779 = build_function_type (long_long_unsigned_type_node, endlink);
11780 tree di_ftype_v8qi
11781 = build_function_type (long_long_integer_type_node,
11782 tree_cons (NULL_TREE, V8QI_type_node,
11783 endlink));
11784 tree di_ftype_v4hi
11785 = build_function_type (long_long_integer_type_node,
11786 tree_cons (NULL_TREE, V4HI_type_node,
11787 endlink));
11788 tree di_ftype_v2si
11789 = build_function_type (long_long_integer_type_node,
11790 tree_cons (NULL_TREE, V2SI_type_node,
11791 endlink));
11792 tree v2si_ftype_v4hi
11793 = build_function_type (V2SI_type_node,
11794 tree_cons (NULL_TREE, V4HI_type_node,
11795 endlink));
11796 tree v4hi_ftype_v8qi
11797 = build_function_type (V4HI_type_node,
11798 tree_cons (NULL_TREE, V8QI_type_node,
11799 endlink));
11801 tree di_ftype_di_v4hi_v4hi
11802 = build_function_type (long_long_unsigned_type_node,
11803 tree_cons (NULL_TREE,
11804 long_long_unsigned_type_node,
11805 tree_cons (NULL_TREE, V4HI_type_node,
11806 tree_cons (NULL_TREE,
11807 V4HI_type_node,
11808 endlink))));
11810 tree di_ftype_v4hi_v4hi
11811 = build_function_type (long_long_unsigned_type_node,
11812 tree_cons (NULL_TREE, V4HI_type_node,
11813 tree_cons (NULL_TREE, V4HI_type_node,
11814 endlink)));
11816 /* Normal vector binops. */
11817 tree v8qi_ftype_v8qi_v8qi
11818 = build_function_type (V8QI_type_node,
11819 tree_cons (NULL_TREE, V8QI_type_node,
11820 tree_cons (NULL_TREE, V8QI_type_node,
11821 endlink)));
11822 tree v4hi_ftype_v4hi_v4hi
11823 = build_function_type (V4HI_type_node,
11824 tree_cons (NULL_TREE, V4HI_type_node,
11825 tree_cons (NULL_TREE, V4HI_type_node,
11826 endlink)));
11827 tree v2si_ftype_v2si_v2si
11828 = build_function_type (V2SI_type_node,
11829 tree_cons (NULL_TREE, V2SI_type_node,
11830 tree_cons (NULL_TREE, V2SI_type_node,
11831 endlink)));
11832 tree di_ftype_di_di
11833 = build_function_type (long_long_unsigned_type_node,
11834 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11835 tree_cons (NULL_TREE,
11836 long_long_unsigned_type_node,
11837 endlink)));
11839 /* Add all builtins that are more or less simple operations on two
11840 operands. */
11841 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11843 /* Use one of the operands; the target can have a different mode for
11844 mask-generating compares. */
11845 enum machine_mode mode;
11846 tree type;
11848 if (d->name == 0)
11849 continue;
11851 mode = insn_data[d->icode].operand[1].mode;
11853 switch (mode)
11855 case V8QImode:
11856 type = v8qi_ftype_v8qi_v8qi;
11857 break;
11858 case V4HImode:
11859 type = v4hi_ftype_v4hi_v4hi;
11860 break;
11861 case V2SImode:
11862 type = v2si_ftype_v2si_v2si;
11863 break;
11864 case DImode:
11865 type = di_ftype_di_di;
11866 break;
11868 default:
11869 abort ();
11872 def_mbuiltin (d->mask, d->name, type, d->code);
11875 /* Add the remaining MMX insns with somewhat more complicated types. */
11876 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11877 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11878 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11880 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11881 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11882 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11883 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11884 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11885 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11887 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11888 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11889 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11890 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11891 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11892 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11894 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11895 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11896 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11897 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11898 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11899 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11901 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11902 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11903 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11904 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11905 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11906 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11908 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11910 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11911 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11912 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11913 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11915 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11916 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11917 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11918 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11919 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11920 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11921 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11922 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11923 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11925 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11926 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11927 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11929 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11930 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11931 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11933 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11934 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11935 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11936 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11937 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11938 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11940 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11941 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11942 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11943 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11944 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11946 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11948 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11951 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11953 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11955 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11958 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11960 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11967 static void
11968 arm_init_builtins (void)
11970 if (TARGET_REALLY_IWMMXT)
11971 arm_init_iwmmxt_builtins ();
11974 /* Errors in the source file can cause expand_expr to return const0_rtx
11975 where we expect a vector. To avoid crashing, use one of the vector
11976 clear instructions. */
11978 static rtx
11979 safe_vector_operand (rtx x, enum machine_mode mode)
11981 if (x != const0_rtx)
11982 return x;
11983 x = gen_reg_rtx (mode);
11985 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11986 : gen_rtx_SUBREG (DImode, x, 0)));
11987 return x;
11990 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11992 static rtx
11993 arm_expand_binop_builtin (enum insn_code icode,
11994 tree arglist, rtx target)
11996 rtx pat;
11997 tree arg0 = TREE_VALUE (arglist);
11998 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11999 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12000 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12001 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12002 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12003 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12005 if (VECTOR_MODE_P (mode0))
12006 op0 = safe_vector_operand (op0, mode0);
12007 if (VECTOR_MODE_P (mode1))
12008 op1 = safe_vector_operand (op1, mode1);
12010 if (! target
12011 || GET_MODE (target) != tmode
12012 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12013 target = gen_reg_rtx (tmode);
12015 /* In case the insn wants input operands in modes different from
12016 the result, abort. */
12017 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12018 abort ();
12020 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12021 op0 = copy_to_mode_reg (mode0, op0);
12022 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12023 op1 = copy_to_mode_reg (mode1, op1);
12025 pat = GEN_FCN (icode) (target, op0, op1);
12026 if (! pat)
12027 return 0;
12028 emit_insn (pat);
12029 return target;
12032 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12034 static rtx
12035 arm_expand_unop_builtin (enum insn_code icode,
12036 tree arglist, rtx target, int do_load)
12038 rtx pat;
12039 tree arg0 = TREE_VALUE (arglist);
12040 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12041 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12042 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12044 if (! target
12045 || GET_MODE (target) != tmode
12046 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12047 target = gen_reg_rtx (tmode);
12048 if (do_load)
12049 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12050 else
12052 if (VECTOR_MODE_P (mode0))
12053 op0 = safe_vector_operand (op0, mode0);
12055 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12056 op0 = copy_to_mode_reg (mode0, op0);
12059 pat = GEN_FCN (icode) (target, op0);
12060 if (! pat)
12061 return 0;
12062 emit_insn (pat);
12063 return target;
12066 /* Expand an expression EXP that calls a built-in function,
12067 with result going to TARGET if that's convenient
12068 (and in mode MODE if that's convenient).
12069 SUBTARGET may be used as the target for computing one of EXP's operands.
12070 IGNORE is nonzero if the value is to be ignored. */
12072 static rtx
12073 arm_expand_builtin (tree exp,
12074 rtx target,
12075 rtx subtarget ATTRIBUTE_UNUSED,
12076 enum machine_mode mode ATTRIBUTE_UNUSED,
12077 int ignore ATTRIBUTE_UNUSED)
12079 const struct builtin_description * d;
12080 enum insn_code icode;
12081 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12082 tree arglist = TREE_OPERAND (exp, 1);
12083 tree arg0;
12084 tree arg1;
12085 tree arg2;
12086 rtx op0;
12087 rtx op1;
12088 rtx op2;
12089 rtx pat;
12090 int fcode = DECL_FUNCTION_CODE (fndecl);
12091 size_t i;
12092 enum machine_mode tmode;
12093 enum machine_mode mode0;
12094 enum machine_mode mode1;
12095 enum machine_mode mode2;
12097 switch (fcode)
12099 case ARM_BUILTIN_TEXTRMSB:
12100 case ARM_BUILTIN_TEXTRMUB:
12101 case ARM_BUILTIN_TEXTRMSH:
12102 case ARM_BUILTIN_TEXTRMUH:
12103 case ARM_BUILTIN_TEXTRMSW:
12104 case ARM_BUILTIN_TEXTRMUW:
12105 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12106 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12107 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12108 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12109 : CODE_FOR_iwmmxt_textrmw);
12111 arg0 = TREE_VALUE (arglist);
12112 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12113 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12114 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12115 tmode = insn_data[icode].operand[0].mode;
12116 mode0 = insn_data[icode].operand[1].mode;
12117 mode1 = insn_data[icode].operand[2].mode;
12119 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12120 op0 = copy_to_mode_reg (mode0, op0);
12121 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12123 /* @@@ better error message */
12124 error ("selector must be an immediate");
12125 return gen_reg_rtx (tmode);
12127 if (target == 0
12128 || GET_MODE (target) != tmode
12129 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12130 target = gen_reg_rtx (tmode);
12131 pat = GEN_FCN (icode) (target, op0, op1);
12132 if (! pat)
12133 return 0;
12134 emit_insn (pat);
12135 return target;
12137 case ARM_BUILTIN_TINSRB:
12138 case ARM_BUILTIN_TINSRH:
12139 case ARM_BUILTIN_TINSRW:
12140 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12141 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12142 : CODE_FOR_iwmmxt_tinsrw);
12143 arg0 = TREE_VALUE (arglist);
12144 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12145 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12146 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12147 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12148 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12149 tmode = insn_data[icode].operand[0].mode;
12150 mode0 = insn_data[icode].operand[1].mode;
12151 mode1 = insn_data[icode].operand[2].mode;
12152 mode2 = insn_data[icode].operand[3].mode;
12154 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12155 op0 = copy_to_mode_reg (mode0, op0);
12156 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12157 op1 = copy_to_mode_reg (mode1, op1);
12158 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12160 /* @@@ better error message */
12161 error ("selector must be an immediate");
12162 return const0_rtx;
12164 if (target == 0
12165 || GET_MODE (target) != tmode
12166 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12167 target = gen_reg_rtx (tmode);
12168 pat = GEN_FCN (icode) (target, op0, op1, op2);
12169 if (! pat)
12170 return 0;
12171 emit_insn (pat);
12172 return target;
12174 case ARM_BUILTIN_SETWCX:
12175 arg0 = TREE_VALUE (arglist);
12176 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12177 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12178 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12179 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12180 return 0;
12182 case ARM_BUILTIN_GETWCX:
12183 arg0 = TREE_VALUE (arglist);
12184 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12185 target = gen_reg_rtx (SImode);
12186 emit_insn (gen_iwmmxt_tmrc (target, op0));
12187 return target;
12189 case ARM_BUILTIN_WSHUFH:
12190 icode = CODE_FOR_iwmmxt_wshufh;
12191 arg0 = TREE_VALUE (arglist);
12192 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12193 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12194 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12195 tmode = insn_data[icode].operand[0].mode;
12196 mode1 = insn_data[icode].operand[1].mode;
12197 mode2 = insn_data[icode].operand[2].mode;
12199 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12200 op0 = copy_to_mode_reg (mode1, op0);
12201 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12203 /* @@@ better error message */
12204 error ("mask must be an immediate");
12205 return const0_rtx;
12207 if (target == 0
12208 || GET_MODE (target) != tmode
12209 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12210 target = gen_reg_rtx (tmode);
12211 pat = GEN_FCN (icode) (target, op0, op1);
12212 if (! pat)
12213 return 0;
12214 emit_insn (pat);
12215 return target;
12217 case ARM_BUILTIN_WSADB:
12218 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12219 case ARM_BUILTIN_WSADH:
12220 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12221 case ARM_BUILTIN_WSADBZ:
12222 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12223 case ARM_BUILTIN_WSADHZ:
12224 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12226 /* Several three-argument builtins. */
12227 case ARM_BUILTIN_WMACS:
12228 case ARM_BUILTIN_WMACU:
12229 case ARM_BUILTIN_WALIGN:
12230 case ARM_BUILTIN_TMIA:
12231 case ARM_BUILTIN_TMIAPH:
12232 case ARM_BUILTIN_TMIATT:
12233 case ARM_BUILTIN_TMIATB:
12234 case ARM_BUILTIN_TMIABT:
12235 case ARM_BUILTIN_TMIABB:
12236 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12237 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12238 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12239 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12240 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12241 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12242 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12243 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12244 : CODE_FOR_iwmmxt_walign);
12245 arg0 = TREE_VALUE (arglist);
12246 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12247 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12248 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12249 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12250 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12251 tmode = insn_data[icode].operand[0].mode;
12252 mode0 = insn_data[icode].operand[1].mode;
12253 mode1 = insn_data[icode].operand[2].mode;
12254 mode2 = insn_data[icode].operand[3].mode;
12256 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12257 op0 = copy_to_mode_reg (mode0, op0);
12258 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12259 op1 = copy_to_mode_reg (mode1, op1);
12260 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12261 op2 = copy_to_mode_reg (mode2, op2);
12262 if (target == 0
12263 || GET_MODE (target) != tmode
12264 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12265 target = gen_reg_rtx (tmode);
12266 pat = GEN_FCN (icode) (target, op0, op1, op2);
12267 if (! pat)
12268 return 0;
12269 emit_insn (pat);
12270 return target;
12272 case ARM_BUILTIN_WZERO:
12273 target = gen_reg_rtx (DImode);
12274 emit_insn (gen_iwmmxt_clrdi (target));
12275 return target;
12277 default:
12278 break;
12281 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12282 if (d->code == (const enum arm_builtins) fcode)
12283 return arm_expand_binop_builtin (d->icode, arglist, target);
12285 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12286 if (d->code == (const enum arm_builtins) fcode)
12287 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12289 /* @@@ Should really do something sensible here. */
12290 return NULL_RTX;
12293 /* Return the number (counting from 0) of
12294 the least significant set bit in MASK. */
12296 inline static int
12297 number_of_first_bit_set (unsigned mask)
12299 int bit;
12301 for (bit = 0;
12302 (mask & (1 << bit)) == 0;
12303 ++bit)
12304 continue;
12306 return bit;
12309 /* Emit code to push or pop registers to or from the stack. F is the
12310 assembly file. MASK is the registers to push or pop. PUSH is
12311 nonzero if we should push, and zero if we should pop. For debugging
12312 output, if pushing, adjust CFA_OFFSET by the amount of space added
12313 to the stack. REAL_REGS should have the same number of bits set as
12314 MASK, and will be used instead (in the same order) to describe which
12315 registers were saved - this is used to mark the save slots when we
12316 push high registers after moving them to low registers. */
12317 static void
12318 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12319 unsigned long real_regs)
12321 int regno;
12322 int lo_mask = mask & 0xFF;
12323 int pushed_words = 0;
12325 if (mask == 0)
12326 abort ();
12328 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12330 /* Special case. Do not generate a POP PC statement here, do it in
12331 thumb_exit() */
12332 thumb_exit (f, -1);
12333 return;
12336 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12338 /* Look at the low registers first. */
12339 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12341 if (lo_mask & 1)
12343 asm_fprintf (f, "%r", regno);
12345 if ((lo_mask & ~1) != 0)
12346 fprintf (f, ", ");
12348 pushed_words++;
12352 if (push && (mask & (1 << LR_REGNUM)))
12354 /* Catch pushing the LR. */
12355 if (mask & 0xFF)
12356 fprintf (f, ", ");
12358 asm_fprintf (f, "%r", LR_REGNUM);
12360 pushed_words++;
12362 else if (!push && (mask & (1 << PC_REGNUM)))
12364 /* Catch popping the PC. */
12365 if (TARGET_INTERWORK || TARGET_BACKTRACE
12366 || current_function_calls_eh_return)
12368 /* The PC is never poped directly, instead
12369 it is popped into r3 and then BX is used. */
12370 fprintf (f, "}\n");
12372 thumb_exit (f, -1);
12374 return;
12376 else
12378 if (mask & 0xFF)
12379 fprintf (f, ", ");
12381 asm_fprintf (f, "%r", PC_REGNUM);
12385 fprintf (f, "}\n");
12387 if (push && pushed_words && dwarf2out_do_frame ())
12389 char *l = dwarf2out_cfi_label ();
12390 int pushed_mask = real_regs;
12392 *cfa_offset += pushed_words * 4;
12393 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12395 pushed_words = 0;
12396 pushed_mask = real_regs;
12397 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12399 if (pushed_mask & 1)
12400 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12405 /* Generate code to return from a thumb function.
12406 If 'reg_containing_return_addr' is -1, then the return address is
12407 actually on the stack, at the stack pointer. */
12408 static void
12409 thumb_exit (FILE *f, int reg_containing_return_addr)
12411 unsigned regs_available_for_popping;
12412 unsigned regs_to_pop;
12413 int pops_needed;
12414 unsigned available;
12415 unsigned required;
12416 int mode;
12417 int size;
12418 int restore_a4 = FALSE;
12420 /* Compute the registers we need to pop. */
12421 regs_to_pop = 0;
12422 pops_needed = 0;
12424 if (reg_containing_return_addr == -1)
12426 regs_to_pop |= 1 << LR_REGNUM;
12427 ++pops_needed;
12430 if (TARGET_BACKTRACE)
12432 /* Restore the (ARM) frame pointer and stack pointer. */
12433 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12434 pops_needed += 2;
12437 /* If there is nothing to pop then just emit the BX instruction and
12438 return. */
12439 if (pops_needed == 0)
12441 if (current_function_calls_eh_return)
12442 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12444 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12445 return;
12447 /* Otherwise if we are not supporting interworking and we have not created
12448 a backtrace structure and the function was not entered in ARM mode then
12449 just pop the return address straight into the PC. */
12450 else if (!TARGET_INTERWORK
12451 && !TARGET_BACKTRACE
12452 && !is_called_in_ARM_mode (current_function_decl)
12453 && !current_function_calls_eh_return)
12455 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12456 return;
12459 /* Find out how many of the (return) argument registers we can corrupt. */
12460 regs_available_for_popping = 0;
12462 /* If returning via __builtin_eh_return, the bottom three registers
12463 all contain information needed for the return. */
12464 if (current_function_calls_eh_return)
12465 size = 12;
12466 else
12468 /* If we can deduce the registers used from the function's
12469 return value. This is more reliable that examining
12470 regs_ever_live[] because that will be set if the register is
12471 ever used in the function, not just if the register is used
12472 to hold a return value. */
12474 if (current_function_return_rtx != 0)
12475 mode = GET_MODE (current_function_return_rtx);
12476 else
12477 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12479 size = GET_MODE_SIZE (mode);
12481 if (size == 0)
12483 /* In a void function we can use any argument register.
12484 In a function that returns a structure on the stack
12485 we can use the second and third argument registers. */
12486 if (mode == VOIDmode)
12487 regs_available_for_popping =
12488 (1 << ARG_REGISTER (1))
12489 | (1 << ARG_REGISTER (2))
12490 | (1 << ARG_REGISTER (3));
12491 else
12492 regs_available_for_popping =
12493 (1 << ARG_REGISTER (2))
12494 | (1 << ARG_REGISTER (3));
12496 else if (size <= 4)
12497 regs_available_for_popping =
12498 (1 << ARG_REGISTER (2))
12499 | (1 << ARG_REGISTER (3));
12500 else if (size <= 8)
12501 regs_available_for_popping =
12502 (1 << ARG_REGISTER (3));
12505 /* Match registers to be popped with registers into which we pop them. */
12506 for (available = regs_available_for_popping,
12507 required = regs_to_pop;
12508 required != 0 && available != 0;
12509 available &= ~(available & - available),
12510 required &= ~(required & - required))
12511 -- pops_needed;
12513 /* If we have any popping registers left over, remove them. */
12514 if (available > 0)
12515 regs_available_for_popping &= ~available;
12517 /* Otherwise if we need another popping register we can use
12518 the fourth argument register. */
12519 else if (pops_needed)
12521 /* If we have not found any free argument registers and
12522 reg a4 contains the return address, we must move it. */
12523 if (regs_available_for_popping == 0
12524 && reg_containing_return_addr == LAST_ARG_REGNUM)
12526 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12527 reg_containing_return_addr = LR_REGNUM;
12529 else if (size > 12)
12531 /* Register a4 is being used to hold part of the return value,
12532 but we have dire need of a free, low register. */
12533 restore_a4 = TRUE;
12535 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12538 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12540 /* The fourth argument register is available. */
12541 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12543 --pops_needed;
12547 /* Pop as many registers as we can. */
12548 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12549 regs_available_for_popping);
12551 /* Process the registers we popped. */
12552 if (reg_containing_return_addr == -1)
12554 /* The return address was popped into the lowest numbered register. */
12555 regs_to_pop &= ~(1 << LR_REGNUM);
12557 reg_containing_return_addr =
12558 number_of_first_bit_set (regs_available_for_popping);
12560 /* Remove this register for the mask of available registers, so that
12561 the return address will not be corrupted by further pops. */
12562 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12565 /* If we popped other registers then handle them here. */
12566 if (regs_available_for_popping)
12568 int frame_pointer;
12570 /* Work out which register currently contains the frame pointer. */
12571 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12573 /* Move it into the correct place. */
12574 asm_fprintf (f, "\tmov\t%r, %r\n",
12575 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12577 /* (Temporarily) remove it from the mask of popped registers. */
12578 regs_available_for_popping &= ~(1 << frame_pointer);
12579 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12581 if (regs_available_for_popping)
12583 int stack_pointer;
12585 /* We popped the stack pointer as well,
12586 find the register that contains it. */
12587 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12589 /* Move it into the stack register. */
12590 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12592 /* At this point we have popped all necessary registers, so
12593 do not worry about restoring regs_available_for_popping
12594 to its correct value:
12596 assert (pops_needed == 0)
12597 assert (regs_available_for_popping == (1 << frame_pointer))
12598 assert (regs_to_pop == (1 << STACK_POINTER)) */
12600 else
12602 /* Since we have just move the popped value into the frame
12603 pointer, the popping register is available for reuse, and
12604 we know that we still have the stack pointer left to pop. */
12605 regs_available_for_popping |= (1 << frame_pointer);
12609 /* If we still have registers left on the stack, but we no longer have
12610 any registers into which we can pop them, then we must move the return
12611 address into the link register and make available the register that
12612 contained it. */
12613 if (regs_available_for_popping == 0 && pops_needed > 0)
12615 regs_available_for_popping |= 1 << reg_containing_return_addr;
12617 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12618 reg_containing_return_addr);
12620 reg_containing_return_addr = LR_REGNUM;
12623 /* If we have registers left on the stack then pop some more.
12624 We know that at most we will want to pop FP and SP. */
12625 if (pops_needed > 0)
12627 int popped_into;
12628 int move_to;
12630 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12631 regs_available_for_popping);
12633 /* We have popped either FP or SP.
12634 Move whichever one it is into the correct register. */
12635 popped_into = number_of_first_bit_set (regs_available_for_popping);
12636 move_to = number_of_first_bit_set (regs_to_pop);
12638 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12640 regs_to_pop &= ~(1 << move_to);
12642 --pops_needed;
12645 /* If we still have not popped everything then we must have only
12646 had one register available to us and we are now popping the SP. */
12647 if (pops_needed > 0)
12649 int popped_into;
12651 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12652 regs_available_for_popping);
12654 popped_into = number_of_first_bit_set (regs_available_for_popping);
12656 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12658 assert (regs_to_pop == (1 << STACK_POINTER))
12659 assert (pops_needed == 1)
12663 /* If necessary restore the a4 register. */
12664 if (restore_a4)
12666 if (reg_containing_return_addr != LR_REGNUM)
12668 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12669 reg_containing_return_addr = LR_REGNUM;
12672 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12675 if (current_function_calls_eh_return)
12676 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12678 /* Return to caller. */
12679 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12683 void
12684 thumb_final_prescan_insn (rtx insn)
12686 if (flag_print_asm_name)
12687 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12688 INSN_ADDRESSES (INSN_UID (insn)));
12692 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12694 unsigned HOST_WIDE_INT mask = 0xff;
12695 int i;
12697 if (val == 0) /* XXX */
12698 return 0;
12700 for (i = 0; i < 25; i++)
12701 if ((val & (mask << i)) == val)
12702 return 1;
12704 return 0;
12707 /* Returns nonzero if the current function contains,
12708 or might contain a far jump. */
12709 static int
12710 thumb_far_jump_used_p (void)
12712 rtx insn;
12714 /* This test is only important for leaf functions. */
12715 /* assert (!leaf_function_p ()); */
12717 /* If we have already decided that far jumps may be used,
12718 do not bother checking again, and always return true even if
12719 it turns out that they are not being used. Once we have made
12720 the decision that far jumps are present (and that hence the link
12721 register will be pushed onto the stack) we cannot go back on it. */
12722 if (cfun->machine->far_jump_used)
12723 return 1;
12725 /* If this function is not being called from the prologue/epilogue
12726 generation code then it must be being called from the
12727 INITIAL_ELIMINATION_OFFSET macro. */
12728 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12730 /* In this case we know that we are being asked about the elimination
12731 of the arg pointer register. If that register is not being used,
12732 then there are no arguments on the stack, and we do not have to
12733 worry that a far jump might force the prologue to push the link
12734 register, changing the stack offsets. In this case we can just
12735 return false, since the presence of far jumps in the function will
12736 not affect stack offsets.
12738 If the arg pointer is live (or if it was live, but has now been
12739 eliminated and so set to dead) then we do have to test to see if
12740 the function might contain a far jump. This test can lead to some
12741 false negatives, since before reload is completed, then length of
12742 branch instructions is not known, so gcc defaults to returning their
12743 longest length, which in turn sets the far jump attribute to true.
12745 A false negative will not result in bad code being generated, but it
12746 will result in a needless push and pop of the link register. We
12747 hope that this does not occur too often.
12749 If we need doubleword stack alignment this could affect the other
12750 elimination offsets so we can't risk getting it wrong. */
12751 if (regs_ever_live [ARG_POINTER_REGNUM])
12752 cfun->machine->arg_pointer_live = 1;
12753 else if (!cfun->machine->arg_pointer_live)
12754 return 0;
12757 /* Check to see if the function contains a branch
12758 insn with the far jump attribute set. */
12759 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12761 if (GET_CODE (insn) == JUMP_INSN
12762 /* Ignore tablejump patterns. */
12763 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12764 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12765 && get_attr_far_jump (insn) == FAR_JUMP_YES
12768 /* Record the fact that we have decided that
12769 the function does use far jumps. */
12770 cfun->machine->far_jump_used = 1;
12771 return 1;
12775 return 0;
12778 /* Return nonzero if FUNC must be entered in ARM mode. */
12780 is_called_in_ARM_mode (tree func)
12782 if (TREE_CODE (func) != FUNCTION_DECL)
12783 abort ();
12785 /* Ignore the problem about functions whose address is taken. */
12786 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12787 return TRUE;
12789 #ifdef ARM_PE
12790 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12791 #else
12792 return FALSE;
12793 #endif
12796 /* The bits which aren't usefully expanded as rtl. */
12797 const char *
12798 thumb_unexpanded_epilogue (void)
12800 int regno;
12801 unsigned long live_regs_mask = 0;
12802 int high_regs_pushed = 0;
12803 int had_to_push_lr;
12804 int size;
12805 int mode;
12807 if (return_used_this_function)
12808 return "";
12810 if (IS_NAKED (arm_current_func_type ()))
12811 return "";
12813 live_regs_mask = thumb_compute_save_reg_mask ();
12814 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12816 /* If we can deduce the registers used from the function's return value.
12817 This is more reliable that examining regs_ever_live[] because that
12818 will be set if the register is ever used in the function, not just if
12819 the register is used to hold a return value. */
12821 if (current_function_return_rtx != 0)
12822 mode = GET_MODE (current_function_return_rtx);
12823 else
12824 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12826 size = GET_MODE_SIZE (mode);
12828 /* The prolog may have pushed some high registers to use as
12829 work registers. e.g. the testsuite file:
12830 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12831 compiles to produce:
12832 push {r4, r5, r6, r7, lr}
12833 mov r7, r9
12834 mov r6, r8
12835 push {r6, r7}
12836 as part of the prolog. We have to undo that pushing here. */
12838 if (high_regs_pushed)
12840 unsigned long mask = live_regs_mask & 0xff;
12841 int next_hi_reg;
12843 /* The available low registers depend on the size of the value we are
12844 returning. */
12845 if (size <= 12)
12846 mask |= 1 << 3;
12847 if (size <= 8)
12848 mask |= 1 << 2;
12850 if (mask == 0)
12851 /* Oh dear! We have no low registers into which we can pop
12852 high registers! */
12853 internal_error
12854 ("no low registers available for popping high registers");
12856 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12857 if (live_regs_mask & (1 << next_hi_reg))
12858 break;
12860 while (high_regs_pushed)
12862 /* Find lo register(s) into which the high register(s) can
12863 be popped. */
12864 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12866 if (mask & (1 << regno))
12867 high_regs_pushed--;
12868 if (high_regs_pushed == 0)
12869 break;
12872 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12874 /* Pop the values into the low register(s). */
12875 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12877 /* Move the value(s) into the high registers. */
12878 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12880 if (mask & (1 << regno))
12882 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12883 regno);
12885 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12886 if (live_regs_mask & (1 << next_hi_reg))
12887 break;
12891 live_regs_mask &= ~0x0f00;
12894 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12895 live_regs_mask &= 0xff;
12897 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12899 /* Pop the return address into the PC. */
12900 if (had_to_push_lr)
12901 live_regs_mask |= 1 << PC_REGNUM;
12903 /* Either no argument registers were pushed or a backtrace
12904 structure was created which includes an adjusted stack
12905 pointer, so just pop everything. */
12906 if (live_regs_mask)
12907 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12908 live_regs_mask);
12910 /* We have either just popped the return address into the
12911 PC or it is was kept in LR for the entire function. */
12912 if (!had_to_push_lr)
12913 thumb_exit (asm_out_file, LR_REGNUM);
12915 else
12917 /* Pop everything but the return address. */
12918 if (live_regs_mask)
12919 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12920 live_regs_mask);
12922 if (had_to_push_lr)
12924 if (size > 12)
12926 /* We have no free low regs, so save one. */
12927 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12928 LAST_ARG_REGNUM);
12931 /* Get the return address into a temporary register. */
12932 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12933 1 << LAST_ARG_REGNUM);
12935 if (size > 12)
12937 /* Move the return address to lr. */
12938 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
12939 LAST_ARG_REGNUM);
12940 /* Restore the low register. */
12941 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
12942 IP_REGNUM);
12943 regno = LR_REGNUM;
12945 else
12946 regno = LAST_ARG_REGNUM;
12948 else
12949 regno = LR_REGNUM;
12951 /* Remove the argument registers that were pushed onto the stack. */
12952 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12953 SP_REGNUM, SP_REGNUM,
12954 current_function_pretend_args_size);
12956 thumb_exit (asm_out_file, regno);
12959 return "";
12962 /* Functions to save and restore machine-specific function data. */
12963 static struct machine_function *
12964 arm_init_machine_status (void)
12966 struct machine_function *machine;
12967 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12969 #if ARM_FT_UNKNOWN != 0
12970 machine->func_type = ARM_FT_UNKNOWN;
12971 #endif
12972 return machine;
12975 /* Return an RTX indicating where the return address to the
12976 calling function can be found. */
12978 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12980 if (count != 0)
12981 return NULL_RTX;
12983 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12986 /* Do anything needed before RTL is emitted for each function. */
12987 void
12988 arm_init_expanders (void)
12990 /* Arrange to initialize and mark the machine per-function status. */
12991 init_machine_status = arm_init_machine_status;
12993 /* This is to stop the combine pass optimizing away the alignment
12994 adjustment of va_arg. */
12995 /* ??? It is claimed that this should not be necessary. */
12996 if (cfun)
12997 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13001 /* Like arm_compute_initial_elimination offset. Simpler because
13002 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13004 HOST_WIDE_INT
13005 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13007 arm_stack_offsets *offsets;
13009 offsets = arm_get_frame_offsets ();
13011 switch (from)
13013 case ARG_POINTER_REGNUM:
13014 switch (to)
13016 case STACK_POINTER_REGNUM:
13017 return offsets->outgoing_args - offsets->saved_args;
13019 case FRAME_POINTER_REGNUM:
13020 return offsets->soft_frame - offsets->saved_args;
13022 case THUMB_HARD_FRAME_POINTER_REGNUM:
13023 case ARM_HARD_FRAME_POINTER_REGNUM:
13024 return offsets->saved_regs - offsets->saved_args;
13026 default:
13027 abort();
13029 break;
13031 case FRAME_POINTER_REGNUM:
13032 switch (to)
13034 case STACK_POINTER_REGNUM:
13035 return offsets->outgoing_args - offsets->soft_frame;
13037 case THUMB_HARD_FRAME_POINTER_REGNUM:
13038 case ARM_HARD_FRAME_POINTER_REGNUM:
13039 return offsets->saved_regs - offsets->soft_frame;
13041 default:
13042 abort();
13044 break;
13046 default:
13047 abort ();
13052 /* Generate the rest of a function's prologue. */
13053 void
13054 thumb_expand_prologue (void)
13056 rtx insn, dwarf;
13058 HOST_WIDE_INT amount;
13059 arm_stack_offsets *offsets;
13060 unsigned long func_type;
13061 int regno;
13062 unsigned long live_regs_mask;
13064 func_type = arm_current_func_type ();
13066 /* Naked functions don't have prologues. */
13067 if (IS_NAKED (func_type))
13068 return;
13070 if (IS_INTERRUPT (func_type))
13072 error ("interrupt Service Routines cannot be coded in Thumb mode");
13073 return;
13076 live_regs_mask = thumb_compute_save_reg_mask ();
13077 /* Load the pic register before setting the frame pointer,
13078 so we can use r7 as a temporary work register. */
13079 if (flag_pic)
13080 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13082 offsets = arm_get_frame_offsets ();
13084 if (frame_pointer_needed)
13086 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13087 stack_pointer_rtx));
13088 RTX_FRAME_RELATED_P (insn) = 1;
13090 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13091 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13092 stack_pointer_rtx);
13094 amount = offsets->outgoing_args - offsets->saved_regs;
13095 if (amount)
13097 if (amount < 512)
13099 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13100 GEN_INT (- amount)));
13101 RTX_FRAME_RELATED_P (insn) = 1;
13103 else
13105 rtx reg;
13107 /* The stack decrement is too big for an immediate value in a single
13108 insn. In theory we could issue multiple subtracts, but after
13109 three of them it becomes more space efficient to place the full
13110 value in the constant pool and load into a register. (Also the
13111 ARM debugger really likes to see only one stack decrement per
13112 function). So instead we look for a scratch register into which
13113 we can load the decrement, and then we subtract this from the
13114 stack pointer. Unfortunately on the thumb the only available
13115 scratch registers are the argument registers, and we cannot use
13116 these as they may hold arguments to the function. Instead we
13117 attempt to locate a call preserved register which is used by this
13118 function. If we can find one, then we know that it will have
13119 been pushed at the start of the prologue and so we can corrupt
13120 it now. */
13121 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13122 if (live_regs_mask & (1 << regno)
13123 && !(frame_pointer_needed
13124 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13125 break;
13127 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13129 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13131 /* Choose an arbitrary, non-argument low register. */
13132 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13134 /* Save it by copying it into a high, scratch register. */
13135 emit_insn (gen_movsi (spare, reg));
13136 /* Add a USE to stop propagate_one_insn() from barfing. */
13137 emit_insn (gen_prologue_use (spare));
13139 /* Decrement the stack. */
13140 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13141 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13142 stack_pointer_rtx, reg));
13143 RTX_FRAME_RELATED_P (insn) = 1;
13144 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13145 plus_constant (stack_pointer_rtx,
13146 -amount));
13147 RTX_FRAME_RELATED_P (dwarf) = 1;
13148 REG_NOTES (insn)
13149 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13150 REG_NOTES (insn));
13152 /* Restore the low register's original value. */
13153 emit_insn (gen_movsi (reg, spare));
13155 /* Emit a USE of the restored scratch register, so that flow
13156 analysis will not consider the restore redundant. The
13157 register won't be used again in this function and isn't
13158 restored by the epilogue. */
13159 emit_insn (gen_prologue_use (reg));
13161 else
13163 reg = gen_rtx_REG (SImode, regno);
13165 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13167 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13168 stack_pointer_rtx, reg));
13169 RTX_FRAME_RELATED_P (insn) = 1;
13170 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13171 plus_constant (stack_pointer_rtx,
13172 -amount));
13173 RTX_FRAME_RELATED_P (dwarf) = 1;
13174 REG_NOTES (insn)
13175 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13176 REG_NOTES (insn));
13179 /* If the frame pointer is needed, emit a special barrier that
13180 will prevent the scheduler from moving stores to the frame
13181 before the stack adjustment. */
13182 if (frame_pointer_needed)
13183 emit_insn (gen_stack_tie (stack_pointer_rtx,
13184 hard_frame_pointer_rtx));
13187 if (current_function_profile || TARGET_NO_SCHED_PRO)
13188 emit_insn (gen_blockage ());
13190 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13191 if (live_regs_mask & 0xff)
13192 cfun->machine->lr_save_eliminated = 0;
13194 /* If the link register is being kept alive, with the return address in it,
13195 then make sure that it does not get reused by the ce2 pass. */
13196 if (cfun->machine->lr_save_eliminated)
13197 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13201 void
13202 thumb_expand_epilogue (void)
13204 HOST_WIDE_INT amount;
13205 arm_stack_offsets *offsets;
13206 int regno;
13208 /* Naked functions don't have prologues. */
13209 if (IS_NAKED (arm_current_func_type ()))
13210 return;
13212 offsets = arm_get_frame_offsets ();
13213 amount = offsets->outgoing_args - offsets->saved_regs;
13215 if (frame_pointer_needed)
13216 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13217 else if (amount)
13219 if (amount < 512)
13220 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13221 GEN_INT (amount)));
13222 else
13224 /* r3 is always free in the epilogue. */
13225 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13227 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13228 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13232 /* Emit a USE (stack_pointer_rtx), so that
13233 the stack adjustment will not be deleted. */
13234 emit_insn (gen_prologue_use (stack_pointer_rtx));
13236 if (current_function_profile || TARGET_NO_SCHED_PRO)
13237 emit_insn (gen_blockage ());
13239 /* Emit a clobber for each insn that will be restored in the epilogue,
13240 so that flow2 will get register lifetimes correct. */
13241 for (regno = 0; regno < 13; regno++)
13242 if (regs_ever_live[regno] && !call_used_regs[regno])
13243 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13245 if (! regs_ever_live[LR_REGNUM])
13246 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13249 static void
13250 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13252 unsigned long live_regs_mask = 0;
13253 unsigned long l_mask;
13254 unsigned high_regs_pushed = 0;
13255 int cfa_offset = 0;
13256 int regno;
13258 if (IS_NAKED (arm_current_func_type ()))
13259 return;
13261 if (is_called_in_ARM_mode (current_function_decl))
13263 const char * name;
13265 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13266 abort ();
13267 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13268 abort ();
13269 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13271 /* Generate code sequence to switch us into Thumb mode. */
13272 /* The .code 32 directive has already been emitted by
13273 ASM_DECLARE_FUNCTION_NAME. */
13274 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13275 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13277 /* Generate a label, so that the debugger will notice the
13278 change in instruction sets. This label is also used by
13279 the assembler to bypass the ARM code when this function
13280 is called from a Thumb encoded function elsewhere in the
13281 same file. Hence the definition of STUB_NAME here must
13282 agree with the definition in gas/config/tc-arm.c. */
13284 #define STUB_NAME ".real_start_of"
13286 fprintf (f, "\t.code\t16\n");
13287 #ifdef ARM_PE
13288 if (arm_dllexport_name_p (name))
13289 name = arm_strip_name_encoding (name);
13290 #endif
13291 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13292 fprintf (f, "\t.thumb_func\n");
13293 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13296 if (current_function_pretend_args_size)
13298 if (cfun->machine->uses_anonymous_args)
13300 int num_pushes;
13302 fprintf (f, "\tpush\t{");
13304 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13306 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13307 regno <= LAST_ARG_REGNUM;
13308 regno++)
13309 asm_fprintf (f, "%r%s", regno,
13310 regno == LAST_ARG_REGNUM ? "" : ", ");
13312 fprintf (f, "}\n");
13314 else
13315 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13316 SP_REGNUM, SP_REGNUM,
13317 current_function_pretend_args_size);
13319 /* We don't need to record the stores for unwinding (would it
13320 help the debugger any if we did?), but record the change in
13321 the stack pointer. */
13322 if (dwarf2out_do_frame ())
13324 char *l = dwarf2out_cfi_label ();
13326 cfa_offset = cfa_offset + current_function_pretend_args_size;
13327 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13331 /* Get the registers we are going to push. */
13332 live_regs_mask = thumb_compute_save_reg_mask ();
13333 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13334 l_mask = live_regs_mask & 0x40ff;
13335 /* Then count how many other high registers will need to be pushed. */
13336 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13338 if (TARGET_BACKTRACE)
13340 unsigned offset;
13341 unsigned work_register;
13343 /* We have been asked to create a stack backtrace structure.
13344 The code looks like this:
13346 0 .align 2
13347 0 func:
13348 0 sub SP, #16 Reserve space for 4 registers.
13349 2 push {R7} Push low registers.
13350 4 add R7, SP, #20 Get the stack pointer before the push.
13351 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13352 8 mov R7, PC Get hold of the start of this code plus 12.
13353 10 str R7, [SP, #16] Store it.
13354 12 mov R7, FP Get hold of the current frame pointer.
13355 14 str R7, [SP, #4] Store it.
13356 16 mov R7, LR Get hold of the current return address.
13357 18 str R7, [SP, #12] Store it.
13358 20 add R7, SP, #16 Point at the start of the backtrace structure.
13359 22 mov FP, R7 Put this value into the frame pointer. */
13361 work_register = thumb_find_work_register (live_regs_mask);
13363 asm_fprintf
13364 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13365 SP_REGNUM, SP_REGNUM);
13367 if (dwarf2out_do_frame ())
13369 char *l = dwarf2out_cfi_label ();
13371 cfa_offset = cfa_offset + 16;
13372 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13375 if (l_mask)
13377 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13378 offset = bit_count (l_mask);
13380 else
13381 offset = 0;
13383 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13384 offset + 16 + current_function_pretend_args_size);
13386 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13387 offset + 4);
13389 /* Make sure that the instruction fetching the PC is in the right place
13390 to calculate "start of backtrace creation code + 12". */
13391 if (l_mask)
13393 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13394 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13395 offset + 12);
13396 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13397 ARM_HARD_FRAME_POINTER_REGNUM);
13398 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13399 offset);
13401 else
13403 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13404 ARM_HARD_FRAME_POINTER_REGNUM);
13405 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13406 offset);
13407 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13408 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13409 offset + 12);
13412 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13413 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13414 offset + 8);
13415 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13416 offset + 12);
13417 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13418 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13420 /* Optimisation: If we are not pushing any low registers but we are going
13421 to push some high registers then delay our first push. This will just
13422 be a push of LR and we can combine it with the push of the first high
13423 register. */
13424 else if ((l_mask & 0xff) != 0
13425 || (high_regs_pushed == 0 && l_mask))
13426 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13428 if (high_regs_pushed)
13430 unsigned pushable_regs;
13431 unsigned next_hi_reg;
13433 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13434 if (live_regs_mask & (1 << next_hi_reg))
13435 break;
13437 pushable_regs = l_mask & 0xff;
13439 if (pushable_regs == 0)
13440 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13442 while (high_regs_pushed > 0)
13444 unsigned long real_regs_mask = 0;
13446 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13448 if (pushable_regs & (1 << regno))
13450 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13452 high_regs_pushed --;
13453 real_regs_mask |= (1 << next_hi_reg);
13455 if (high_regs_pushed)
13457 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13458 next_hi_reg --)
13459 if (live_regs_mask & (1 << next_hi_reg))
13460 break;
13462 else
13464 pushable_regs &= ~((1 << regno) - 1);
13465 break;
13470 /* If we had to find a work register and we have not yet
13471 saved the LR then add it to the list of regs to push. */
13472 if (l_mask == (1 << LR_REGNUM))
13474 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13475 1, &cfa_offset,
13476 real_regs_mask | (1 << LR_REGNUM));
13477 l_mask = 0;
13479 else
13480 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13485 /* Handle the case of a double word load into a low register from
13486 a computed memory address. The computed address may involve a
13487 register which is overwritten by the load. */
13488 const char *
13489 thumb_load_double_from_address (rtx *operands)
13491 rtx addr;
13492 rtx base;
13493 rtx offset;
13494 rtx arg1;
13495 rtx arg2;
13497 if (GET_CODE (operands[0]) != REG)
13498 abort ();
13500 if (GET_CODE (operands[1]) != MEM)
13501 abort ();
13503 /* Get the memory address. */
13504 addr = XEXP (operands[1], 0);
13506 /* Work out how the memory address is computed. */
13507 switch (GET_CODE (addr))
13509 case REG:
13510 operands[2] = gen_rtx_MEM (SImode,
13511 plus_constant (XEXP (operands[1], 0), 4));
13513 if (REGNO (operands[0]) == REGNO (addr))
13515 output_asm_insn ("ldr\t%H0, %2", operands);
13516 output_asm_insn ("ldr\t%0, %1", operands);
13518 else
13520 output_asm_insn ("ldr\t%0, %1", operands);
13521 output_asm_insn ("ldr\t%H0, %2", operands);
13523 break;
13525 case CONST:
13526 /* Compute <address> + 4 for the high order load. */
13527 operands[2] = gen_rtx_MEM (SImode,
13528 plus_constant (XEXP (operands[1], 0), 4));
13530 output_asm_insn ("ldr\t%0, %1", operands);
13531 output_asm_insn ("ldr\t%H0, %2", operands);
13532 break;
13534 case PLUS:
13535 arg1 = XEXP (addr, 0);
13536 arg2 = XEXP (addr, 1);
13538 if (CONSTANT_P (arg1))
13539 base = arg2, offset = arg1;
13540 else
13541 base = arg1, offset = arg2;
13543 if (GET_CODE (base) != REG)
13544 abort ();
13546 /* Catch the case of <address> = <reg> + <reg> */
13547 if (GET_CODE (offset) == REG)
13549 int reg_offset = REGNO (offset);
13550 int reg_base = REGNO (base);
13551 int reg_dest = REGNO (operands[0]);
13553 /* Add the base and offset registers together into the
13554 higher destination register. */
13555 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13556 reg_dest + 1, reg_base, reg_offset);
13558 /* Load the lower destination register from the address in
13559 the higher destination register. */
13560 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13561 reg_dest, reg_dest + 1);
13563 /* Load the higher destination register from its own address
13564 plus 4. */
13565 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13566 reg_dest + 1, reg_dest + 1);
13568 else
13570 /* Compute <address> + 4 for the high order load. */
13571 operands[2] = gen_rtx_MEM (SImode,
13572 plus_constant (XEXP (operands[1], 0), 4));
13574 /* If the computed address is held in the low order register
13575 then load the high order register first, otherwise always
13576 load the low order register first. */
13577 if (REGNO (operands[0]) == REGNO (base))
13579 output_asm_insn ("ldr\t%H0, %2", operands);
13580 output_asm_insn ("ldr\t%0, %1", operands);
13582 else
13584 output_asm_insn ("ldr\t%0, %1", operands);
13585 output_asm_insn ("ldr\t%H0, %2", operands);
13588 break;
13590 case LABEL_REF:
13591 /* With no registers to worry about we can just load the value
13592 directly. */
13593 operands[2] = gen_rtx_MEM (SImode,
13594 plus_constant (XEXP (operands[1], 0), 4));
13596 output_asm_insn ("ldr\t%H0, %2", operands);
13597 output_asm_insn ("ldr\t%0, %1", operands);
13598 break;
13600 default:
13601 abort ();
13602 break;
13605 return "";
13608 const char *
13609 thumb_output_move_mem_multiple (int n, rtx *operands)
13611 rtx tmp;
13613 switch (n)
13615 case 2:
13616 if (REGNO (operands[4]) > REGNO (operands[5]))
13618 tmp = operands[4];
13619 operands[4] = operands[5];
13620 operands[5] = tmp;
13622 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13623 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13624 break;
13626 case 3:
13627 if (REGNO (operands[4]) > REGNO (operands[5]))
13629 tmp = operands[4];
13630 operands[4] = operands[5];
13631 operands[5] = tmp;
13633 if (REGNO (operands[5]) > REGNO (operands[6]))
13635 tmp = operands[5];
13636 operands[5] = operands[6];
13637 operands[6] = tmp;
13639 if (REGNO (operands[4]) > REGNO (operands[5]))
13641 tmp = operands[4];
13642 operands[4] = operands[5];
13643 operands[5] = tmp;
13646 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13647 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13648 break;
13650 default:
13651 abort ();
13654 return "";
13657 /* Output a call-via instruction for thumb state. */
13658 const char *
13659 thumb_call_via_reg (rtx reg)
13661 int regno = REGNO (reg);
13662 rtx *labelp;
13664 gcc_assert (regno < LR_REGNUM);
13666 /* If we are in the normal text section we can use a single instance
13667 per compilation unit. If we are doing function sections, then we need
13668 an entry per section, since we can't rely on reachability. */
13669 if (in_text_section ())
13671 thumb_call_reg_needed = 1;
13673 if (thumb_call_via_label[regno] == NULL)
13674 thumb_call_via_label[regno] = gen_label_rtx ();
13675 labelp = thumb_call_via_label + regno;
13677 else
13679 if (cfun->machine->call_via[regno] == NULL)
13680 cfun->machine->call_via[regno] = gen_label_rtx ();
13681 labelp = cfun->machine->call_via + regno;
13684 output_asm_insn ("bl\t%a0", labelp);
13685 return "";
13688 /* Routines for generating rtl. */
13689 void
13690 thumb_expand_movmemqi (rtx *operands)
13692 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13693 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13694 HOST_WIDE_INT len = INTVAL (operands[2]);
13695 HOST_WIDE_INT offset = 0;
13697 while (len >= 12)
13699 emit_insn (gen_movmem12b (out, in, out, in));
13700 len -= 12;
13703 if (len >= 8)
13705 emit_insn (gen_movmem8b (out, in, out, in));
13706 len -= 8;
13709 if (len >= 4)
13711 rtx reg = gen_reg_rtx (SImode);
13712 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13713 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13714 len -= 4;
13715 offset += 4;
13718 if (len >= 2)
13720 rtx reg = gen_reg_rtx (HImode);
13721 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13722 plus_constant (in, offset))));
13723 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13724 reg));
13725 len -= 2;
13726 offset += 2;
13729 if (len)
13731 rtx reg = gen_reg_rtx (QImode);
13732 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13733 plus_constant (in, offset))));
13734 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13735 reg));
13739 void
13740 thumb_reload_out_hi (rtx *operands)
13742 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13745 /* Handle reading a half-word from memory during reload. */
13746 void
13747 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13749 abort ();
13752 /* Return the length of a function name prefix
13753 that starts with the character 'c'. */
13754 static int
13755 arm_get_strip_length (int c)
13757 switch (c)
13759 ARM_NAME_ENCODING_LENGTHS
13760 default: return 0;
13764 /* Return a pointer to a function's name with any
13765 and all prefix encodings stripped from it. */
13766 const char *
13767 arm_strip_name_encoding (const char *name)
13769 int skip;
13771 while ((skip = arm_get_strip_length (* name)))
13772 name += skip;
13774 return name;
13777 /* If there is a '*' anywhere in the name's prefix, then
13778 emit the stripped name verbatim, otherwise prepend an
13779 underscore if leading underscores are being used. */
13780 void
13781 arm_asm_output_labelref (FILE *stream, const char *name)
13783 int skip;
13784 int verbatim = 0;
13786 while ((skip = arm_get_strip_length (* name)))
13788 verbatim |= (*name == '*');
13789 name += skip;
13792 if (verbatim)
13793 fputs (name, stream);
13794 else
13795 asm_fprintf (stream, "%U%s", name);
13798 static void
13799 arm_file_end (void)
13801 int regno;
13803 if (! thumb_call_reg_needed)
13804 return;
13806 text_section ();
13807 asm_fprintf (asm_out_file, "\t.code 16\n");
13808 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13810 for (regno = 0; regno < LR_REGNUM; regno++)
13812 rtx label = thumb_call_via_label[regno];
13814 if (label != 0)
13816 targetm.asm_out.internal_label (asm_out_file, "L",
13817 CODE_LABEL_NUMBER (label));
13818 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13823 rtx aof_pic_label;
13825 #ifdef AOF_ASSEMBLER
13826 /* Special functions only needed when producing AOF syntax assembler. */
13828 struct pic_chain
13830 struct pic_chain * next;
13831 const char * symname;
13834 static struct pic_chain * aof_pic_chain = NULL;
13837 aof_pic_entry (rtx x)
13839 struct pic_chain ** chainp;
13840 int offset;
13842 if (aof_pic_label == NULL_RTX)
13844 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13847 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13848 offset += 4, chainp = &(*chainp)->next)
13849 if ((*chainp)->symname == XSTR (x, 0))
13850 return plus_constant (aof_pic_label, offset);
13852 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13853 (*chainp)->next = NULL;
13854 (*chainp)->symname = XSTR (x, 0);
13855 return plus_constant (aof_pic_label, offset);
13858 void
13859 aof_dump_pic_table (FILE *f)
13861 struct pic_chain * chain;
13863 if (aof_pic_chain == NULL)
13864 return;
13866 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13867 PIC_OFFSET_TABLE_REGNUM,
13868 PIC_OFFSET_TABLE_REGNUM);
13869 fputs ("|x$adcons|\n", f);
13871 for (chain = aof_pic_chain; chain; chain = chain->next)
13873 fputs ("\tDCD\t", f);
13874 assemble_name (f, chain->symname);
13875 fputs ("\n", f);
13879 int arm_text_section_count = 1;
13881 char *
13882 aof_text_section (void )
13884 static char buf[100];
13885 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13886 arm_text_section_count++);
13887 if (flag_pic)
13888 strcat (buf, ", PIC, REENTRANT");
13889 return buf;
13892 static int arm_data_section_count = 1;
13894 char *
13895 aof_data_section (void)
13897 static char buf[100];
13898 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13899 return buf;
13902 /* The AOF assembler is religiously strict about declarations of
13903 imported and exported symbols, so that it is impossible to declare
13904 a function as imported near the beginning of the file, and then to
13905 export it later on. It is, however, possible to delay the decision
13906 until all the functions in the file have been compiled. To get
13907 around this, we maintain a list of the imports and exports, and
13908 delete from it any that are subsequently defined. At the end of
13909 compilation we spit the remainder of the list out before the END
13910 directive. */
13912 struct import
13914 struct import * next;
13915 const char * name;
13918 static struct import * imports_list = NULL;
13920 void
13921 aof_add_import (const char *name)
13923 struct import * new;
13925 for (new = imports_list; new; new = new->next)
13926 if (new->name == name)
13927 return;
13929 new = (struct import *) xmalloc (sizeof (struct import));
13930 new->next = imports_list;
13931 imports_list = new;
13932 new->name = name;
13935 void
13936 aof_delete_import (const char *name)
13938 struct import ** old;
13940 for (old = &imports_list; *old; old = & (*old)->next)
13942 if ((*old)->name == name)
13944 *old = (*old)->next;
13945 return;
13950 int arm_main_function = 0;
13952 static void
13953 aof_dump_imports (FILE *f)
13955 /* The AOF assembler needs this to cause the startup code to be extracted
13956 from the library. Brining in __main causes the whole thing to work
13957 automagically. */
13958 if (arm_main_function)
13960 text_section ();
13961 fputs ("\tIMPORT __main\n", f);
13962 fputs ("\tDCD __main\n", f);
13965 /* Now dump the remaining imports. */
13966 while (imports_list)
13968 fprintf (f, "\tIMPORT\t");
13969 assemble_name (f, imports_list->name);
13970 fputc ('\n', f);
13971 imports_list = imports_list->next;
13975 static void
13976 aof_globalize_label (FILE *stream, const char *name)
13978 default_globalize_label (stream, name);
13979 if (! strcmp (name, "main"))
13980 arm_main_function = 1;
13983 static void
13984 aof_file_start (void)
13986 fputs ("__r0\tRN\t0\n", asm_out_file);
13987 fputs ("__a1\tRN\t0\n", asm_out_file);
13988 fputs ("__a2\tRN\t1\n", asm_out_file);
13989 fputs ("__a3\tRN\t2\n", asm_out_file);
13990 fputs ("__a4\tRN\t3\n", asm_out_file);
13991 fputs ("__v1\tRN\t4\n", asm_out_file);
13992 fputs ("__v2\tRN\t5\n", asm_out_file);
13993 fputs ("__v3\tRN\t6\n", asm_out_file);
13994 fputs ("__v4\tRN\t7\n", asm_out_file);
13995 fputs ("__v5\tRN\t8\n", asm_out_file);
13996 fputs ("__v6\tRN\t9\n", asm_out_file);
13997 fputs ("__sl\tRN\t10\n", asm_out_file);
13998 fputs ("__fp\tRN\t11\n", asm_out_file);
13999 fputs ("__ip\tRN\t12\n", asm_out_file);
14000 fputs ("__sp\tRN\t13\n", asm_out_file);
14001 fputs ("__lr\tRN\t14\n", asm_out_file);
14002 fputs ("__pc\tRN\t15\n", asm_out_file);
14003 fputs ("__f0\tFN\t0\n", asm_out_file);
14004 fputs ("__f1\tFN\t1\n", asm_out_file);
14005 fputs ("__f2\tFN\t2\n", asm_out_file);
14006 fputs ("__f3\tFN\t3\n", asm_out_file);
14007 fputs ("__f4\tFN\t4\n", asm_out_file);
14008 fputs ("__f5\tFN\t5\n", asm_out_file);
14009 fputs ("__f6\tFN\t6\n", asm_out_file);
14010 fputs ("__f7\tFN\t7\n", asm_out_file);
14011 text_section ();
14014 static void
14015 aof_file_end (void)
14017 if (flag_pic)
14018 aof_dump_pic_table (asm_out_file);
14019 arm_file_end ();
14020 aof_dump_imports (asm_out_file);
14021 fputs ("\tEND\n", asm_out_file);
14023 #endif /* AOF_ASSEMBLER */
14025 #ifndef ARM_PE
14026 /* Symbols in the text segment can be accessed without indirecting via the
14027 constant pool; it may take an extra binary operation, but this is still
14028 faster than indirecting via memory. Don't do this when not optimizing,
14029 since we won't be calculating al of the offsets necessary to do this
14030 simplification. */
14032 static void
14033 arm_encode_section_info (tree decl, rtx rtl, int first)
14035 /* This doesn't work with AOF syntax, since the string table may be in
14036 a different AREA. */
14037 #ifndef AOF_ASSEMBLER
14038 if (optimize > 0 && TREE_CONSTANT (decl))
14039 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14040 #endif
14042 /* If we are referencing a function that is weak then encode a long call
14043 flag in the function name, otherwise if the function is static or
14044 or known to be defined in this file then encode a short call flag. */
14045 if (first && DECL_P (decl))
14047 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14048 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14049 else if (! TREE_PUBLIC (decl))
14050 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14053 #endif /* !ARM_PE */
14055 static void
14056 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14058 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14059 && !strcmp (prefix, "L"))
14061 arm_ccfsm_state = 0;
14062 arm_target_insn = NULL;
14064 default_internal_label (stream, prefix, labelno);
14067 /* Output code to add DELTA to the first argument, and then jump
14068 to FUNCTION. Used for C++ multiple inheritance. */
14069 static void
14070 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14071 HOST_WIDE_INT delta,
14072 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14073 tree function)
14075 static int thunk_label = 0;
14076 char label[256];
14077 int mi_delta = delta;
14078 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14079 int shift = 0;
14080 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14081 ? 1 : 0);
14082 if (mi_delta < 0)
14083 mi_delta = - mi_delta;
14084 if (TARGET_THUMB)
14086 int labelno = thunk_label++;
14087 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14088 fputs ("\tldr\tr12, ", file);
14089 assemble_name (file, label);
14090 fputc ('\n', file);
14092 while (mi_delta != 0)
14094 if ((mi_delta & (3 << shift)) == 0)
14095 shift += 2;
14096 else
14098 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14099 mi_op, this_regno, this_regno,
14100 mi_delta & (0xff << shift));
14101 mi_delta &= ~(0xff << shift);
14102 shift += 8;
14105 if (TARGET_THUMB)
14107 fprintf (file, "\tbx\tr12\n");
14108 ASM_OUTPUT_ALIGN (file, 2);
14109 assemble_name (file, label);
14110 fputs (":\n", file);
14111 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14113 else
14115 fputs ("\tb\t", file);
14116 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14117 if (NEED_PLT_RELOC)
14118 fputs ("(PLT)", file);
14119 fputc ('\n', file);
14124 arm_emit_vector_const (FILE *file, rtx x)
14126 int i;
14127 const char * pattern;
14129 if (GET_CODE (x) != CONST_VECTOR)
14130 abort ();
14132 switch (GET_MODE (x))
14134 case V2SImode: pattern = "%08x"; break;
14135 case V4HImode: pattern = "%04x"; break;
14136 case V8QImode: pattern = "%02x"; break;
14137 default: abort ();
14140 fprintf (file, "0x");
14141 for (i = CONST_VECTOR_NUNITS (x); i--;)
14143 rtx element;
14145 element = CONST_VECTOR_ELT (x, i);
14146 fprintf (file, pattern, INTVAL (element));
14149 return 1;
14152 const char *
14153 arm_output_load_gr (rtx *operands)
14155 rtx reg;
14156 rtx offset;
14157 rtx wcgr;
14158 rtx sum;
14160 if (GET_CODE (operands [1]) != MEM
14161 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14162 || GET_CODE (reg = XEXP (sum, 0)) != REG
14163 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14164 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14165 return "wldrw%?\t%0, %1";
14167 /* Fix up an out-of-range load of a GR register. */
14168 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14169 wcgr = operands[0];
14170 operands[0] = reg;
14171 output_asm_insn ("ldr%?\t%0, %1", operands);
14173 operands[0] = wcgr;
14174 operands[1] = reg;
14175 output_asm_insn ("tmcr%?\t%0, %1", operands);
14176 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14178 return "";
14181 static rtx
14182 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14183 int incoming ATTRIBUTE_UNUSED)
14185 #if 0
14186 /* FIXME: The ARM backend has special code to handle structure
14187 returns, and will reserve its own hidden first argument. So
14188 if this macro is enabled a *second* hidden argument will be
14189 reserved, which will break binary compatibility with old
14190 toolchains and also thunk handling. One day this should be
14191 fixed. */
14192 return 0;
14193 #else
14194 /* Register in which address to store a structure value
14195 is passed to a function. */
14196 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14197 #endif
14200 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14202 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14203 named arg and all anonymous args onto the stack.
14204 XXX I know the prologue shouldn't be pushing registers, but it is faster
14205 that way. */
14207 static void
14208 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14209 enum machine_mode mode ATTRIBUTE_UNUSED,
14210 tree type ATTRIBUTE_UNUSED,
14211 int *pretend_size,
14212 int second_time ATTRIBUTE_UNUSED)
14214 cfun->machine->uses_anonymous_args = 1;
14215 if (cum->nregs < NUM_ARG_REGS)
14216 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14219 /* Return nonzero if the CONSUMER instruction (a store) does not need
14220 PRODUCER's value to calculate the address. */
14223 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14225 rtx value = PATTERN (producer);
14226 rtx addr = PATTERN (consumer);
14228 if (GET_CODE (value) == COND_EXEC)
14229 value = COND_EXEC_CODE (value);
14230 if (GET_CODE (value) == PARALLEL)
14231 value = XVECEXP (value, 0, 0);
14232 value = XEXP (value, 0);
14233 if (GET_CODE (addr) == COND_EXEC)
14234 addr = COND_EXEC_CODE (addr);
14235 if (GET_CODE (addr) == PARALLEL)
14236 addr = XVECEXP (addr, 0, 0);
14237 addr = XEXP (addr, 0);
14239 return !reg_overlap_mentioned_p (value, addr);
14242 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14243 have an early register shift value or amount dependency on the
14244 result of PRODUCER. */
14247 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14249 rtx value = PATTERN (producer);
14250 rtx op = PATTERN (consumer);
14251 rtx early_op;
14253 if (GET_CODE (value) == COND_EXEC)
14254 value = COND_EXEC_CODE (value);
14255 if (GET_CODE (value) == PARALLEL)
14256 value = XVECEXP (value, 0, 0);
14257 value = XEXP (value, 0);
14258 if (GET_CODE (op) == COND_EXEC)
14259 op = COND_EXEC_CODE (op);
14260 if (GET_CODE (op) == PARALLEL)
14261 op = XVECEXP (op, 0, 0);
14262 op = XEXP (op, 1);
14264 early_op = XEXP (op, 0);
14265 /* This is either an actual independent shift, or a shift applied to
14266 the first operand of another operation. We want the whole shift
14267 operation. */
14268 if (GET_CODE (early_op) == REG)
14269 early_op = op;
14271 return !reg_overlap_mentioned_p (value, early_op);
14274 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14275 have an early register shift value dependency on the result of
14276 PRODUCER. */
14279 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14281 rtx value = PATTERN (producer);
14282 rtx op = PATTERN (consumer);
14283 rtx early_op;
14285 if (GET_CODE (value) == COND_EXEC)
14286 value = COND_EXEC_CODE (value);
14287 if (GET_CODE (value) == PARALLEL)
14288 value = XVECEXP (value, 0, 0);
14289 value = XEXP (value, 0);
14290 if (GET_CODE (op) == COND_EXEC)
14291 op = COND_EXEC_CODE (op);
14292 if (GET_CODE (op) == PARALLEL)
14293 op = XVECEXP (op, 0, 0);
14294 op = XEXP (op, 1);
14296 early_op = XEXP (op, 0);
14298 /* This is either an actual independent shift, or a shift applied to
14299 the first operand of another operation. We want the value being
14300 shifted, in either case. */
14301 if (GET_CODE (early_op) != REG)
14302 early_op = XEXP (early_op, 0);
14304 return !reg_overlap_mentioned_p (value, early_op);
14307 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14308 have an early register mult dependency on the result of
14309 PRODUCER. */
14312 arm_no_early_mul_dep (rtx producer, rtx consumer)
14314 rtx value = PATTERN (producer);
14315 rtx op = PATTERN (consumer);
14317 if (GET_CODE (value) == COND_EXEC)
14318 value = COND_EXEC_CODE (value);
14319 if (GET_CODE (value) == PARALLEL)
14320 value = XVECEXP (value, 0, 0);
14321 value = XEXP (value, 0);
14322 if (GET_CODE (op) == COND_EXEC)
14323 op = COND_EXEC_CODE (op);
14324 if (GET_CODE (op) == PARALLEL)
14325 op = XVECEXP (op, 0, 0);
14326 op = XEXP (op, 1);
14328 return (GET_CODE (op) == PLUS
14329 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14333 /* We can't rely on the caller doing the proper promotion when
14334 using APCS or ATPCS. */
14336 static bool
14337 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14339 return !TARGET_AAPCS_BASED;
14343 /* AAPCS based ABIs use short enums by default. */
14345 static bool
14346 arm_default_short_enums (void)
14348 return TARGET_AAPCS_BASED;
14352 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14354 static bool
14355 arm_align_anon_bitfield (void)
14357 return TARGET_AAPCS_BASED;
14361 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14363 static tree
14364 arm_cxx_guard_type (void)
14366 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14370 /* The EABI says test the least significan bit of a guard variable. */
14372 static bool
14373 arm_cxx_guard_mask_bit (void)
14375 return TARGET_AAPCS_BASED;
14379 /* The EABI specifies that all array cookies are 8 bytes long. */
14381 static tree
14382 arm_get_cookie_size (tree type)
14384 tree size;
14386 if (!TARGET_AAPCS_BASED)
14387 return default_cxx_get_cookie_size (type);
14389 size = build_int_cst (sizetype, 8);
14390 return size;
14394 /* The EABI says that array cookies should also contain the element size. */
14396 static bool
14397 arm_cookie_has_size (void)
14399 return TARGET_AAPCS_BASED;
14403 /* The EABI says constructors and destructors should return a pointer to
14404 the object constructed/destroyed. */
14406 static bool
14407 arm_cxx_cdtor_returns_this (void)
14409 return TARGET_AAPCS_BASED;
14412 /* The EABI says that an inline function may never be the key
14413 method. */
14415 static bool
14416 arm_cxx_key_method_may_be_inline (void)
14418 return !TARGET_AAPCS_BASED;
14421 static void
14422 arm_cxx_determine_class_data_visibility (tree decl)
14424 if (!TARGET_AAPCS_BASED)
14425 return;
14427 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14428 is exported. However, on systems without dynamic vague linkage,
14429 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14430 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14431 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14432 else
14433 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14434 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14437 static bool
14438 arm_cxx_class_data_always_comdat (void)
14440 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14441 vague linkage if the class has no key function. */
14442 return !TARGET_AAPCS_BASED;
14446 /* The EABI says __aeabi_atexit should be used to register static
14447 destructors. */
14449 static bool
14450 arm_cxx_use_aeabi_atexit (void)
14452 return TARGET_AAPCS_BASED;
14456 void
14457 arm_set_return_address (rtx source, rtx scratch)
14459 arm_stack_offsets *offsets;
14460 HOST_WIDE_INT delta;
14461 rtx addr;
14462 unsigned long saved_regs;
14464 saved_regs = arm_compute_save_reg_mask ();
14466 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14467 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14468 else
14470 if (frame_pointer_needed)
14471 addr = plus_constant(hard_frame_pointer_rtx, -4);
14472 else
14474 /* LR will be the first saved register. */
14475 offsets = arm_get_frame_offsets ();
14476 delta = offsets->outgoing_args - (offsets->frame + 4);
14479 if (delta >= 4096)
14481 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14482 GEN_INT (delta & ~4095)));
14483 addr = scratch;
14484 delta &= 4095;
14486 else
14487 addr = stack_pointer_rtx;
14489 addr = plus_constant (addr, delta);
14491 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14496 void
14497 thumb_set_return_address (rtx source, rtx scratch)
14499 arm_stack_offsets *offsets;
14500 HOST_WIDE_INT delta;
14501 int reg;
14502 rtx addr;
14503 unsigned long mask;
14505 emit_insn (gen_rtx_USE (VOIDmode, source));
14507 mask = thumb_compute_save_reg_mask ();
14508 if (mask & (1 << LR_REGNUM))
14510 offsets = arm_get_frame_offsets ();
14512 /* Find the saved regs. */
14513 if (frame_pointer_needed)
14515 delta = offsets->soft_frame - offsets->saved_args;
14516 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14518 else
14520 delta = offsets->outgoing_args - offsets->saved_args;
14521 reg = SP_REGNUM;
14523 /* Allow for the stack frame. */
14524 if (TARGET_BACKTRACE)
14525 delta -= 16;
14526 /* The link register is always the first saved register. */
14527 delta -= 4;
14529 /* Construct the address. */
14530 addr = gen_rtx_REG (SImode, reg);
14531 if ((reg != SP_REGNUM && delta >= 128)
14532 || delta >= 1024)
14534 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14535 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14536 addr = scratch;
14538 else
14539 addr = plus_constant (addr, delta);
14541 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14543 else
14544 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14547 /* Implements target hook vector_mode_supported_p. */
14548 bool
14549 arm_vector_mode_supported_p (enum machine_mode mode)
14551 if ((mode == V2SImode)
14552 || (mode == V4HImode)
14553 || (mode == V8QImode))
14554 return true;
14556 return false;
14559 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14560 ARM insns and therefore guarantee that the shift count is modulo 256.
14561 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14562 guarantee no particular behavior for out-of-range counts. */
14564 static unsigned HOST_WIDE_INT
14565 arm_shift_truncation_mask (enum machine_mode mode)
14567 return mode == SImode ? 255 : 0;
14571 /* Map internal gcc register numbers to DWARF2 register numbers. */
14573 unsigned int
14574 arm_dbx_register_number (unsigned int regno)
14576 if (regno < 16)
14577 return regno;
14579 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14580 compatibility. The EABI defines them as registers 96-103. */
14581 if (IS_FPA_REGNUM (regno))
14582 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14584 if (IS_VFP_REGNUM (regno))
14585 return 64 + regno - FIRST_VFP_REGNUM;
14587 if (IS_IWMMXT_GR_REGNUM (regno))
14588 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14590 if (IS_IWMMXT_REGNUM (regno))
14591 return 112 + regno - FIRST_IWMMXT_REGNUM;
14593 abort ();