* config/arm/arm.h (ARM_PRINT_OPERAND_ADDRESS): Use gcc_assert and
[official-gcc.git] / gcc / config / arm / arm.c
blob7345510433ac745fc535c5bf460bf43f0ee8b023
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void thumb_exit (FILE *, int);
88 static rtx is_jump_table (rtx);
89 static HOST_WIDE_INT get_jump_table_size (rtx);
90 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
91 static Mnode *add_minipool_forward_ref (Mfix *);
92 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_backward_ref (Mfix *);
94 static void assign_minipool_offsets (Mfix *);
95 static void arm_print_value (FILE *, rtx);
96 static void dump_minipool (rtx);
97 static int arm_barrier_cost (rtx);
98 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
99 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
100 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
101 rtx);
102 static void arm_reorg (void);
103 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
104 static int current_file_function_operand (rtx);
105 static unsigned long arm_compute_save_reg0_reg12_mask (void);
106 static unsigned long arm_compute_save_reg_mask (void);
107 static unsigned long arm_isr_value (tree);
108 static unsigned long arm_compute_func_type (void);
109 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
110 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
111 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
112 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
113 #endif
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_size_rtx_costs (rtx, int, int, int *);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
144 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
145 tree, bool);
147 #ifndef ARM_PE
148 static void arm_encode_section_info (tree, rtx, int);
149 #endif
151 static void arm_file_end (void);
153 #ifdef AOF_ASSEMBLER
154 static void aof_globalize_label (FILE *, const char *);
155 static void aof_dump_imports (FILE *);
156 static void aof_dump_pic_table (FILE *);
157 static void aof_file_start (void);
158 static void aof_file_end (void);
159 #endif
160 static rtx arm_struct_value_rtx (tree, int);
161 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
162 tree, int *, int);
163 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
164 enum machine_mode, tree, bool);
165 static bool arm_promote_prototypes (tree);
166 static bool arm_default_short_enums (void);
167 static bool arm_align_anon_bitfield (void);
168 static bool arm_return_in_msb (tree);
169 static bool arm_must_pass_in_stack (enum machine_mode, tree);
171 static tree arm_cxx_guard_type (void);
172 static bool arm_cxx_guard_mask_bit (void);
173 static tree arm_get_cookie_size (tree);
174 static bool arm_cookie_has_size (void);
175 static bool arm_cxx_cdtor_returns_this (void);
176 static bool arm_cxx_key_method_may_be_inline (void);
177 static void arm_cxx_determine_class_data_visibility (tree);
178 static bool arm_cxx_class_data_always_comdat (void);
179 static bool arm_cxx_use_aeabi_atexit (void);
180 static void arm_init_libfuncs (void);
181 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
183 /* Initialize the GCC target structure. */
184 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
185 #undef TARGET_MERGE_DECL_ATTRIBUTES
186 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
187 #endif
189 #undef TARGET_ATTRIBUTE_TABLE
190 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
192 #undef TARGET_ASM_FILE_END
193 #define TARGET_ASM_FILE_END arm_file_end
195 #ifdef AOF_ASSEMBLER
196 #undef TARGET_ASM_BYTE_OP
197 #define TARGET_ASM_BYTE_OP "\tDCB\t"
198 #undef TARGET_ASM_ALIGNED_HI_OP
199 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
200 #undef TARGET_ASM_ALIGNED_SI_OP
201 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
202 #undef TARGET_ASM_GLOBALIZE_LABEL
203 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
204 #undef TARGET_ASM_FILE_START
205 #define TARGET_ASM_FILE_START aof_file_start
206 #undef TARGET_ASM_FILE_END
207 #define TARGET_ASM_FILE_END aof_file_end
208 #else
209 #undef TARGET_ASM_ALIGNED_SI_OP
210 #define TARGET_ASM_ALIGNED_SI_OP NULL
211 #undef TARGET_ASM_INTEGER
212 #define TARGET_ASM_INTEGER arm_assemble_integer
213 #endif
215 #undef TARGET_ASM_FUNCTION_PROLOGUE
216 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
218 #undef TARGET_ASM_FUNCTION_EPILOGUE
219 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
221 #undef TARGET_COMP_TYPE_ATTRIBUTES
222 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
224 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
225 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
227 #undef TARGET_SCHED_ADJUST_COST
228 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
230 #undef TARGET_ENCODE_SECTION_INFO
231 #ifdef ARM_PE
232 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
233 #else
234 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
235 #endif
237 #undef TARGET_STRIP_NAME_ENCODING
238 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
240 #undef TARGET_ASM_INTERNAL_LABEL
241 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
243 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
244 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
246 #undef TARGET_ASM_OUTPUT_MI_THUNK
247 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
248 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
249 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
251 /* This will be overridden in arm_override_options. */
252 #undef TARGET_RTX_COSTS
253 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
254 #undef TARGET_ADDRESS_COST
255 #define TARGET_ADDRESS_COST arm_address_cost
257 #undef TARGET_SHIFT_TRUNCATION_MASK
258 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
259 #undef TARGET_VECTOR_MODE_SUPPORTED_P
260 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
262 #undef TARGET_MACHINE_DEPENDENT_REORG
263 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
265 #undef TARGET_INIT_BUILTINS
266 #define TARGET_INIT_BUILTINS arm_init_builtins
267 #undef TARGET_EXPAND_BUILTIN
268 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
270 #undef TARGET_INIT_LIBFUNCS
271 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
273 #undef TARGET_PROMOTE_FUNCTION_ARGS
274 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
275 #undef TARGET_PROMOTE_FUNCTION_RETURN
276 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
277 #undef TARGET_PROMOTE_PROTOTYPES
278 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
279 #undef TARGET_PASS_BY_REFERENCE
280 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
281 #undef TARGET_ARG_PARTIAL_BYTES
282 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
284 #undef TARGET_STRUCT_VALUE_RTX
285 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
287 #undef TARGET_SETUP_INCOMING_VARARGS
288 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
290 #undef TARGET_DEFAULT_SHORT_ENUMS
291 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
293 #undef TARGET_ALIGN_ANON_BITFIELD
294 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
296 #undef TARGET_CXX_GUARD_TYPE
297 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
299 #undef TARGET_CXX_GUARD_MASK_BIT
300 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
302 #undef TARGET_CXX_GET_COOKIE_SIZE
303 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
305 #undef TARGET_CXX_COOKIE_HAS_SIZE
306 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
308 #undef TARGET_CXX_CDTOR_RETURNS_THIS
309 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
311 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
312 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
314 #undef TARGET_CXX_USE_AEABI_ATEXIT
315 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
317 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
318 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
319 arm_cxx_determine_class_data_visibility
321 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
322 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
324 #undef TARGET_RETURN_IN_MSB
325 #define TARGET_RETURN_IN_MSB arm_return_in_msb
327 #undef TARGET_MUST_PASS_IN_STACK
328 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
330 struct gcc_target targetm = TARGET_INITIALIZER;
332 /* Obstack for minipool constant handling. */
333 static struct obstack minipool_obstack;
334 static char * minipool_startobj;
336 /* The maximum number of insns skipped which
337 will be conditionalised if possible. */
338 static int max_insns_skipped = 5;
340 extern FILE * asm_out_file;
342 /* True if we are currently building a constant table. */
343 int making_const_table;
345 /* Define the information needed to generate branch insns. This is
346 stored from the compare operation. */
347 rtx arm_compare_op0, arm_compare_op1;
349 /* The processor for which instructions should be scheduled. */
350 enum processor_type arm_tune = arm_none;
352 /* Which floating point model to use. */
353 enum arm_fp_model arm_fp_model;
355 /* Which floating point hardware is available. */
356 enum fputype arm_fpu_arch;
358 /* Which floating point hardware to schedule for. */
359 enum fputype arm_fpu_tune;
361 /* Whether to use floating point hardware. */
362 enum float_abi_type arm_float_abi;
364 /* Which ABI to use. */
365 enum arm_abi_type arm_abi;
367 /* Set by the -mfpu=... option. */
368 const char * target_fpu_name = NULL;
370 /* Set by the -mfpe=... option. */
371 const char * target_fpe_name = NULL;
373 /* Set by the -mfloat-abi=... option. */
374 const char * target_float_abi_name = NULL;
376 /* Set by the legacy -mhard-float and -msoft-float options. */
377 const char * target_float_switch = NULL;
379 /* Set by the -mabi=... option. */
380 const char * target_abi_name = NULL;
382 /* Used to parse -mstructure_size_boundary command line option. */
383 const char * structure_size_string = NULL;
384 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
386 /* Used for Thumb call_via trampolines. */
387 rtx thumb_call_via_label[14];
388 static int thumb_call_reg_needed;
390 /* Bit values used to identify processor capabilities. */
391 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
392 #define FL_ARCH3M (1 << 1) /* Extended multiply */
393 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
394 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
395 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
396 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
397 #define FL_THUMB (1 << 6) /* Thumb aware */
398 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
399 #define FL_STRONG (1 << 8) /* StrongARM */
400 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
401 #define FL_XSCALE (1 << 10) /* XScale */
402 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
403 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
404 media instructions. */
405 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
406 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
407 Note: ARM6 & 7 derivatives only. */
409 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
411 #define FL_FOR_ARCH2 0
412 #define FL_FOR_ARCH3 FL_MODE32
413 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
414 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
415 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
416 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
417 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
418 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
419 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
420 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
421 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
422 #define FL_FOR_ARCH6J FL_FOR_ARCH6
423 #define FL_FOR_ARCH6K FL_FOR_ARCH6
424 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
425 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
427 /* The bits in this mask specify which
428 instructions we are allowed to generate. */
429 static unsigned long insn_flags = 0;
431 /* The bits in this mask specify which instruction scheduling options should
432 be used. */
433 static unsigned long tune_flags = 0;
435 /* The following are used in the arm.md file as equivalents to bits
436 in the above two flag variables. */
438 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
439 int arm_arch3m = 0;
441 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
442 int arm_arch4 = 0;
444 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
445 int arm_arch4t = 0;
447 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
448 int arm_arch5 = 0;
450 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
451 int arm_arch5e = 0;
453 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
454 int arm_arch6 = 0;
456 /* Nonzero if this chip can benefit from load scheduling. */
457 int arm_ld_sched = 0;
459 /* Nonzero if this chip is a StrongARM. */
460 int arm_tune_strongarm = 0;
462 /* Nonzero if this chip is a Cirrus variant. */
463 int arm_arch_cirrus = 0;
465 /* Nonzero if this chip supports Intel Wireless MMX technology. */
466 int arm_arch_iwmmxt = 0;
468 /* Nonzero if this chip is an XScale. */
469 int arm_arch_xscale = 0;
471 /* Nonzero if tuning for XScale */
472 int arm_tune_xscale = 0;
474 /* Nonzero if we want to tune for stores that access the write-buffer.
475 This typically means an ARM6 or ARM7 with MMU or MPU. */
476 int arm_tune_wbuf = 0;
478 /* Nonzero if generating Thumb instructions. */
479 int thumb_code = 0;
481 /* Nonzero if we should define __THUMB_INTERWORK__ in the
482 preprocessor.
483 XXX This is a bit of a hack, it's intended to help work around
484 problems in GLD which doesn't understand that armv5t code is
485 interworking clean. */
486 int arm_cpp_interwork = 0;
488 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
489 must report the mode of the memory reference from PRINT_OPERAND to
490 PRINT_OPERAND_ADDRESS. */
491 enum machine_mode output_memory_reference_mode;
493 /* The register number to be used for the PIC offset register. */
494 const char * arm_pic_register_string = NULL;
495 int arm_pic_register = INVALID_REGNUM;
497 /* Set to 1 when a return insn is output, this means that the epilogue
498 is not needed. */
499 int return_used_this_function;
501 /* Set to 1 after arm_reorg has started. Reset to start at the start of
502 the next function. */
503 static int after_arm_reorg = 0;
505 /* The maximum number of insns to be used when loading a constant. */
506 static int arm_constant_limit = 3;
508 /* For an explanation of these variables, see final_prescan_insn below. */
509 int arm_ccfsm_state;
510 enum arm_cond_code arm_current_cc;
511 rtx arm_target_insn;
512 int arm_target_label;
514 /* The condition codes of the ARM, and the inverse function. */
515 static const char * const arm_condition_codes[] =
517 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
518 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
521 #define streq(string1, string2) (strcmp (string1, string2) == 0)
523 /* Initialization code. */
525 struct processors
527 const char *const name;
528 enum processor_type core;
529 const char *arch;
530 const unsigned long flags;
531 bool (* rtx_costs) (rtx, int, int, int *);
534 /* Not all of these give usefully different compilation alternatives,
535 but there is no simple way of generalizing them. */
536 static const struct processors all_cores[] =
538 /* ARM Cores */
539 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
540 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
541 #include "arm-cores.def"
542 #undef ARM_CORE
543 {NULL, arm_none, NULL, 0, NULL}
546 static const struct processors all_architectures[] =
548 /* ARM Architectures */
549 /* We don't specify rtx_costs here as it will be figured out
550 from the core. */
552 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
553 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
554 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
555 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
556 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
557 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
558 implementations that support it, so we will leave it out for now. */
559 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
560 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
561 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
562 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
563 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
564 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
565 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
566 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
567 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
568 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
569 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
570 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
571 {NULL, arm_none, NULL, 0 , NULL}
574 /* This is a magic structure. The 'string' field is magically filled in
575 with a pointer to the value specified by the user on the command line
576 assuming that the user has specified such a value. */
578 struct arm_cpu_select arm_select[] =
580 /* string name processors */
581 { NULL, "-mcpu=", all_cores },
582 { NULL, "-march=", all_architectures },
583 { NULL, "-mtune=", all_cores }
587 /* The name of the proprocessor macro to define for this architecture. */
589 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
591 struct fpu_desc
593 const char * name;
594 enum fputype fpu;
598 /* Available values for for -mfpu=. */
600 static const struct fpu_desc all_fpus[] =
602 {"fpa", FPUTYPE_FPA},
603 {"fpe2", FPUTYPE_FPA_EMU2},
604 {"fpe3", FPUTYPE_FPA_EMU2},
605 {"maverick", FPUTYPE_MAVERICK},
606 {"vfp", FPUTYPE_VFP}
610 /* Floating point models used by the different hardware.
611 See fputype in arm.h. */
613 static const enum fputype fp_model_for_fpu[] =
615 /* No FP hardware. */
616 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
617 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
618 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
619 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
620 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
621 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
625 struct float_abi
627 const char * name;
628 enum float_abi_type abi_type;
632 /* Available values for -mfloat-abi=. */
634 static const struct float_abi all_float_abis[] =
636 {"soft", ARM_FLOAT_ABI_SOFT},
637 {"softfp", ARM_FLOAT_ABI_SOFTFP},
638 {"hard", ARM_FLOAT_ABI_HARD}
642 struct abi_name
644 const char *name;
645 enum arm_abi_type abi_type;
649 /* Available values for -mabi=. */
651 static const struct abi_name arm_all_abis[] =
653 {"apcs-gnu", ARM_ABI_APCS},
654 {"atpcs", ARM_ABI_ATPCS},
655 {"aapcs", ARM_ABI_AAPCS},
656 {"iwmmxt", ARM_ABI_IWMMXT}
659 /* Return the number of bits set in VALUE. */
660 static unsigned
661 bit_count (unsigned long value)
663 unsigned long count = 0;
665 while (value)
667 count++;
668 value &= value - 1; /* Clear the least-significant set bit. */
671 return count;
674 /* Set up library functions unique to ARM. */
676 static void
677 arm_init_libfuncs (void)
679 /* There are no special library functions unless we are using the
680 ARM BPABI. */
681 if (!TARGET_BPABI)
682 return;
684 /* The functions below are described in Section 4 of the "Run-Time
685 ABI for the ARM architecture", Version 1.0. */
687 /* Double-precision floating-point arithmetic. Table 2. */
688 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
689 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
690 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
691 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
692 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
694 /* Double-precision comparisons. Table 3. */
695 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
696 set_optab_libfunc (ne_optab, DFmode, NULL);
697 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
698 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
699 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
700 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
701 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
703 /* Single-precision floating-point arithmetic. Table 4. */
704 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
705 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
706 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
707 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
708 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
710 /* Single-precision comparisons. Table 5. */
711 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
712 set_optab_libfunc (ne_optab, SFmode, NULL);
713 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
714 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
715 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
716 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
717 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
719 /* Floating-point to integer conversions. Table 6. */
720 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
721 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
722 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
723 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
724 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
725 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
726 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
727 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
729 /* Conversions between floating types. Table 7. */
730 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
731 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
733 /* Integer to floating-point conversions. Table 8. */
734 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
735 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
736 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
737 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
738 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
739 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
740 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
741 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
743 /* Long long. Table 9. */
744 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
745 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
746 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
747 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
748 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
749 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
750 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
751 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
753 /* Integer (32/32->32) division. \S 4.3.1. */
754 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
755 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
757 /* The divmod functions are designed so that they can be used for
758 plain division, even though they return both the quotient and the
759 remainder. The quotient is returned in the usual location (i.e.,
760 r0 for SImode, {r0, r1} for DImode), just as would be expected
761 for an ordinary division routine. Because the AAPCS calling
762 conventions specify that all of { r0, r1, r2, r3 } are
763 callee-saved registers, there is no need to tell the compiler
764 explicitly that those registers are clobbered by these
765 routines. */
766 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
767 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
768 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
769 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
771 /* We don't have mod libcalls. Fortunately gcc knows how to use the
772 divmod libcalls instead. */
773 set_optab_libfunc (smod_optab, DImode, NULL);
774 set_optab_libfunc (umod_optab, DImode, NULL);
775 set_optab_libfunc (smod_optab, SImode, NULL);
776 set_optab_libfunc (umod_optab, SImode, NULL);
779 /* Fix up any incompatible options that the user has specified.
780 This has now turned into a maze. */
781 void
782 arm_override_options (void)
784 unsigned i;
786 /* Set up the flags based on the cpu/architecture selected by the user. */
787 for (i = ARRAY_SIZE (arm_select); i--;)
789 struct arm_cpu_select * ptr = arm_select + i;
791 if (ptr->string != NULL && ptr->string[0] != '\0')
793 const struct processors * sel;
795 for (sel = ptr->processors; sel->name != NULL; sel++)
796 if (streq (ptr->string, sel->name))
798 /* Set the architecture define. */
799 if (i != 2)
800 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
802 /* Determine the processor core for which we should
803 tune code-generation. */
804 if (/* -mcpu= is a sensible default. */
805 i == 0
806 /* If -march= is used, and -mcpu= has not been used,
807 assume that we should tune for a representative
808 CPU from that architecture. */
809 || i == 1
810 /* -mtune= overrides -mcpu= and -march=. */
811 || i == 2)
812 arm_tune = (enum processor_type) (sel - ptr->processors);
814 if (i != 2)
816 /* If we have been given an architecture and a processor
817 make sure that they are compatible. We only generate
818 a warning though, and we prefer the CPU over the
819 architecture. */
820 if (insn_flags != 0 && (insn_flags ^ sel->flags))
821 warning (0, "switch -mcpu=%s conflicts with -march= switch",
822 ptr->string);
824 insn_flags = sel->flags;
827 break;
830 if (sel->name == NULL)
831 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
835 /* If the user did not specify a processor, choose one for them. */
836 if (insn_flags == 0)
838 const struct processors * sel;
839 unsigned int sought;
840 enum processor_type cpu;
842 cpu = TARGET_CPU_DEFAULT;
843 if (cpu == arm_none)
845 #ifdef SUBTARGET_CPU_DEFAULT
846 /* Use the subtarget default CPU if none was specified by
847 configure. */
848 cpu = SUBTARGET_CPU_DEFAULT;
849 #endif
850 /* Default to ARM6. */
851 if (cpu == arm_none)
852 cpu = arm6;
854 sel = &all_cores[cpu];
856 insn_flags = sel->flags;
858 /* Now check to see if the user has specified some command line
859 switch that require certain abilities from the cpu. */
860 sought = 0;
862 if (TARGET_INTERWORK || TARGET_THUMB)
864 sought |= (FL_THUMB | FL_MODE32);
866 /* There are no ARM processors that support both APCS-26 and
867 interworking. Therefore we force FL_MODE26 to be removed
868 from insn_flags here (if it was set), so that the search
869 below will always be able to find a compatible processor. */
870 insn_flags &= ~FL_MODE26;
873 if (sought != 0 && ((sought & insn_flags) != sought))
875 /* Try to locate a CPU type that supports all of the abilities
876 of the default CPU, plus the extra abilities requested by
877 the user. */
878 for (sel = all_cores; sel->name != NULL; sel++)
879 if ((sel->flags & sought) == (sought | insn_flags))
880 break;
882 if (sel->name == NULL)
884 unsigned current_bit_count = 0;
885 const struct processors * best_fit = NULL;
887 /* Ideally we would like to issue an error message here
888 saying that it was not possible to find a CPU compatible
889 with the default CPU, but which also supports the command
890 line options specified by the programmer, and so they
891 ought to use the -mcpu=<name> command line option to
892 override the default CPU type.
894 If we cannot find a cpu that has both the
895 characteristics of the default cpu and the given
896 command line options we scan the array again looking
897 for a best match. */
898 for (sel = all_cores; sel->name != NULL; sel++)
899 if ((sel->flags & sought) == sought)
901 unsigned count;
903 count = bit_count (sel->flags & insn_flags);
905 if (count >= current_bit_count)
907 best_fit = sel;
908 current_bit_count = count;
912 gcc_assert (best_fit);
913 sel = best_fit;
916 insn_flags = sel->flags;
918 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
919 if (arm_tune == arm_none)
920 arm_tune = (enum processor_type) (sel - all_cores);
923 /* The processor for which we should tune should now have been
924 chosen. */
925 gcc_assert (arm_tune != arm_none);
927 tune_flags = all_cores[(int)arm_tune].flags;
928 if (optimize_size)
929 targetm.rtx_costs = arm_size_rtx_costs;
930 else
931 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
933 /* Make sure that the processor choice does not conflict with any of the
934 other command line choices. */
935 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
937 warning (0, "target CPU does not support interworking" );
938 target_flags &= ~ARM_FLAG_INTERWORK;
941 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
943 warning (0, "target CPU does not support THUMB instructions");
944 target_flags &= ~ARM_FLAG_THUMB;
947 if (TARGET_APCS_FRAME && TARGET_THUMB)
949 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
950 target_flags &= ~ARM_FLAG_APCS_FRAME;
953 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
954 from here where no function is being compiled currently. */
955 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
956 && TARGET_ARM)
957 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
959 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
960 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
962 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
963 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
965 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
967 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
968 target_flags |= ARM_FLAG_APCS_FRAME;
971 if (TARGET_POKE_FUNCTION_NAME)
972 target_flags |= ARM_FLAG_APCS_FRAME;
974 if (TARGET_APCS_REENT && flag_pic)
975 error ("-fpic and -mapcs-reent are incompatible");
977 if (TARGET_APCS_REENT)
978 warning (0, "APCS reentrant code not supported. Ignored");
980 /* If this target is normally configured to use APCS frames, warn if they
981 are turned off and debugging is turned on. */
982 if (TARGET_ARM
983 && write_symbols != NO_DEBUG
984 && !TARGET_APCS_FRAME
985 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
986 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
988 /* If stack checking is disabled, we can use r10 as the PIC register,
989 which keeps r9 available. */
990 if (flag_pic)
991 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
993 if (TARGET_APCS_FLOAT)
994 warning (0, "passing floating point arguments in fp regs not yet supported");
996 /* Initialize boolean versions of the flags, for use in the arm.md file. */
997 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
998 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
999 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1000 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1001 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1002 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1003 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1004 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1006 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1007 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1008 thumb_code = (TARGET_ARM == 0);
1009 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1010 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1011 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1013 /* V5 code we generate is completely interworking capable, so we turn off
1014 TARGET_INTERWORK here to avoid many tests later on. */
1016 /* XXX However, we must pass the right pre-processor defines to CPP
1017 or GLD can get confused. This is a hack. */
1018 if (TARGET_INTERWORK)
1019 arm_cpp_interwork = 1;
1021 if (arm_arch5)
1022 target_flags &= ~ARM_FLAG_INTERWORK;
1024 if (target_abi_name)
1026 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1028 if (streq (arm_all_abis[i].name, target_abi_name))
1030 arm_abi = arm_all_abis[i].abi_type;
1031 break;
1034 if (i == ARRAY_SIZE (arm_all_abis))
1035 error ("invalid ABI option: -mabi=%s", target_abi_name);
1037 else
1038 arm_abi = ARM_DEFAULT_ABI;
1040 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1041 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1043 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1044 error ("iwmmxt abi requires an iwmmxt capable cpu");
1046 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1047 if (target_fpu_name == NULL && target_fpe_name != NULL)
1049 if (streq (target_fpe_name, "2"))
1050 target_fpu_name = "fpe2";
1051 else if (streq (target_fpe_name, "3"))
1052 target_fpu_name = "fpe3";
1053 else
1054 error ("invalid floating point emulation option: -mfpe=%s",
1055 target_fpe_name);
1057 if (target_fpu_name != NULL)
1059 /* The user specified a FPU. */
1060 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1062 if (streq (all_fpus[i].name, target_fpu_name))
1064 arm_fpu_arch = all_fpus[i].fpu;
1065 arm_fpu_tune = arm_fpu_arch;
1066 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1067 break;
1070 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1071 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1073 else
1075 #ifdef FPUTYPE_DEFAULT
1076 /* Use the default if it is specified for this platform. */
1077 arm_fpu_arch = FPUTYPE_DEFAULT;
1078 arm_fpu_tune = FPUTYPE_DEFAULT;
1079 #else
1080 /* Pick one based on CPU type. */
1081 /* ??? Some targets assume FPA is the default.
1082 if ((insn_flags & FL_VFP) != 0)
1083 arm_fpu_arch = FPUTYPE_VFP;
1084 else
1086 if (arm_arch_cirrus)
1087 arm_fpu_arch = FPUTYPE_MAVERICK;
1088 else
1089 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1090 #endif
1091 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1092 arm_fpu_tune = FPUTYPE_FPA;
1093 else
1094 arm_fpu_tune = arm_fpu_arch;
1095 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1096 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1099 if (target_float_abi_name != NULL)
1101 /* The user specified a FP ABI. */
1102 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1104 if (streq (all_float_abis[i].name, target_float_abi_name))
1106 arm_float_abi = all_float_abis[i].abi_type;
1107 break;
1110 if (i == ARRAY_SIZE (all_float_abis))
1111 error ("invalid floating point abi: -mfloat-abi=%s",
1112 target_float_abi_name);
1114 else if (target_float_switch)
1116 /* This is a bit of a hack to avoid needing target flags for these. */
1117 if (target_float_switch[0] == 'h')
1118 arm_float_abi = ARM_FLOAT_ABI_HARD;
1119 else
1120 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1122 else
1123 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1125 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1126 sorry ("-mfloat-abi=hard and VFP");
1128 /* If soft-float is specified then don't use FPU. */
1129 if (TARGET_SOFT_FLOAT)
1130 arm_fpu_arch = FPUTYPE_NONE;
1132 /* For arm2/3 there is no need to do any scheduling if there is only
1133 a floating point emulator, or we are doing software floating-point. */
1134 if ((TARGET_SOFT_FLOAT
1135 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1136 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1137 && (tune_flags & FL_MODE32) == 0)
1138 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1140 /* Override the default structure alignment for AAPCS ABI. */
1141 if (arm_abi == ARM_ABI_AAPCS)
1142 arm_structure_size_boundary = 8;
1144 if (structure_size_string != NULL)
1146 int size = strtol (structure_size_string, NULL, 0);
1148 if (size == 8 || size == 32
1149 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1150 arm_structure_size_boundary = size;
1151 else
1152 warning (0, "structure size boundary can only be set to %s",
1153 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1156 if (arm_pic_register_string != NULL)
1158 int pic_register = decode_reg_name (arm_pic_register_string);
1160 if (!flag_pic)
1161 warning (0, "-mpic-register= is useless without -fpic");
1163 /* Prevent the user from choosing an obviously stupid PIC register. */
1164 else if (pic_register < 0 || call_used_regs[pic_register]
1165 || pic_register == HARD_FRAME_POINTER_REGNUM
1166 || pic_register == STACK_POINTER_REGNUM
1167 || pic_register >= PC_REGNUM)
1168 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1169 else
1170 arm_pic_register = pic_register;
1173 if (TARGET_THUMB && flag_schedule_insns)
1175 /* Don't warn since it's on by default in -O2. */
1176 flag_schedule_insns = 0;
1179 if (optimize_size)
1181 /* There's some dispute as to whether this should be 1 or 2. However,
1182 experiments seem to show that in pathological cases a setting of
1183 1 degrades less severely than a setting of 2. This could change if
1184 other parts of the compiler change their behavior. */
1185 arm_constant_limit = 1;
1187 /* If optimizing for size, bump the number of instructions that we
1188 are prepared to conditionally execute (even on a StrongARM). */
1189 max_insns_skipped = 6;
1191 else
1193 /* For processors with load scheduling, it never costs more than
1194 2 cycles to load a constant, and the load scheduler may well
1195 reduce that to 1. */
1196 if (arm_ld_sched)
1197 arm_constant_limit = 1;
1199 /* On XScale the longer latency of a load makes it more difficult
1200 to achieve a good schedule, so it's faster to synthesize
1201 constants that can be done in two insns. */
1202 if (arm_tune_xscale)
1203 arm_constant_limit = 2;
1205 /* StrongARM has early execution of branches, so a sequence
1206 that is worth skipping is shorter. */
1207 if (arm_tune_strongarm)
1208 max_insns_skipped = 3;
1211 /* Register global variables with the garbage collector. */
1212 arm_add_gc_roots ();
1215 static void
1216 arm_add_gc_roots (void)
1218 gcc_obstack_init(&minipool_obstack);
1219 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1222 /* A table of known ARM exception types.
1223 For use with the interrupt function attribute. */
1225 typedef struct
1227 const char *const arg;
1228 const unsigned long return_value;
1230 isr_attribute_arg;
1232 static const isr_attribute_arg isr_attribute_args [] =
1234 { "IRQ", ARM_FT_ISR },
1235 { "irq", ARM_FT_ISR },
1236 { "FIQ", ARM_FT_FIQ },
1237 { "fiq", ARM_FT_FIQ },
1238 { "ABORT", ARM_FT_ISR },
1239 { "abort", ARM_FT_ISR },
1240 { "ABORT", ARM_FT_ISR },
1241 { "abort", ARM_FT_ISR },
1242 { "UNDEF", ARM_FT_EXCEPTION },
1243 { "undef", ARM_FT_EXCEPTION },
1244 { "SWI", ARM_FT_EXCEPTION },
1245 { "swi", ARM_FT_EXCEPTION },
1246 { NULL, ARM_FT_NORMAL }
1249 /* Returns the (interrupt) function type of the current
1250 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1252 static unsigned long
1253 arm_isr_value (tree argument)
1255 const isr_attribute_arg * ptr;
1256 const char * arg;
1258 /* No argument - default to IRQ. */
1259 if (argument == NULL_TREE)
1260 return ARM_FT_ISR;
1262 /* Get the value of the argument. */
1263 if (TREE_VALUE (argument) == NULL_TREE
1264 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1265 return ARM_FT_UNKNOWN;
1267 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1269 /* Check it against the list of known arguments. */
1270 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1271 if (streq (arg, ptr->arg))
1272 return ptr->return_value;
1274 /* An unrecognized interrupt type. */
1275 return ARM_FT_UNKNOWN;
1278 /* Computes the type of the current function. */
1280 static unsigned long
1281 arm_compute_func_type (void)
1283 unsigned long type = ARM_FT_UNKNOWN;
1284 tree a;
1285 tree attr;
1287 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1289 /* Decide if the current function is volatile. Such functions
1290 never return, and many memory cycles can be saved by not storing
1291 register values that will never be needed again. This optimization
1292 was added to speed up context switching in a kernel application. */
1293 if (optimize > 0
1294 && TREE_NOTHROW (current_function_decl)
1295 && TREE_THIS_VOLATILE (current_function_decl))
1296 type |= ARM_FT_VOLATILE;
1298 if (cfun->static_chain_decl != NULL)
1299 type |= ARM_FT_NESTED;
1301 attr = DECL_ATTRIBUTES (current_function_decl);
1303 a = lookup_attribute ("naked", attr);
1304 if (a != NULL_TREE)
1305 type |= ARM_FT_NAKED;
1307 a = lookup_attribute ("isr", attr);
1308 if (a == NULL_TREE)
1309 a = lookup_attribute ("interrupt", attr);
1311 if (a == NULL_TREE)
1312 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1313 else
1314 type |= arm_isr_value (TREE_VALUE (a));
1316 return type;
1319 /* Returns the type of the current function. */
1321 unsigned long
1322 arm_current_func_type (void)
1324 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1325 cfun->machine->func_type = arm_compute_func_type ();
1327 return cfun->machine->func_type;
1330 /* Return 1 if it is possible to return using a single instruction.
1331 If SIBLING is non-null, this is a test for a return before a sibling
1332 call. SIBLING is the call insn, so we can examine its register usage. */
1335 use_return_insn (int iscond, rtx sibling)
1337 int regno;
1338 unsigned int func_type;
1339 unsigned long saved_int_regs;
1340 unsigned HOST_WIDE_INT stack_adjust;
1341 arm_stack_offsets *offsets;
1343 /* Never use a return instruction before reload has run. */
1344 if (!reload_completed)
1345 return 0;
1347 func_type = arm_current_func_type ();
1349 /* Naked functions and volatile functions need special
1350 consideration. */
1351 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1352 return 0;
1354 /* So do interrupt functions that use the frame pointer. */
1355 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1356 return 0;
1358 offsets = arm_get_frame_offsets ();
1359 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1361 /* As do variadic functions. */
1362 if (current_function_pretend_args_size
1363 || cfun->machine->uses_anonymous_args
1364 /* Or if the function calls __builtin_eh_return () */
1365 || current_function_calls_eh_return
1366 /* Or if the function calls alloca */
1367 || current_function_calls_alloca
1368 /* Or if there is a stack adjustment. However, if the stack pointer
1369 is saved on the stack, we can use a pre-incrementing stack load. */
1370 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1371 return 0;
1373 saved_int_regs = arm_compute_save_reg_mask ();
1375 /* Unfortunately, the insn
1377 ldmib sp, {..., sp, ...}
1379 triggers a bug on most SA-110 based devices, such that the stack
1380 pointer won't be correctly restored if the instruction takes a
1381 page fault. We work around this problem by popping r3 along with
1382 the other registers, since that is never slower than executing
1383 another instruction.
1385 We test for !arm_arch5 here, because code for any architecture
1386 less than this could potentially be run on one of the buggy
1387 chips. */
1388 if (stack_adjust == 4 && !arm_arch5)
1390 /* Validate that r3 is a call-clobbered register (always true in
1391 the default abi) ... */
1392 if (!call_used_regs[3])
1393 return 0;
1395 /* ... that it isn't being used for a return value (always true
1396 until we implement return-in-regs), or for a tail-call
1397 argument ... */
1398 if (sibling)
1400 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1402 if (find_regno_fusage (sibling, USE, 3))
1403 return 0;
1406 /* ... and that there are no call-saved registers in r0-r2
1407 (always true in the default ABI). */
1408 if (saved_int_regs & 0x7)
1409 return 0;
1412 /* Can't be done if interworking with Thumb, and any registers have been
1413 stacked. */
1414 if (TARGET_INTERWORK && saved_int_regs != 0)
1415 return 0;
1417 /* On StrongARM, conditional returns are expensive if they aren't
1418 taken and multiple registers have been stacked. */
1419 if (iscond && arm_tune_strongarm)
1421 /* Conditional return when just the LR is stored is a simple
1422 conditional-load instruction, that's not expensive. */
1423 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1424 return 0;
1426 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1427 return 0;
1430 /* If there are saved registers but the LR isn't saved, then we need
1431 two instructions for the return. */
1432 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1433 return 0;
1435 /* Can't be done if any of the FPA regs are pushed,
1436 since this also requires an insn. */
1437 if (TARGET_HARD_FLOAT && TARGET_FPA)
1438 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1439 if (regs_ever_live[regno] && !call_used_regs[regno])
1440 return 0;
1442 /* Likewise VFP regs. */
1443 if (TARGET_HARD_FLOAT && TARGET_VFP)
1444 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1445 if (regs_ever_live[regno] && !call_used_regs[regno])
1446 return 0;
1448 if (TARGET_REALLY_IWMMXT)
1449 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1450 if (regs_ever_live[regno] && ! call_used_regs [regno])
1451 return 0;
1453 return 1;
1456 /* Return TRUE if int I is a valid immediate ARM constant. */
1459 const_ok_for_arm (HOST_WIDE_INT i)
1461 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1463 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1464 be all zero, or all one. */
1465 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1466 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1467 != ((~(unsigned HOST_WIDE_INT) 0)
1468 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1469 return FALSE;
1471 /* Fast return for 0 and powers of 2 */
1472 if ((i & (i - 1)) == 0)
1473 return TRUE;
1477 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1478 return TRUE;
1479 mask =
1480 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1481 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1483 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1485 return FALSE;
1488 /* Return true if I is a valid constant for the operation CODE. */
1489 static int
1490 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1492 if (const_ok_for_arm (i))
1493 return 1;
1495 switch (code)
1497 case PLUS:
1498 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1500 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1501 case XOR:
1502 case IOR:
1503 return 0;
1505 case AND:
1506 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1508 default:
1509 gcc_unreachable ();
1513 /* Emit a sequence of insns to handle a large constant.
1514 CODE is the code of the operation required, it can be any of SET, PLUS,
1515 IOR, AND, XOR, MINUS;
1516 MODE is the mode in which the operation is being performed;
1517 VAL is the integer to operate on;
1518 SOURCE is the other operand (a register, or a null-pointer for SET);
1519 SUBTARGETS means it is safe to create scratch registers if that will
1520 either produce a simpler sequence, or we will want to cse the values.
1521 Return value is the number of insns emitted. */
1524 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1525 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1527 rtx cond;
1529 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1530 cond = COND_EXEC_TEST (PATTERN (insn));
1531 else
1532 cond = NULL_RTX;
1534 if (subtargets || code == SET
1535 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1536 && REGNO (target) != REGNO (source)))
1538 /* After arm_reorg has been called, we can't fix up expensive
1539 constants by pushing them into memory so we must synthesize
1540 them in-line, regardless of the cost. This is only likely to
1541 be more costly on chips that have load delay slots and we are
1542 compiling without running the scheduler (so no splitting
1543 occurred before the final instruction emission).
1545 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1547 if (!after_arm_reorg
1548 && !cond
1549 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1550 1, 0)
1551 > arm_constant_limit + (code != SET)))
1553 if (code == SET)
1555 /* Currently SET is the only monadic value for CODE, all
1556 the rest are diadic. */
1557 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1558 return 1;
1560 else
1562 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1564 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1565 /* For MINUS, the value is subtracted from, since we never
1566 have subtraction of a constant. */
1567 if (code == MINUS)
1568 emit_insn (gen_rtx_SET (VOIDmode, target,
1569 gen_rtx_MINUS (mode, temp, source)));
1570 else
1571 emit_insn (gen_rtx_SET (VOIDmode, target,
1572 gen_rtx_fmt_ee (code, mode, source, temp)));
1573 return 2;
1578 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1582 static int
1583 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1585 HOST_WIDE_INT temp1;
1586 int num_insns = 0;
1589 int end;
1591 if (i <= 0)
1592 i += 32;
1593 if (remainder & (3 << (i - 2)))
1595 end = i - 8;
1596 if (end < 0)
1597 end += 32;
1598 temp1 = remainder & ((0x0ff << end)
1599 | ((i < end) ? (0xff >> (32 - end)) : 0));
1600 remainder &= ~temp1;
1601 num_insns++;
1602 i -= 6;
1604 i -= 2;
1605 } while (remainder);
1606 return num_insns;
1609 /* Emit an instruction with the indicated PATTERN. If COND is
1610 non-NULL, conditionalize the execution of the instruction on COND
1611 being true. */
1613 static void
1614 emit_constant_insn (rtx cond, rtx pattern)
1616 if (cond)
1617 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1618 emit_insn (pattern);
1621 /* As above, but extra parameter GENERATE which, if clear, suppresses
1622 RTL generation. */
1624 static int
1625 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1626 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1627 int generate)
1629 int can_invert = 0;
1630 int can_negate = 0;
1631 int can_negate_initial = 0;
1632 int can_shift = 0;
1633 int i;
1634 int num_bits_set = 0;
1635 int set_sign_bit_copies = 0;
1636 int clear_sign_bit_copies = 0;
1637 int clear_zero_bit_copies = 0;
1638 int set_zero_bit_copies = 0;
1639 int insns = 0;
1640 unsigned HOST_WIDE_INT temp1, temp2;
1641 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1643 /* Find out which operations are safe for a given CODE. Also do a quick
1644 check for degenerate cases; these can occur when DImode operations
1645 are split. */
1646 switch (code)
1648 case SET:
1649 can_invert = 1;
1650 can_shift = 1;
1651 can_negate = 1;
1652 break;
1654 case PLUS:
1655 can_negate = 1;
1656 can_negate_initial = 1;
1657 break;
1659 case IOR:
1660 if (remainder == 0xffffffff)
1662 if (generate)
1663 emit_constant_insn (cond,
1664 gen_rtx_SET (VOIDmode, target,
1665 GEN_INT (ARM_SIGN_EXTEND (val))));
1666 return 1;
1668 if (remainder == 0)
1670 if (reload_completed && rtx_equal_p (target, source))
1671 return 0;
1672 if (generate)
1673 emit_constant_insn (cond,
1674 gen_rtx_SET (VOIDmode, target, source));
1675 return 1;
1677 break;
1679 case AND:
1680 if (remainder == 0)
1682 if (generate)
1683 emit_constant_insn (cond,
1684 gen_rtx_SET (VOIDmode, target, const0_rtx));
1685 return 1;
1687 if (remainder == 0xffffffff)
1689 if (reload_completed && rtx_equal_p (target, source))
1690 return 0;
1691 if (generate)
1692 emit_constant_insn (cond,
1693 gen_rtx_SET (VOIDmode, target, source));
1694 return 1;
1696 can_invert = 1;
1697 break;
1699 case XOR:
1700 if (remainder == 0)
1702 if (reload_completed && rtx_equal_p (target, source))
1703 return 0;
1704 if (generate)
1705 emit_constant_insn (cond,
1706 gen_rtx_SET (VOIDmode, target, source));
1707 return 1;
1710 /* We don't know how to handle other cases yet. */
1711 gcc_assert (remainder == 0xffffffff);
1713 if (generate)
1714 emit_constant_insn (cond,
1715 gen_rtx_SET (VOIDmode, target,
1716 gen_rtx_NOT (mode, source)));
1717 return 1;
1719 case MINUS:
1720 /* We treat MINUS as (val - source), since (source - val) is always
1721 passed as (source + (-val)). */
1722 if (remainder == 0)
1724 if (generate)
1725 emit_constant_insn (cond,
1726 gen_rtx_SET (VOIDmode, target,
1727 gen_rtx_NEG (mode, source)));
1728 return 1;
1730 if (const_ok_for_arm (val))
1732 if (generate)
1733 emit_constant_insn (cond,
1734 gen_rtx_SET (VOIDmode, target,
1735 gen_rtx_MINUS (mode, GEN_INT (val),
1736 source)));
1737 return 1;
1739 can_negate = 1;
1741 break;
1743 default:
1744 gcc_unreachable ();
1747 /* If we can do it in one insn get out quickly. */
1748 if (const_ok_for_arm (val)
1749 || (can_negate_initial && const_ok_for_arm (-val))
1750 || (can_invert && const_ok_for_arm (~val)))
1752 if (generate)
1753 emit_constant_insn (cond,
1754 gen_rtx_SET (VOIDmode, target,
1755 (source
1756 ? gen_rtx_fmt_ee (code, mode, source,
1757 GEN_INT (val))
1758 : GEN_INT (val))));
1759 return 1;
1762 /* Calculate a few attributes that may be useful for specific
1763 optimizations. */
1764 for (i = 31; i >= 0; i--)
1766 if ((remainder & (1 << i)) == 0)
1767 clear_sign_bit_copies++;
1768 else
1769 break;
1772 for (i = 31; i >= 0; i--)
1774 if ((remainder & (1 << i)) != 0)
1775 set_sign_bit_copies++;
1776 else
1777 break;
1780 for (i = 0; i <= 31; i++)
1782 if ((remainder & (1 << i)) == 0)
1783 clear_zero_bit_copies++;
1784 else
1785 break;
1788 for (i = 0; i <= 31; i++)
1790 if ((remainder & (1 << i)) != 0)
1791 set_zero_bit_copies++;
1792 else
1793 break;
1796 switch (code)
1798 case SET:
1799 /* See if we can do this by sign_extending a constant that is known
1800 to be negative. This is a good, way of doing it, since the shift
1801 may well merge into a subsequent insn. */
1802 if (set_sign_bit_copies > 1)
1804 if (const_ok_for_arm
1805 (temp1 = ARM_SIGN_EXTEND (remainder
1806 << (set_sign_bit_copies - 1))))
1808 if (generate)
1810 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, new_src,
1813 GEN_INT (temp1)));
1814 emit_constant_insn (cond,
1815 gen_ashrsi3 (target, new_src,
1816 GEN_INT (set_sign_bit_copies - 1)));
1818 return 2;
1820 /* For an inverted constant, we will need to set the low bits,
1821 these will be shifted out of harm's way. */
1822 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1823 if (const_ok_for_arm (~temp1))
1825 if (generate)
1827 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1828 emit_constant_insn (cond,
1829 gen_rtx_SET (VOIDmode, new_src,
1830 GEN_INT (temp1)));
1831 emit_constant_insn (cond,
1832 gen_ashrsi3 (target, new_src,
1833 GEN_INT (set_sign_bit_copies - 1)));
1835 return 2;
1839 /* See if we can generate this by setting the bottom (or the top)
1840 16 bits, and then shifting these into the other half of the
1841 word. We only look for the simplest cases, to do more would cost
1842 too much. Be careful, however, not to generate this when the
1843 alternative would take fewer insns. */
1844 if (val & 0xffff0000)
1846 temp1 = remainder & 0xffff0000;
1847 temp2 = remainder & 0x0000ffff;
1849 /* Overlaps outside this range are best done using other methods. */
1850 for (i = 9; i < 24; i++)
1852 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1853 && !const_ok_for_arm (temp2))
1855 rtx new_src = (subtargets
1856 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1857 : target);
1858 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1859 source, subtargets, generate);
1860 source = new_src;
1861 if (generate)
1862 emit_constant_insn
1863 (cond,
1864 gen_rtx_SET
1865 (VOIDmode, target,
1866 gen_rtx_IOR (mode,
1867 gen_rtx_ASHIFT (mode, source,
1868 GEN_INT (i)),
1869 source)));
1870 return insns + 1;
1874 /* Don't duplicate cases already considered. */
1875 for (i = 17; i < 24; i++)
1877 if (((temp1 | (temp1 >> i)) == remainder)
1878 && !const_ok_for_arm (temp1))
1880 rtx new_src = (subtargets
1881 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1882 : target);
1883 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1884 source, subtargets, generate);
1885 source = new_src;
1886 if (generate)
1887 emit_constant_insn
1888 (cond,
1889 gen_rtx_SET (VOIDmode, target,
1890 gen_rtx_IOR
1891 (mode,
1892 gen_rtx_LSHIFTRT (mode, source,
1893 GEN_INT (i)),
1894 source)));
1895 return insns + 1;
1899 break;
1901 case IOR:
1902 case XOR:
1903 /* If we have IOR or XOR, and the constant can be loaded in a
1904 single instruction, and we can find a temporary to put it in,
1905 then this can be done in two instructions instead of 3-4. */
1906 if (subtargets
1907 /* TARGET can't be NULL if SUBTARGETS is 0 */
1908 || (reload_completed && !reg_mentioned_p (target, source)))
1910 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1912 if (generate)
1914 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1916 emit_constant_insn (cond,
1917 gen_rtx_SET (VOIDmode, sub,
1918 GEN_INT (val)));
1919 emit_constant_insn (cond,
1920 gen_rtx_SET (VOIDmode, target,
1921 gen_rtx_fmt_ee (code, mode,
1922 source, sub)));
1924 return 2;
1928 if (code == XOR)
1929 break;
1931 if (set_sign_bit_copies > 8
1932 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1934 if (generate)
1936 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1937 rtx shift = GEN_INT (set_sign_bit_copies);
1939 emit_constant_insn
1940 (cond,
1941 gen_rtx_SET (VOIDmode, sub,
1942 gen_rtx_NOT (mode,
1943 gen_rtx_ASHIFT (mode,
1944 source,
1945 shift))));
1946 emit_constant_insn
1947 (cond,
1948 gen_rtx_SET (VOIDmode, target,
1949 gen_rtx_NOT (mode,
1950 gen_rtx_LSHIFTRT (mode, sub,
1951 shift))));
1953 return 2;
1956 if (set_zero_bit_copies > 8
1957 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1959 if (generate)
1961 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1962 rtx shift = GEN_INT (set_zero_bit_copies);
1964 emit_constant_insn
1965 (cond,
1966 gen_rtx_SET (VOIDmode, sub,
1967 gen_rtx_NOT (mode,
1968 gen_rtx_LSHIFTRT (mode,
1969 source,
1970 shift))));
1971 emit_constant_insn
1972 (cond,
1973 gen_rtx_SET (VOIDmode, target,
1974 gen_rtx_NOT (mode,
1975 gen_rtx_ASHIFT (mode, sub,
1976 shift))));
1978 return 2;
1981 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1983 if (generate)
1985 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1986 emit_constant_insn (cond,
1987 gen_rtx_SET (VOIDmode, sub,
1988 gen_rtx_NOT (mode, source)));
1989 source = sub;
1990 if (subtargets)
1991 sub = gen_reg_rtx (mode);
1992 emit_constant_insn (cond,
1993 gen_rtx_SET (VOIDmode, sub,
1994 gen_rtx_AND (mode, source,
1995 GEN_INT (temp1))));
1996 emit_constant_insn (cond,
1997 gen_rtx_SET (VOIDmode, target,
1998 gen_rtx_NOT (mode, sub)));
2000 return 3;
2002 break;
2004 case AND:
2005 /* See if two shifts will do 2 or more insn's worth of work. */
2006 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2008 HOST_WIDE_INT shift_mask = ((0xffffffff
2009 << (32 - clear_sign_bit_copies))
2010 & 0xffffffff);
2012 if ((remainder | shift_mask) != 0xffffffff)
2014 if (generate)
2016 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2017 insns = arm_gen_constant (AND, mode, cond,
2018 remainder | shift_mask,
2019 new_src, source, subtargets, 1);
2020 source = new_src;
2022 else
2024 rtx targ = subtargets ? NULL_RTX : target;
2025 insns = arm_gen_constant (AND, mode, cond,
2026 remainder | shift_mask,
2027 targ, source, subtargets, 0);
2031 if (generate)
2033 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2034 rtx shift = GEN_INT (clear_sign_bit_copies);
2036 emit_insn (gen_ashlsi3 (new_src, source, shift));
2037 emit_insn (gen_lshrsi3 (target, new_src, shift));
2040 return insns + 2;
2043 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2045 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2047 if ((remainder | shift_mask) != 0xffffffff)
2049 if (generate)
2051 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2053 insns = arm_gen_constant (AND, mode, cond,
2054 remainder | shift_mask,
2055 new_src, source, subtargets, 1);
2056 source = new_src;
2058 else
2060 rtx targ = subtargets ? NULL_RTX : target;
2062 insns = arm_gen_constant (AND, mode, cond,
2063 remainder | shift_mask,
2064 targ, source, subtargets, 0);
2068 if (generate)
2070 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2071 rtx shift = GEN_INT (clear_zero_bit_copies);
2073 emit_insn (gen_lshrsi3 (new_src, source, shift));
2074 emit_insn (gen_ashlsi3 (target, new_src, shift));
2077 return insns + 2;
2080 break;
2082 default:
2083 break;
2086 for (i = 0; i < 32; i++)
2087 if (remainder & (1 << i))
2088 num_bits_set++;
2090 if (code == AND || (can_invert && num_bits_set > 16))
2091 remainder = (~remainder) & 0xffffffff;
2092 else if (code == PLUS && num_bits_set > 16)
2093 remainder = (-remainder) & 0xffffffff;
2094 else
2096 can_invert = 0;
2097 can_negate = 0;
2100 /* Now try and find a way of doing the job in either two or three
2101 instructions.
2102 We start by looking for the largest block of zeros that are aligned on
2103 a 2-bit boundary, we then fill up the temps, wrapping around to the
2104 top of the word when we drop off the bottom.
2105 In the worst case this code should produce no more than four insns. */
2107 int best_start = 0;
2108 int best_consecutive_zeros = 0;
2110 for (i = 0; i < 32; i += 2)
2112 int consecutive_zeros = 0;
2114 if (!(remainder & (3 << i)))
2116 while ((i < 32) && !(remainder & (3 << i)))
2118 consecutive_zeros += 2;
2119 i += 2;
2121 if (consecutive_zeros > best_consecutive_zeros)
2123 best_consecutive_zeros = consecutive_zeros;
2124 best_start = i - consecutive_zeros;
2126 i -= 2;
2130 /* So long as it won't require any more insns to do so, it's
2131 desirable to emit a small constant (in bits 0...9) in the last
2132 insn. This way there is more chance that it can be combined with
2133 a later addressing insn to form a pre-indexed load or store
2134 operation. Consider:
2136 *((volatile int *)0xe0000100) = 1;
2137 *((volatile int *)0xe0000110) = 2;
2139 We want this to wind up as:
2141 mov rA, #0xe0000000
2142 mov rB, #1
2143 str rB, [rA, #0x100]
2144 mov rB, #2
2145 str rB, [rA, #0x110]
2147 rather than having to synthesize both large constants from scratch.
2149 Therefore, we calculate how many insns would be required to emit
2150 the constant starting from `best_start', and also starting from
2151 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2152 yield a shorter sequence, we may as well use zero. */
2153 if (best_start != 0
2154 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2155 && (count_insns_for_constant (remainder, 0) <=
2156 count_insns_for_constant (remainder, best_start)))
2157 best_start = 0;
2159 /* Now start emitting the insns. */
2160 i = best_start;
2163 int end;
2165 if (i <= 0)
2166 i += 32;
2167 if (remainder & (3 << (i - 2)))
2169 end = i - 8;
2170 if (end < 0)
2171 end += 32;
2172 temp1 = remainder & ((0x0ff << end)
2173 | ((i < end) ? (0xff >> (32 - end)) : 0));
2174 remainder &= ~temp1;
2176 if (generate)
2178 rtx new_src, temp1_rtx;
2180 if (code == SET || code == MINUS)
2182 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2183 if (can_invert && code != MINUS)
2184 temp1 = ~temp1;
2186 else
2188 if (remainder && subtargets)
2189 new_src = gen_reg_rtx (mode);
2190 else
2191 new_src = target;
2192 if (can_invert)
2193 temp1 = ~temp1;
2194 else if (can_negate)
2195 temp1 = -temp1;
2198 temp1 = trunc_int_for_mode (temp1, mode);
2199 temp1_rtx = GEN_INT (temp1);
2201 if (code == SET)
2203 else if (code == MINUS)
2204 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2205 else
2206 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2208 emit_constant_insn (cond,
2209 gen_rtx_SET (VOIDmode, new_src,
2210 temp1_rtx));
2211 source = new_src;
2214 if (code == SET)
2216 can_invert = 0;
2217 code = PLUS;
2219 else if (code == MINUS)
2220 code = PLUS;
2222 insns++;
2223 i -= 6;
2225 i -= 2;
2227 while (remainder);
2230 return insns;
2233 /* Canonicalize a comparison so that we are more likely to recognize it.
2234 This can be done for a few constant compares, where we can make the
2235 immediate value easier to load. */
2237 enum rtx_code
2238 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2240 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2242 switch (code)
2244 case EQ:
2245 case NE:
2246 return code;
2248 case GT:
2249 case LE:
2250 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2251 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2253 *op1 = GEN_INT (i + 1);
2254 return code == GT ? GE : LT;
2256 break;
2258 case GE:
2259 case LT:
2260 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2261 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2263 *op1 = GEN_INT (i - 1);
2264 return code == GE ? GT : LE;
2266 break;
2268 case GTU:
2269 case LEU:
2270 if (i != ~((unsigned HOST_WIDE_INT) 0)
2271 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2273 *op1 = GEN_INT (i + 1);
2274 return code == GTU ? GEU : LTU;
2276 break;
2278 case GEU:
2279 case LTU:
2280 if (i != 0
2281 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2283 *op1 = GEN_INT (i - 1);
2284 return code == GEU ? GTU : LEU;
2286 break;
2288 default:
2289 gcc_unreachable ();
2292 return code;
2296 /* Define how to find the value returned by a function. */
2299 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2301 enum machine_mode mode;
2302 int unsignedp ATTRIBUTE_UNUSED;
2303 rtx r ATTRIBUTE_UNUSED;
2305 mode = TYPE_MODE (type);
2306 /* Promote integer types. */
2307 if (INTEGRAL_TYPE_P (type))
2308 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2310 /* Promotes small structs returned in a register to full-word size
2311 for big-endian AAPCS. */
2312 if (arm_return_in_msb (type))
2314 HOST_WIDE_INT size = int_size_in_bytes (type);
2315 if (size % UNITS_PER_WORD != 0)
2317 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2318 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2322 return LIBCALL_VALUE(mode);
2325 /* Determine the amount of memory needed to store the possible return
2326 registers of an untyped call. */
2328 arm_apply_result_size (void)
2330 int size = 16;
2332 if (TARGET_ARM)
2334 if (TARGET_HARD_FLOAT_ABI)
2336 if (TARGET_FPA)
2337 size += 12;
2338 if (TARGET_MAVERICK)
2339 size += 8;
2341 if (TARGET_IWMMXT_ABI)
2342 size += 8;
2345 return size;
2348 /* Decide whether a type should be returned in memory (true)
2349 or in a register (false). This is called by the macro
2350 RETURN_IN_MEMORY. */
2352 arm_return_in_memory (tree type)
2354 HOST_WIDE_INT size;
2356 if (!AGGREGATE_TYPE_P (type) &&
2357 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2358 /* All simple types are returned in registers.
2359 For AAPCS, complex types are treated the same as aggregates. */
2360 return 0;
2362 size = int_size_in_bytes (type);
2364 if (arm_abi != ARM_ABI_APCS)
2366 /* ATPCS and later return aggregate types in memory only if they are
2367 larger than a word (or are variable size). */
2368 return (size < 0 || size > UNITS_PER_WORD);
2371 /* For the arm-wince targets we choose to be compatible with Microsoft's
2372 ARM and Thumb compilers, which always return aggregates in memory. */
2373 #ifndef ARM_WINCE
2374 /* All structures/unions bigger than one word are returned in memory.
2375 Also catch the case where int_size_in_bytes returns -1. In this case
2376 the aggregate is either huge or of variable size, and in either case
2377 we will want to return it via memory and not in a register. */
2378 if (size < 0 || size > UNITS_PER_WORD)
2379 return 1;
2381 if (TREE_CODE (type) == RECORD_TYPE)
2383 tree field;
2385 /* For a struct the APCS says that we only return in a register
2386 if the type is 'integer like' and every addressable element
2387 has an offset of zero. For practical purposes this means
2388 that the structure can have at most one non bit-field element
2389 and that this element must be the first one in the structure. */
2391 /* Find the first field, ignoring non FIELD_DECL things which will
2392 have been created by C++. */
2393 for (field = TYPE_FIELDS (type);
2394 field && TREE_CODE (field) != FIELD_DECL;
2395 field = TREE_CHAIN (field))
2396 continue;
2398 if (field == NULL)
2399 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2401 /* Check that the first field is valid for returning in a register. */
2403 /* ... Floats are not allowed */
2404 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2405 return 1;
2407 /* ... Aggregates that are not themselves valid for returning in
2408 a register are not allowed. */
2409 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2410 return 1;
2412 /* Now check the remaining fields, if any. Only bitfields are allowed,
2413 since they are not addressable. */
2414 for (field = TREE_CHAIN (field);
2415 field;
2416 field = TREE_CHAIN (field))
2418 if (TREE_CODE (field) != FIELD_DECL)
2419 continue;
2421 if (!DECL_BIT_FIELD_TYPE (field))
2422 return 1;
2425 return 0;
2428 if (TREE_CODE (type) == UNION_TYPE)
2430 tree field;
2432 /* Unions can be returned in registers if every element is
2433 integral, or can be returned in an integer register. */
2434 for (field = TYPE_FIELDS (type);
2435 field;
2436 field = TREE_CHAIN (field))
2438 if (TREE_CODE (field) != FIELD_DECL)
2439 continue;
2441 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2442 return 1;
2444 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2445 return 1;
2448 return 0;
2450 #endif /* not ARM_WINCE */
2452 /* Return all other types in memory. */
2453 return 1;
2456 /* Indicate whether or not words of a double are in big-endian order. */
2459 arm_float_words_big_endian (void)
2461 if (TARGET_MAVERICK)
2462 return 0;
2464 /* For FPA, float words are always big-endian. For VFP, floats words
2465 follow the memory system mode. */
2467 if (TARGET_FPA)
2469 return 1;
2472 if (TARGET_VFP)
2473 return (TARGET_BIG_END ? 1 : 0);
2475 return 1;
2478 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2479 for a call to a function whose data type is FNTYPE.
2480 For a library call, FNTYPE is NULL. */
2481 void
2482 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2483 rtx libname ATTRIBUTE_UNUSED,
2484 tree fndecl ATTRIBUTE_UNUSED)
2486 /* On the ARM, the offset starts at 0. */
2487 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2488 pcum->iwmmxt_nregs = 0;
2489 pcum->can_split = true;
2491 pcum->call_cookie = CALL_NORMAL;
2493 if (TARGET_LONG_CALLS)
2494 pcum->call_cookie = CALL_LONG;
2496 /* Check for long call/short call attributes. The attributes
2497 override any command line option. */
2498 if (fntype)
2500 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2501 pcum->call_cookie = CALL_SHORT;
2502 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2503 pcum->call_cookie = CALL_LONG;
2506 /* Varargs vectors are treated the same as long long.
2507 named_count avoids having to change the way arm handles 'named' */
2508 pcum->named_count = 0;
2509 pcum->nargs = 0;
2511 if (TARGET_REALLY_IWMMXT && fntype)
2513 tree fn_arg;
2515 for (fn_arg = TYPE_ARG_TYPES (fntype);
2516 fn_arg;
2517 fn_arg = TREE_CHAIN (fn_arg))
2518 pcum->named_count += 1;
2520 if (! pcum->named_count)
2521 pcum->named_count = INT_MAX;
2526 /* Return true if mode/type need doubleword alignment. */
2527 bool
2528 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2530 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2531 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2535 /* Determine where to put an argument to a function.
2536 Value is zero to push the argument on the stack,
2537 or a hard register in which to store the argument.
2539 MODE is the argument's machine mode.
2540 TYPE is the data type of the argument (as a tree).
2541 This is null for libcalls where that information may
2542 not be available.
2543 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2544 the preceding args and about the function being called.
2545 NAMED is nonzero if this argument is a named parameter
2546 (otherwise it is an extra parameter matching an ellipsis). */
2549 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2550 tree type, int named)
2552 int nregs;
2554 /* Varargs vectors are treated the same as long long.
2555 named_count avoids having to change the way arm handles 'named' */
2556 if (TARGET_IWMMXT_ABI
2557 && arm_vector_mode_supported_p (mode)
2558 && pcum->named_count > pcum->nargs + 1)
2560 if (pcum->iwmmxt_nregs <= 9)
2561 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2562 else
2564 pcum->can_split = false;
2565 return NULL_RTX;
2569 /* Put doubleword aligned quantities in even register pairs. */
2570 if (pcum->nregs & 1
2571 && ARM_DOUBLEWORD_ALIGN
2572 && arm_needs_doubleword_align (mode, type))
2573 pcum->nregs++;
2575 if (mode == VOIDmode)
2576 /* Compute operand 2 of the call insn. */
2577 return GEN_INT (pcum->call_cookie);
2579 /* Only allow splitting an arg between regs and memory if all preceding
2580 args were allocated to regs. For args passed by reference we only count
2581 the reference pointer. */
2582 if (pcum->can_split)
2583 nregs = 1;
2584 else
2585 nregs = ARM_NUM_REGS2 (mode, type);
2587 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2588 return NULL_RTX;
2590 return gen_rtx_REG (mode, pcum->nregs);
2593 static int
2594 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2595 tree type, bool named ATTRIBUTE_UNUSED)
2597 int nregs = pcum->nregs;
2599 if (arm_vector_mode_supported_p (mode))
2600 return 0;
2602 if (NUM_ARG_REGS > nregs
2603 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2604 && pcum->can_split)
2605 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2607 return 0;
2610 /* Variable sized types are passed by reference. This is a GCC
2611 extension to the ARM ABI. */
2613 static bool
2614 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2615 enum machine_mode mode ATTRIBUTE_UNUSED,
2616 tree type, bool named ATTRIBUTE_UNUSED)
2618 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2621 /* Encode the current state of the #pragma [no_]long_calls. */
2622 typedef enum
2624 OFF, /* No #pramgma [no_]long_calls is in effect. */
2625 LONG, /* #pragma long_calls is in effect. */
2626 SHORT /* #pragma no_long_calls is in effect. */
2627 } arm_pragma_enum;
2629 static arm_pragma_enum arm_pragma_long_calls = OFF;
2631 void
2632 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2634 arm_pragma_long_calls = LONG;
2637 void
2638 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2640 arm_pragma_long_calls = SHORT;
2643 void
2644 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2646 arm_pragma_long_calls = OFF;
2649 /* Table of machine attributes. */
2650 const struct attribute_spec arm_attribute_table[] =
2652 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2653 /* Function calls made to this symbol must be done indirectly, because
2654 it may lie outside of the 26 bit addressing range of a normal function
2655 call. */
2656 { "long_call", 0, 0, false, true, true, NULL },
2657 /* Whereas these functions are always known to reside within the 26 bit
2658 addressing range. */
2659 { "short_call", 0, 0, false, true, true, NULL },
2660 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2661 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2662 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2663 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2664 #ifdef ARM_PE
2665 /* ARM/PE has three new attributes:
2666 interfacearm - ?
2667 dllexport - for exporting a function/variable that will live in a dll
2668 dllimport - for importing a function/variable from a dll
2670 Microsoft allows multiple declspecs in one __declspec, separating
2671 them with spaces. We do NOT support this. Instead, use __declspec
2672 multiple times.
2674 { "dllimport", 0, 0, true, false, false, NULL },
2675 { "dllexport", 0, 0, true, false, false, NULL },
2676 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2677 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2678 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2679 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2680 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2681 #endif
2682 { NULL, 0, 0, false, false, false, NULL }
2685 /* Handle an attribute requiring a FUNCTION_DECL;
2686 arguments as in struct attribute_spec.handler. */
2687 static tree
2688 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2689 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2691 if (TREE_CODE (*node) != FUNCTION_DECL)
2693 warning (0, "%qs attribute only applies to functions",
2694 IDENTIFIER_POINTER (name));
2695 *no_add_attrs = true;
2698 return NULL_TREE;
2701 /* Handle an "interrupt" or "isr" attribute;
2702 arguments as in struct attribute_spec.handler. */
2703 static tree
2704 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2705 bool *no_add_attrs)
2707 if (DECL_P (*node))
2709 if (TREE_CODE (*node) != FUNCTION_DECL)
2711 warning (0, "%qs attribute only applies to functions",
2712 IDENTIFIER_POINTER (name));
2713 *no_add_attrs = true;
2715 /* FIXME: the argument if any is checked for type attributes;
2716 should it be checked for decl ones? */
2718 else
2720 if (TREE_CODE (*node) == FUNCTION_TYPE
2721 || TREE_CODE (*node) == METHOD_TYPE)
2723 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2725 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2726 *no_add_attrs = true;
2729 else if (TREE_CODE (*node) == POINTER_TYPE
2730 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2731 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2732 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2734 *node = build_variant_type_copy (*node);
2735 TREE_TYPE (*node) = build_type_attribute_variant
2736 (TREE_TYPE (*node),
2737 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2738 *no_add_attrs = true;
2740 else
2742 /* Possibly pass this attribute on from the type to a decl. */
2743 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2744 | (int) ATTR_FLAG_FUNCTION_NEXT
2745 | (int) ATTR_FLAG_ARRAY_NEXT))
2747 *no_add_attrs = true;
2748 return tree_cons (name, args, NULL_TREE);
2750 else
2752 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2757 return NULL_TREE;
2760 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2761 /* Handle the "notshared" attribute. This attribute is another way of
2762 requesting hidden visibility. ARM's compiler supports
2763 "__declspec(notshared)"; we support the same thing via an
2764 attribute. */
2766 static tree
2767 arm_handle_notshared_attribute (tree *node,
2768 tree name ATTRIBUTE_UNUSED,
2769 tree args ATTRIBUTE_UNUSED,
2770 int flags ATTRIBUTE_UNUSED,
2771 bool *no_add_attrs)
2773 tree decl = TYPE_NAME (*node);
2775 if (decl)
2777 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2778 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2779 *no_add_attrs = false;
2781 return NULL_TREE;
2783 #endif
2785 /* Return 0 if the attributes for two types are incompatible, 1 if they
2786 are compatible, and 2 if they are nearly compatible (which causes a
2787 warning to be generated). */
2788 static int
2789 arm_comp_type_attributes (tree type1, tree type2)
2791 int l1, l2, s1, s2;
2793 /* Check for mismatch of non-default calling convention. */
2794 if (TREE_CODE (type1) != FUNCTION_TYPE)
2795 return 1;
2797 /* Check for mismatched call attributes. */
2798 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2799 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2800 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2801 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2803 /* Only bother to check if an attribute is defined. */
2804 if (l1 | l2 | s1 | s2)
2806 /* If one type has an attribute, the other must have the same attribute. */
2807 if ((l1 != l2) || (s1 != s2))
2808 return 0;
2810 /* Disallow mixed attributes. */
2811 if ((l1 & s2) || (l2 & s1))
2812 return 0;
2815 /* Check for mismatched ISR attribute. */
2816 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2817 if (! l1)
2818 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2819 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2820 if (! l2)
2821 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2822 if (l1 != l2)
2823 return 0;
2825 return 1;
2828 /* Encode long_call or short_call attribute by prefixing
2829 symbol name in DECL with a special character FLAG. */
2830 void
2831 arm_encode_call_attribute (tree decl, int flag)
2833 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2834 int len = strlen (str);
2835 char * newstr;
2837 /* Do not allow weak functions to be treated as short call. */
2838 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2839 return;
2841 newstr = alloca (len + 2);
2842 newstr[0] = flag;
2843 strcpy (newstr + 1, str);
2845 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2846 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2849 /* Assigns default attributes to newly defined type. This is used to
2850 set short_call/long_call attributes for function types of
2851 functions defined inside corresponding #pragma scopes. */
2852 static void
2853 arm_set_default_type_attributes (tree type)
2855 /* Add __attribute__ ((long_call)) to all functions, when
2856 inside #pragma long_calls or __attribute__ ((short_call)),
2857 when inside #pragma no_long_calls. */
2858 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2860 tree type_attr_list, attr_name;
2861 type_attr_list = TYPE_ATTRIBUTES (type);
2863 if (arm_pragma_long_calls == LONG)
2864 attr_name = get_identifier ("long_call");
2865 else if (arm_pragma_long_calls == SHORT)
2866 attr_name = get_identifier ("short_call");
2867 else
2868 return;
2870 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2871 TYPE_ATTRIBUTES (type) = type_attr_list;
2875 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2876 defined within the current compilation unit. If this cannot be
2877 determined, then 0 is returned. */
2878 static int
2879 current_file_function_operand (rtx sym_ref)
2881 /* This is a bit of a fib. A function will have a short call flag
2882 applied to its name if it has the short call attribute, or it has
2883 already been defined within the current compilation unit. */
2884 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2885 return 1;
2887 /* The current function is always defined within the current compilation
2888 unit. If it s a weak definition however, then this may not be the real
2889 definition of the function, and so we have to say no. */
2890 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2891 && !DECL_WEAK (current_function_decl))
2892 return 1;
2894 /* We cannot make the determination - default to returning 0. */
2895 return 0;
2898 /* Return nonzero if a 32 bit "long_call" should be generated for
2899 this call. We generate a long_call if the function:
2901 a. has an __attribute__((long call))
2902 or b. is within the scope of a #pragma long_calls
2903 or c. the -mlong-calls command line switch has been specified
2904 . and either:
2905 1. -ffunction-sections is in effect
2906 or 2. the current function has __attribute__ ((section))
2907 or 3. the target function has __attribute__ ((section))
2909 However we do not generate a long call if the function:
2911 d. has an __attribute__ ((short_call))
2912 or e. is inside the scope of a #pragma no_long_calls
2913 or f. is defined within the current compilation unit.
2915 This function will be called by C fragments contained in the machine
2916 description file. SYM_REF and CALL_COOKIE correspond to the matched
2917 rtl operands. CALL_SYMBOL is used to distinguish between
2918 two different callers of the function. It is set to 1 in the
2919 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2920 and "call_value" patterns. This is because of the difference in the
2921 SYM_REFs passed by these patterns. */
2923 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2925 if (!call_symbol)
2927 if (GET_CODE (sym_ref) != MEM)
2928 return 0;
2930 sym_ref = XEXP (sym_ref, 0);
2933 if (GET_CODE (sym_ref) != SYMBOL_REF)
2934 return 0;
2936 if (call_cookie & CALL_SHORT)
2937 return 0;
2939 if (TARGET_LONG_CALLS)
2941 if (flag_function_sections
2942 || DECL_SECTION_NAME (current_function_decl))
2943 /* c.3 is handled by the definition of the
2944 ARM_DECLARE_FUNCTION_SIZE macro. */
2945 return 1;
2948 if (current_file_function_operand (sym_ref))
2949 return 0;
2951 return (call_cookie & CALL_LONG)
2952 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2953 || TARGET_LONG_CALLS;
2956 /* Return nonzero if it is ok to make a tail-call to DECL. */
2957 static bool
2958 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2960 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2962 if (cfun->machine->sibcall_blocked)
2963 return false;
2965 /* Never tailcall something for which we have no decl, or if we
2966 are in Thumb mode. */
2967 if (decl == NULL || TARGET_THUMB)
2968 return false;
2970 /* Get the calling method. */
2971 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2972 call_type = CALL_SHORT;
2973 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2974 call_type = CALL_LONG;
2976 /* Cannot tail-call to long calls, since these are out of range of
2977 a branch instruction. However, if not compiling PIC, we know
2978 we can reach the symbol if it is in this compilation unit. */
2979 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2980 return false;
2982 /* If we are interworking and the function is not declared static
2983 then we can't tail-call it unless we know that it exists in this
2984 compilation unit (since it might be a Thumb routine). */
2985 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2986 return false;
2988 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2989 if (IS_INTERRUPT (arm_current_func_type ()))
2990 return false;
2992 /* Everything else is ok. */
2993 return true;
2997 /* Addressing mode support functions. */
2999 /* Return nonzero if X is a legitimate immediate operand when compiling
3000 for PIC. */
3002 legitimate_pic_operand_p (rtx x)
3004 if (CONSTANT_P (x)
3005 && flag_pic
3006 && (GET_CODE (x) == SYMBOL_REF
3007 || (GET_CODE (x) == CONST
3008 && GET_CODE (XEXP (x, 0)) == PLUS
3009 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3010 return 0;
3012 return 1;
3016 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3018 if (GET_CODE (orig) == SYMBOL_REF
3019 || GET_CODE (orig) == LABEL_REF)
3021 #ifndef AOF_ASSEMBLER
3022 rtx pic_ref, address;
3023 #endif
3024 rtx insn;
3025 int subregs = 0;
3027 if (reg == 0)
3029 gcc_assert (!no_new_pseudos);
3030 reg = gen_reg_rtx (Pmode);
3032 subregs = 1;
3035 #ifdef AOF_ASSEMBLER
3036 /* The AOF assembler can generate relocations for these directly, and
3037 understands that the PIC register has to be added into the offset. */
3038 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3039 #else
3040 if (subregs)
3041 address = gen_reg_rtx (Pmode);
3042 else
3043 address = reg;
3045 if (TARGET_ARM)
3046 emit_insn (gen_pic_load_addr_arm (address, orig));
3047 else
3048 emit_insn (gen_pic_load_addr_thumb (address, orig));
3050 if ((GET_CODE (orig) == LABEL_REF
3051 || (GET_CODE (orig) == SYMBOL_REF &&
3052 SYMBOL_REF_LOCAL_P (orig)))
3053 && NEED_GOT_RELOC)
3054 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3055 else
3057 pic_ref = gen_const_mem (Pmode,
3058 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3059 address));
3062 insn = emit_move_insn (reg, pic_ref);
3063 #endif
3064 current_function_uses_pic_offset_table = 1;
3065 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3066 by loop. */
3067 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3068 REG_NOTES (insn));
3069 return reg;
3071 else if (GET_CODE (orig) == CONST)
3073 rtx base, offset;
3075 if (GET_CODE (XEXP (orig, 0)) == PLUS
3076 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3077 return orig;
3079 if (reg == 0)
3081 gcc_assert (!no_new_pseudos);
3082 reg = gen_reg_rtx (Pmode);
3085 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3087 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3088 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3089 base == reg ? 0 : reg);
3091 if (GET_CODE (offset) == CONST_INT)
3093 /* The base register doesn't really matter, we only want to
3094 test the index for the appropriate mode. */
3095 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3097 gcc_assert (no_new_pseudos);
3098 offset = force_reg (Pmode, offset);
3101 if (GET_CODE (offset) == CONST_INT)
3102 return plus_constant (base, INTVAL (offset));
3105 if (GET_MODE_SIZE (mode) > 4
3106 && (GET_MODE_CLASS (mode) == MODE_INT
3107 || TARGET_SOFT_FLOAT))
3109 emit_insn (gen_addsi3 (reg, base, offset));
3110 return reg;
3113 return gen_rtx_PLUS (Pmode, base, offset);
3116 return orig;
3120 /* Find a spare low register to use during the prolog of a function. */
3122 static int
3123 thumb_find_work_register (unsigned long pushed_regs_mask)
3125 int reg;
3127 /* Check the argument registers first as these are call-used. The
3128 register allocation order means that sometimes r3 might be used
3129 but earlier argument registers might not, so check them all. */
3130 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3131 if (!regs_ever_live[reg])
3132 return reg;
3134 /* Before going on to check the call-saved registers we can try a couple
3135 more ways of deducing that r3 is available. The first is when we are
3136 pushing anonymous arguments onto the stack and we have less than 4
3137 registers worth of fixed arguments(*). In this case r3 will be part of
3138 the variable argument list and so we can be sure that it will be
3139 pushed right at the start of the function. Hence it will be available
3140 for the rest of the prologue.
3141 (*): ie current_function_pretend_args_size is greater than 0. */
3142 if (cfun->machine->uses_anonymous_args
3143 && current_function_pretend_args_size > 0)
3144 return LAST_ARG_REGNUM;
3146 /* The other case is when we have fixed arguments but less than 4 registers
3147 worth. In this case r3 might be used in the body of the function, but
3148 it is not being used to convey an argument into the function. In theory
3149 we could just check current_function_args_size to see how many bytes are
3150 being passed in argument registers, but it seems that it is unreliable.
3151 Sometimes it will have the value 0 when in fact arguments are being
3152 passed. (See testcase execute/20021111-1.c for an example). So we also
3153 check the args_info.nregs field as well. The problem with this field is
3154 that it makes no allowances for arguments that are passed to the
3155 function but which are not used. Hence we could miss an opportunity
3156 when a function has an unused argument in r3. But it is better to be
3157 safe than to be sorry. */
3158 if (! cfun->machine->uses_anonymous_args
3159 && current_function_args_size >= 0
3160 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3161 && cfun->args_info.nregs < 4)
3162 return LAST_ARG_REGNUM;
3164 /* Otherwise look for a call-saved register that is going to be pushed. */
3165 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3166 if (pushed_regs_mask & (1 << reg))
3167 return reg;
3169 /* Something went wrong - thumb_compute_save_reg_mask()
3170 should have arranged for a suitable register to be pushed. */
3171 gcc_unreachable ();
3175 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3176 low register. */
3178 void
3179 arm_load_pic_register (unsigned int scratch)
3181 #ifndef AOF_ASSEMBLER
3182 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3183 rtx global_offset_table;
3185 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3186 return;
3188 gcc_assert (flag_pic);
3190 l1 = gen_label_rtx ();
3192 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3193 /* On the ARM the PC register contains 'dot + 8' at the time of the
3194 addition, on the Thumb it is 'dot + 4'. */
3195 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3196 if (GOT_PCREL)
3197 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3198 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3199 else
3200 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3202 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3204 if (TARGET_ARM)
3206 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3207 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3209 else
3211 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3213 /* We will have pushed the pic register, so should always be
3214 able to find a work register. */
3215 pic_tmp = gen_rtx_REG (SImode, scratch);
3216 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3217 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3219 else
3220 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3221 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3224 /* Need to emit this whether or not we obey regdecls,
3225 since setjmp/longjmp can cause life info to screw up. */
3226 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3227 #endif /* AOF_ASSEMBLER */
3231 /* Return nonzero if X is valid as an ARM state addressing register. */
3232 static int
3233 arm_address_register_rtx_p (rtx x, int strict_p)
3235 int regno;
3237 if (GET_CODE (x) != REG)
3238 return 0;
3240 regno = REGNO (x);
3242 if (strict_p)
3243 return ARM_REGNO_OK_FOR_BASE_P (regno);
3245 return (regno <= LAST_ARM_REGNUM
3246 || regno >= FIRST_PSEUDO_REGISTER
3247 || regno == FRAME_POINTER_REGNUM
3248 || regno == ARG_POINTER_REGNUM);
3251 /* Return nonzero if X is a valid ARM state address operand. */
3253 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3254 int strict_p)
3256 bool use_ldrd;
3257 enum rtx_code code = GET_CODE (x);
3259 if (arm_address_register_rtx_p (x, strict_p))
3260 return 1;
3262 use_ldrd = (TARGET_LDRD
3263 && (mode == DImode
3264 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3266 if (code == POST_INC || code == PRE_DEC
3267 || ((code == PRE_INC || code == POST_DEC)
3268 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3269 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3271 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3272 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3273 && GET_CODE (XEXP (x, 1)) == PLUS
3274 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3276 rtx addend = XEXP (XEXP (x, 1), 1);
3278 /* Don't allow ldrd post increment by register because it's hard
3279 to fixup invalid register choices. */
3280 if (use_ldrd
3281 && GET_CODE (x) == POST_MODIFY
3282 && GET_CODE (addend) == REG)
3283 return 0;
3285 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3286 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3289 /* After reload constants split into minipools will have addresses
3290 from a LABEL_REF. */
3291 else if (reload_completed
3292 && (code == LABEL_REF
3293 || (code == CONST
3294 && GET_CODE (XEXP (x, 0)) == PLUS
3295 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3296 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3297 return 1;
3299 else if (mode == TImode)
3300 return 0;
3302 else if (code == PLUS)
3304 rtx xop0 = XEXP (x, 0);
3305 rtx xop1 = XEXP (x, 1);
3307 return ((arm_address_register_rtx_p (xop0, strict_p)
3308 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3309 || (arm_address_register_rtx_p (xop1, strict_p)
3310 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3313 #if 0
3314 /* Reload currently can't handle MINUS, so disable this for now */
3315 else if (GET_CODE (x) == MINUS)
3317 rtx xop0 = XEXP (x, 0);
3318 rtx xop1 = XEXP (x, 1);
3320 return (arm_address_register_rtx_p (xop0, strict_p)
3321 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3323 #endif
3325 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3326 && code == SYMBOL_REF
3327 && CONSTANT_POOL_ADDRESS_P (x)
3328 && ! (flag_pic
3329 && symbol_mentioned_p (get_pool_constant (x))))
3330 return 1;
3332 return 0;
3335 /* Return nonzero if INDEX is valid for an address index operand in
3336 ARM state. */
3337 static int
3338 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3339 int strict_p)
3341 HOST_WIDE_INT range;
3342 enum rtx_code code = GET_CODE (index);
3344 /* Standard coprocessor addressing modes. */
3345 if (TARGET_HARD_FLOAT
3346 && (TARGET_FPA || TARGET_MAVERICK)
3347 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3348 || (TARGET_MAVERICK && mode == DImode)))
3349 return (code == CONST_INT && INTVAL (index) < 1024
3350 && INTVAL (index) > -1024
3351 && (INTVAL (index) & 3) == 0);
3353 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3354 return (code == CONST_INT
3355 && INTVAL (index) < 1024
3356 && INTVAL (index) > -1024
3357 && (INTVAL (index) & 3) == 0);
3359 if (arm_address_register_rtx_p (index, strict_p)
3360 && (GET_MODE_SIZE (mode) <= 4))
3361 return 1;
3363 if (mode == DImode || mode == DFmode)
3365 if (code == CONST_INT)
3367 HOST_WIDE_INT val = INTVAL (index);
3369 if (TARGET_LDRD)
3370 return val > -256 && val < 256;
3371 else
3372 return val > -4096 && val < 4092;
3375 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3378 if (GET_MODE_SIZE (mode) <= 4
3379 && ! (arm_arch4
3380 && (mode == HImode
3381 || (mode == QImode && outer == SIGN_EXTEND))))
3383 if (code == MULT)
3385 rtx xiop0 = XEXP (index, 0);
3386 rtx xiop1 = XEXP (index, 1);
3388 return ((arm_address_register_rtx_p (xiop0, strict_p)
3389 && power_of_two_operand (xiop1, SImode))
3390 || (arm_address_register_rtx_p (xiop1, strict_p)
3391 && power_of_two_operand (xiop0, SImode)));
3393 else if (code == LSHIFTRT || code == ASHIFTRT
3394 || code == ASHIFT || code == ROTATERT)
3396 rtx op = XEXP (index, 1);
3398 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3399 && GET_CODE (op) == CONST_INT
3400 && INTVAL (op) > 0
3401 && INTVAL (op) <= 31);
3405 /* For ARM v4 we may be doing a sign-extend operation during the
3406 load. */
3407 if (arm_arch4)
3409 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3410 range = 256;
3411 else
3412 range = 4096;
3414 else
3415 range = (mode == HImode) ? 4095 : 4096;
3417 return (code == CONST_INT
3418 && INTVAL (index) < range
3419 && INTVAL (index) > -range);
3422 /* Return nonzero if X is valid as a Thumb state base register. */
3423 static int
3424 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3426 int regno;
3428 if (GET_CODE (x) != REG)
3429 return 0;
3431 regno = REGNO (x);
3433 if (strict_p)
3434 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3436 return (regno <= LAST_LO_REGNUM
3437 || regno > LAST_VIRTUAL_REGISTER
3438 || regno == FRAME_POINTER_REGNUM
3439 || (GET_MODE_SIZE (mode) >= 4
3440 && (regno == STACK_POINTER_REGNUM
3441 || regno >= FIRST_PSEUDO_REGISTER
3442 || x == hard_frame_pointer_rtx
3443 || x == arg_pointer_rtx)));
3446 /* Return nonzero if x is a legitimate index register. This is the case
3447 for any base register that can access a QImode object. */
3448 inline static int
3449 thumb_index_register_rtx_p (rtx x, int strict_p)
3451 return thumb_base_register_rtx_p (x, QImode, strict_p);
3454 /* Return nonzero if x is a legitimate Thumb-state address.
3456 The AP may be eliminated to either the SP or the FP, so we use the
3457 least common denominator, e.g. SImode, and offsets from 0 to 64.
3459 ??? Verify whether the above is the right approach.
3461 ??? Also, the FP may be eliminated to the SP, so perhaps that
3462 needs special handling also.
3464 ??? Look at how the mips16 port solves this problem. It probably uses
3465 better ways to solve some of these problems.
3467 Although it is not incorrect, we don't accept QImode and HImode
3468 addresses based on the frame pointer or arg pointer until the
3469 reload pass starts. This is so that eliminating such addresses
3470 into stack based ones won't produce impossible code. */
3472 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3474 /* ??? Not clear if this is right. Experiment. */
3475 if (GET_MODE_SIZE (mode) < 4
3476 && !(reload_in_progress || reload_completed)
3477 && (reg_mentioned_p (frame_pointer_rtx, x)
3478 || reg_mentioned_p (arg_pointer_rtx, x)
3479 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3480 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3481 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3482 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3483 return 0;
3485 /* Accept any base register. SP only in SImode or larger. */
3486 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3487 return 1;
3489 /* This is PC relative data before arm_reorg runs. */
3490 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3491 && GET_CODE (x) == SYMBOL_REF
3492 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3493 return 1;
3495 /* This is PC relative data after arm_reorg runs. */
3496 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3497 && (GET_CODE (x) == LABEL_REF
3498 || (GET_CODE (x) == CONST
3499 && GET_CODE (XEXP (x, 0)) == PLUS
3500 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3501 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3502 return 1;
3504 /* Post-inc indexing only supported for SImode and larger. */
3505 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3506 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3507 return 1;
3509 else if (GET_CODE (x) == PLUS)
3511 /* REG+REG address can be any two index registers. */
3512 /* We disallow FRAME+REG addressing since we know that FRAME
3513 will be replaced with STACK, and SP relative addressing only
3514 permits SP+OFFSET. */
3515 if (GET_MODE_SIZE (mode) <= 4
3516 && XEXP (x, 0) != frame_pointer_rtx
3517 && XEXP (x, 1) != frame_pointer_rtx
3518 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3519 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3520 return 1;
3522 /* REG+const has 5-7 bit offset for non-SP registers. */
3523 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3524 || XEXP (x, 0) == arg_pointer_rtx)
3525 && GET_CODE (XEXP (x, 1)) == CONST_INT
3526 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3527 return 1;
3529 /* REG+const has 10 bit offset for SP, but only SImode and
3530 larger is supported. */
3531 /* ??? Should probably check for DI/DFmode overflow here
3532 just like GO_IF_LEGITIMATE_OFFSET does. */
3533 else if (GET_CODE (XEXP (x, 0)) == REG
3534 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3535 && GET_MODE_SIZE (mode) >= 4
3536 && GET_CODE (XEXP (x, 1)) == CONST_INT
3537 && INTVAL (XEXP (x, 1)) >= 0
3538 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3539 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3540 return 1;
3542 else if (GET_CODE (XEXP (x, 0)) == REG
3543 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3544 && GET_MODE_SIZE (mode) >= 4
3545 && GET_CODE (XEXP (x, 1)) == CONST_INT
3546 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3547 return 1;
3550 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3551 && GET_MODE_SIZE (mode) == 4
3552 && GET_CODE (x) == SYMBOL_REF
3553 && CONSTANT_POOL_ADDRESS_P (x)
3554 && !(flag_pic
3555 && symbol_mentioned_p (get_pool_constant (x))))
3556 return 1;
3558 return 0;
3561 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3562 instruction of mode MODE. */
3564 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3566 switch (GET_MODE_SIZE (mode))
3568 case 1:
3569 return val >= 0 && val < 32;
3571 case 2:
3572 return val >= 0 && val < 64 && (val & 1) == 0;
3574 default:
3575 return (val >= 0
3576 && (val + GET_MODE_SIZE (mode)) <= 128
3577 && (val & 3) == 0);
3581 /* Try machine-dependent ways of modifying an illegitimate address
3582 to be legitimate. If we find one, return the new, valid address. */
3584 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3586 if (GET_CODE (x) == PLUS)
3588 rtx xop0 = XEXP (x, 0);
3589 rtx xop1 = XEXP (x, 1);
3591 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3592 xop0 = force_reg (SImode, xop0);
3594 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3595 xop1 = force_reg (SImode, xop1);
3597 if (ARM_BASE_REGISTER_RTX_P (xop0)
3598 && GET_CODE (xop1) == CONST_INT)
3600 HOST_WIDE_INT n, low_n;
3601 rtx base_reg, val;
3602 n = INTVAL (xop1);
3604 /* VFP addressing modes actually allow greater offsets, but for
3605 now we just stick with the lowest common denominator. */
3606 if (mode == DImode
3607 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3609 low_n = n & 0x0f;
3610 n &= ~0x0f;
3611 if (low_n > 4)
3613 n += 16;
3614 low_n -= 16;
3617 else
3619 low_n = ((mode) == TImode ? 0
3620 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3621 n -= low_n;
3624 base_reg = gen_reg_rtx (SImode);
3625 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3626 GEN_INT (n)), NULL_RTX);
3627 emit_move_insn (base_reg, val);
3628 x = (low_n == 0 ? base_reg
3629 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3631 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3632 x = gen_rtx_PLUS (SImode, xop0, xop1);
3635 /* XXX We don't allow MINUS any more -- see comment in
3636 arm_legitimate_address_p (). */
3637 else if (GET_CODE (x) == MINUS)
3639 rtx xop0 = XEXP (x, 0);
3640 rtx xop1 = XEXP (x, 1);
3642 if (CONSTANT_P (xop0))
3643 xop0 = force_reg (SImode, xop0);
3645 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3646 xop1 = force_reg (SImode, xop1);
3648 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3649 x = gen_rtx_MINUS (SImode, xop0, xop1);
3652 if (flag_pic)
3654 /* We need to find and carefully transform any SYMBOL and LABEL
3655 references; so go back to the original address expression. */
3656 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3658 if (new_x != orig_x)
3659 x = new_x;
3662 return x;
3666 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3667 to be legitimate. If we find one, return the new, valid address. */
3669 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3671 if (GET_CODE (x) == PLUS
3672 && GET_CODE (XEXP (x, 1)) == CONST_INT
3673 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3674 || INTVAL (XEXP (x, 1)) < 0))
3676 rtx xop0 = XEXP (x, 0);
3677 rtx xop1 = XEXP (x, 1);
3678 HOST_WIDE_INT offset = INTVAL (xop1);
3680 /* Try and fold the offset into a biasing of the base register and
3681 then offsetting that. Don't do this when optimizing for space
3682 since it can cause too many CSEs. */
3683 if (optimize_size && offset >= 0
3684 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3686 HOST_WIDE_INT delta;
3688 if (offset >= 256)
3689 delta = offset - (256 - GET_MODE_SIZE (mode));
3690 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3691 delta = 31 * GET_MODE_SIZE (mode);
3692 else
3693 delta = offset & (~31 * GET_MODE_SIZE (mode));
3695 xop0 = force_operand (plus_constant (xop0, offset - delta),
3696 NULL_RTX);
3697 x = plus_constant (xop0, delta);
3699 else if (offset < 0 && offset > -256)
3700 /* Small negative offsets are best done with a subtract before the
3701 dereference, forcing these into a register normally takes two
3702 instructions. */
3703 x = force_operand (x, NULL_RTX);
3704 else
3706 /* For the remaining cases, force the constant into a register. */
3707 xop1 = force_reg (SImode, xop1);
3708 x = gen_rtx_PLUS (SImode, xop0, xop1);
3711 else if (GET_CODE (x) == PLUS
3712 && s_register_operand (XEXP (x, 1), SImode)
3713 && !s_register_operand (XEXP (x, 0), SImode))
3715 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3717 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3720 if (flag_pic)
3722 /* We need to find and carefully transform any SYMBOL and LABEL
3723 references; so go back to the original address expression. */
3724 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3726 if (new_x != orig_x)
3727 x = new_x;
3730 return x;
3735 #define REG_OR_SUBREG_REG(X) \
3736 (GET_CODE (X) == REG \
3737 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3739 #define REG_OR_SUBREG_RTX(X) \
3740 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3742 #ifndef COSTS_N_INSNS
3743 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3744 #endif
3745 static inline int
3746 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3748 enum machine_mode mode = GET_MODE (x);
3750 switch (code)
3752 case ASHIFT:
3753 case ASHIFTRT:
3754 case LSHIFTRT:
3755 case ROTATERT:
3756 case PLUS:
3757 case MINUS:
3758 case COMPARE:
3759 case NEG:
3760 case NOT:
3761 return COSTS_N_INSNS (1);
3763 case MULT:
3764 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3766 int cycles = 0;
3767 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3769 while (i)
3771 i >>= 2;
3772 cycles++;
3774 return COSTS_N_INSNS (2) + cycles;
3776 return COSTS_N_INSNS (1) + 16;
3778 case SET:
3779 return (COSTS_N_INSNS (1)
3780 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3781 + GET_CODE (SET_DEST (x)) == MEM));
3783 case CONST_INT:
3784 if (outer == SET)
3786 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3787 return 0;
3788 if (thumb_shiftable_const (INTVAL (x)))
3789 return COSTS_N_INSNS (2);
3790 return COSTS_N_INSNS (3);
3792 else if ((outer == PLUS || outer == COMPARE)
3793 && INTVAL (x) < 256 && INTVAL (x) > -256)
3794 return 0;
3795 else if (outer == AND
3796 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3797 return COSTS_N_INSNS (1);
3798 else if (outer == ASHIFT || outer == ASHIFTRT
3799 || outer == LSHIFTRT)
3800 return 0;
3801 return COSTS_N_INSNS (2);
3803 case CONST:
3804 case CONST_DOUBLE:
3805 case LABEL_REF:
3806 case SYMBOL_REF:
3807 return COSTS_N_INSNS (3);
3809 case UDIV:
3810 case UMOD:
3811 case DIV:
3812 case MOD:
3813 return 100;
3815 case TRUNCATE:
3816 return 99;
3818 case AND:
3819 case XOR:
3820 case IOR:
3821 /* XXX guess. */
3822 return 8;
3824 case MEM:
3825 /* XXX another guess. */
3826 /* Memory costs quite a lot for the first word, but subsequent words
3827 load at the equivalent of a single insn each. */
3828 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3829 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3830 ? 4 : 0));
3832 case IF_THEN_ELSE:
3833 /* XXX a guess. */
3834 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3835 return 14;
3836 return 2;
3838 case ZERO_EXTEND:
3839 /* XXX still guessing. */
3840 switch (GET_MODE (XEXP (x, 0)))
3842 case QImode:
3843 return (1 + (mode == DImode ? 4 : 0)
3844 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3846 case HImode:
3847 return (4 + (mode == DImode ? 4 : 0)
3848 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3850 case SImode:
3851 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3853 default:
3854 return 99;
3857 default:
3858 return 99;
3863 /* Worker routine for arm_rtx_costs. */
3864 static inline int
3865 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3867 enum machine_mode mode = GET_MODE (x);
3868 enum rtx_code subcode;
3869 int extra_cost;
3871 switch (code)
3873 case MEM:
3874 /* Memory costs quite a lot for the first word, but subsequent words
3875 load at the equivalent of a single insn each. */
3876 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3877 + (GET_CODE (x) == SYMBOL_REF
3878 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3880 case DIV:
3881 case MOD:
3882 case UDIV:
3883 case UMOD:
3884 return optimize_size ? COSTS_N_INSNS (2) : 100;
3886 case ROTATE:
3887 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3888 return 4;
3889 /* Fall through */
3890 case ROTATERT:
3891 if (mode != SImode)
3892 return 8;
3893 /* Fall through */
3894 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3895 if (mode == DImode)
3896 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3897 + ((GET_CODE (XEXP (x, 0)) == REG
3898 || (GET_CODE (XEXP (x, 0)) == SUBREG
3899 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3900 ? 0 : 8));
3901 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3902 || (GET_CODE (XEXP (x, 0)) == SUBREG
3903 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3904 ? 0 : 4)
3905 + ((GET_CODE (XEXP (x, 1)) == REG
3906 || (GET_CODE (XEXP (x, 1)) == SUBREG
3907 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3908 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3909 ? 0 : 4));
3911 case MINUS:
3912 if (mode == DImode)
3913 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3914 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3915 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3916 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3917 ? 0 : 8));
3919 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3920 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3921 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3922 && arm_const_double_rtx (XEXP (x, 1))))
3923 ? 0 : 8)
3924 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3925 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3926 && arm_const_double_rtx (XEXP (x, 0))))
3927 ? 0 : 8));
3929 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3930 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3931 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3932 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3933 || subcode == ASHIFTRT || subcode == LSHIFTRT
3934 || subcode == ROTATE || subcode == ROTATERT
3935 || (subcode == MULT
3936 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3937 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3938 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3939 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3940 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3941 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3942 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3943 return 1;
3944 /* Fall through */
3946 case PLUS:
3947 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3948 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3949 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3950 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3951 && arm_const_double_rtx (XEXP (x, 1))))
3952 ? 0 : 8));
3954 /* Fall through */
3955 case AND: case XOR: case IOR:
3956 extra_cost = 0;
3958 /* Normally the frame registers will be spilt into reg+const during
3959 reload, so it is a bad idea to combine them with other instructions,
3960 since then they might not be moved outside of loops. As a compromise
3961 we allow integration with ops that have a constant as their second
3962 operand. */
3963 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3964 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3965 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3966 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3967 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3968 extra_cost = 4;
3970 if (mode == DImode)
3971 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3972 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3973 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3974 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3975 ? 0 : 8));
3977 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3978 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3979 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3980 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3981 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3982 ? 0 : 4));
3984 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3985 return (1 + extra_cost
3986 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3987 || subcode == LSHIFTRT || subcode == ASHIFTRT
3988 || subcode == ROTATE || subcode == ROTATERT
3989 || (subcode == MULT
3990 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3991 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3992 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3993 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3994 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3995 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3996 ? 0 : 4));
3998 return 8;
4000 case MULT:
4001 /* This should have been handled by the CPU specific routines. */
4002 gcc_unreachable ();
4004 case TRUNCATE:
4005 if (arm_arch3m && mode == SImode
4006 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4007 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4008 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4009 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4010 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4011 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4012 return 8;
4013 return 99;
4015 case NEG:
4016 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4017 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4018 /* Fall through */
4019 case NOT:
4020 if (mode == DImode)
4021 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4023 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4025 case IF_THEN_ELSE:
4026 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4027 return 14;
4028 return 2;
4030 case COMPARE:
4031 return 1;
4033 case ABS:
4034 return 4 + (mode == DImode ? 4 : 0);
4036 case SIGN_EXTEND:
4037 if (GET_MODE (XEXP (x, 0)) == QImode)
4038 return (4 + (mode == DImode ? 4 : 0)
4039 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4040 /* Fall through */
4041 case ZERO_EXTEND:
4042 switch (GET_MODE (XEXP (x, 0)))
4044 case QImode:
4045 return (1 + (mode == DImode ? 4 : 0)
4046 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4048 case HImode:
4049 return (4 + (mode == DImode ? 4 : 0)
4050 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4052 case SImode:
4053 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4055 case V8QImode:
4056 case V4HImode:
4057 case V2SImode:
4058 case V4QImode:
4059 case V2HImode:
4060 return 1;
4062 default:
4063 gcc_unreachable ();
4065 gcc_unreachable ();
4067 case CONST_INT:
4068 if (const_ok_for_arm (INTVAL (x)))
4069 return outer == SET ? 2 : -1;
4070 else if (outer == AND
4071 && const_ok_for_arm (~INTVAL (x)))
4072 return -1;
4073 else if ((outer == COMPARE
4074 || outer == PLUS || outer == MINUS)
4075 && const_ok_for_arm (-INTVAL (x)))
4076 return -1;
4077 else
4078 return 5;
4080 case CONST:
4081 case LABEL_REF:
4082 case SYMBOL_REF:
4083 return 6;
4085 case CONST_DOUBLE:
4086 if (arm_const_double_rtx (x))
4087 return outer == SET ? 2 : -1;
4088 else if ((outer == COMPARE || outer == PLUS)
4089 && neg_const_double_rtx_ok_for_fpa (x))
4090 return -1;
4091 return 7;
4093 default:
4094 return 99;
4098 /* RTX costs when optimizing for size. */
4099 static bool
4100 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4102 enum machine_mode mode = GET_MODE (x);
4104 if (TARGET_THUMB)
4106 /* XXX TBD. For now, use the standard costs. */
4107 *total = thumb_rtx_costs (x, code, outer_code);
4108 return true;
4111 switch (code)
4113 case MEM:
4114 /* A memory access costs 1 insn if the mode is small, or the address is
4115 a single register, otherwise it costs one insn per word. */
4116 if (REG_P (XEXP (x, 0)))
4117 *total = COSTS_N_INSNS (1);
4118 else
4119 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4120 return true;
4122 case DIV:
4123 case MOD:
4124 case UDIV:
4125 case UMOD:
4126 /* Needs a libcall, so it costs about this. */
4127 *total = COSTS_N_INSNS (2);
4128 return false;
4130 case ROTATE:
4131 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4133 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4134 return true;
4136 /* Fall through */
4137 case ROTATERT:
4138 case ASHIFT:
4139 case LSHIFTRT:
4140 case ASHIFTRT:
4141 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4143 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4144 return true;
4146 else if (mode == SImode)
4148 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4149 /* Slightly disparage register shifts, but not by much. */
4150 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4151 *total += 1 + rtx_cost (XEXP (x, 1), code);
4152 return true;
4155 /* Needs a libcall. */
4156 *total = COSTS_N_INSNS (2);
4157 return false;
4159 case MINUS:
4160 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4162 *total = COSTS_N_INSNS (1);
4163 return false;
4166 if (mode == SImode)
4168 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4169 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4171 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4172 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4173 || subcode1 == ROTATE || subcode1 == ROTATERT
4174 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4175 || subcode1 == ASHIFTRT)
4177 /* It's just the cost of the two operands. */
4178 *total = 0;
4179 return false;
4182 *total = COSTS_N_INSNS (1);
4183 return false;
4186 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4187 return false;
4189 case PLUS:
4190 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4192 *total = COSTS_N_INSNS (1);
4193 return false;
4196 /* Fall through */
4197 case AND: case XOR: case IOR:
4198 if (mode == SImode)
4200 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4202 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4203 || subcode == LSHIFTRT || subcode == ASHIFTRT
4204 || (code == AND && subcode == NOT))
4206 /* It's just the cost of the two operands. */
4207 *total = 0;
4208 return false;
4212 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4213 return false;
4215 case MULT:
4216 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4217 return false;
4219 case NEG:
4220 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4221 *total = COSTS_N_INSNS (1);
4222 /* Fall through */
4223 case NOT:
4224 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4226 return false;
4228 case IF_THEN_ELSE:
4229 *total = 0;
4230 return false;
4232 case COMPARE:
4233 if (cc_register (XEXP (x, 0), VOIDmode))
4234 * total = 0;
4235 else
4236 *total = COSTS_N_INSNS (1);
4237 return false;
4239 case ABS:
4240 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4241 *total = COSTS_N_INSNS (1);
4242 else
4243 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4244 return false;
4246 case SIGN_EXTEND:
4247 *total = 0;
4248 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4250 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4251 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4253 if (mode == DImode)
4254 *total += COSTS_N_INSNS (1);
4255 return false;
4257 case ZERO_EXTEND:
4258 *total = 0;
4259 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4261 switch (GET_MODE (XEXP (x, 0)))
4263 case QImode:
4264 *total += COSTS_N_INSNS (1);
4265 break;
4267 case HImode:
4268 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4270 case SImode:
4271 break;
4273 default:
4274 *total += COSTS_N_INSNS (2);
4278 if (mode == DImode)
4279 *total += COSTS_N_INSNS (1);
4281 return false;
4283 case CONST_INT:
4284 if (const_ok_for_arm (INTVAL (x)))
4285 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4286 else if (const_ok_for_arm (~INTVAL (x)))
4287 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4288 else if (const_ok_for_arm (-INTVAL (x)))
4290 if (outer_code == COMPARE || outer_code == PLUS
4291 || outer_code == MINUS)
4292 *total = 0;
4293 else
4294 *total = COSTS_N_INSNS (1);
4296 else
4297 *total = COSTS_N_INSNS (2);
4298 return true;
4300 case CONST:
4301 case LABEL_REF:
4302 case SYMBOL_REF:
4303 *total = COSTS_N_INSNS (2);
4304 return true;
4306 case CONST_DOUBLE:
4307 *total = COSTS_N_INSNS (4);
4308 return true;
4310 default:
4311 if (mode != VOIDmode)
4312 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4313 else
4314 *total = COSTS_N_INSNS (4); /* How knows? */
4315 return false;
4319 /* RTX costs for cores with a slow MUL implementation. */
4321 static bool
4322 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4324 enum machine_mode mode = GET_MODE (x);
4326 if (TARGET_THUMB)
4328 *total = thumb_rtx_costs (x, code, outer_code);
4329 return true;
4332 switch (code)
4334 case MULT:
4335 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4336 || mode == DImode)
4338 *total = 30;
4339 return true;
4342 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4344 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4345 & (unsigned HOST_WIDE_INT) 0xffffffff);
4346 int cost, const_ok = const_ok_for_arm (i);
4347 int j, booth_unit_size;
4349 /* Tune as appropriate. */
4350 cost = const_ok ? 4 : 8;
4351 booth_unit_size = 2;
4352 for (j = 0; i && j < 32; j += booth_unit_size)
4354 i >>= booth_unit_size;
4355 cost += 2;
4358 *total = cost;
4359 return true;
4362 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4363 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4364 return true;
4366 default:
4367 *total = arm_rtx_costs_1 (x, code, outer_code);
4368 return true;
4373 /* RTX cost for cores with a fast multiply unit (M variants). */
4375 static bool
4376 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4378 enum machine_mode mode = GET_MODE (x);
4380 if (TARGET_THUMB)
4382 *total = thumb_rtx_costs (x, code, outer_code);
4383 return true;
4386 switch (code)
4388 case MULT:
4389 /* There is no point basing this on the tuning, since it is always the
4390 fast variant if it exists at all. */
4391 if (mode == DImode
4392 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4393 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4394 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4396 *total = 8;
4397 return true;
4401 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4402 || mode == DImode)
4404 *total = 30;
4405 return true;
4408 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4410 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4411 & (unsigned HOST_WIDE_INT) 0xffffffff);
4412 int cost, const_ok = const_ok_for_arm (i);
4413 int j, booth_unit_size;
4415 /* Tune as appropriate. */
4416 cost = const_ok ? 4 : 8;
4417 booth_unit_size = 8;
4418 for (j = 0; i && j < 32; j += booth_unit_size)
4420 i >>= booth_unit_size;
4421 cost += 2;
4424 *total = cost;
4425 return true;
4428 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4429 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4430 return true;
4432 default:
4433 *total = arm_rtx_costs_1 (x, code, outer_code);
4434 return true;
4439 /* RTX cost for XScale CPUs. */
4441 static bool
4442 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4444 enum machine_mode mode = GET_MODE (x);
4446 if (TARGET_THUMB)
4448 *total = thumb_rtx_costs (x, code, outer_code);
4449 return true;
4452 switch (code)
4454 case MULT:
4455 /* There is no point basing this on the tuning, since it is always the
4456 fast variant if it exists at all. */
4457 if (mode == DImode
4458 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4459 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4460 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4462 *total = 8;
4463 return true;
4467 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4468 || mode == DImode)
4470 *total = 30;
4471 return true;
4474 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4476 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4477 & (unsigned HOST_WIDE_INT) 0xffffffff);
4478 int cost, const_ok = const_ok_for_arm (i);
4479 unsigned HOST_WIDE_INT masked_const;
4481 /* The cost will be related to two insns.
4482 First a load of the constant (MOV or LDR), then a multiply. */
4483 cost = 2;
4484 if (! const_ok)
4485 cost += 1; /* LDR is probably more expensive because
4486 of longer result latency. */
4487 masked_const = i & 0xffff8000;
4488 if (masked_const != 0 && masked_const != 0xffff8000)
4490 masked_const = i & 0xf8000000;
4491 if (masked_const == 0 || masked_const == 0xf8000000)
4492 cost += 1;
4493 else
4494 cost += 2;
4496 *total = cost;
4497 return true;
4500 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4501 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4502 return true;
4504 case COMPARE:
4505 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4506 will stall until the multiplication is complete. */
4507 if (GET_CODE (XEXP (x, 0)) == MULT)
4508 *total = 4 + rtx_cost (XEXP (x, 0), code);
4509 else
4510 *total = arm_rtx_costs_1 (x, code, outer_code);
4511 return true;
4513 default:
4514 *total = arm_rtx_costs_1 (x, code, outer_code);
4515 return true;
4520 /* RTX costs for 9e (and later) cores. */
4522 static bool
4523 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4525 enum machine_mode mode = GET_MODE (x);
4526 int nonreg_cost;
4527 int cost;
4529 if (TARGET_THUMB)
4531 switch (code)
4533 case MULT:
4534 *total = COSTS_N_INSNS (3);
4535 return true;
4537 default:
4538 *total = thumb_rtx_costs (x, code, outer_code);
4539 return true;
4543 switch (code)
4545 case MULT:
4546 /* There is no point basing this on the tuning, since it is always the
4547 fast variant if it exists at all. */
4548 if (mode == DImode
4549 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4550 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4551 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4553 *total = 3;
4554 return true;
4558 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4560 *total = 30;
4561 return true;
4563 if (mode == DImode)
4565 cost = 7;
4566 nonreg_cost = 8;
4568 else
4570 cost = 2;
4571 nonreg_cost = 4;
4575 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4576 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4577 return true;
4579 default:
4580 *total = arm_rtx_costs_1 (x, code, outer_code);
4581 return true;
4584 /* All address computations that can be done are free, but rtx cost returns
4585 the same for practically all of them. So we weight the different types
4586 of address here in the order (most pref first):
4587 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4588 static inline int
4589 arm_arm_address_cost (rtx x)
4591 enum rtx_code c = GET_CODE (x);
4593 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4594 return 0;
4595 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4596 return 10;
4598 if (c == PLUS || c == MINUS)
4600 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4601 return 2;
4603 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4604 return 3;
4606 return 4;
4609 return 6;
4612 static inline int
4613 arm_thumb_address_cost (rtx x)
4615 enum rtx_code c = GET_CODE (x);
4617 if (c == REG)
4618 return 1;
4619 if (c == PLUS
4620 && GET_CODE (XEXP (x, 0)) == REG
4621 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4622 return 1;
4624 return 2;
4627 static int
4628 arm_address_cost (rtx x)
4630 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4633 static int
4634 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4636 rtx i_pat, d_pat;
4638 /* Some true dependencies can have a higher cost depending
4639 on precisely how certain input operands are used. */
4640 if (arm_tune_xscale
4641 && REG_NOTE_KIND (link) == 0
4642 && recog_memoized (insn) >= 0
4643 && recog_memoized (dep) >= 0)
4645 int shift_opnum = get_attr_shift (insn);
4646 enum attr_type attr_type = get_attr_type (dep);
4648 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4649 operand for INSN. If we have a shifted input operand and the
4650 instruction we depend on is another ALU instruction, then we may
4651 have to account for an additional stall. */
4652 if (shift_opnum != 0
4653 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4655 rtx shifted_operand;
4656 int opno;
4658 /* Get the shifted operand. */
4659 extract_insn (insn);
4660 shifted_operand = recog_data.operand[shift_opnum];
4662 /* Iterate over all the operands in DEP. If we write an operand
4663 that overlaps with SHIFTED_OPERAND, then we have increase the
4664 cost of this dependency. */
4665 extract_insn (dep);
4666 preprocess_constraints ();
4667 for (opno = 0; opno < recog_data.n_operands; opno++)
4669 /* We can ignore strict inputs. */
4670 if (recog_data.operand_type[opno] == OP_IN)
4671 continue;
4673 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4674 shifted_operand))
4675 return 2;
4680 /* XXX This is not strictly true for the FPA. */
4681 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4682 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4683 return 0;
4685 /* Call insns don't incur a stall, even if they follow a load. */
4686 if (REG_NOTE_KIND (link) == 0
4687 && GET_CODE (insn) == CALL_INSN)
4688 return 1;
4690 if ((i_pat = single_set (insn)) != NULL
4691 && GET_CODE (SET_SRC (i_pat)) == MEM
4692 && (d_pat = single_set (dep)) != NULL
4693 && GET_CODE (SET_DEST (d_pat)) == MEM)
4695 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4696 /* This is a load after a store, there is no conflict if the load reads
4697 from a cached area. Assume that loads from the stack, and from the
4698 constant pool are cached, and that others will miss. This is a
4699 hack. */
4701 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4702 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4703 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4704 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4705 return 1;
4708 return cost;
4711 static int fp_consts_inited = 0;
4713 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4714 static const char * const strings_fp[8] =
4716 "0", "1", "2", "3",
4717 "4", "5", "0.5", "10"
4720 static REAL_VALUE_TYPE values_fp[8];
4722 static void
4723 init_fp_table (void)
4725 int i;
4726 REAL_VALUE_TYPE r;
4728 if (TARGET_VFP)
4729 fp_consts_inited = 1;
4730 else
4731 fp_consts_inited = 8;
4733 for (i = 0; i < fp_consts_inited; i++)
4735 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4736 values_fp[i] = r;
4740 /* Return TRUE if rtx X is a valid immediate FP constant. */
4742 arm_const_double_rtx (rtx x)
4744 REAL_VALUE_TYPE r;
4745 int i;
4747 if (!fp_consts_inited)
4748 init_fp_table ();
4750 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4751 if (REAL_VALUE_MINUS_ZERO (r))
4752 return 0;
4754 for (i = 0; i < fp_consts_inited; i++)
4755 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4756 return 1;
4758 return 0;
4761 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4763 neg_const_double_rtx_ok_for_fpa (rtx x)
4765 REAL_VALUE_TYPE r;
4766 int i;
4768 if (!fp_consts_inited)
4769 init_fp_table ();
4771 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4772 r = REAL_VALUE_NEGATE (r);
4773 if (REAL_VALUE_MINUS_ZERO (r))
4774 return 0;
4776 for (i = 0; i < 8; i++)
4777 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4778 return 1;
4780 return 0;
4783 /* Predicates for `match_operand' and `match_operator'. */
4785 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4787 cirrus_memory_offset (rtx op)
4789 /* Reject eliminable registers. */
4790 if (! (reload_in_progress || reload_completed)
4791 && ( reg_mentioned_p (frame_pointer_rtx, op)
4792 || reg_mentioned_p (arg_pointer_rtx, op)
4793 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4794 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4795 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4796 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4797 return 0;
4799 if (GET_CODE (op) == MEM)
4801 rtx ind;
4803 ind = XEXP (op, 0);
4805 /* Match: (mem (reg)). */
4806 if (GET_CODE (ind) == REG)
4807 return 1;
4809 /* Match:
4810 (mem (plus (reg)
4811 (const))). */
4812 if (GET_CODE (ind) == PLUS
4813 && GET_CODE (XEXP (ind, 0)) == REG
4814 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4815 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4816 return 1;
4819 return 0;
4822 /* Return TRUE if OP is a valid VFP memory address pattern.
4823 WB if true if writeback address modes are allowed. */
4826 arm_coproc_mem_operand (rtx op, bool wb)
4828 rtx ind;
4830 /* Reject eliminable registers. */
4831 if (! (reload_in_progress || reload_completed)
4832 && ( reg_mentioned_p (frame_pointer_rtx, op)
4833 || reg_mentioned_p (arg_pointer_rtx, op)
4834 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4835 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4836 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4837 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4838 return FALSE;
4840 /* Constants are converted into offsets from labels. */
4841 if (GET_CODE (op) != MEM)
4842 return FALSE;
4844 ind = XEXP (op, 0);
4846 if (reload_completed
4847 && (GET_CODE (ind) == LABEL_REF
4848 || (GET_CODE (ind) == CONST
4849 && GET_CODE (XEXP (ind, 0)) == PLUS
4850 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4851 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4852 return TRUE;
4854 /* Match: (mem (reg)). */
4855 if (GET_CODE (ind) == REG)
4856 return arm_address_register_rtx_p (ind, 0);
4858 /* Autoincremment addressing modes. */
4859 if (wb
4860 && (GET_CODE (ind) == PRE_INC
4861 || GET_CODE (ind) == POST_INC
4862 || GET_CODE (ind) == PRE_DEC
4863 || GET_CODE (ind) == POST_DEC))
4864 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4866 if (wb
4867 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4868 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4869 && GET_CODE (XEXP (ind, 1)) == PLUS
4870 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4871 ind = XEXP (ind, 1);
4873 /* Match:
4874 (plus (reg)
4875 (const)). */
4876 if (GET_CODE (ind) == PLUS
4877 && GET_CODE (XEXP (ind, 0)) == REG
4878 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4879 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4880 && INTVAL (XEXP (ind, 1)) > -1024
4881 && INTVAL (XEXP (ind, 1)) < 1024
4882 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4883 return TRUE;
4885 return FALSE;
4888 /* Return true if X is a register that will be eliminated later on. */
4890 arm_eliminable_register (rtx x)
4892 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4893 || REGNO (x) == ARG_POINTER_REGNUM
4894 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4895 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4898 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4899 VFP registers. Otherwise return NO_REGS. */
4901 enum reg_class
4902 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4904 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4905 return NO_REGS;
4907 return GENERAL_REGS;
4910 /* Values which must be returned in the most-significant end of the return
4911 register. */
4913 static bool
4914 arm_return_in_msb (tree valtype)
4916 return (TARGET_AAPCS_BASED
4917 && BYTES_BIG_ENDIAN
4918 && (AGGREGATE_TYPE_P (valtype)
4919 || TREE_CODE (valtype) == COMPLEX_TYPE));
4922 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4923 Use by the Cirrus Maverick code which has to workaround
4924 a hardware bug triggered by such instructions. */
4925 static bool
4926 arm_memory_load_p (rtx insn)
4928 rtx body, lhs, rhs;;
4930 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4931 return false;
4933 body = PATTERN (insn);
4935 if (GET_CODE (body) != SET)
4936 return false;
4938 lhs = XEXP (body, 0);
4939 rhs = XEXP (body, 1);
4941 lhs = REG_OR_SUBREG_RTX (lhs);
4943 /* If the destination is not a general purpose
4944 register we do not have to worry. */
4945 if (GET_CODE (lhs) != REG
4946 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4947 return false;
4949 /* As well as loads from memory we also have to react
4950 to loads of invalid constants which will be turned
4951 into loads from the minipool. */
4952 return (GET_CODE (rhs) == MEM
4953 || GET_CODE (rhs) == SYMBOL_REF
4954 || note_invalid_constants (insn, -1, false));
4957 /* Return TRUE if INSN is a Cirrus instruction. */
4958 static bool
4959 arm_cirrus_insn_p (rtx insn)
4961 enum attr_cirrus attr;
4963 /* get_attr cannot accept USE or CLOBBER. */
4964 if (!insn
4965 || GET_CODE (insn) != INSN
4966 || GET_CODE (PATTERN (insn)) == USE
4967 || GET_CODE (PATTERN (insn)) == CLOBBER)
4968 return 0;
4970 attr = get_attr_cirrus (insn);
4972 return attr != CIRRUS_NOT;
4975 /* Cirrus reorg for invalid instruction combinations. */
4976 static void
4977 cirrus_reorg (rtx first)
4979 enum attr_cirrus attr;
4980 rtx body = PATTERN (first);
4981 rtx t;
4982 int nops;
4984 /* Any branch must be followed by 2 non Cirrus instructions. */
4985 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4987 nops = 0;
4988 t = next_nonnote_insn (first);
4990 if (arm_cirrus_insn_p (t))
4991 ++ nops;
4993 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4994 ++ nops;
4996 while (nops --)
4997 emit_insn_after (gen_nop (), first);
4999 return;
5002 /* (float (blah)) is in parallel with a clobber. */
5003 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5004 body = XVECEXP (body, 0, 0);
5006 if (GET_CODE (body) == SET)
5008 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5010 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5011 be followed by a non Cirrus insn. */
5012 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5014 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5015 emit_insn_after (gen_nop (), first);
5017 return;
5019 else if (arm_memory_load_p (first))
5021 unsigned int arm_regno;
5023 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5024 ldr/cfmv64hr combination where the Rd field is the same
5025 in both instructions must be split with a non Cirrus
5026 insn. Example:
5028 ldr r0, blah
5030 cfmvsr mvf0, r0. */
5032 /* Get Arm register number for ldr insn. */
5033 if (GET_CODE (lhs) == REG)
5034 arm_regno = REGNO (lhs);
5035 else
5037 gcc_assert (GET_CODE (rhs) == REG);
5038 arm_regno = REGNO (rhs);
5041 /* Next insn. */
5042 first = next_nonnote_insn (first);
5044 if (! arm_cirrus_insn_p (first))
5045 return;
5047 body = PATTERN (first);
5049 /* (float (blah)) is in parallel with a clobber. */
5050 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5051 body = XVECEXP (body, 0, 0);
5053 if (GET_CODE (body) == FLOAT)
5054 body = XEXP (body, 0);
5056 if (get_attr_cirrus (first) == CIRRUS_MOVE
5057 && GET_CODE (XEXP (body, 1)) == REG
5058 && arm_regno == REGNO (XEXP (body, 1)))
5059 emit_insn_after (gen_nop (), first);
5061 return;
5065 /* get_attr cannot accept USE or CLOBBER. */
5066 if (!first
5067 || GET_CODE (first) != INSN
5068 || GET_CODE (PATTERN (first)) == USE
5069 || GET_CODE (PATTERN (first)) == CLOBBER)
5070 return;
5072 attr = get_attr_cirrus (first);
5074 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5075 must be followed by a non-coprocessor instruction. */
5076 if (attr == CIRRUS_COMPARE)
5078 nops = 0;
5080 t = next_nonnote_insn (first);
5082 if (arm_cirrus_insn_p (t))
5083 ++ nops;
5085 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5086 ++ nops;
5088 while (nops --)
5089 emit_insn_after (gen_nop (), first);
5091 return;
5095 /* Return TRUE if X references a SYMBOL_REF. */
5097 symbol_mentioned_p (rtx x)
5099 const char * fmt;
5100 int i;
5102 if (GET_CODE (x) == SYMBOL_REF)
5103 return 1;
5105 fmt = GET_RTX_FORMAT (GET_CODE (x));
5107 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5109 if (fmt[i] == 'E')
5111 int j;
5113 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5114 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5115 return 1;
5117 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5118 return 1;
5121 return 0;
5124 /* Return TRUE if X references a LABEL_REF. */
5126 label_mentioned_p (rtx x)
5128 const char * fmt;
5129 int i;
5131 if (GET_CODE (x) == LABEL_REF)
5132 return 1;
5134 fmt = GET_RTX_FORMAT (GET_CODE (x));
5135 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5137 if (fmt[i] == 'E')
5139 int j;
5141 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5142 if (label_mentioned_p (XVECEXP (x, i, j)))
5143 return 1;
5145 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5146 return 1;
5149 return 0;
5152 enum rtx_code
5153 minmax_code (rtx x)
5155 enum rtx_code code = GET_CODE (x);
5157 switch (code)
5159 case SMAX:
5160 return GE;
5161 case SMIN:
5162 return LE;
5163 case UMIN:
5164 return LEU;
5165 case UMAX:
5166 return GEU;
5167 default:
5168 gcc_unreachable ();
5172 /* Return 1 if memory locations are adjacent. */
5174 adjacent_mem_locations (rtx a, rtx b)
5176 /* We don't guarantee to preserve the order of these memory refs. */
5177 if (volatile_refs_p (a) || volatile_refs_p (b))
5178 return 0;
5180 if ((GET_CODE (XEXP (a, 0)) == REG
5181 || (GET_CODE (XEXP (a, 0)) == PLUS
5182 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5183 && (GET_CODE (XEXP (b, 0)) == REG
5184 || (GET_CODE (XEXP (b, 0)) == PLUS
5185 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5187 HOST_WIDE_INT val0 = 0, val1 = 0;
5188 rtx reg0, reg1;
5189 int val_diff;
5191 if (GET_CODE (XEXP (a, 0)) == PLUS)
5193 reg0 = XEXP (XEXP (a, 0), 0);
5194 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5196 else
5197 reg0 = XEXP (a, 0);
5199 if (GET_CODE (XEXP (b, 0)) == PLUS)
5201 reg1 = XEXP (XEXP (b, 0), 0);
5202 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5204 else
5205 reg1 = XEXP (b, 0);
5207 /* Don't accept any offset that will require multiple
5208 instructions to handle, since this would cause the
5209 arith_adjacentmem pattern to output an overlong sequence. */
5210 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5211 return 0;
5213 /* Don't allow an eliminable register: register elimination can make
5214 the offset too large. */
5215 if (arm_eliminable_register (reg0))
5216 return 0;
5218 val_diff = val1 - val0;
5220 if (arm_ld_sched)
5222 /* If the target has load delay slots, then there's no benefit
5223 to using an ldm instruction unless the offset is zero and
5224 we are optimizing for size. */
5225 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5226 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5227 && (val_diff == 4 || val_diff == -4));
5230 return ((REGNO (reg0) == REGNO (reg1))
5231 && (val_diff == 4 || val_diff == -4));
5234 return 0;
5238 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5239 HOST_WIDE_INT *load_offset)
5241 int unsorted_regs[4];
5242 HOST_WIDE_INT unsorted_offsets[4];
5243 int order[4];
5244 int base_reg = -1;
5245 int i;
5247 /* Can only handle 2, 3, or 4 insns at present,
5248 though could be easily extended if required. */
5249 gcc_assert (nops >= 2 && nops <= 4);
5251 /* Loop over the operands and check that the memory references are
5252 suitable (i.e. immediate offsets from the same base register). At
5253 the same time, extract the target register, and the memory
5254 offsets. */
5255 for (i = 0; i < nops; i++)
5257 rtx reg;
5258 rtx offset;
5260 /* Convert a subreg of a mem into the mem itself. */
5261 if (GET_CODE (operands[nops + i]) == SUBREG)
5262 operands[nops + i] = alter_subreg (operands + (nops + i));
5264 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5266 /* Don't reorder volatile memory references; it doesn't seem worth
5267 looking for the case where the order is ok anyway. */
5268 if (MEM_VOLATILE_P (operands[nops + i]))
5269 return 0;
5271 offset = const0_rtx;
5273 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5274 || (GET_CODE (reg) == SUBREG
5275 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5276 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5277 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5278 == REG)
5279 || (GET_CODE (reg) == SUBREG
5280 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5281 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5282 == CONST_INT)))
5284 if (i == 0)
5286 base_reg = REGNO (reg);
5287 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5288 ? REGNO (operands[i])
5289 : REGNO (SUBREG_REG (operands[i])));
5290 order[0] = 0;
5292 else
5294 if (base_reg != (int) REGNO (reg))
5295 /* Not addressed from the same base register. */
5296 return 0;
5298 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5299 ? REGNO (operands[i])
5300 : REGNO (SUBREG_REG (operands[i])));
5301 if (unsorted_regs[i] < unsorted_regs[order[0]])
5302 order[0] = i;
5305 /* If it isn't an integer register, or if it overwrites the
5306 base register but isn't the last insn in the list, then
5307 we can't do this. */
5308 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5309 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5310 return 0;
5312 unsorted_offsets[i] = INTVAL (offset);
5314 else
5315 /* Not a suitable memory address. */
5316 return 0;
5319 /* All the useful information has now been extracted from the
5320 operands into unsorted_regs and unsorted_offsets; additionally,
5321 order[0] has been set to the lowest numbered register in the
5322 list. Sort the registers into order, and check that the memory
5323 offsets are ascending and adjacent. */
5325 for (i = 1; i < nops; i++)
5327 int j;
5329 order[i] = order[i - 1];
5330 for (j = 0; j < nops; j++)
5331 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5332 && (order[i] == order[i - 1]
5333 || unsorted_regs[j] < unsorted_regs[order[i]]))
5334 order[i] = j;
5336 /* Have we found a suitable register? if not, one must be used more
5337 than once. */
5338 if (order[i] == order[i - 1])
5339 return 0;
5341 /* Is the memory address adjacent and ascending? */
5342 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5343 return 0;
5346 if (base)
5348 *base = base_reg;
5350 for (i = 0; i < nops; i++)
5351 regs[i] = unsorted_regs[order[i]];
5353 *load_offset = unsorted_offsets[order[0]];
5356 if (unsorted_offsets[order[0]] == 0)
5357 return 1; /* ldmia */
5359 if (unsorted_offsets[order[0]] == 4)
5360 return 2; /* ldmib */
5362 if (unsorted_offsets[order[nops - 1]] == 0)
5363 return 3; /* ldmda */
5365 if (unsorted_offsets[order[nops - 1]] == -4)
5366 return 4; /* ldmdb */
5368 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5369 if the offset isn't small enough. The reason 2 ldrs are faster
5370 is because these ARMs are able to do more than one cache access
5371 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5372 whilst the ARM8 has a double bandwidth cache. This means that
5373 these cores can do both an instruction fetch and a data fetch in
5374 a single cycle, so the trick of calculating the address into a
5375 scratch register (one of the result regs) and then doing a load
5376 multiple actually becomes slower (and no smaller in code size).
5377 That is the transformation
5379 ldr rd1, [rbase + offset]
5380 ldr rd2, [rbase + offset + 4]
5384 add rd1, rbase, offset
5385 ldmia rd1, {rd1, rd2}
5387 produces worse code -- '3 cycles + any stalls on rd2' instead of
5388 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5389 access per cycle, the first sequence could never complete in less
5390 than 6 cycles, whereas the ldm sequence would only take 5 and
5391 would make better use of sequential accesses if not hitting the
5392 cache.
5394 We cheat here and test 'arm_ld_sched' which we currently know to
5395 only be true for the ARM8, ARM9 and StrongARM. If this ever
5396 changes, then the test below needs to be reworked. */
5397 if (nops == 2 && arm_ld_sched)
5398 return 0;
5400 /* Can't do it without setting up the offset, only do this if it takes
5401 no more than one insn. */
5402 return (const_ok_for_arm (unsorted_offsets[order[0]])
5403 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5406 const char *
5407 emit_ldm_seq (rtx *operands, int nops)
5409 int regs[4];
5410 int base_reg;
5411 HOST_WIDE_INT offset;
5412 char buf[100];
5413 int i;
5415 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5417 case 1:
5418 strcpy (buf, "ldm%?ia\t");
5419 break;
5421 case 2:
5422 strcpy (buf, "ldm%?ib\t");
5423 break;
5425 case 3:
5426 strcpy (buf, "ldm%?da\t");
5427 break;
5429 case 4:
5430 strcpy (buf, "ldm%?db\t");
5431 break;
5433 case 5:
5434 if (offset >= 0)
5435 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5436 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5437 (long) offset);
5438 else
5439 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5440 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5441 (long) -offset);
5442 output_asm_insn (buf, operands);
5443 base_reg = regs[0];
5444 strcpy (buf, "ldm%?ia\t");
5445 break;
5447 default:
5448 gcc_unreachable ();
5451 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5452 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5454 for (i = 1; i < nops; i++)
5455 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5456 reg_names[regs[i]]);
5458 strcat (buf, "}\t%@ phole ldm");
5460 output_asm_insn (buf, operands);
5461 return "";
5465 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5466 HOST_WIDE_INT * load_offset)
5468 int unsorted_regs[4];
5469 HOST_WIDE_INT unsorted_offsets[4];
5470 int order[4];
5471 int base_reg = -1;
5472 int i;
5474 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5475 extended if required. */
5476 gcc_assert (nops >= 2 && nops <= 4);
5478 /* Loop over the operands and check that the memory references are
5479 suitable (i.e. immediate offsets from the same base register). At
5480 the same time, extract the target register, and the memory
5481 offsets. */
5482 for (i = 0; i < nops; i++)
5484 rtx reg;
5485 rtx offset;
5487 /* Convert a subreg of a mem into the mem itself. */
5488 if (GET_CODE (operands[nops + i]) == SUBREG)
5489 operands[nops + i] = alter_subreg (operands + (nops + i));
5491 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5493 /* Don't reorder volatile memory references; it doesn't seem worth
5494 looking for the case where the order is ok anyway. */
5495 if (MEM_VOLATILE_P (operands[nops + i]))
5496 return 0;
5498 offset = const0_rtx;
5500 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5501 || (GET_CODE (reg) == SUBREG
5502 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5503 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5504 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5505 == REG)
5506 || (GET_CODE (reg) == SUBREG
5507 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5508 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5509 == CONST_INT)))
5511 if (i == 0)
5513 base_reg = REGNO (reg);
5514 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5515 ? REGNO (operands[i])
5516 : REGNO (SUBREG_REG (operands[i])));
5517 order[0] = 0;
5519 else
5521 if (base_reg != (int) REGNO (reg))
5522 /* Not addressed from the same base register. */
5523 return 0;
5525 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5526 ? REGNO (operands[i])
5527 : REGNO (SUBREG_REG (operands[i])));
5528 if (unsorted_regs[i] < unsorted_regs[order[0]])
5529 order[0] = i;
5532 /* If it isn't an integer register, then we can't do this. */
5533 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5534 return 0;
5536 unsorted_offsets[i] = INTVAL (offset);
5538 else
5539 /* Not a suitable memory address. */
5540 return 0;
5543 /* All the useful information has now been extracted from the
5544 operands into unsorted_regs and unsorted_offsets; additionally,
5545 order[0] has been set to the lowest numbered register in the
5546 list. Sort the registers into order, and check that the memory
5547 offsets are ascending and adjacent. */
5549 for (i = 1; i < nops; i++)
5551 int j;
5553 order[i] = order[i - 1];
5554 for (j = 0; j < nops; j++)
5555 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5556 && (order[i] == order[i - 1]
5557 || unsorted_regs[j] < unsorted_regs[order[i]]))
5558 order[i] = j;
5560 /* Have we found a suitable register? if not, one must be used more
5561 than once. */
5562 if (order[i] == order[i - 1])
5563 return 0;
5565 /* Is the memory address adjacent and ascending? */
5566 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5567 return 0;
5570 if (base)
5572 *base = base_reg;
5574 for (i = 0; i < nops; i++)
5575 regs[i] = unsorted_regs[order[i]];
5577 *load_offset = unsorted_offsets[order[0]];
5580 if (unsorted_offsets[order[0]] == 0)
5581 return 1; /* stmia */
5583 if (unsorted_offsets[order[0]] == 4)
5584 return 2; /* stmib */
5586 if (unsorted_offsets[order[nops - 1]] == 0)
5587 return 3; /* stmda */
5589 if (unsorted_offsets[order[nops - 1]] == -4)
5590 return 4; /* stmdb */
5592 return 0;
5595 const char *
5596 emit_stm_seq (rtx *operands, int nops)
5598 int regs[4];
5599 int base_reg;
5600 HOST_WIDE_INT offset;
5601 char buf[100];
5602 int i;
5604 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5606 case 1:
5607 strcpy (buf, "stm%?ia\t");
5608 break;
5610 case 2:
5611 strcpy (buf, "stm%?ib\t");
5612 break;
5614 case 3:
5615 strcpy (buf, "stm%?da\t");
5616 break;
5618 case 4:
5619 strcpy (buf, "stm%?db\t");
5620 break;
5622 default:
5623 gcc_unreachable ();
5626 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5627 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5629 for (i = 1; i < nops; i++)
5630 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5631 reg_names[regs[i]]);
5633 strcat (buf, "}\t%@ phole stm");
5635 output_asm_insn (buf, operands);
5636 return "";
5640 /* Routines for use in generating RTL. */
5643 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5644 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5646 HOST_WIDE_INT offset = *offsetp;
5647 int i = 0, j;
5648 rtx result;
5649 int sign = up ? 1 : -1;
5650 rtx mem, addr;
5652 /* XScale has load-store double instructions, but they have stricter
5653 alignment requirements than load-store multiple, so we cannot
5654 use them.
5656 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5657 the pipeline until completion.
5659 NREGS CYCLES
5665 An ldr instruction takes 1-3 cycles, but does not block the
5666 pipeline.
5668 NREGS CYCLES
5669 1 1-3
5670 2 2-6
5671 3 3-9
5672 4 4-12
5674 Best case ldr will always win. However, the more ldr instructions
5675 we issue, the less likely we are to be able to schedule them well.
5676 Using ldr instructions also increases code size.
5678 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5679 for counts of 3 or 4 regs. */
5680 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5682 rtx seq;
5684 start_sequence ();
5686 for (i = 0; i < count; i++)
5688 addr = plus_constant (from, i * 4 * sign);
5689 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5690 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5691 offset += 4 * sign;
5694 if (write_back)
5696 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5697 *offsetp = offset;
5700 seq = get_insns ();
5701 end_sequence ();
5703 return seq;
5706 result = gen_rtx_PARALLEL (VOIDmode,
5707 rtvec_alloc (count + (write_back ? 1 : 0)));
5708 if (write_back)
5710 XVECEXP (result, 0, 0)
5711 = gen_rtx_SET (GET_MODE (from), from,
5712 plus_constant (from, count * 4 * sign));
5713 i = 1;
5714 count++;
5717 for (j = 0; i < count; i++, j++)
5719 addr = plus_constant (from, j * 4 * sign);
5720 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5721 XVECEXP (result, 0, i)
5722 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5723 offset += 4 * sign;
5726 if (write_back)
5727 *offsetp = offset;
5729 return result;
5733 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5734 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5736 HOST_WIDE_INT offset = *offsetp;
5737 int i = 0, j;
5738 rtx result;
5739 int sign = up ? 1 : -1;
5740 rtx mem, addr;
5742 /* See arm_gen_load_multiple for discussion of
5743 the pros/cons of ldm/stm usage for XScale. */
5744 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5746 rtx seq;
5748 start_sequence ();
5750 for (i = 0; i < count; i++)
5752 addr = plus_constant (to, i * 4 * sign);
5753 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5754 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5755 offset += 4 * sign;
5758 if (write_back)
5760 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5761 *offsetp = offset;
5764 seq = get_insns ();
5765 end_sequence ();
5767 return seq;
5770 result = gen_rtx_PARALLEL (VOIDmode,
5771 rtvec_alloc (count + (write_back ? 1 : 0)));
5772 if (write_back)
5774 XVECEXP (result, 0, 0)
5775 = gen_rtx_SET (GET_MODE (to), to,
5776 plus_constant (to, count * 4 * sign));
5777 i = 1;
5778 count++;
5781 for (j = 0; i < count; i++, j++)
5783 addr = plus_constant (to, j * 4 * sign);
5784 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5785 XVECEXP (result, 0, i)
5786 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5787 offset += 4 * sign;
5790 if (write_back)
5791 *offsetp = offset;
5793 return result;
5797 arm_gen_movmemqi (rtx *operands)
5799 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5800 HOST_WIDE_INT srcoffset, dstoffset;
5801 int i;
5802 rtx src, dst, srcbase, dstbase;
5803 rtx part_bytes_reg = NULL;
5804 rtx mem;
5806 if (GET_CODE (operands[2]) != CONST_INT
5807 || GET_CODE (operands[3]) != CONST_INT
5808 || INTVAL (operands[2]) > 64
5809 || INTVAL (operands[3]) & 3)
5810 return 0;
5812 dstbase = operands[0];
5813 srcbase = operands[1];
5815 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5816 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5818 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5819 out_words_to_go = INTVAL (operands[2]) / 4;
5820 last_bytes = INTVAL (operands[2]) & 3;
5821 dstoffset = srcoffset = 0;
5823 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5824 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5826 for (i = 0; in_words_to_go >= 2; i+=4)
5828 if (in_words_to_go > 4)
5829 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5830 srcbase, &srcoffset));
5831 else
5832 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5833 FALSE, srcbase, &srcoffset));
5835 if (out_words_to_go)
5837 if (out_words_to_go > 4)
5838 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5839 dstbase, &dstoffset));
5840 else if (out_words_to_go != 1)
5841 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5842 dst, TRUE,
5843 (last_bytes == 0
5844 ? FALSE : TRUE),
5845 dstbase, &dstoffset));
5846 else
5848 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5849 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5850 if (last_bytes != 0)
5852 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5853 dstoffset += 4;
5858 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5859 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5862 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5863 if (out_words_to_go)
5865 rtx sreg;
5867 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5868 sreg = copy_to_reg (mem);
5870 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5871 emit_move_insn (mem, sreg);
5872 in_words_to_go--;
5874 gcc_assert (!in_words_to_go); /* Sanity check */
5877 if (in_words_to_go)
5879 gcc_assert (in_words_to_go > 0);
5881 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5882 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5885 gcc_assert (!last_bytes || part_bytes_reg);
5887 if (BYTES_BIG_ENDIAN && last_bytes)
5889 rtx tmp = gen_reg_rtx (SImode);
5891 /* The bytes we want are in the top end of the word. */
5892 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5893 GEN_INT (8 * (4 - last_bytes))));
5894 part_bytes_reg = tmp;
5896 while (last_bytes)
5898 mem = adjust_automodify_address (dstbase, QImode,
5899 plus_constant (dst, last_bytes - 1),
5900 dstoffset + last_bytes - 1);
5901 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5903 if (--last_bytes)
5905 tmp = gen_reg_rtx (SImode);
5906 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5907 part_bytes_reg = tmp;
5912 else
5914 if (last_bytes > 1)
5916 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5917 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5918 last_bytes -= 2;
5919 if (last_bytes)
5921 rtx tmp = gen_reg_rtx (SImode);
5922 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5923 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5924 part_bytes_reg = tmp;
5925 dstoffset += 2;
5929 if (last_bytes)
5931 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5932 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5936 return 1;
5939 /* Generate a memory reference for a half word, such that it will be loaded
5940 into the top 16 bits of the word. We can assume that the address is
5941 known to be alignable and of the form reg, or plus (reg, const). */
5944 arm_gen_rotated_half_load (rtx memref)
5946 HOST_WIDE_INT offset = 0;
5947 rtx base = XEXP (memref, 0);
5949 if (GET_CODE (base) == PLUS)
5951 offset = INTVAL (XEXP (base, 1));
5952 base = XEXP (base, 0);
5955 /* If we aren't allowed to generate unaligned addresses, then fail. */
5956 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5957 return NULL;
5959 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5961 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5962 return base;
5964 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5967 /* Select a dominance comparison mode if possible for a test of the general
5968 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5969 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5970 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5971 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5972 In all cases OP will be either EQ or NE, but we don't need to know which
5973 here. If we are unable to support a dominance comparison we return
5974 CC mode. This will then fail to match for the RTL expressions that
5975 generate this call. */
5976 enum machine_mode
5977 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5979 enum rtx_code cond1, cond2;
5980 int swapped = 0;
5982 /* Currently we will probably get the wrong result if the individual
5983 comparisons are not simple. This also ensures that it is safe to
5984 reverse a comparison if necessary. */
5985 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5986 != CCmode)
5987 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5988 != CCmode))
5989 return CCmode;
5991 /* The if_then_else variant of this tests the second condition if the
5992 first passes, but is true if the first fails. Reverse the first
5993 condition to get a true "inclusive-or" expression. */
5994 if (cond_or == DOM_CC_NX_OR_Y)
5995 cond1 = reverse_condition (cond1);
5997 /* If the comparisons are not equal, and one doesn't dominate the other,
5998 then we can't do this. */
5999 if (cond1 != cond2
6000 && !comparison_dominates_p (cond1, cond2)
6001 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6002 return CCmode;
6004 if (swapped)
6006 enum rtx_code temp = cond1;
6007 cond1 = cond2;
6008 cond2 = temp;
6011 switch (cond1)
6013 case EQ:
6014 if (cond_or == DOM_CC_X_AND_Y)
6015 return CC_DEQmode;
6017 switch (cond2)
6019 case EQ: return CC_DEQmode;
6020 case LE: return CC_DLEmode;
6021 case LEU: return CC_DLEUmode;
6022 case GE: return CC_DGEmode;
6023 case GEU: return CC_DGEUmode;
6024 default: gcc_unreachable ();
6027 case LT:
6028 if (cond_or == DOM_CC_X_AND_Y)
6029 return CC_DLTmode;
6031 switch (cond2)
6033 case LT:
6034 return CC_DLTmode;
6035 case LE:
6036 return CC_DLEmode;
6037 case NE:
6038 return CC_DNEmode;
6039 default:
6040 gcc_unreachable ();
6043 case GT:
6044 if (cond_or == DOM_CC_X_AND_Y)
6045 return CC_DGTmode;
6047 switch (cond2)
6049 case GT:
6050 return CC_DGTmode;
6051 case GE:
6052 return CC_DGEmode;
6053 case NE:
6054 return CC_DNEmode;
6055 default:
6056 gcc_unreachable ();
6059 case LTU:
6060 if (cond_or == DOM_CC_X_AND_Y)
6061 return CC_DLTUmode;
6063 switch (cond2)
6065 case LTU:
6066 return CC_DLTUmode;
6067 case LEU:
6068 return CC_DLEUmode;
6069 case NE:
6070 return CC_DNEmode;
6071 default:
6072 gcc_unreachable ();
6075 case GTU:
6076 if (cond_or == DOM_CC_X_AND_Y)
6077 return CC_DGTUmode;
6079 switch (cond2)
6081 case GTU:
6082 return CC_DGTUmode;
6083 case GEU:
6084 return CC_DGEUmode;
6085 case NE:
6086 return CC_DNEmode;
6087 default:
6088 gcc_unreachable ();
6091 /* The remaining cases only occur when both comparisons are the
6092 same. */
6093 case NE:
6094 gcc_assert (cond1 == cond2);
6095 return CC_DNEmode;
6097 case LE:
6098 gcc_assert (cond1 == cond2);
6099 return CC_DLEmode;
6101 case GE:
6102 gcc_assert (cond1 == cond2);
6103 return CC_DGEmode;
6105 case LEU:
6106 gcc_assert (cond1 == cond2);
6107 return CC_DLEUmode;
6109 case GEU:
6110 gcc_assert (cond1 == cond2);
6111 return CC_DGEUmode;
6113 default:
6114 gcc_unreachable ();
6118 enum machine_mode
6119 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6121 /* All floating point compares return CCFP if it is an equality
6122 comparison, and CCFPE otherwise. */
6123 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6125 switch (op)
6127 case EQ:
6128 case NE:
6129 case UNORDERED:
6130 case ORDERED:
6131 case UNLT:
6132 case UNLE:
6133 case UNGT:
6134 case UNGE:
6135 case UNEQ:
6136 case LTGT:
6137 return CCFPmode;
6139 case LT:
6140 case LE:
6141 case GT:
6142 case GE:
6143 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6144 return CCFPmode;
6145 return CCFPEmode;
6147 default:
6148 gcc_unreachable ();
6152 /* A compare with a shifted operand. Because of canonicalization, the
6153 comparison will have to be swapped when we emit the assembler. */
6154 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6155 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6156 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6157 || GET_CODE (x) == ROTATERT))
6158 return CC_SWPmode;
6160 /* This operation is performed swapped, but since we only rely on the Z
6161 flag we don't need an additional mode. */
6162 if (GET_MODE (y) == SImode && REG_P (y)
6163 && GET_CODE (x) == NEG
6164 && (op == EQ || op == NE))
6165 return CC_Zmode;
6167 /* This is a special case that is used by combine to allow a
6168 comparison of a shifted byte load to be split into a zero-extend
6169 followed by a comparison of the shifted integer (only valid for
6170 equalities and unsigned inequalities). */
6171 if (GET_MODE (x) == SImode
6172 && GET_CODE (x) == ASHIFT
6173 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6174 && GET_CODE (XEXP (x, 0)) == SUBREG
6175 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6176 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6177 && (op == EQ || op == NE
6178 || op == GEU || op == GTU || op == LTU || op == LEU)
6179 && GET_CODE (y) == CONST_INT)
6180 return CC_Zmode;
6182 /* A construct for a conditional compare, if the false arm contains
6183 0, then both conditions must be true, otherwise either condition
6184 must be true. Not all conditions are possible, so CCmode is
6185 returned if it can't be done. */
6186 if (GET_CODE (x) == IF_THEN_ELSE
6187 && (XEXP (x, 2) == const0_rtx
6188 || XEXP (x, 2) == const1_rtx)
6189 && COMPARISON_P (XEXP (x, 0))
6190 && COMPARISON_P (XEXP (x, 1)))
6191 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6192 INTVAL (XEXP (x, 2)));
6194 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6195 if (GET_CODE (x) == AND
6196 && COMPARISON_P (XEXP (x, 0))
6197 && COMPARISON_P (XEXP (x, 1)))
6198 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6199 DOM_CC_X_AND_Y);
6201 if (GET_CODE (x) == IOR
6202 && COMPARISON_P (XEXP (x, 0))
6203 && COMPARISON_P (XEXP (x, 1)))
6204 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6205 DOM_CC_X_OR_Y);
6207 /* An operation (on Thumb) where we want to test for a single bit.
6208 This is done by shifting that bit up into the top bit of a
6209 scratch register; we can then branch on the sign bit. */
6210 if (TARGET_THUMB
6211 && GET_MODE (x) == SImode
6212 && (op == EQ || op == NE)
6213 && (GET_CODE (x) == ZERO_EXTRACT))
6214 return CC_Nmode;
6216 /* An operation that sets the condition codes as a side-effect, the
6217 V flag is not set correctly, so we can only use comparisons where
6218 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6219 instead.) */
6220 if (GET_MODE (x) == SImode
6221 && y == const0_rtx
6222 && (op == EQ || op == NE || op == LT || op == GE)
6223 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6224 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6225 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6226 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6227 || GET_CODE (x) == LSHIFTRT
6228 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6229 || GET_CODE (x) == ROTATERT
6230 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6231 return CC_NOOVmode;
6233 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6234 return CC_Zmode;
6236 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6237 && GET_CODE (x) == PLUS
6238 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6239 return CC_Cmode;
6241 return CCmode;
6244 /* X and Y are two things to compare using CODE. Emit the compare insn and
6245 return the rtx for register 0 in the proper mode. FP means this is a
6246 floating point compare: I don't think that it is needed on the arm. */
6248 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6250 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6251 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6253 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6254 gen_rtx_COMPARE (mode, x, y)));
6256 return cc_reg;
6259 /* Generate a sequence of insns that will generate the correct return
6260 address mask depending on the physical architecture that the program
6261 is running on. */
6263 arm_gen_return_addr_mask (void)
6265 rtx reg = gen_reg_rtx (Pmode);
6267 emit_insn (gen_return_addr_mask (reg));
6268 return reg;
6271 void
6272 arm_reload_in_hi (rtx *operands)
6274 rtx ref = operands[1];
6275 rtx base, scratch;
6276 HOST_WIDE_INT offset = 0;
6278 if (GET_CODE (ref) == SUBREG)
6280 offset = SUBREG_BYTE (ref);
6281 ref = SUBREG_REG (ref);
6284 if (GET_CODE (ref) == REG)
6286 /* We have a pseudo which has been spilt onto the stack; there
6287 are two cases here: the first where there is a simple
6288 stack-slot replacement and a second where the stack-slot is
6289 out of range, or is used as a subreg. */
6290 if (reg_equiv_mem[REGNO (ref)])
6292 ref = reg_equiv_mem[REGNO (ref)];
6293 base = find_replacement (&XEXP (ref, 0));
6295 else
6296 /* The slot is out of range, or was dressed up in a SUBREG. */
6297 base = reg_equiv_address[REGNO (ref)];
6299 else
6300 base = find_replacement (&XEXP (ref, 0));
6302 /* Handle the case where the address is too complex to be offset by 1. */
6303 if (GET_CODE (base) == MINUS
6304 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6306 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6308 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6309 base = base_plus;
6311 else if (GET_CODE (base) == PLUS)
6313 /* The addend must be CONST_INT, or we would have dealt with it above. */
6314 HOST_WIDE_INT hi, lo;
6316 offset += INTVAL (XEXP (base, 1));
6317 base = XEXP (base, 0);
6319 /* Rework the address into a legal sequence of insns. */
6320 /* Valid range for lo is -4095 -> 4095 */
6321 lo = (offset >= 0
6322 ? (offset & 0xfff)
6323 : -((-offset) & 0xfff));
6325 /* Corner case, if lo is the max offset then we would be out of range
6326 once we have added the additional 1 below, so bump the msb into the
6327 pre-loading insn(s). */
6328 if (lo == 4095)
6329 lo &= 0x7ff;
6331 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6332 ^ (HOST_WIDE_INT) 0x80000000)
6333 - (HOST_WIDE_INT) 0x80000000);
6335 gcc_assert (hi + lo == offset);
6337 if (hi != 0)
6339 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6341 /* Get the base address; addsi3 knows how to handle constants
6342 that require more than one insn. */
6343 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6344 base = base_plus;
6345 offset = lo;
6349 /* Operands[2] may overlap operands[0] (though it won't overlap
6350 operands[1]), that's why we asked for a DImode reg -- so we can
6351 use the bit that does not overlap. */
6352 if (REGNO (operands[2]) == REGNO (operands[0]))
6353 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6354 else
6355 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6357 emit_insn (gen_zero_extendqisi2 (scratch,
6358 gen_rtx_MEM (QImode,
6359 plus_constant (base,
6360 offset))));
6361 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6362 gen_rtx_MEM (QImode,
6363 plus_constant (base,
6364 offset + 1))));
6365 if (!BYTES_BIG_ENDIAN)
6366 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6367 gen_rtx_IOR (SImode,
6368 gen_rtx_ASHIFT
6369 (SImode,
6370 gen_rtx_SUBREG (SImode, operands[0], 0),
6371 GEN_INT (8)),
6372 scratch)));
6373 else
6374 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6375 gen_rtx_IOR (SImode,
6376 gen_rtx_ASHIFT (SImode, scratch,
6377 GEN_INT (8)),
6378 gen_rtx_SUBREG (SImode, operands[0],
6379 0))));
6382 /* Handle storing a half-word to memory during reload by synthesizing as two
6383 byte stores. Take care not to clobber the input values until after we
6384 have moved them somewhere safe. This code assumes that if the DImode
6385 scratch in operands[2] overlaps either the input value or output address
6386 in some way, then that value must die in this insn (we absolutely need
6387 two scratch registers for some corner cases). */
6388 void
6389 arm_reload_out_hi (rtx *operands)
6391 rtx ref = operands[0];
6392 rtx outval = operands[1];
6393 rtx base, scratch;
6394 HOST_WIDE_INT offset = 0;
6396 if (GET_CODE (ref) == SUBREG)
6398 offset = SUBREG_BYTE (ref);
6399 ref = SUBREG_REG (ref);
6402 if (GET_CODE (ref) == REG)
6404 /* We have a pseudo which has been spilt onto the stack; there
6405 are two cases here: the first where there is a simple
6406 stack-slot replacement and a second where the stack-slot is
6407 out of range, or is used as a subreg. */
6408 if (reg_equiv_mem[REGNO (ref)])
6410 ref = reg_equiv_mem[REGNO (ref)];
6411 base = find_replacement (&XEXP (ref, 0));
6413 else
6414 /* The slot is out of range, or was dressed up in a SUBREG. */
6415 base = reg_equiv_address[REGNO (ref)];
6417 else
6418 base = find_replacement (&XEXP (ref, 0));
6420 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6422 /* Handle the case where the address is too complex to be offset by 1. */
6423 if (GET_CODE (base) == MINUS
6424 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6426 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6428 /* Be careful not to destroy OUTVAL. */
6429 if (reg_overlap_mentioned_p (base_plus, outval))
6431 /* Updating base_plus might destroy outval, see if we can
6432 swap the scratch and base_plus. */
6433 if (!reg_overlap_mentioned_p (scratch, outval))
6435 rtx tmp = scratch;
6436 scratch = base_plus;
6437 base_plus = tmp;
6439 else
6441 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6443 /* Be conservative and copy OUTVAL into the scratch now,
6444 this should only be necessary if outval is a subreg
6445 of something larger than a word. */
6446 /* XXX Might this clobber base? I can't see how it can,
6447 since scratch is known to overlap with OUTVAL, and
6448 must be wider than a word. */
6449 emit_insn (gen_movhi (scratch_hi, outval));
6450 outval = scratch_hi;
6454 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6455 base = base_plus;
6457 else if (GET_CODE (base) == PLUS)
6459 /* The addend must be CONST_INT, or we would have dealt with it above. */
6460 HOST_WIDE_INT hi, lo;
6462 offset += INTVAL (XEXP (base, 1));
6463 base = XEXP (base, 0);
6465 /* Rework the address into a legal sequence of insns. */
6466 /* Valid range for lo is -4095 -> 4095 */
6467 lo = (offset >= 0
6468 ? (offset & 0xfff)
6469 : -((-offset) & 0xfff));
6471 /* Corner case, if lo is the max offset then we would be out of range
6472 once we have added the additional 1 below, so bump the msb into the
6473 pre-loading insn(s). */
6474 if (lo == 4095)
6475 lo &= 0x7ff;
6477 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6478 ^ (HOST_WIDE_INT) 0x80000000)
6479 - (HOST_WIDE_INT) 0x80000000);
6481 gcc_assert (hi + lo == offset);
6483 if (hi != 0)
6485 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6487 /* Be careful not to destroy OUTVAL. */
6488 if (reg_overlap_mentioned_p (base_plus, outval))
6490 /* Updating base_plus might destroy outval, see if we
6491 can swap the scratch and base_plus. */
6492 if (!reg_overlap_mentioned_p (scratch, outval))
6494 rtx tmp = scratch;
6495 scratch = base_plus;
6496 base_plus = tmp;
6498 else
6500 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6502 /* Be conservative and copy outval into scratch now,
6503 this should only be necessary if outval is a
6504 subreg of something larger than a word. */
6505 /* XXX Might this clobber base? I can't see how it
6506 can, since scratch is known to overlap with
6507 outval. */
6508 emit_insn (gen_movhi (scratch_hi, outval));
6509 outval = scratch_hi;
6513 /* Get the base address; addsi3 knows how to handle constants
6514 that require more than one insn. */
6515 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6516 base = base_plus;
6517 offset = lo;
6521 if (BYTES_BIG_ENDIAN)
6523 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6524 plus_constant (base, offset + 1)),
6525 gen_lowpart (QImode, outval)));
6526 emit_insn (gen_lshrsi3 (scratch,
6527 gen_rtx_SUBREG (SImode, outval, 0),
6528 GEN_INT (8)));
6529 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6530 gen_lowpart (QImode, scratch)));
6532 else
6534 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6535 gen_lowpart (QImode, outval)));
6536 emit_insn (gen_lshrsi3 (scratch,
6537 gen_rtx_SUBREG (SImode, outval, 0),
6538 GEN_INT (8)));
6539 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6540 plus_constant (base, offset + 1)),
6541 gen_lowpart (QImode, scratch)));
6545 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6546 (padded to the size of a word) should be passed in a register. */
6548 static bool
6549 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6551 if (TARGET_AAPCS_BASED)
6552 return must_pass_in_stack_var_size (mode, type);
6553 else
6554 return must_pass_in_stack_var_size_or_pad (mode, type);
6558 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6559 Return true if an argument passed on the stack should be padded upwards,
6560 i.e. if the least-significant byte has useful data. */
6562 bool
6563 arm_pad_arg_upward (enum machine_mode mode, tree type)
6565 if (!TARGET_AAPCS_BASED)
6566 return DEFAULT_FUNCTION_ARG_PADDING(mode, type);
6568 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6569 return false;
6571 return true;
6575 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6576 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6577 byte of the register has useful data, and return the opposite if the
6578 most significant byte does.
6579 For AAPCS, small aggregates and small complex types are always padded
6580 upwards. */
6582 bool
6583 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6584 tree type, int first ATTRIBUTE_UNUSED)
6586 if (TARGET_AAPCS_BASED
6587 && BYTES_BIG_ENDIAN
6588 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6589 && int_size_in_bytes (type) <= 4)
6590 return true;
6592 /* Otherwise, use default padding. */
6593 return !BYTES_BIG_ENDIAN;
6598 /* Print a symbolic form of X to the debug file, F. */
6599 static void
6600 arm_print_value (FILE *f, rtx x)
6602 switch (GET_CODE (x))
6604 case CONST_INT:
6605 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6606 return;
6608 case CONST_DOUBLE:
6609 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6610 return;
6612 case CONST_VECTOR:
6614 int i;
6616 fprintf (f, "<");
6617 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6619 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6620 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6621 fputc (',', f);
6623 fprintf (f, ">");
6625 return;
6627 case CONST_STRING:
6628 fprintf (f, "\"%s\"", XSTR (x, 0));
6629 return;
6631 case SYMBOL_REF:
6632 fprintf (f, "`%s'", XSTR (x, 0));
6633 return;
6635 case LABEL_REF:
6636 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6637 return;
6639 case CONST:
6640 arm_print_value (f, XEXP (x, 0));
6641 return;
6643 case PLUS:
6644 arm_print_value (f, XEXP (x, 0));
6645 fprintf (f, "+");
6646 arm_print_value (f, XEXP (x, 1));
6647 return;
6649 case PC:
6650 fprintf (f, "pc");
6651 return;
6653 default:
6654 fprintf (f, "????");
6655 return;
6659 /* Routines for manipulation of the constant pool. */
6661 /* Arm instructions cannot load a large constant directly into a
6662 register; they have to come from a pc relative load. The constant
6663 must therefore be placed in the addressable range of the pc
6664 relative load. Depending on the precise pc relative load
6665 instruction the range is somewhere between 256 bytes and 4k. This
6666 means that we often have to dump a constant inside a function, and
6667 generate code to branch around it.
6669 It is important to minimize this, since the branches will slow
6670 things down and make the code larger.
6672 Normally we can hide the table after an existing unconditional
6673 branch so that there is no interruption of the flow, but in the
6674 worst case the code looks like this:
6676 ldr rn, L1
6678 b L2
6679 align
6680 L1: .long value
6684 ldr rn, L3
6686 b L4
6687 align
6688 L3: .long value
6692 We fix this by performing a scan after scheduling, which notices
6693 which instructions need to have their operands fetched from the
6694 constant table and builds the table.
6696 The algorithm starts by building a table of all the constants that
6697 need fixing up and all the natural barriers in the function (places
6698 where a constant table can be dropped without breaking the flow).
6699 For each fixup we note how far the pc-relative replacement will be
6700 able to reach and the offset of the instruction into the function.
6702 Having built the table we then group the fixes together to form
6703 tables that are as large as possible (subject to addressing
6704 constraints) and emit each table of constants after the last
6705 barrier that is within range of all the instructions in the group.
6706 If a group does not contain a barrier, then we forcibly create one
6707 by inserting a jump instruction into the flow. Once the table has
6708 been inserted, the insns are then modified to reference the
6709 relevant entry in the pool.
6711 Possible enhancements to the algorithm (not implemented) are:
6713 1) For some processors and object formats, there may be benefit in
6714 aligning the pools to the start of cache lines; this alignment
6715 would need to be taken into account when calculating addressability
6716 of a pool. */
6718 /* These typedefs are located at the start of this file, so that
6719 they can be used in the prototypes there. This comment is to
6720 remind readers of that fact so that the following structures
6721 can be understood more easily.
6723 typedef struct minipool_node Mnode;
6724 typedef struct minipool_fixup Mfix; */
6726 struct minipool_node
6728 /* Doubly linked chain of entries. */
6729 Mnode * next;
6730 Mnode * prev;
6731 /* The maximum offset into the code that this entry can be placed. While
6732 pushing fixes for forward references, all entries are sorted in order
6733 of increasing max_address. */
6734 HOST_WIDE_INT max_address;
6735 /* Similarly for an entry inserted for a backwards ref. */
6736 HOST_WIDE_INT min_address;
6737 /* The number of fixes referencing this entry. This can become zero
6738 if we "unpush" an entry. In this case we ignore the entry when we
6739 come to emit the code. */
6740 int refcount;
6741 /* The offset from the start of the minipool. */
6742 HOST_WIDE_INT offset;
6743 /* The value in table. */
6744 rtx value;
6745 /* The mode of value. */
6746 enum machine_mode mode;
6747 /* The size of the value. With iWMMXt enabled
6748 sizes > 4 also imply an alignment of 8-bytes. */
6749 int fix_size;
6752 struct minipool_fixup
6754 Mfix * next;
6755 rtx insn;
6756 HOST_WIDE_INT address;
6757 rtx * loc;
6758 enum machine_mode mode;
6759 int fix_size;
6760 rtx value;
6761 Mnode * minipool;
6762 HOST_WIDE_INT forwards;
6763 HOST_WIDE_INT backwards;
6766 /* Fixes less than a word need padding out to a word boundary. */
6767 #define MINIPOOL_FIX_SIZE(mode) \
6768 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6770 static Mnode * minipool_vector_head;
6771 static Mnode * minipool_vector_tail;
6772 static rtx minipool_vector_label;
6774 /* The linked list of all minipool fixes required for this function. */
6775 Mfix * minipool_fix_head;
6776 Mfix * minipool_fix_tail;
6777 /* The fix entry for the current minipool, once it has been placed. */
6778 Mfix * minipool_barrier;
6780 /* Determines if INSN is the start of a jump table. Returns the end
6781 of the TABLE or NULL_RTX. */
6782 static rtx
6783 is_jump_table (rtx insn)
6785 rtx table;
6787 if (GET_CODE (insn) == JUMP_INSN
6788 && JUMP_LABEL (insn) != NULL
6789 && ((table = next_real_insn (JUMP_LABEL (insn)))
6790 == next_real_insn (insn))
6791 && table != NULL
6792 && GET_CODE (table) == JUMP_INSN
6793 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6794 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6795 return table;
6797 return NULL_RTX;
6800 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6801 #define JUMP_TABLES_IN_TEXT_SECTION 0
6802 #endif
6804 static HOST_WIDE_INT
6805 get_jump_table_size (rtx insn)
6807 /* ADDR_VECs only take room if read-only data does into the text
6808 section. */
6809 if (JUMP_TABLES_IN_TEXT_SECTION
6810 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6811 || 1
6812 #endif
6815 rtx body = PATTERN (insn);
6816 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6818 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6821 return 0;
6824 /* Move a minipool fix MP from its current location to before MAX_MP.
6825 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6826 constraints may need updating. */
6827 static Mnode *
6828 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6829 HOST_WIDE_INT max_address)
6831 /* The code below assumes these are different. */
6832 gcc_assert (mp != max_mp);
6834 if (max_mp == NULL)
6836 if (max_address < mp->max_address)
6837 mp->max_address = max_address;
6839 else
6841 if (max_address > max_mp->max_address - mp->fix_size)
6842 mp->max_address = max_mp->max_address - mp->fix_size;
6843 else
6844 mp->max_address = max_address;
6846 /* Unlink MP from its current position. Since max_mp is non-null,
6847 mp->prev must be non-null. */
6848 mp->prev->next = mp->next;
6849 if (mp->next != NULL)
6850 mp->next->prev = mp->prev;
6851 else
6852 minipool_vector_tail = mp->prev;
6854 /* Re-insert it before MAX_MP. */
6855 mp->next = max_mp;
6856 mp->prev = max_mp->prev;
6857 max_mp->prev = mp;
6859 if (mp->prev != NULL)
6860 mp->prev->next = mp;
6861 else
6862 minipool_vector_head = mp;
6865 /* Save the new entry. */
6866 max_mp = mp;
6868 /* Scan over the preceding entries and adjust their addresses as
6869 required. */
6870 while (mp->prev != NULL
6871 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6873 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6874 mp = mp->prev;
6877 return max_mp;
6880 /* Add a constant to the minipool for a forward reference. Returns the
6881 node added or NULL if the constant will not fit in this pool. */
6882 static Mnode *
6883 add_minipool_forward_ref (Mfix *fix)
6885 /* If set, max_mp is the first pool_entry that has a lower
6886 constraint than the one we are trying to add. */
6887 Mnode * max_mp = NULL;
6888 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6889 Mnode * mp;
6891 /* If this fix's address is greater than the address of the first
6892 entry, then we can't put the fix in this pool. We subtract the
6893 size of the current fix to ensure that if the table is fully
6894 packed we still have enough room to insert this value by suffling
6895 the other fixes forwards. */
6896 if (minipool_vector_head &&
6897 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6898 return NULL;
6900 /* Scan the pool to see if a constant with the same value has
6901 already been added. While we are doing this, also note the
6902 location where we must insert the constant if it doesn't already
6903 exist. */
6904 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6906 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6907 && fix->mode == mp->mode
6908 && (GET_CODE (fix->value) != CODE_LABEL
6909 || (CODE_LABEL_NUMBER (fix->value)
6910 == CODE_LABEL_NUMBER (mp->value)))
6911 && rtx_equal_p (fix->value, mp->value))
6913 /* More than one fix references this entry. */
6914 mp->refcount++;
6915 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6918 /* Note the insertion point if necessary. */
6919 if (max_mp == NULL
6920 && mp->max_address > max_address)
6921 max_mp = mp;
6923 /* If we are inserting an 8-bytes aligned quantity and
6924 we have not already found an insertion point, then
6925 make sure that all such 8-byte aligned quantities are
6926 placed at the start of the pool. */
6927 if (ARM_DOUBLEWORD_ALIGN
6928 && max_mp == NULL
6929 && fix->fix_size == 8
6930 && mp->fix_size != 8)
6932 max_mp = mp;
6933 max_address = mp->max_address;
6937 /* The value is not currently in the minipool, so we need to create
6938 a new entry for it. If MAX_MP is NULL, the entry will be put on
6939 the end of the list since the placement is less constrained than
6940 any existing entry. Otherwise, we insert the new fix before
6941 MAX_MP and, if necessary, adjust the constraints on the other
6942 entries. */
6943 mp = xmalloc (sizeof (* mp));
6944 mp->fix_size = fix->fix_size;
6945 mp->mode = fix->mode;
6946 mp->value = fix->value;
6947 mp->refcount = 1;
6948 /* Not yet required for a backwards ref. */
6949 mp->min_address = -65536;
6951 if (max_mp == NULL)
6953 mp->max_address = max_address;
6954 mp->next = NULL;
6955 mp->prev = minipool_vector_tail;
6957 if (mp->prev == NULL)
6959 minipool_vector_head = mp;
6960 minipool_vector_label = gen_label_rtx ();
6962 else
6963 mp->prev->next = mp;
6965 minipool_vector_tail = mp;
6967 else
6969 if (max_address > max_mp->max_address - mp->fix_size)
6970 mp->max_address = max_mp->max_address - mp->fix_size;
6971 else
6972 mp->max_address = max_address;
6974 mp->next = max_mp;
6975 mp->prev = max_mp->prev;
6976 max_mp->prev = mp;
6977 if (mp->prev != NULL)
6978 mp->prev->next = mp;
6979 else
6980 minipool_vector_head = mp;
6983 /* Save the new entry. */
6984 max_mp = mp;
6986 /* Scan over the preceding entries and adjust their addresses as
6987 required. */
6988 while (mp->prev != NULL
6989 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6991 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6992 mp = mp->prev;
6995 return max_mp;
6998 static Mnode *
6999 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7000 HOST_WIDE_INT min_address)
7002 HOST_WIDE_INT offset;
7004 /* The code below assumes these are different. */
7005 gcc_assert (mp != min_mp);
7007 if (min_mp == NULL)
7009 if (min_address > mp->min_address)
7010 mp->min_address = min_address;
7012 else
7014 /* We will adjust this below if it is too loose. */
7015 mp->min_address = min_address;
7017 /* Unlink MP from its current position. Since min_mp is non-null,
7018 mp->next must be non-null. */
7019 mp->next->prev = mp->prev;
7020 if (mp->prev != NULL)
7021 mp->prev->next = mp->next;
7022 else
7023 minipool_vector_head = mp->next;
7025 /* Reinsert it after MIN_MP. */
7026 mp->prev = min_mp;
7027 mp->next = min_mp->next;
7028 min_mp->next = mp;
7029 if (mp->next != NULL)
7030 mp->next->prev = mp;
7031 else
7032 minipool_vector_tail = mp;
7035 min_mp = mp;
7037 offset = 0;
7038 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7040 mp->offset = offset;
7041 if (mp->refcount > 0)
7042 offset += mp->fix_size;
7044 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7045 mp->next->min_address = mp->min_address + mp->fix_size;
7048 return min_mp;
7051 /* Add a constant to the minipool for a backward reference. Returns the
7052 node added or NULL if the constant will not fit in this pool.
7054 Note that the code for insertion for a backwards reference can be
7055 somewhat confusing because the calculated offsets for each fix do
7056 not take into account the size of the pool (which is still under
7057 construction. */
7058 static Mnode *
7059 add_minipool_backward_ref (Mfix *fix)
7061 /* If set, min_mp is the last pool_entry that has a lower constraint
7062 than the one we are trying to add. */
7063 Mnode *min_mp = NULL;
7064 /* This can be negative, since it is only a constraint. */
7065 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7066 Mnode *mp;
7068 /* If we can't reach the current pool from this insn, or if we can't
7069 insert this entry at the end of the pool without pushing other
7070 fixes out of range, then we don't try. This ensures that we
7071 can't fail later on. */
7072 if (min_address >= minipool_barrier->address
7073 || (minipool_vector_tail->min_address + fix->fix_size
7074 >= minipool_barrier->address))
7075 return NULL;
7077 /* Scan the pool to see if a constant with the same value has
7078 already been added. While we are doing this, also note the
7079 location where we must insert the constant if it doesn't already
7080 exist. */
7081 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7083 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7084 && fix->mode == mp->mode
7085 && (GET_CODE (fix->value) != CODE_LABEL
7086 || (CODE_LABEL_NUMBER (fix->value)
7087 == CODE_LABEL_NUMBER (mp->value)))
7088 && rtx_equal_p (fix->value, mp->value)
7089 /* Check that there is enough slack to move this entry to the
7090 end of the table (this is conservative). */
7091 && (mp->max_address
7092 > (minipool_barrier->address
7093 + minipool_vector_tail->offset
7094 + minipool_vector_tail->fix_size)))
7096 mp->refcount++;
7097 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7100 if (min_mp != NULL)
7101 mp->min_address += fix->fix_size;
7102 else
7104 /* Note the insertion point if necessary. */
7105 if (mp->min_address < min_address)
7107 /* For now, we do not allow the insertion of 8-byte alignment
7108 requiring nodes anywhere but at the start of the pool. */
7109 if (ARM_DOUBLEWORD_ALIGN
7110 && fix->fix_size == 8 && mp->fix_size != 8)
7111 return NULL;
7112 else
7113 min_mp = mp;
7115 else if (mp->max_address
7116 < minipool_barrier->address + mp->offset + fix->fix_size)
7118 /* Inserting before this entry would push the fix beyond
7119 its maximum address (which can happen if we have
7120 re-located a forwards fix); force the new fix to come
7121 after it. */
7122 min_mp = mp;
7123 min_address = mp->min_address + fix->fix_size;
7125 /* If we are inserting an 8-bytes aligned quantity and
7126 we have not already found an insertion point, then
7127 make sure that all such 8-byte aligned quantities are
7128 placed at the start of the pool. */
7129 else if (ARM_DOUBLEWORD_ALIGN
7130 && min_mp == NULL
7131 && fix->fix_size == 8
7132 && mp->fix_size < 8)
7134 min_mp = mp;
7135 min_address = mp->min_address + fix->fix_size;
7140 /* We need to create a new entry. */
7141 mp = xmalloc (sizeof (* mp));
7142 mp->fix_size = fix->fix_size;
7143 mp->mode = fix->mode;
7144 mp->value = fix->value;
7145 mp->refcount = 1;
7146 mp->max_address = minipool_barrier->address + 65536;
7148 mp->min_address = min_address;
7150 if (min_mp == NULL)
7152 mp->prev = NULL;
7153 mp->next = minipool_vector_head;
7155 if (mp->next == NULL)
7157 minipool_vector_tail = mp;
7158 minipool_vector_label = gen_label_rtx ();
7160 else
7161 mp->next->prev = mp;
7163 minipool_vector_head = mp;
7165 else
7167 mp->next = min_mp->next;
7168 mp->prev = min_mp;
7169 min_mp->next = mp;
7171 if (mp->next != NULL)
7172 mp->next->prev = mp;
7173 else
7174 minipool_vector_tail = mp;
7177 /* Save the new entry. */
7178 min_mp = mp;
7180 if (mp->prev)
7181 mp = mp->prev;
7182 else
7183 mp->offset = 0;
7185 /* Scan over the following entries and adjust their offsets. */
7186 while (mp->next != NULL)
7188 if (mp->next->min_address < mp->min_address + mp->fix_size)
7189 mp->next->min_address = mp->min_address + mp->fix_size;
7191 if (mp->refcount)
7192 mp->next->offset = mp->offset + mp->fix_size;
7193 else
7194 mp->next->offset = mp->offset;
7196 mp = mp->next;
7199 return min_mp;
7202 static void
7203 assign_minipool_offsets (Mfix *barrier)
7205 HOST_WIDE_INT offset = 0;
7206 Mnode *mp;
7208 minipool_barrier = barrier;
7210 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7212 mp->offset = offset;
7214 if (mp->refcount > 0)
7215 offset += mp->fix_size;
7219 /* Output the literal table */
7220 static void
7221 dump_minipool (rtx scan)
7223 Mnode * mp;
7224 Mnode * nmp;
7225 int align64 = 0;
7227 if (ARM_DOUBLEWORD_ALIGN)
7228 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7229 if (mp->refcount > 0 && mp->fix_size == 8)
7231 align64 = 1;
7232 break;
7235 if (dump_file)
7236 fprintf (dump_file,
7237 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7238 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7240 scan = emit_label_after (gen_label_rtx (), scan);
7241 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7242 scan = emit_label_after (minipool_vector_label, scan);
7244 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7246 if (mp->refcount > 0)
7248 if (dump_file)
7250 fprintf (dump_file,
7251 ";; Offset %u, min %ld, max %ld ",
7252 (unsigned) mp->offset, (unsigned long) mp->min_address,
7253 (unsigned long) mp->max_address);
7254 arm_print_value (dump_file, mp->value);
7255 fputc ('\n', dump_file);
7258 switch (mp->fix_size)
7260 #ifdef HAVE_consttable_1
7261 case 1:
7262 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7263 break;
7265 #endif
7266 #ifdef HAVE_consttable_2
7267 case 2:
7268 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7269 break;
7271 #endif
7272 #ifdef HAVE_consttable_4
7273 case 4:
7274 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7275 break;
7277 #endif
7278 #ifdef HAVE_consttable_8
7279 case 8:
7280 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7281 break;
7283 #endif
7284 default:
7285 gcc_unreachable ();
7289 nmp = mp->next;
7290 free (mp);
7293 minipool_vector_head = minipool_vector_tail = NULL;
7294 scan = emit_insn_after (gen_consttable_end (), scan);
7295 scan = emit_barrier_after (scan);
7298 /* Return the cost of forcibly inserting a barrier after INSN. */
7299 static int
7300 arm_barrier_cost (rtx insn)
7302 /* Basing the location of the pool on the loop depth is preferable,
7303 but at the moment, the basic block information seems to be
7304 corrupt by this stage of the compilation. */
7305 int base_cost = 50;
7306 rtx next = next_nonnote_insn (insn);
7308 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7309 base_cost -= 20;
7311 switch (GET_CODE (insn))
7313 case CODE_LABEL:
7314 /* It will always be better to place the table before the label, rather
7315 than after it. */
7316 return 50;
7318 case INSN:
7319 case CALL_INSN:
7320 return base_cost;
7322 case JUMP_INSN:
7323 return base_cost - 10;
7325 default:
7326 return base_cost + 10;
7330 /* Find the best place in the insn stream in the range
7331 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7332 Create the barrier by inserting a jump and add a new fix entry for
7333 it. */
7334 static Mfix *
7335 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7337 HOST_WIDE_INT count = 0;
7338 rtx barrier;
7339 rtx from = fix->insn;
7340 rtx selected = from;
7341 int selected_cost;
7342 HOST_WIDE_INT selected_address;
7343 Mfix * new_fix;
7344 HOST_WIDE_INT max_count = max_address - fix->address;
7345 rtx label = gen_label_rtx ();
7347 selected_cost = arm_barrier_cost (from);
7348 selected_address = fix->address;
7350 while (from && count < max_count)
7352 rtx tmp;
7353 int new_cost;
7355 /* This code shouldn't have been called if there was a natural barrier
7356 within range. */
7357 gcc_assert (GET_CODE (from) != BARRIER);
7359 /* Count the length of this insn. */
7360 count += get_attr_length (from);
7362 /* If there is a jump table, add its length. */
7363 tmp = is_jump_table (from);
7364 if (tmp != NULL)
7366 count += get_jump_table_size (tmp);
7368 /* Jump tables aren't in a basic block, so base the cost on
7369 the dispatch insn. If we select this location, we will
7370 still put the pool after the table. */
7371 new_cost = arm_barrier_cost (from);
7373 if (count < max_count && new_cost <= selected_cost)
7375 selected = tmp;
7376 selected_cost = new_cost;
7377 selected_address = fix->address + count;
7380 /* Continue after the dispatch table. */
7381 from = NEXT_INSN (tmp);
7382 continue;
7385 new_cost = arm_barrier_cost (from);
7387 if (count < max_count && new_cost <= selected_cost)
7389 selected = from;
7390 selected_cost = new_cost;
7391 selected_address = fix->address + count;
7394 from = NEXT_INSN (from);
7397 /* Create a new JUMP_INSN that branches around a barrier. */
7398 from = emit_jump_insn_after (gen_jump (label), selected);
7399 JUMP_LABEL (from) = label;
7400 barrier = emit_barrier_after (from);
7401 emit_label_after (label, barrier);
7403 /* Create a minipool barrier entry for the new barrier. */
7404 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7405 new_fix->insn = barrier;
7406 new_fix->address = selected_address;
7407 new_fix->next = fix->next;
7408 fix->next = new_fix;
7410 return new_fix;
7413 /* Record that there is a natural barrier in the insn stream at
7414 ADDRESS. */
7415 static void
7416 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7418 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7420 fix->insn = insn;
7421 fix->address = address;
7423 fix->next = NULL;
7424 if (minipool_fix_head != NULL)
7425 minipool_fix_tail->next = fix;
7426 else
7427 minipool_fix_head = fix;
7429 minipool_fix_tail = fix;
7432 /* Record INSN, which will need fixing up to load a value from the
7433 minipool. ADDRESS is the offset of the insn since the start of the
7434 function; LOC is a pointer to the part of the insn which requires
7435 fixing; VALUE is the constant that must be loaded, which is of type
7436 MODE. */
7437 static void
7438 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7439 enum machine_mode mode, rtx value)
7441 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7443 #ifdef AOF_ASSEMBLER
7444 /* PIC symbol references need to be converted into offsets into the
7445 based area. */
7446 /* XXX This shouldn't be done here. */
7447 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7448 value = aof_pic_entry (value);
7449 #endif /* AOF_ASSEMBLER */
7451 fix->insn = insn;
7452 fix->address = address;
7453 fix->loc = loc;
7454 fix->mode = mode;
7455 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7456 fix->value = value;
7457 fix->forwards = get_attr_pool_range (insn);
7458 fix->backwards = get_attr_neg_pool_range (insn);
7459 fix->minipool = NULL;
7461 /* If an insn doesn't have a range defined for it, then it isn't
7462 expecting to be reworked by this code. Better to stop now than
7463 to generate duff assembly code. */
7464 gcc_assert (fix->forwards || fix->backwards);
7466 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7467 So there might be an empty word before the start of the pool.
7468 Hence we reduce the forward range by 4 to allow for this
7469 possibility. */
7470 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7471 fix->forwards -= 4;
7473 if (dump_file)
7475 fprintf (dump_file,
7476 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7477 GET_MODE_NAME (mode),
7478 INSN_UID (insn), (unsigned long) address,
7479 -1 * (long)fix->backwards, (long)fix->forwards);
7480 arm_print_value (dump_file, fix->value);
7481 fprintf (dump_file, "\n");
7484 /* Add it to the chain of fixes. */
7485 fix->next = NULL;
7487 if (minipool_fix_head != NULL)
7488 minipool_fix_tail->next = fix;
7489 else
7490 minipool_fix_head = fix;
7492 minipool_fix_tail = fix;
7495 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7496 Returns the number of insns needed, or 99 if we don't know how to
7497 do it. */
7499 arm_const_double_inline_cost (rtx val)
7501 rtx lowpart, highpart;
7502 enum machine_mode mode;
7504 mode = GET_MODE (val);
7506 if (mode == VOIDmode)
7507 mode = DImode;
7509 gcc_assert (GET_MODE_SIZE (mode) == 8);
7511 lowpart = gen_lowpart (SImode, val);
7512 highpart = gen_highpart_mode (SImode, mode, val);
7514 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7515 gcc_assert (GET_CODE (highpart) == CONST_INT);
7517 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7518 NULL_RTX, NULL_RTX, 0, 0)
7519 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7520 NULL_RTX, NULL_RTX, 0, 0));
7523 /* Return true if it is worthwhile to split a 64-bit constant into two
7524 32-bit operations. This is the case if optimizing for size, or
7525 if we have load delay slots, or if one 32-bit part can be done with
7526 a single data operation. */
7527 bool
7528 arm_const_double_by_parts (rtx val)
7530 enum machine_mode mode = GET_MODE (val);
7531 rtx part;
7533 if (optimize_size || arm_ld_sched)
7534 return true;
7536 if (mode == VOIDmode)
7537 mode = DImode;
7539 part = gen_highpart_mode (SImode, mode, val);
7541 gcc_assert (GET_CODE (part) == CONST_INT);
7543 if (const_ok_for_arm (INTVAL (part))
7544 || const_ok_for_arm (~INTVAL (part)))
7545 return true;
7547 part = gen_lowpart (SImode, val);
7549 gcc_assert (GET_CODE (part) == CONST_INT);
7551 if (const_ok_for_arm (INTVAL (part))
7552 || const_ok_for_arm (~INTVAL (part)))
7553 return true;
7555 return false;
7558 /* Scan INSN and note any of its operands that need fixing.
7559 If DO_PUSHES is false we do not actually push any of the fixups
7560 needed. The function returns TRUE if any fixups were needed/pushed.
7561 This is used by arm_memory_load_p() which needs to know about loads
7562 of constants that will be converted into minipool loads. */
7563 static bool
7564 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7566 bool result = false;
7567 int opno;
7569 extract_insn (insn);
7571 if (!constrain_operands (1))
7572 fatal_insn_not_found (insn);
7574 if (recog_data.n_alternatives == 0)
7575 return false;
7577 /* Fill in recog_op_alt with information about the constraints of
7578 this insn. */
7579 preprocess_constraints ();
7581 for (opno = 0; opno < recog_data.n_operands; opno++)
7583 /* Things we need to fix can only occur in inputs. */
7584 if (recog_data.operand_type[opno] != OP_IN)
7585 continue;
7587 /* If this alternative is a memory reference, then any mention
7588 of constants in this alternative is really to fool reload
7589 into allowing us to accept one there. We need to fix them up
7590 now so that we output the right code. */
7591 if (recog_op_alt[opno][which_alternative].memory_ok)
7593 rtx op = recog_data.operand[opno];
7595 if (CONSTANT_P (op))
7597 if (do_pushes)
7598 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7599 recog_data.operand_mode[opno], op);
7600 result = true;
7602 else if (GET_CODE (op) == MEM
7603 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7604 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7606 if (do_pushes)
7608 rtx cop = avoid_constant_pool_reference (op);
7610 /* Casting the address of something to a mode narrower
7611 than a word can cause avoid_constant_pool_reference()
7612 to return the pool reference itself. That's no good to
7613 us here. Lets just hope that we can use the
7614 constant pool value directly. */
7615 if (op == cop)
7616 cop = get_pool_constant (XEXP (op, 0));
7618 push_minipool_fix (insn, address,
7619 recog_data.operand_loc[opno],
7620 recog_data.operand_mode[opno], cop);
7623 result = true;
7628 return result;
7631 /* Gcc puts the pool in the wrong place for ARM, since we can only
7632 load addresses a limited distance around the pc. We do some
7633 special munging to move the constant pool values to the correct
7634 point in the code. */
7635 static void
7636 arm_reorg (void)
7638 rtx insn;
7639 HOST_WIDE_INT address = 0;
7640 Mfix * fix;
7642 minipool_fix_head = minipool_fix_tail = NULL;
7644 /* The first insn must always be a note, or the code below won't
7645 scan it properly. */
7646 insn = get_insns ();
7647 gcc_assert (GET_CODE (insn) == NOTE);
7649 /* Scan all the insns and record the operands that will need fixing. */
7650 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7652 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7653 && (arm_cirrus_insn_p (insn)
7654 || GET_CODE (insn) == JUMP_INSN
7655 || arm_memory_load_p (insn)))
7656 cirrus_reorg (insn);
7658 if (GET_CODE (insn) == BARRIER)
7659 push_minipool_barrier (insn, address);
7660 else if (INSN_P (insn))
7662 rtx table;
7664 note_invalid_constants (insn, address, true);
7665 address += get_attr_length (insn);
7667 /* If the insn is a vector jump, add the size of the table
7668 and skip the table. */
7669 if ((table = is_jump_table (insn)) != NULL)
7671 address += get_jump_table_size (table);
7672 insn = table;
7677 fix = minipool_fix_head;
7679 /* Now scan the fixups and perform the required changes. */
7680 while (fix)
7682 Mfix * ftmp;
7683 Mfix * fdel;
7684 Mfix * last_added_fix;
7685 Mfix * last_barrier = NULL;
7686 Mfix * this_fix;
7688 /* Skip any further barriers before the next fix. */
7689 while (fix && GET_CODE (fix->insn) == BARRIER)
7690 fix = fix->next;
7692 /* No more fixes. */
7693 if (fix == NULL)
7694 break;
7696 last_added_fix = NULL;
7698 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7700 if (GET_CODE (ftmp->insn) == BARRIER)
7702 if (ftmp->address >= minipool_vector_head->max_address)
7703 break;
7705 last_barrier = ftmp;
7707 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7708 break;
7710 last_added_fix = ftmp; /* Keep track of the last fix added. */
7713 /* If we found a barrier, drop back to that; any fixes that we
7714 could have reached but come after the barrier will now go in
7715 the next mini-pool. */
7716 if (last_barrier != NULL)
7718 /* Reduce the refcount for those fixes that won't go into this
7719 pool after all. */
7720 for (fdel = last_barrier->next;
7721 fdel && fdel != ftmp;
7722 fdel = fdel->next)
7724 fdel->minipool->refcount--;
7725 fdel->minipool = NULL;
7728 ftmp = last_barrier;
7730 else
7732 /* ftmp is first fix that we can't fit into this pool and
7733 there no natural barriers that we could use. Insert a
7734 new barrier in the code somewhere between the previous
7735 fix and this one, and arrange to jump around it. */
7736 HOST_WIDE_INT max_address;
7738 /* The last item on the list of fixes must be a barrier, so
7739 we can never run off the end of the list of fixes without
7740 last_barrier being set. */
7741 gcc_assert (ftmp);
7743 max_address = minipool_vector_head->max_address;
7744 /* Check that there isn't another fix that is in range that
7745 we couldn't fit into this pool because the pool was
7746 already too large: we need to put the pool before such an
7747 instruction. */
7748 if (ftmp->address < max_address)
7749 max_address = ftmp->address;
7751 last_barrier = create_fix_barrier (last_added_fix, max_address);
7754 assign_minipool_offsets (last_barrier);
7756 while (ftmp)
7758 if (GET_CODE (ftmp->insn) != BARRIER
7759 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7760 == NULL))
7761 break;
7763 ftmp = ftmp->next;
7766 /* Scan over the fixes we have identified for this pool, fixing them
7767 up and adding the constants to the pool itself. */
7768 for (this_fix = fix; this_fix && ftmp != this_fix;
7769 this_fix = this_fix->next)
7770 if (GET_CODE (this_fix->insn) != BARRIER)
7772 rtx addr
7773 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7774 minipool_vector_label),
7775 this_fix->minipool->offset);
7776 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7779 dump_minipool (last_barrier->insn);
7780 fix = ftmp;
7783 /* From now on we must synthesize any constants that we can't handle
7784 directly. This can happen if the RTL gets split during final
7785 instruction generation. */
7786 after_arm_reorg = 1;
7788 /* Free the minipool memory. */
7789 obstack_free (&minipool_obstack, minipool_startobj);
7792 /* Routines to output assembly language. */
7794 /* If the rtx is the correct value then return the string of the number.
7795 In this way we can ensure that valid double constants are generated even
7796 when cross compiling. */
7797 const char *
7798 fp_immediate_constant (rtx x)
7800 REAL_VALUE_TYPE r;
7801 int i;
7803 if (!fp_consts_inited)
7804 init_fp_table ();
7806 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7807 for (i = 0; i < 8; i++)
7808 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7809 return strings_fp[i];
7811 gcc_unreachable ();
7814 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7815 static const char *
7816 fp_const_from_val (REAL_VALUE_TYPE *r)
7818 int i;
7820 if (!fp_consts_inited)
7821 init_fp_table ();
7823 for (i = 0; i < 8; i++)
7824 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7825 return strings_fp[i];
7827 gcc_unreachable ();
7830 /* Output the operands of a LDM/STM instruction to STREAM.
7831 MASK is the ARM register set mask of which only bits 0-15 are important.
7832 REG is the base register, either the frame pointer or the stack pointer,
7833 INSTR is the possibly suffixed load or store instruction. */
7835 static void
7836 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7837 unsigned long mask)
7839 unsigned i;
7840 bool not_first = FALSE;
7842 fputc ('\t', stream);
7843 asm_fprintf (stream, instr, reg);
7844 fputs (", {", stream);
7846 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7847 if (mask & (1 << i))
7849 if (not_first)
7850 fprintf (stream, ", ");
7852 asm_fprintf (stream, "%r", i);
7853 not_first = TRUE;
7856 fprintf (stream, "}\n");
7860 /* Output a FLDMX instruction to STREAM.
7861 BASE if the register containing the address.
7862 REG and COUNT specify the register range.
7863 Extra registers may be added to avoid hardware bugs. */
7865 static void
7866 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7868 int i;
7870 /* Workaround ARM10 VFPr1 bug. */
7871 if (count == 2 && !arm_arch6)
7873 if (reg == 15)
7874 reg--;
7875 count++;
7878 fputc ('\t', stream);
7879 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7881 for (i = reg; i < reg + count; i++)
7883 if (i > reg)
7884 fputs (", ", stream);
7885 asm_fprintf (stream, "d%d", i);
7887 fputs ("}\n", stream);
7892 /* Output the assembly for a store multiple. */
7894 const char *
7895 vfp_output_fstmx (rtx * operands)
7897 char pattern[100];
7898 int p;
7899 int base;
7900 int i;
7902 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7903 p = strlen (pattern);
7905 gcc_assert (GET_CODE (operands[1]) == REG);
7907 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7908 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7910 p += sprintf (&pattern[p], ", d%d", base + i);
7912 strcpy (&pattern[p], "}");
7914 output_asm_insn (pattern, operands);
7915 return "";
7919 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7920 number of bytes pushed. */
7922 static int
7923 vfp_emit_fstmx (int base_reg, int count)
7925 rtx par;
7926 rtx dwarf;
7927 rtx tmp, reg;
7928 int i;
7930 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7931 register pairs are stored by a store multiple insn. We avoid this
7932 by pushing an extra pair. */
7933 if (count == 2 && !arm_arch6)
7935 if (base_reg == LAST_VFP_REGNUM - 3)
7936 base_reg -= 2;
7937 count++;
7940 /* ??? The frame layout is implementation defined. We describe
7941 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7942 We really need some way of representing the whole block so that the
7943 unwinder can figure it out at runtime. */
7944 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7945 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7947 reg = gen_rtx_REG (DFmode, base_reg);
7948 base_reg += 2;
7950 XVECEXP (par, 0, 0)
7951 = gen_rtx_SET (VOIDmode,
7952 gen_rtx_MEM (BLKmode,
7953 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7954 gen_rtx_UNSPEC (BLKmode,
7955 gen_rtvec (1, reg),
7956 UNSPEC_PUSH_MULT));
7958 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7959 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7960 GEN_INT (-(count * 8 + 4))));
7961 RTX_FRAME_RELATED_P (tmp) = 1;
7962 XVECEXP (dwarf, 0, 0) = tmp;
7964 tmp = gen_rtx_SET (VOIDmode,
7965 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7966 reg);
7967 RTX_FRAME_RELATED_P (tmp) = 1;
7968 XVECEXP (dwarf, 0, 1) = tmp;
7970 for (i = 1; i < count; i++)
7972 reg = gen_rtx_REG (DFmode, base_reg);
7973 base_reg += 2;
7974 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7976 tmp = gen_rtx_SET (VOIDmode,
7977 gen_rtx_MEM (DFmode,
7978 gen_rtx_PLUS (SImode,
7979 stack_pointer_rtx,
7980 GEN_INT (i * 8))),
7981 reg);
7982 RTX_FRAME_RELATED_P (tmp) = 1;
7983 XVECEXP (dwarf, 0, i + 1) = tmp;
7986 par = emit_insn (par);
7987 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7988 REG_NOTES (par));
7989 RTX_FRAME_RELATED_P (par) = 1;
7991 return count * 8 + 4;
7995 /* Output a 'call' insn. */
7996 const char *
7997 output_call (rtx *operands)
7999 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8001 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8002 if (REGNO (operands[0]) == LR_REGNUM)
8004 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8005 output_asm_insn ("mov%?\t%0, %|lr", operands);
8008 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8010 if (TARGET_INTERWORK || arm_arch4t)
8011 output_asm_insn ("bx%?\t%0", operands);
8012 else
8013 output_asm_insn ("mov%?\t%|pc, %0", operands);
8015 return "";
8018 /* Output a 'call' insn that is a reference in memory. */
8019 const char *
8020 output_call_mem (rtx *operands)
8022 if (TARGET_INTERWORK && !arm_arch5)
8024 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8025 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8026 output_asm_insn ("bx%?\t%|ip", operands);
8028 else if (regno_use_in (LR_REGNUM, operands[0]))
8030 /* LR is used in the memory address. We load the address in the
8031 first instruction. It's safe to use IP as the target of the
8032 load since the call will kill it anyway. */
8033 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8034 if (arm_arch5)
8035 output_asm_insn ("blx%?\t%|ip", operands);
8036 else
8038 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8039 if (arm_arch4t)
8040 output_asm_insn ("bx%?\t%|ip", operands);
8041 else
8042 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8045 else
8047 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8048 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8051 return "";
8055 /* Output a move from arm registers to an fpa registers.
8056 OPERANDS[0] is an fpa register.
8057 OPERANDS[1] is the first registers of an arm register pair. */
8058 const char *
8059 output_mov_long_double_fpa_from_arm (rtx *operands)
8061 int arm_reg0 = REGNO (operands[1]);
8062 rtx ops[3];
8064 gcc_assert (arm_reg0 != IP_REGNUM);
8066 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8067 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8068 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8070 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8071 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8073 return "";
8076 /* Output a move from an fpa register to arm registers.
8077 OPERANDS[0] is the first registers of an arm register pair.
8078 OPERANDS[1] is an fpa register. */
8079 const char *
8080 output_mov_long_double_arm_from_fpa (rtx *operands)
8082 int arm_reg0 = REGNO (operands[0]);
8083 rtx ops[3];
8085 gcc_assert (arm_reg0 != IP_REGNUM);
8087 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8088 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8089 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8091 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8092 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8093 return "";
8096 /* Output a move from arm registers to arm registers of a long double
8097 OPERANDS[0] is the destination.
8098 OPERANDS[1] is the source. */
8099 const char *
8100 output_mov_long_double_arm_from_arm (rtx *operands)
8102 /* We have to be careful here because the two might overlap. */
8103 int dest_start = REGNO (operands[0]);
8104 int src_start = REGNO (operands[1]);
8105 rtx ops[2];
8106 int i;
8108 if (dest_start < src_start)
8110 for (i = 0; i < 3; i++)
8112 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8113 ops[1] = gen_rtx_REG (SImode, src_start + i);
8114 output_asm_insn ("mov%?\t%0, %1", ops);
8117 else
8119 for (i = 2; i >= 0; i--)
8121 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8122 ops[1] = gen_rtx_REG (SImode, src_start + i);
8123 output_asm_insn ("mov%?\t%0, %1", ops);
8127 return "";
8131 /* Output a move from arm registers to an fpa registers.
8132 OPERANDS[0] is an fpa register.
8133 OPERANDS[1] is the first registers of an arm register pair. */
8134 const char *
8135 output_mov_double_fpa_from_arm (rtx *operands)
8137 int arm_reg0 = REGNO (operands[1]);
8138 rtx ops[2];
8140 gcc_assert (arm_reg0 != IP_REGNUM);
8142 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8143 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8144 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8145 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8146 return "";
8149 /* Output a move from an fpa register to arm registers.
8150 OPERANDS[0] is the first registers of an arm register pair.
8151 OPERANDS[1] is an fpa register. */
8152 const char *
8153 output_mov_double_arm_from_fpa (rtx *operands)
8155 int arm_reg0 = REGNO (operands[0]);
8156 rtx ops[2];
8158 gcc_assert (arm_reg0 != IP_REGNUM);
8160 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8161 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8162 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8163 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8164 return "";
8167 /* Output a move between double words.
8168 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8169 or MEM<-REG and all MEMs must be offsettable addresses. */
8170 const char *
8171 output_move_double (rtx *operands)
8173 enum rtx_code code0 = GET_CODE (operands[0]);
8174 enum rtx_code code1 = GET_CODE (operands[1]);
8175 rtx otherops[3];
8177 if (code0 == REG)
8179 int reg0 = REGNO (operands[0]);
8181 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8183 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8185 switch (GET_CODE (XEXP (operands[1], 0)))
8187 case REG:
8188 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8189 break;
8191 case PRE_INC:
8192 gcc_assert (TARGET_LDRD);
8193 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8194 break;
8196 case PRE_DEC:
8197 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8198 break;
8200 case POST_INC:
8201 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8202 break;
8204 case POST_DEC:
8205 gcc_assert (TARGET_LDRD);
8206 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8207 break;
8209 case PRE_MODIFY:
8210 case POST_MODIFY:
8211 otherops[0] = operands[0];
8212 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8213 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8215 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8217 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8219 /* Registers overlap so split out the increment. */
8220 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8221 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8223 else
8224 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8226 else
8228 /* We only allow constant increments, so this is safe. */
8229 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8231 break;
8233 case LABEL_REF:
8234 case CONST:
8235 output_asm_insn ("adr%?\t%0, %1", operands);
8236 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8237 break;
8239 default:
8240 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8241 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8243 otherops[0] = operands[0];
8244 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8245 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8247 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8249 if (GET_CODE (otherops[2]) == CONST_INT)
8251 switch ((int) INTVAL (otherops[2]))
8253 case -8:
8254 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8255 return "";
8256 case -4:
8257 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8258 return "";
8259 case 4:
8260 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8261 return "";
8264 if (TARGET_LDRD
8265 && (GET_CODE (otherops[2]) == REG
8266 || (GET_CODE (otherops[2]) == CONST_INT
8267 && INTVAL (otherops[2]) > -256
8268 && INTVAL (otherops[2]) < 256)))
8270 if (reg_overlap_mentioned_p (otherops[0],
8271 otherops[2]))
8273 /* Swap base and index registers over to
8274 avoid a conflict. */
8275 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8276 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8279 /* If both registers conflict, it will usually
8280 have been fixed by a splitter. */
8281 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8283 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8284 output_asm_insn ("ldr%?d\t%0, [%1]",
8285 otherops);
8287 else
8288 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8289 return "";
8292 if (GET_CODE (otherops[2]) == CONST_INT)
8294 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8295 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8296 else
8297 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8299 else
8300 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8302 else
8303 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8305 return "ldm%?ia\t%0, %M0";
8307 else
8309 otherops[1] = adjust_address (operands[1], SImode, 4);
8310 /* Take care of overlapping base/data reg. */
8311 if (reg_mentioned_p (operands[0], operands[1]))
8313 output_asm_insn ("ldr%?\t%0, %1", otherops);
8314 output_asm_insn ("ldr%?\t%0, %1", operands);
8316 else
8318 output_asm_insn ("ldr%?\t%0, %1", operands);
8319 output_asm_insn ("ldr%?\t%0, %1", otherops);
8324 else
8326 /* Constraints should ensure this. */
8327 gcc_assert (code0 == MEM && code1 == REG);
8328 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8330 switch (GET_CODE (XEXP (operands[0], 0)))
8332 case REG:
8333 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8334 break;
8336 case PRE_INC:
8337 gcc_assert (TARGET_LDRD);
8338 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8339 break;
8341 case PRE_DEC:
8342 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8343 break;
8345 case POST_INC:
8346 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8347 break;
8349 case POST_DEC:
8350 gcc_assert (TARGET_LDRD);
8351 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8352 break;
8354 case PRE_MODIFY:
8355 case POST_MODIFY:
8356 otherops[0] = operands[1];
8357 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8358 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8360 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8361 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8362 else
8363 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8364 break;
8366 case PLUS:
8367 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8368 if (GET_CODE (otherops[2]) == CONST_INT)
8370 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8372 case -8:
8373 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8374 return "";
8376 case -4:
8377 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8378 return "";
8380 case 4:
8381 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8382 return "";
8385 if (TARGET_LDRD
8386 && (GET_CODE (otherops[2]) == REG
8387 || (GET_CODE (otherops[2]) == CONST_INT
8388 && INTVAL (otherops[2]) > -256
8389 && INTVAL (otherops[2]) < 256)))
8391 otherops[0] = operands[1];
8392 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8393 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8394 return "";
8396 /* Fall through */
8398 default:
8399 otherops[0] = adjust_address (operands[0], SImode, 4);
8400 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8401 output_asm_insn ("str%?\t%1, %0", operands);
8402 output_asm_insn ("str%?\t%1, %0", otherops);
8406 return "";
8409 /* Output an ADD r, s, #n where n may be too big for one instruction.
8410 If adding zero to one register, output nothing. */
8411 const char *
8412 output_add_immediate (rtx *operands)
8414 HOST_WIDE_INT n = INTVAL (operands[2]);
8416 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8418 if (n < 0)
8419 output_multi_immediate (operands,
8420 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8421 -n);
8422 else
8423 output_multi_immediate (operands,
8424 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8428 return "";
8431 /* Output a multiple immediate operation.
8432 OPERANDS is the vector of operands referred to in the output patterns.
8433 INSTR1 is the output pattern to use for the first constant.
8434 INSTR2 is the output pattern to use for subsequent constants.
8435 IMMED_OP is the index of the constant slot in OPERANDS.
8436 N is the constant value. */
8437 static const char *
8438 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8439 int immed_op, HOST_WIDE_INT n)
8441 #if HOST_BITS_PER_WIDE_INT > 32
8442 n &= 0xffffffff;
8443 #endif
8445 if (n == 0)
8447 /* Quick and easy output. */
8448 operands[immed_op] = const0_rtx;
8449 output_asm_insn (instr1, operands);
8451 else
8453 int i;
8454 const char * instr = instr1;
8456 /* Note that n is never zero here (which would give no output). */
8457 for (i = 0; i < 32; i += 2)
8459 if (n & (3 << i))
8461 operands[immed_op] = GEN_INT (n & (255 << i));
8462 output_asm_insn (instr, operands);
8463 instr = instr2;
8464 i += 6;
8469 return "";
8472 /* Return the appropriate ARM instruction for the operation code.
8473 The returned result should not be overwritten. OP is the rtx of the
8474 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8475 was shifted. */
8476 const char *
8477 arithmetic_instr (rtx op, int shift_first_arg)
8479 switch (GET_CODE (op))
8481 case PLUS:
8482 return "add";
8484 case MINUS:
8485 return shift_first_arg ? "rsb" : "sub";
8487 case IOR:
8488 return "orr";
8490 case XOR:
8491 return "eor";
8493 case AND:
8494 return "and";
8496 default:
8497 gcc_unreachable ();
8501 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8502 for the operation code. The returned result should not be overwritten.
8503 OP is the rtx code of the shift.
8504 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8505 shift. */
8506 static const char *
8507 shift_op (rtx op, HOST_WIDE_INT *amountp)
8509 const char * mnem;
8510 enum rtx_code code = GET_CODE (op);
8512 switch (GET_CODE (XEXP (op, 1)))
8514 case REG:
8515 case SUBREG:
8516 *amountp = -1;
8517 break;
8519 case CONST_INT:
8520 *amountp = INTVAL (XEXP (op, 1));
8521 break;
8523 default:
8524 gcc_unreachable ();
8527 switch (code)
8529 case ASHIFT:
8530 mnem = "asl";
8531 break;
8533 case ASHIFTRT:
8534 mnem = "asr";
8535 break;
8537 case LSHIFTRT:
8538 mnem = "lsr";
8539 break;
8541 case ROTATE:
8542 gcc_assert (*amountp != -1);
8543 *amountp = 32 - *amountp;
8545 /* Fall through. */
8547 case ROTATERT:
8548 mnem = "ror";
8549 break;
8551 case MULT:
8552 /* We never have to worry about the amount being other than a
8553 power of 2, since this case can never be reloaded from a reg. */
8554 gcc_assert (*amountp != -1);
8555 *amountp = int_log2 (*amountp);
8556 return "asl";
8558 default:
8559 gcc_unreachable ();
8562 if (*amountp != -1)
8564 /* This is not 100% correct, but follows from the desire to merge
8565 multiplication by a power of 2 with the recognizer for a
8566 shift. >=32 is not a valid shift for "asl", so we must try and
8567 output a shift that produces the correct arithmetical result.
8568 Using lsr #32 is identical except for the fact that the carry bit
8569 is not set correctly if we set the flags; but we never use the
8570 carry bit from such an operation, so we can ignore that. */
8571 if (code == ROTATERT)
8572 /* Rotate is just modulo 32. */
8573 *amountp &= 31;
8574 else if (*amountp != (*amountp & 31))
8576 if (code == ASHIFT)
8577 mnem = "lsr";
8578 *amountp = 32;
8581 /* Shifts of 0 are no-ops. */
8582 if (*amountp == 0)
8583 return NULL;
8586 return mnem;
8589 /* Obtain the shift from the POWER of two. */
8591 static HOST_WIDE_INT
8592 int_log2 (HOST_WIDE_INT power)
8594 HOST_WIDE_INT shift = 0;
8596 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8598 gcc_assert (shift <= 31);
8599 shift++;
8602 return shift;
8605 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8606 because /bin/as is horribly restrictive. The judgement about
8607 whether or not each character is 'printable' (and can be output as
8608 is) or not (and must be printed with an octal escape) must be made
8609 with reference to the *host* character set -- the situation is
8610 similar to that discussed in the comments above pp_c_char in
8611 c-pretty-print.c. */
8613 #define MAX_ASCII_LEN 51
8615 void
8616 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8618 int i;
8619 int len_so_far = 0;
8621 fputs ("\t.ascii\t\"", stream);
8623 for (i = 0; i < len; i++)
8625 int c = p[i];
8627 if (len_so_far >= MAX_ASCII_LEN)
8629 fputs ("\"\n\t.ascii\t\"", stream);
8630 len_so_far = 0;
8633 if (ISPRINT (c))
8635 if (c == '\\' || c == '\"')
8637 putc ('\\', stream);
8638 len_so_far++;
8640 putc (c, stream);
8641 len_so_far++;
8643 else
8645 fprintf (stream, "\\%03o", c);
8646 len_so_far += 4;
8650 fputs ("\"\n", stream);
8653 /* Compute the register save mask for registers 0 through 12
8654 inclusive. This code is used by arm_compute_save_reg_mask. */
8656 static unsigned long
8657 arm_compute_save_reg0_reg12_mask (void)
8659 unsigned long func_type = arm_current_func_type ();
8660 unsigned long save_reg_mask = 0;
8661 unsigned int reg;
8663 if (IS_INTERRUPT (func_type))
8665 unsigned int max_reg;
8666 /* Interrupt functions must not corrupt any registers,
8667 even call clobbered ones. If this is a leaf function
8668 we can just examine the registers used by the RTL, but
8669 otherwise we have to assume that whatever function is
8670 called might clobber anything, and so we have to save
8671 all the call-clobbered registers as well. */
8672 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8673 /* FIQ handlers have registers r8 - r12 banked, so
8674 we only need to check r0 - r7, Normal ISRs only
8675 bank r14 and r15, so we must check up to r12.
8676 r13 is the stack pointer which is always preserved,
8677 so we do not need to consider it here. */
8678 max_reg = 7;
8679 else
8680 max_reg = 12;
8682 for (reg = 0; reg <= max_reg; reg++)
8683 if (regs_ever_live[reg]
8684 || (! current_function_is_leaf && call_used_regs [reg]))
8685 save_reg_mask |= (1 << reg);
8687 /* Also save the pic base register if necessary. */
8688 if (flag_pic
8689 && !TARGET_SINGLE_PIC_BASE
8690 && current_function_uses_pic_offset_table)
8691 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8693 else
8695 /* In the normal case we only need to save those registers
8696 which are call saved and which are used by this function. */
8697 for (reg = 0; reg <= 10; reg++)
8698 if (regs_ever_live[reg] && ! call_used_regs [reg])
8699 save_reg_mask |= (1 << reg);
8701 /* Handle the frame pointer as a special case. */
8702 if (! TARGET_APCS_FRAME
8703 && ! frame_pointer_needed
8704 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8705 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8706 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8708 /* If we aren't loading the PIC register,
8709 don't stack it even though it may be live. */
8710 if (flag_pic
8711 && !TARGET_SINGLE_PIC_BASE
8712 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8713 || current_function_uses_pic_offset_table))
8714 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8717 /* Save registers so the exception handler can modify them. */
8718 if (current_function_calls_eh_return)
8720 unsigned int i;
8722 for (i = 0; ; i++)
8724 reg = EH_RETURN_DATA_REGNO (i);
8725 if (reg == INVALID_REGNUM)
8726 break;
8727 save_reg_mask |= 1 << reg;
8731 return save_reg_mask;
8734 /* Compute a bit mask of which registers need to be
8735 saved on the stack for the current function. */
8737 static unsigned long
8738 arm_compute_save_reg_mask (void)
8740 unsigned int save_reg_mask = 0;
8741 unsigned long func_type = arm_current_func_type ();
8743 if (IS_NAKED (func_type))
8744 /* This should never really happen. */
8745 return 0;
8747 /* If we are creating a stack frame, then we must save the frame pointer,
8748 IP (which will hold the old stack pointer), LR and the PC. */
8749 if (frame_pointer_needed)
8750 save_reg_mask |=
8751 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8752 | (1 << IP_REGNUM)
8753 | (1 << LR_REGNUM)
8754 | (1 << PC_REGNUM);
8756 /* Volatile functions do not return, so there
8757 is no need to save any other registers. */
8758 if (IS_VOLATILE (func_type))
8759 return save_reg_mask;
8761 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8763 /* Decide if we need to save the link register.
8764 Interrupt routines have their own banked link register,
8765 so they never need to save it.
8766 Otherwise if we do not use the link register we do not need to save
8767 it. If we are pushing other registers onto the stack however, we
8768 can save an instruction in the epilogue by pushing the link register
8769 now and then popping it back into the PC. This incurs extra memory
8770 accesses though, so we only do it when optimizing for size, and only
8771 if we know that we will not need a fancy return sequence. */
8772 if (regs_ever_live [LR_REGNUM]
8773 || (save_reg_mask
8774 && optimize_size
8775 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8776 && !current_function_calls_eh_return))
8777 save_reg_mask |= 1 << LR_REGNUM;
8779 if (cfun->machine->lr_save_eliminated)
8780 save_reg_mask &= ~ (1 << LR_REGNUM);
8782 if (TARGET_REALLY_IWMMXT
8783 && ((bit_count (save_reg_mask)
8784 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8786 unsigned int reg;
8788 /* The total number of registers that are going to be pushed
8789 onto the stack is odd. We need to ensure that the stack
8790 is 64-bit aligned before we start to save iWMMXt registers,
8791 and also before we start to create locals. (A local variable
8792 might be a double or long long which we will load/store using
8793 an iWMMXt instruction). Therefore we need to push another
8794 ARM register, so that the stack will be 64-bit aligned. We
8795 try to avoid using the arg registers (r0 -r3) as they might be
8796 used to pass values in a tail call. */
8797 for (reg = 4; reg <= 12; reg++)
8798 if ((save_reg_mask & (1 << reg)) == 0)
8799 break;
8801 if (reg <= 12)
8802 save_reg_mask |= (1 << reg);
8803 else
8805 cfun->machine->sibcall_blocked = 1;
8806 save_reg_mask |= (1 << 3);
8810 return save_reg_mask;
8814 /* Compute a bit mask of which registers need to be
8815 saved on the stack for the current function. */
8816 static unsigned long
8817 thumb_compute_save_reg_mask (void)
8819 unsigned long mask;
8820 unsigned reg;
8822 mask = 0;
8823 for (reg = 0; reg < 12; reg ++)
8824 if (regs_ever_live[reg] && !call_used_regs[reg])
8825 mask |= 1 << reg;
8827 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8828 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8830 if (TARGET_SINGLE_PIC_BASE)
8831 mask &= ~(1 << arm_pic_register);
8833 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8834 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8835 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8837 /* LR will also be pushed if any lo regs are pushed. */
8838 if (mask & 0xff || thumb_force_lr_save ())
8839 mask |= (1 << LR_REGNUM);
8841 /* Make sure we have a low work register if we need one.
8842 We will need one if we are going to push a high register,
8843 but we are not currently intending to push a low register. */
8844 if ((mask & 0xff) == 0
8845 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8847 /* Use thumb_find_work_register to choose which register
8848 we will use. If the register is live then we will
8849 have to push it. Use LAST_LO_REGNUM as our fallback
8850 choice for the register to select. */
8851 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8853 if (! call_used_regs[reg])
8854 mask |= 1 << reg;
8857 return mask;
8861 /* Return the number of bytes required to save VFP registers. */
8862 static int
8863 arm_get_vfp_saved_size (void)
8865 unsigned int regno;
8866 int count;
8867 int saved;
8869 saved = 0;
8870 /* Space for saved VFP registers. */
8871 if (TARGET_HARD_FLOAT && TARGET_VFP)
8873 count = 0;
8874 for (regno = FIRST_VFP_REGNUM;
8875 regno < LAST_VFP_REGNUM;
8876 regno += 2)
8878 if ((!regs_ever_live[regno] || call_used_regs[regno])
8879 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8881 if (count > 0)
8883 /* Workaround ARM10 VFPr1 bug. */
8884 if (count == 2 && !arm_arch6)
8885 count++;
8886 saved += count * 8 + 4;
8888 count = 0;
8890 else
8891 count++;
8893 if (count > 0)
8895 if (count == 2 && !arm_arch6)
8896 count++;
8897 saved += count * 8 + 4;
8900 return saved;
8904 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8905 everything bar the final return instruction. */
8906 const char *
8907 output_return_instruction (rtx operand, int really_return, int reverse)
8909 char conditional[10];
8910 char instr[100];
8911 unsigned reg;
8912 unsigned long live_regs_mask;
8913 unsigned long func_type;
8914 arm_stack_offsets *offsets;
8916 func_type = arm_current_func_type ();
8918 if (IS_NAKED (func_type))
8919 return "";
8921 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8923 /* If this function was declared non-returning, and we have
8924 found a tail call, then we have to trust that the called
8925 function won't return. */
8926 if (really_return)
8928 rtx ops[2];
8930 /* Otherwise, trap an attempted return by aborting. */
8931 ops[0] = operand;
8932 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8933 : "abort");
8934 assemble_external_libcall (ops[1]);
8935 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8938 return "";
8941 gcc_assert (!current_function_calls_alloca || really_return);
8943 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8945 return_used_this_function = 1;
8947 live_regs_mask = arm_compute_save_reg_mask ();
8949 if (live_regs_mask)
8951 const char * return_reg;
8953 /* If we do not have any special requirements for function exit
8954 (e.g. interworking, or ISR) then we can load the return address
8955 directly into the PC. Otherwise we must load it into LR. */
8956 if (really_return
8957 && ! TARGET_INTERWORK)
8958 return_reg = reg_names[PC_REGNUM];
8959 else
8960 return_reg = reg_names[LR_REGNUM];
8962 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8964 /* There are three possible reasons for the IP register
8965 being saved. 1) a stack frame was created, in which case
8966 IP contains the old stack pointer, or 2) an ISR routine
8967 corrupted it, or 3) it was saved to align the stack on
8968 iWMMXt. In case 1, restore IP into SP, otherwise just
8969 restore IP. */
8970 if (frame_pointer_needed)
8972 live_regs_mask &= ~ (1 << IP_REGNUM);
8973 live_regs_mask |= (1 << SP_REGNUM);
8975 else
8976 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
8979 /* On some ARM architectures it is faster to use LDR rather than
8980 LDM to load a single register. On other architectures, the
8981 cost is the same. In 26 bit mode, or for exception handlers,
8982 we have to use LDM to load the PC so that the CPSR is also
8983 restored. */
8984 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8985 if (live_regs_mask == (1U << reg))
8986 break;
8988 if (reg <= LAST_ARM_REGNUM
8989 && (reg != LR_REGNUM
8990 || ! really_return
8991 || ! IS_INTERRUPT (func_type)))
8993 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8994 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8996 else
8998 char *p;
8999 int first = 1;
9001 /* Generate the load multiple instruction to restore the
9002 registers. Note we can get here, even if
9003 frame_pointer_needed is true, but only if sp already
9004 points to the base of the saved core registers. */
9005 if (live_regs_mask & (1 << SP_REGNUM))
9007 unsigned HOST_WIDE_INT stack_adjust;
9009 offsets = arm_get_frame_offsets ();
9010 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9011 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9013 if (stack_adjust && arm_arch5)
9014 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9015 else
9017 /* If we can't use ldmib (SA110 bug),
9018 then try to pop r3 instead. */
9019 if (stack_adjust)
9020 live_regs_mask |= 1 << 3;
9021 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9024 else
9025 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9027 p = instr + strlen (instr);
9029 for (reg = 0; reg <= SP_REGNUM; reg++)
9030 if (live_regs_mask & (1 << reg))
9032 int l = strlen (reg_names[reg]);
9034 if (first)
9035 first = 0;
9036 else
9038 memcpy (p, ", ", 2);
9039 p += 2;
9042 memcpy (p, "%|", 2);
9043 memcpy (p + 2, reg_names[reg], l);
9044 p += l + 2;
9047 if (live_regs_mask & (1 << LR_REGNUM))
9049 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9050 /* If returning from an interrupt, restore the CPSR. */
9051 if (IS_INTERRUPT (func_type))
9052 strcat (p, "^");
9054 else
9055 strcpy (p, "}");
9058 output_asm_insn (instr, & operand);
9060 /* See if we need to generate an extra instruction to
9061 perform the actual function return. */
9062 if (really_return
9063 && func_type != ARM_FT_INTERWORKED
9064 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9066 /* The return has already been handled
9067 by loading the LR into the PC. */
9068 really_return = 0;
9072 if (really_return)
9074 switch ((int) ARM_FUNC_TYPE (func_type))
9076 case ARM_FT_ISR:
9077 case ARM_FT_FIQ:
9078 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9079 break;
9081 case ARM_FT_INTERWORKED:
9082 sprintf (instr, "bx%s\t%%|lr", conditional);
9083 break;
9085 case ARM_FT_EXCEPTION:
9086 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9087 break;
9089 default:
9090 /* Use bx if it's available. */
9091 if (arm_arch5 || arm_arch4t)
9092 sprintf (instr, "bx%s\t%%|lr", conditional);
9093 else
9094 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9095 break;
9098 output_asm_insn (instr, & operand);
9101 return "";
9104 /* Write the function name into the code section, directly preceding
9105 the function prologue.
9107 Code will be output similar to this:
9109 .ascii "arm_poke_function_name", 0
9110 .align
9112 .word 0xff000000 + (t1 - t0)
9113 arm_poke_function_name
9114 mov ip, sp
9115 stmfd sp!, {fp, ip, lr, pc}
9116 sub fp, ip, #4
9118 When performing a stack backtrace, code can inspect the value
9119 of 'pc' stored at 'fp' + 0. If the trace function then looks
9120 at location pc - 12 and the top 8 bits are set, then we know
9121 that there is a function name embedded immediately preceding this
9122 location and has length ((pc[-3]) & 0xff000000).
9124 We assume that pc is declared as a pointer to an unsigned long.
9126 It is of no benefit to output the function name if we are assembling
9127 a leaf function. These function types will not contain a stack
9128 backtrace structure, therefore it is not possible to determine the
9129 function name. */
9130 void
9131 arm_poke_function_name (FILE *stream, const char *name)
9133 unsigned long alignlength;
9134 unsigned long length;
9135 rtx x;
9137 length = strlen (name) + 1;
9138 alignlength = ROUND_UP_WORD (length);
9140 ASM_OUTPUT_ASCII (stream, name, length);
9141 ASM_OUTPUT_ALIGN (stream, 2);
9142 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9143 assemble_aligned_integer (UNITS_PER_WORD, x);
9146 /* Place some comments into the assembler stream
9147 describing the current function. */
9148 static void
9149 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9151 unsigned long func_type;
9153 if (!TARGET_ARM)
9155 thumb_output_function_prologue (f, frame_size);
9156 return;
9159 /* Sanity check. */
9160 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9162 func_type = arm_current_func_type ();
9164 switch ((int) ARM_FUNC_TYPE (func_type))
9166 default:
9167 case ARM_FT_NORMAL:
9168 break;
9169 case ARM_FT_INTERWORKED:
9170 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9171 break;
9172 case ARM_FT_ISR:
9173 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9174 break;
9175 case ARM_FT_FIQ:
9176 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9177 break;
9178 case ARM_FT_EXCEPTION:
9179 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9180 break;
9183 if (IS_NAKED (func_type))
9184 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9186 if (IS_VOLATILE (func_type))
9187 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9189 if (IS_NESTED (func_type))
9190 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9192 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9193 current_function_args_size,
9194 current_function_pretend_args_size, frame_size);
9196 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9197 frame_pointer_needed,
9198 cfun->machine->uses_anonymous_args);
9200 if (cfun->machine->lr_save_eliminated)
9201 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9203 if (current_function_calls_eh_return)
9204 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9206 #ifdef AOF_ASSEMBLER
9207 if (flag_pic)
9208 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9209 #endif
9211 return_used_this_function = 0;
9214 const char *
9215 arm_output_epilogue (rtx sibling)
9217 int reg;
9218 unsigned long saved_regs_mask;
9219 unsigned long func_type;
9220 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9221 frame that is $fp + 4 for a non-variadic function. */
9222 int floats_offset = 0;
9223 rtx operands[3];
9224 FILE * f = asm_out_file;
9225 unsigned int lrm_count = 0;
9226 int really_return = (sibling == NULL);
9227 int start_reg;
9228 arm_stack_offsets *offsets;
9230 /* If we have already generated the return instruction
9231 then it is futile to generate anything else. */
9232 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9233 return "";
9235 func_type = arm_current_func_type ();
9237 if (IS_NAKED (func_type))
9238 /* Naked functions don't have epilogues. */
9239 return "";
9241 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9243 rtx op;
9245 /* A volatile function should never return. Call abort. */
9246 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9247 assemble_external_libcall (op);
9248 output_asm_insn ("bl\t%a0", &op);
9250 return "";
9253 /* If we are throwing an exception, then we really must be doing a
9254 return, so we can't tail-call. */
9255 gcc_assert (!current_function_calls_eh_return || really_return);
9257 offsets = arm_get_frame_offsets ();
9258 saved_regs_mask = arm_compute_save_reg_mask ();
9260 if (TARGET_IWMMXT)
9261 lrm_count = bit_count (saved_regs_mask);
9263 floats_offset = offsets->saved_args;
9264 /* Compute how far away the floats will be. */
9265 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9266 if (saved_regs_mask & (1 << reg))
9267 floats_offset += 4;
9269 if (frame_pointer_needed)
9271 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9272 int vfp_offset = offsets->frame;
9274 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9276 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9277 if (regs_ever_live[reg] && !call_used_regs[reg])
9279 floats_offset += 12;
9280 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9281 reg, FP_REGNUM, floats_offset - vfp_offset);
9284 else
9286 start_reg = LAST_FPA_REGNUM;
9288 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9290 if (regs_ever_live[reg] && !call_used_regs[reg])
9292 floats_offset += 12;
9294 /* We can't unstack more than four registers at once. */
9295 if (start_reg - reg == 3)
9297 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9298 reg, FP_REGNUM, floats_offset - vfp_offset);
9299 start_reg = reg - 1;
9302 else
9304 if (reg != start_reg)
9305 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9306 reg + 1, start_reg - reg,
9307 FP_REGNUM, floats_offset - vfp_offset);
9308 start_reg = reg - 1;
9312 /* Just in case the last register checked also needs unstacking. */
9313 if (reg != start_reg)
9314 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9315 reg + 1, start_reg - reg,
9316 FP_REGNUM, floats_offset - vfp_offset);
9319 if (TARGET_HARD_FLOAT && TARGET_VFP)
9321 int saved_size;
9323 /* The fldmx insn does not have base+offset addressing modes,
9324 so we use IP to hold the address. */
9325 saved_size = arm_get_vfp_saved_size ();
9327 if (saved_size > 0)
9329 floats_offset += saved_size;
9330 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9331 FP_REGNUM, floats_offset - vfp_offset);
9333 start_reg = FIRST_VFP_REGNUM;
9334 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9336 if ((!regs_ever_live[reg] || call_used_regs[reg])
9337 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9339 if (start_reg != reg)
9340 arm_output_fldmx (f, IP_REGNUM,
9341 (start_reg - FIRST_VFP_REGNUM) / 2,
9342 (reg - start_reg) / 2);
9343 start_reg = reg + 2;
9346 if (start_reg != reg)
9347 arm_output_fldmx (f, IP_REGNUM,
9348 (start_reg - FIRST_VFP_REGNUM) / 2,
9349 (reg - start_reg) / 2);
9352 if (TARGET_IWMMXT)
9354 /* The frame pointer is guaranteed to be non-double-word aligned.
9355 This is because it is set to (old_stack_pointer - 4) and the
9356 old_stack_pointer was double word aligned. Thus the offset to
9357 the iWMMXt registers to be loaded must also be non-double-word
9358 sized, so that the resultant address *is* double-word aligned.
9359 We can ignore floats_offset since that was already included in
9360 the live_regs_mask. */
9361 lrm_count += (lrm_count % 2 ? 2 : 1);
9363 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9364 if (regs_ever_live[reg] && !call_used_regs[reg])
9366 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9367 reg, FP_REGNUM, lrm_count * 4);
9368 lrm_count += 2;
9372 /* saved_regs_mask should contain the IP, which at the time of stack
9373 frame generation actually contains the old stack pointer. So a
9374 quick way to unwind the stack is just pop the IP register directly
9375 into the stack pointer. */
9376 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9377 saved_regs_mask &= ~ (1 << IP_REGNUM);
9378 saved_regs_mask |= (1 << SP_REGNUM);
9380 /* There are two registers left in saved_regs_mask - LR and PC. We
9381 only need to restore the LR register (the return address), but to
9382 save time we can load it directly into the PC, unless we need a
9383 special function exit sequence, or we are not really returning. */
9384 if (really_return
9385 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9386 && !current_function_calls_eh_return)
9387 /* Delete the LR from the register mask, so that the LR on
9388 the stack is loaded into the PC in the register mask. */
9389 saved_regs_mask &= ~ (1 << LR_REGNUM);
9390 else
9391 saved_regs_mask &= ~ (1 << PC_REGNUM);
9393 /* We must use SP as the base register, because SP is one of the
9394 registers being restored. If an interrupt or page fault
9395 happens in the ldm instruction, the SP might or might not
9396 have been restored. That would be bad, as then SP will no
9397 longer indicate the safe area of stack, and we can get stack
9398 corruption. Using SP as the base register means that it will
9399 be reset correctly to the original value, should an interrupt
9400 occur. If the stack pointer already points at the right
9401 place, then omit the subtraction. */
9402 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9403 || current_function_calls_alloca)
9404 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9405 4 * bit_count (saved_regs_mask));
9406 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9408 if (IS_INTERRUPT (func_type))
9409 /* Interrupt handlers will have pushed the
9410 IP onto the stack, so restore it now. */
9411 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9413 else
9415 /* Restore stack pointer if necessary. */
9416 if (offsets->outgoing_args != offsets->saved_regs)
9418 operands[0] = operands[1] = stack_pointer_rtx;
9419 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9420 output_add_immediate (operands);
9423 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9425 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9426 if (regs_ever_live[reg] && !call_used_regs[reg])
9427 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9428 reg, SP_REGNUM);
9430 else
9432 start_reg = FIRST_FPA_REGNUM;
9434 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9436 if (regs_ever_live[reg] && !call_used_regs[reg])
9438 if (reg - start_reg == 3)
9440 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9441 start_reg, SP_REGNUM);
9442 start_reg = reg + 1;
9445 else
9447 if (reg != start_reg)
9448 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9449 start_reg, reg - start_reg,
9450 SP_REGNUM);
9452 start_reg = reg + 1;
9456 /* Just in case the last register checked also needs unstacking. */
9457 if (reg != start_reg)
9458 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9459 start_reg, reg - start_reg, SP_REGNUM);
9462 if (TARGET_HARD_FLOAT && TARGET_VFP)
9464 start_reg = FIRST_VFP_REGNUM;
9465 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9467 if ((!regs_ever_live[reg] || call_used_regs[reg])
9468 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9470 if (start_reg != reg)
9471 arm_output_fldmx (f, SP_REGNUM,
9472 (start_reg - FIRST_VFP_REGNUM) / 2,
9473 (reg - start_reg) / 2);
9474 start_reg = reg + 2;
9477 if (start_reg != reg)
9478 arm_output_fldmx (f, SP_REGNUM,
9479 (start_reg - FIRST_VFP_REGNUM) / 2,
9480 (reg - start_reg) / 2);
9482 if (TARGET_IWMMXT)
9483 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9484 if (regs_ever_live[reg] && !call_used_regs[reg])
9485 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9487 /* If we can, restore the LR into the PC. */
9488 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9489 && really_return
9490 && current_function_pretend_args_size == 0
9491 && saved_regs_mask & (1 << LR_REGNUM)
9492 && !current_function_calls_eh_return)
9494 saved_regs_mask &= ~ (1 << LR_REGNUM);
9495 saved_regs_mask |= (1 << PC_REGNUM);
9498 /* Load the registers off the stack. If we only have one register
9499 to load use the LDR instruction - it is faster. */
9500 if (saved_regs_mask == (1 << LR_REGNUM))
9502 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9504 else if (saved_regs_mask)
9506 if (saved_regs_mask & (1 << SP_REGNUM))
9507 /* Note - write back to the stack register is not enabled
9508 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9509 in the list of registers and if we add writeback the
9510 instruction becomes UNPREDICTABLE. */
9511 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9512 else
9513 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9516 if (current_function_pretend_args_size)
9518 /* Unwind the pre-pushed regs. */
9519 operands[0] = operands[1] = stack_pointer_rtx;
9520 operands[2] = GEN_INT (current_function_pretend_args_size);
9521 output_add_immediate (operands);
9525 /* We may have already restored PC directly from the stack. */
9526 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9527 return "";
9529 /* Stack adjustment for exception handler. */
9530 if (current_function_calls_eh_return)
9531 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9532 ARM_EH_STACKADJ_REGNUM);
9534 /* Generate the return instruction. */
9535 switch ((int) ARM_FUNC_TYPE (func_type))
9537 case ARM_FT_ISR:
9538 case ARM_FT_FIQ:
9539 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9540 break;
9542 case ARM_FT_EXCEPTION:
9543 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9544 break;
9546 case ARM_FT_INTERWORKED:
9547 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9548 break;
9550 default:
9551 if (arm_arch5 || arm_arch4t)
9552 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9553 else
9554 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9555 break;
9558 return "";
9561 static void
9562 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9563 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9565 arm_stack_offsets *offsets;
9567 if (TARGET_THUMB)
9569 int regno;
9571 /* Emit any call-via-reg trampolines that are needed for v4t support
9572 of call_reg and call_value_reg type insns. */
9573 for (regno = 0; regno < LR_REGNUM; regno++)
9575 rtx label = cfun->machine->call_via[regno];
9577 if (label != NULL)
9579 function_section (current_function_decl);
9580 targetm.asm_out.internal_label (asm_out_file, "L",
9581 CODE_LABEL_NUMBER (label));
9582 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9586 /* ??? Probably not safe to set this here, since it assumes that a
9587 function will be emitted as assembly immediately after we generate
9588 RTL for it. This does not happen for inline functions. */
9589 return_used_this_function = 0;
9591 else
9593 /* We need to take into account any stack-frame rounding. */
9594 offsets = arm_get_frame_offsets ();
9596 gcc_assert (!use_return_insn (FALSE, NULL)
9597 || !return_used_this_function
9598 || offsets->saved_regs == offsets->outgoing_args
9599 || frame_pointer_needed);
9601 /* Reset the ARM-specific per-function variables. */
9602 after_arm_reorg = 0;
9606 /* Generate and emit an insn that we will recognize as a push_multi.
9607 Unfortunately, since this insn does not reflect very well the actual
9608 semantics of the operation, we need to annotate the insn for the benefit
9609 of DWARF2 frame unwind information. */
9610 static rtx
9611 emit_multi_reg_push (unsigned long mask)
9613 int num_regs = 0;
9614 int num_dwarf_regs;
9615 int i, j;
9616 rtx par;
9617 rtx dwarf;
9618 int dwarf_par_index;
9619 rtx tmp, reg;
9621 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9622 if (mask & (1 << i))
9623 num_regs++;
9625 gcc_assert (num_regs && num_regs <= 16);
9627 /* We don't record the PC in the dwarf frame information. */
9628 num_dwarf_regs = num_regs;
9629 if (mask & (1 << PC_REGNUM))
9630 num_dwarf_regs--;
9632 /* For the body of the insn we are going to generate an UNSPEC in
9633 parallel with several USEs. This allows the insn to be recognized
9634 by the push_multi pattern in the arm.md file. The insn looks
9635 something like this:
9637 (parallel [
9638 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9639 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9640 (use (reg:SI 11 fp))
9641 (use (reg:SI 12 ip))
9642 (use (reg:SI 14 lr))
9643 (use (reg:SI 15 pc))
9646 For the frame note however, we try to be more explicit and actually
9647 show each register being stored into the stack frame, plus a (single)
9648 decrement of the stack pointer. We do it this way in order to be
9649 friendly to the stack unwinding code, which only wants to see a single
9650 stack decrement per instruction. The RTL we generate for the note looks
9651 something like this:
9653 (sequence [
9654 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9655 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9656 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9657 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9658 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9661 This sequence is used both by the code to support stack unwinding for
9662 exceptions handlers and the code to generate dwarf2 frame debugging. */
9664 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9665 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9666 dwarf_par_index = 1;
9668 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9670 if (mask & (1 << i))
9672 reg = gen_rtx_REG (SImode, i);
9674 XVECEXP (par, 0, 0)
9675 = gen_rtx_SET (VOIDmode,
9676 gen_rtx_MEM (BLKmode,
9677 gen_rtx_PRE_DEC (BLKmode,
9678 stack_pointer_rtx)),
9679 gen_rtx_UNSPEC (BLKmode,
9680 gen_rtvec (1, reg),
9681 UNSPEC_PUSH_MULT));
9683 if (i != PC_REGNUM)
9685 tmp = gen_rtx_SET (VOIDmode,
9686 gen_rtx_MEM (SImode, stack_pointer_rtx),
9687 reg);
9688 RTX_FRAME_RELATED_P (tmp) = 1;
9689 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9690 dwarf_par_index++;
9693 break;
9697 for (j = 1, i++; j < num_regs; i++)
9699 if (mask & (1 << i))
9701 reg = gen_rtx_REG (SImode, i);
9703 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9705 if (i != PC_REGNUM)
9707 tmp = gen_rtx_SET (VOIDmode,
9708 gen_rtx_MEM (SImode,
9709 plus_constant (stack_pointer_rtx,
9710 4 * j)),
9711 reg);
9712 RTX_FRAME_RELATED_P (tmp) = 1;
9713 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9716 j++;
9720 par = emit_insn (par);
9722 tmp = gen_rtx_SET (SImode,
9723 stack_pointer_rtx,
9724 gen_rtx_PLUS (SImode,
9725 stack_pointer_rtx,
9726 GEN_INT (-4 * num_regs)));
9727 RTX_FRAME_RELATED_P (tmp) = 1;
9728 XVECEXP (dwarf, 0, 0) = tmp;
9730 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9731 REG_NOTES (par));
9732 return par;
9735 static rtx
9736 emit_sfm (int base_reg, int count)
9738 rtx par;
9739 rtx dwarf;
9740 rtx tmp, reg;
9741 int i;
9743 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9744 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9746 reg = gen_rtx_REG (XFmode, base_reg++);
9748 XVECEXP (par, 0, 0)
9749 = gen_rtx_SET (VOIDmode,
9750 gen_rtx_MEM (BLKmode,
9751 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9752 gen_rtx_UNSPEC (BLKmode,
9753 gen_rtvec (1, reg),
9754 UNSPEC_PUSH_MULT));
9755 tmp = gen_rtx_SET (VOIDmode,
9756 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9757 RTX_FRAME_RELATED_P (tmp) = 1;
9758 XVECEXP (dwarf, 0, 1) = tmp;
9760 for (i = 1; i < count; i++)
9762 reg = gen_rtx_REG (XFmode, base_reg++);
9763 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9765 tmp = gen_rtx_SET (VOIDmode,
9766 gen_rtx_MEM (XFmode,
9767 plus_constant (stack_pointer_rtx,
9768 i * 12)),
9769 reg);
9770 RTX_FRAME_RELATED_P (tmp) = 1;
9771 XVECEXP (dwarf, 0, i + 1) = tmp;
9774 tmp = gen_rtx_SET (VOIDmode,
9775 stack_pointer_rtx,
9776 gen_rtx_PLUS (SImode,
9777 stack_pointer_rtx,
9778 GEN_INT (-12 * count)));
9779 RTX_FRAME_RELATED_P (tmp) = 1;
9780 XVECEXP (dwarf, 0, 0) = tmp;
9782 par = emit_insn (par);
9783 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9784 REG_NOTES (par));
9785 return par;
9789 /* Return true if the current function needs to save/restore LR. */
9791 static bool
9792 thumb_force_lr_save (void)
9794 return !cfun->machine->lr_save_eliminated
9795 && (!leaf_function_p ()
9796 || thumb_far_jump_used_p ()
9797 || regs_ever_live [LR_REGNUM]);
9801 /* Compute the distance from register FROM to register TO.
9802 These can be the arg pointer (26), the soft frame pointer (25),
9803 the stack pointer (13) or the hard frame pointer (11).
9804 In thumb mode r7 is used as the soft frame pointer, if needed.
9805 Typical stack layout looks like this:
9807 old stack pointer -> | |
9808 ----
9809 | | \
9810 | | saved arguments for
9811 | | vararg functions
9812 | | /
9814 hard FP & arg pointer -> | | \
9815 | | stack
9816 | | frame
9817 | | /
9819 | | \
9820 | | call saved
9821 | | registers
9822 soft frame pointer -> | | /
9824 | | \
9825 | | local
9826 | | variables
9827 | | /
9829 | | \
9830 | | outgoing
9831 | | arguments
9832 current stack pointer -> | | /
9835 For a given function some or all of these stack components
9836 may not be needed, giving rise to the possibility of
9837 eliminating some of the registers.
9839 The values returned by this function must reflect the behavior
9840 of arm_expand_prologue() and arm_compute_save_reg_mask().
9842 The sign of the number returned reflects the direction of stack
9843 growth, so the values are positive for all eliminations except
9844 from the soft frame pointer to the hard frame pointer.
9846 SFP may point just inside the local variables block to ensure correct
9847 alignment. */
9850 /* Calculate stack offsets. These are used to calculate register elimination
9851 offsets and in prologue/epilogue code. */
9853 static arm_stack_offsets *
9854 arm_get_frame_offsets (void)
9856 struct arm_stack_offsets *offsets;
9857 unsigned long func_type;
9858 int leaf;
9859 int saved;
9860 HOST_WIDE_INT frame_size;
9862 offsets = &cfun->machine->stack_offsets;
9864 /* We need to know if we are a leaf function. Unfortunately, it
9865 is possible to be called after start_sequence has been called,
9866 which causes get_insns to return the insns for the sequence,
9867 not the function, which will cause leaf_function_p to return
9868 the incorrect result.
9870 to know about leaf functions once reload has completed, and the
9871 frame size cannot be changed after that time, so we can safely
9872 use the cached value. */
9874 if (reload_completed)
9875 return offsets;
9877 /* Initially this is the size of the local variables. It will translated
9878 into an offset once we have determined the size of preceding data. */
9879 frame_size = ROUND_UP_WORD (get_frame_size ());
9881 leaf = leaf_function_p ();
9883 /* Space for variadic functions. */
9884 offsets->saved_args = current_function_pretend_args_size;
9886 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9888 if (TARGET_ARM)
9890 unsigned int regno;
9892 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9894 /* We know that SP will be doubleword aligned on entry, and we must
9895 preserve that condition at any subroutine call. We also require the
9896 soft frame pointer to be doubleword aligned. */
9898 if (TARGET_REALLY_IWMMXT)
9900 /* Check for the call-saved iWMMXt registers. */
9901 for (regno = FIRST_IWMMXT_REGNUM;
9902 regno <= LAST_IWMMXT_REGNUM;
9903 regno++)
9904 if (regs_ever_live [regno] && ! call_used_regs [regno])
9905 saved += 8;
9908 func_type = arm_current_func_type ();
9909 if (! IS_VOLATILE (func_type))
9911 /* Space for saved FPA registers. */
9912 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9913 if (regs_ever_live[regno] && ! call_used_regs[regno])
9914 saved += 12;
9916 /* Space for saved VFP registers. */
9917 if (TARGET_HARD_FLOAT && TARGET_VFP)
9918 saved += arm_get_vfp_saved_size ();
9921 else /* TARGET_THUMB */
9923 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
9924 if (TARGET_BACKTRACE)
9925 saved += 16;
9928 /* Saved registers include the stack frame. */
9929 offsets->saved_regs = offsets->saved_args + saved;
9930 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
9931 /* A leaf function does not need any stack alignment if it has nothing
9932 on the stack. */
9933 if (leaf && frame_size == 0)
9935 offsets->outgoing_args = offsets->soft_frame;
9936 return offsets;
9939 /* Ensure SFP has the correct alignment. */
9940 if (ARM_DOUBLEWORD_ALIGN
9941 && (offsets->soft_frame & 7))
9942 offsets->soft_frame += 4;
9944 offsets->outgoing_args = offsets->soft_frame + frame_size
9945 + current_function_outgoing_args_size;
9947 if (ARM_DOUBLEWORD_ALIGN)
9949 /* Ensure SP remains doubleword aligned. */
9950 if (offsets->outgoing_args & 7)
9951 offsets->outgoing_args += 4;
9952 gcc_assert (!(offsets->outgoing_args & 7));
9955 return offsets;
9959 /* Calculate the relative offsets for the different stack pointers. Positive
9960 offsets are in the direction of stack growth. */
9962 HOST_WIDE_INT
9963 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9965 arm_stack_offsets *offsets;
9967 offsets = arm_get_frame_offsets ();
9969 /* OK, now we have enough information to compute the distances.
9970 There must be an entry in these switch tables for each pair
9971 of registers in ELIMINABLE_REGS, even if some of the entries
9972 seem to be redundant or useless. */
9973 switch (from)
9975 case ARG_POINTER_REGNUM:
9976 switch (to)
9978 case THUMB_HARD_FRAME_POINTER_REGNUM:
9979 return 0;
9981 case FRAME_POINTER_REGNUM:
9982 /* This is the reverse of the soft frame pointer
9983 to hard frame pointer elimination below. */
9984 return offsets->soft_frame - offsets->saved_args;
9986 case ARM_HARD_FRAME_POINTER_REGNUM:
9987 /* If there is no stack frame then the hard
9988 frame pointer and the arg pointer coincide. */
9989 if (offsets->frame == offsets->saved_regs)
9990 return 0;
9991 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9992 return (frame_pointer_needed
9993 && cfun->static_chain_decl != NULL
9994 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9996 case STACK_POINTER_REGNUM:
9997 /* If nothing has been pushed on the stack at all
9998 then this will return -4. This *is* correct! */
9999 return offsets->outgoing_args - (offsets->saved_args + 4);
10001 default:
10002 gcc_unreachable ();
10004 gcc_unreachable ();
10006 case FRAME_POINTER_REGNUM:
10007 switch (to)
10009 case THUMB_HARD_FRAME_POINTER_REGNUM:
10010 return 0;
10012 case ARM_HARD_FRAME_POINTER_REGNUM:
10013 /* The hard frame pointer points to the top entry in the
10014 stack frame. The soft frame pointer to the bottom entry
10015 in the stack frame. If there is no stack frame at all,
10016 then they are identical. */
10018 return offsets->frame - offsets->soft_frame;
10020 case STACK_POINTER_REGNUM:
10021 return offsets->outgoing_args - offsets->soft_frame;
10023 default:
10024 gcc_unreachable ();
10026 gcc_unreachable ();
10028 default:
10029 /* You cannot eliminate from the stack pointer.
10030 In theory you could eliminate from the hard frame
10031 pointer to the stack pointer, but this will never
10032 happen, since if a stack frame is not needed the
10033 hard frame pointer will never be used. */
10034 gcc_unreachable ();
10039 /* Generate the prologue instructions for entry into an ARM function. */
10040 void
10041 arm_expand_prologue (void)
10043 int reg;
10044 rtx amount;
10045 rtx insn;
10046 rtx ip_rtx;
10047 unsigned long live_regs_mask;
10048 unsigned long func_type;
10049 int fp_offset = 0;
10050 int saved_pretend_args = 0;
10051 int saved_regs = 0;
10052 unsigned HOST_WIDE_INT args_to_push;
10053 arm_stack_offsets *offsets;
10055 func_type = arm_current_func_type ();
10057 /* Naked functions don't have prologues. */
10058 if (IS_NAKED (func_type))
10059 return;
10061 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10062 args_to_push = current_function_pretend_args_size;
10064 /* Compute which register we will have to save onto the stack. */
10065 live_regs_mask = arm_compute_save_reg_mask ();
10067 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10069 if (frame_pointer_needed)
10071 if (IS_INTERRUPT (func_type))
10073 /* Interrupt functions must not corrupt any registers.
10074 Creating a frame pointer however, corrupts the IP
10075 register, so we must push it first. */
10076 insn = emit_multi_reg_push (1 << IP_REGNUM);
10078 /* Do not set RTX_FRAME_RELATED_P on this insn.
10079 The dwarf stack unwinding code only wants to see one
10080 stack decrement per function, and this is not it. If
10081 this instruction is labeled as being part of the frame
10082 creation sequence then dwarf2out_frame_debug_expr will
10083 die when it encounters the assignment of IP to FP
10084 later on, since the use of SP here establishes SP as
10085 the CFA register and not IP.
10087 Anyway this instruction is not really part of the stack
10088 frame creation although it is part of the prologue. */
10090 else if (IS_NESTED (func_type))
10092 /* The Static chain register is the same as the IP register
10093 used as a scratch register during stack frame creation.
10094 To get around this need to find somewhere to store IP
10095 whilst the frame is being created. We try the following
10096 places in order:
10098 1. The last argument register.
10099 2. A slot on the stack above the frame. (This only
10100 works if the function is not a varargs function).
10101 3. Register r3, after pushing the argument registers
10102 onto the stack.
10104 Note - we only need to tell the dwarf2 backend about the SP
10105 adjustment in the second variant; the static chain register
10106 doesn't need to be unwound, as it doesn't contain a value
10107 inherited from the caller. */
10109 if (regs_ever_live[3] == 0)
10111 insn = gen_rtx_REG (SImode, 3);
10112 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10113 insn = emit_insn (insn);
10115 else if (args_to_push == 0)
10117 rtx dwarf;
10118 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10119 insn = gen_rtx_MEM (SImode, insn);
10120 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10121 insn = emit_insn (insn);
10123 fp_offset = 4;
10125 /* Just tell the dwarf backend that we adjusted SP. */
10126 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10127 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10128 GEN_INT (-fp_offset)));
10129 RTX_FRAME_RELATED_P (insn) = 1;
10130 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10131 dwarf, REG_NOTES (insn));
10133 else
10135 /* Store the args on the stack. */
10136 if (cfun->machine->uses_anonymous_args)
10137 insn = emit_multi_reg_push
10138 ((0xf0 >> (args_to_push / 4)) & 0xf);
10139 else
10140 insn = emit_insn
10141 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10142 GEN_INT (- args_to_push)));
10144 RTX_FRAME_RELATED_P (insn) = 1;
10146 saved_pretend_args = 1;
10147 fp_offset = args_to_push;
10148 args_to_push = 0;
10150 /* Now reuse r3 to preserve IP. */
10151 insn = gen_rtx_REG (SImode, 3);
10152 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10153 (void) emit_insn (insn);
10157 if (fp_offset)
10159 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10160 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10162 else
10163 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10165 insn = emit_insn (insn);
10166 RTX_FRAME_RELATED_P (insn) = 1;
10169 if (args_to_push)
10171 /* Push the argument registers, or reserve space for them. */
10172 if (cfun->machine->uses_anonymous_args)
10173 insn = emit_multi_reg_push
10174 ((0xf0 >> (args_to_push / 4)) & 0xf);
10175 else
10176 insn = emit_insn
10177 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10178 GEN_INT (- args_to_push)));
10179 RTX_FRAME_RELATED_P (insn) = 1;
10182 /* If this is an interrupt service routine, and the link register
10183 is going to be pushed, and we are not creating a stack frame,
10184 (which would involve an extra push of IP and a pop in the epilogue)
10185 subtracting four from LR now will mean that the function return
10186 can be done with a single instruction. */
10187 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10188 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10189 && ! frame_pointer_needed)
10190 emit_insn (gen_rtx_SET (SImode,
10191 gen_rtx_REG (SImode, LR_REGNUM),
10192 gen_rtx_PLUS (SImode,
10193 gen_rtx_REG (SImode, LR_REGNUM),
10194 GEN_INT (-4))));
10196 if (live_regs_mask)
10198 insn = emit_multi_reg_push (live_regs_mask);
10199 saved_regs += bit_count (live_regs_mask) * 4;
10200 RTX_FRAME_RELATED_P (insn) = 1;
10203 if (TARGET_IWMMXT)
10204 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10205 if (regs_ever_live[reg] && ! call_used_regs [reg])
10207 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10208 insn = gen_rtx_MEM (V2SImode, insn);
10209 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10210 gen_rtx_REG (V2SImode, reg)));
10211 RTX_FRAME_RELATED_P (insn) = 1;
10212 saved_regs += 8;
10215 if (! IS_VOLATILE (func_type))
10217 int start_reg;
10219 /* Save any floating point call-saved registers used by this
10220 function. */
10221 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10223 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10224 if (regs_ever_live[reg] && !call_used_regs[reg])
10226 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10227 insn = gen_rtx_MEM (XFmode, insn);
10228 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10229 gen_rtx_REG (XFmode, reg)));
10230 RTX_FRAME_RELATED_P (insn) = 1;
10231 saved_regs += 12;
10234 else
10236 start_reg = LAST_FPA_REGNUM;
10238 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10240 if (regs_ever_live[reg] && !call_used_regs[reg])
10242 if (start_reg - reg == 3)
10244 insn = emit_sfm (reg, 4);
10245 RTX_FRAME_RELATED_P (insn) = 1;
10246 saved_regs += 48;
10247 start_reg = reg - 1;
10250 else
10252 if (start_reg != reg)
10254 insn = emit_sfm (reg + 1, start_reg - reg);
10255 RTX_FRAME_RELATED_P (insn) = 1;
10256 saved_regs += (start_reg - reg) * 12;
10258 start_reg = reg - 1;
10262 if (start_reg != reg)
10264 insn = emit_sfm (reg + 1, start_reg - reg);
10265 saved_regs += (start_reg - reg) * 12;
10266 RTX_FRAME_RELATED_P (insn) = 1;
10269 if (TARGET_HARD_FLOAT && TARGET_VFP)
10271 start_reg = FIRST_VFP_REGNUM;
10273 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10275 if ((!regs_ever_live[reg] || call_used_regs[reg])
10276 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10278 if (start_reg != reg)
10279 saved_regs += vfp_emit_fstmx (start_reg,
10280 (reg - start_reg) / 2);
10281 start_reg = reg + 2;
10284 if (start_reg != reg)
10285 saved_regs += vfp_emit_fstmx (start_reg,
10286 (reg - start_reg) / 2);
10290 if (frame_pointer_needed)
10292 /* Create the new frame pointer. */
10293 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10294 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10295 RTX_FRAME_RELATED_P (insn) = 1;
10297 if (IS_NESTED (func_type))
10299 /* Recover the static chain register. */
10300 if (regs_ever_live [3] == 0
10301 || saved_pretend_args)
10302 insn = gen_rtx_REG (SImode, 3);
10303 else /* if (current_function_pretend_args_size == 0) */
10305 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10306 GEN_INT (4));
10307 insn = gen_rtx_MEM (SImode, insn);
10310 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10311 /* Add a USE to stop propagate_one_insn() from barfing. */
10312 emit_insn (gen_prologue_use (ip_rtx));
10316 offsets = arm_get_frame_offsets ();
10317 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10319 /* This add can produce multiple insns for a large constant, so we
10320 need to get tricky. */
10321 rtx last = get_last_insn ();
10323 amount = GEN_INT (offsets->saved_args + saved_regs
10324 - offsets->outgoing_args);
10326 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10327 amount));
10330 last = last ? NEXT_INSN (last) : get_insns ();
10331 RTX_FRAME_RELATED_P (last) = 1;
10333 while (last != insn);
10335 /* If the frame pointer is needed, emit a special barrier that
10336 will prevent the scheduler from moving stores to the frame
10337 before the stack adjustment. */
10338 if (frame_pointer_needed)
10339 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10340 hard_frame_pointer_rtx));
10344 if (flag_pic)
10345 arm_load_pic_register (INVALID_REGNUM);
10347 /* If we are profiling, make sure no instructions are scheduled before
10348 the call to mcount. Similarly if the user has requested no
10349 scheduling in the prolog. */
10350 if (current_function_profile || TARGET_NO_SCHED_PRO)
10351 emit_insn (gen_blockage ());
10353 /* If the link register is being kept alive, with the return address in it,
10354 then make sure that it does not get reused by the ce2 pass. */
10355 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10357 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10358 cfun->machine->lr_save_eliminated = 1;
10362 /* If CODE is 'd', then the X is a condition operand and the instruction
10363 should only be executed if the condition is true.
10364 if CODE is 'D', then the X is a condition operand and the instruction
10365 should only be executed if the condition is false: however, if the mode
10366 of the comparison is CCFPEmode, then always execute the instruction -- we
10367 do this because in these circumstances !GE does not necessarily imply LT;
10368 in these cases the instruction pattern will take care to make sure that
10369 an instruction containing %d will follow, thereby undoing the effects of
10370 doing this instruction unconditionally.
10371 If CODE is 'N' then X is a floating point operand that must be negated
10372 before output.
10373 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10374 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10375 void
10376 arm_print_operand (FILE *stream, rtx x, int code)
10378 switch (code)
10380 case '@':
10381 fputs (ASM_COMMENT_START, stream);
10382 return;
10384 case '_':
10385 fputs (user_label_prefix, stream);
10386 return;
10388 case '|':
10389 fputs (REGISTER_PREFIX, stream);
10390 return;
10392 case '?':
10393 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10395 if (TARGET_THUMB)
10397 output_operand_lossage ("predicated Thumb instruction");
10398 break;
10400 if (current_insn_predicate != NULL)
10402 output_operand_lossage
10403 ("predicated instruction in conditional sequence");
10404 break;
10407 fputs (arm_condition_codes[arm_current_cc], stream);
10409 else if (current_insn_predicate)
10411 enum arm_cond_code code;
10413 if (TARGET_THUMB)
10415 output_operand_lossage ("predicated Thumb instruction");
10416 break;
10419 code = get_arm_condition_code (current_insn_predicate);
10420 fputs (arm_condition_codes[code], stream);
10422 return;
10424 case 'N':
10426 REAL_VALUE_TYPE r;
10427 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10428 r = REAL_VALUE_NEGATE (r);
10429 fprintf (stream, "%s", fp_const_from_val (&r));
10431 return;
10433 case 'B':
10434 if (GET_CODE (x) == CONST_INT)
10436 HOST_WIDE_INT val;
10437 val = ARM_SIGN_EXTEND (~INTVAL (x));
10438 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10440 else
10442 putc ('~', stream);
10443 output_addr_const (stream, x);
10445 return;
10447 case 'i':
10448 fprintf (stream, "%s", arithmetic_instr (x, 1));
10449 return;
10451 /* Truncate Cirrus shift counts. */
10452 case 's':
10453 if (GET_CODE (x) == CONST_INT)
10455 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10456 return;
10458 arm_print_operand (stream, x, 0);
10459 return;
10461 case 'I':
10462 fprintf (stream, "%s", arithmetic_instr (x, 0));
10463 return;
10465 case 'S':
10467 HOST_WIDE_INT val;
10468 const char * shift = shift_op (x, &val);
10470 if (shift)
10472 fprintf (stream, ", %s ", shift_op (x, &val));
10473 if (val == -1)
10474 arm_print_operand (stream, XEXP (x, 1), 0);
10475 else
10476 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10479 return;
10481 /* An explanation of the 'Q', 'R' and 'H' register operands:
10483 In a pair of registers containing a DI or DF value the 'Q'
10484 operand returns the register number of the register containing
10485 the least significant part of the value. The 'R' operand returns
10486 the register number of the register containing the most
10487 significant part of the value.
10489 The 'H' operand returns the higher of the two register numbers.
10490 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10491 same as the 'Q' operand, since the most significant part of the
10492 value is held in the lower number register. The reverse is true
10493 on systems where WORDS_BIG_ENDIAN is false.
10495 The purpose of these operands is to distinguish between cases
10496 where the endian-ness of the values is important (for example
10497 when they are added together), and cases where the endian-ness
10498 is irrelevant, but the order of register operations is important.
10499 For example when loading a value from memory into a register
10500 pair, the endian-ness does not matter. Provided that the value
10501 from the lower memory address is put into the lower numbered
10502 register, and the value from the higher address is put into the
10503 higher numbered register, the load will work regardless of whether
10504 the value being loaded is big-wordian or little-wordian. The
10505 order of the two register loads can matter however, if the address
10506 of the memory location is actually held in one of the registers
10507 being overwritten by the load. */
10508 case 'Q':
10509 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10511 output_operand_lossage ("invalid operand for code '%c'", code);
10512 return;
10515 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10516 return;
10518 case 'R':
10519 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10521 output_operand_lossage ("invalid operand for code '%c'", code);
10522 return;
10525 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10526 return;
10528 case 'H':
10529 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10531 output_operand_lossage ("invalid operand for code '%c'", code);
10532 return;
10535 asm_fprintf (stream, "%r", REGNO (x) + 1);
10536 return;
10538 case 'm':
10539 asm_fprintf (stream, "%r",
10540 GET_CODE (XEXP (x, 0)) == REG
10541 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10542 return;
10544 case 'M':
10545 asm_fprintf (stream, "{%r-%r}",
10546 REGNO (x),
10547 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10548 return;
10550 case 'd':
10551 /* CONST_TRUE_RTX means always -- that's the default. */
10552 if (x == const_true_rtx)
10553 return;
10555 if (!COMPARISON_P (x))
10557 output_operand_lossage ("invalid operand for code '%c'", code);
10558 return;
10561 fputs (arm_condition_codes[get_arm_condition_code (x)],
10562 stream);
10563 return;
10565 case 'D':
10566 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10567 want to do that. */
10568 if (x == const_true_rtx)
10570 output_operand_lossage ("instruction never exectued");
10571 return;
10573 if (!COMPARISON_P (x))
10575 output_operand_lossage ("invalid operand for code '%c'", code);
10576 return;
10579 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10580 (get_arm_condition_code (x))],
10581 stream);
10582 return;
10584 /* Cirrus registers can be accessed in a variety of ways:
10585 single floating point (f)
10586 double floating point (d)
10587 32bit integer (fx)
10588 64bit integer (dx). */
10589 case 'W': /* Cirrus register in F mode. */
10590 case 'X': /* Cirrus register in D mode. */
10591 case 'Y': /* Cirrus register in FX mode. */
10592 case 'Z': /* Cirrus register in DX mode. */
10593 gcc_assert (GET_CODE (x) == REG
10594 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10596 fprintf (stream, "mv%s%s",
10597 code == 'W' ? "f"
10598 : code == 'X' ? "d"
10599 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10601 return;
10603 /* Print cirrus register in the mode specified by the register's mode. */
10604 case 'V':
10606 int mode = GET_MODE (x);
10608 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10610 output_operand_lossage ("invalid operand for code '%c'", code);
10611 return;
10614 fprintf (stream, "mv%s%s",
10615 mode == DFmode ? "d"
10616 : mode == SImode ? "fx"
10617 : mode == DImode ? "dx"
10618 : "f", reg_names[REGNO (x)] + 2);
10620 return;
10623 case 'U':
10624 if (GET_CODE (x) != REG
10625 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10626 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10627 /* Bad value for wCG register number. */
10629 output_operand_lossage ("invalid operand for code '%c'", code);
10630 return;
10633 else
10634 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10635 return;
10637 /* Print an iWMMXt control register name. */
10638 case 'w':
10639 if (GET_CODE (x) != CONST_INT
10640 || INTVAL (x) < 0
10641 || INTVAL (x) >= 16)
10642 /* Bad value for wC register number. */
10644 output_operand_lossage ("invalid operand for code '%c'", code);
10645 return;
10648 else
10650 static const char * wc_reg_names [16] =
10652 "wCID", "wCon", "wCSSF", "wCASF",
10653 "wC4", "wC5", "wC6", "wC7",
10654 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10655 "wC12", "wC13", "wC14", "wC15"
10658 fprintf (stream, wc_reg_names [INTVAL (x)]);
10660 return;
10662 /* Print a VFP double precision register name. */
10663 case 'P':
10665 int mode = GET_MODE (x);
10666 int num;
10668 if (mode != DImode && mode != DFmode)
10670 output_operand_lossage ("invalid operand for code '%c'", code);
10671 return;
10674 if (GET_CODE (x) != REG
10675 || !IS_VFP_REGNUM (REGNO (x)))
10677 output_operand_lossage ("invalid operand for code '%c'", code);
10678 return;
10681 num = REGNO(x) - FIRST_VFP_REGNUM;
10682 if (num & 1)
10684 output_operand_lossage ("invalid operand for code '%c'", code);
10685 return;
10688 fprintf (stream, "d%d", num >> 1);
10690 return;
10692 default:
10693 if (x == 0)
10695 output_operand_lossage ("missing operand");
10696 return;
10699 switch (GET_CODE (x))
10701 case REG:
10702 asm_fprintf (stream, "%r", REGNO (x));
10703 break;
10705 case MEM:
10706 output_memory_reference_mode = GET_MODE (x);
10707 output_address (XEXP (x, 0));
10708 break;
10710 case CONST_DOUBLE:
10711 fprintf (stream, "#%s", fp_immediate_constant (x));
10712 break;
10714 default:
10715 gcc_assert (GET_CODE (x) != NEG);
10716 fputc ('#', stream);
10717 output_addr_const (stream, x);
10718 break;
10723 #ifndef AOF_ASSEMBLER
10724 /* Target hook for assembling integer objects. The ARM version needs to
10725 handle word-sized values specially. */
10726 static bool
10727 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10729 if (size == UNITS_PER_WORD && aligned_p)
10731 fputs ("\t.word\t", asm_out_file);
10732 output_addr_const (asm_out_file, x);
10734 /* Mark symbols as position independent. We only do this in the
10735 .text segment, not in the .data segment. */
10736 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10737 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10739 if (GET_CODE (x) == SYMBOL_REF
10740 && (CONSTANT_POOL_ADDRESS_P (x)
10741 || SYMBOL_REF_LOCAL_P (x)))
10742 fputs ("(GOTOFF)", asm_out_file);
10743 else if (GET_CODE (x) == LABEL_REF)
10744 fputs ("(GOTOFF)", asm_out_file);
10745 else
10746 fputs ("(GOT)", asm_out_file);
10748 fputc ('\n', asm_out_file);
10749 return true;
10752 if (arm_vector_mode_supported_p (GET_MODE (x)))
10754 int i, units;
10756 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10758 units = CONST_VECTOR_NUNITS (x);
10760 switch (GET_MODE (x))
10762 case V2SImode: size = 4; break;
10763 case V4HImode: size = 2; break;
10764 case V8QImode: size = 1; break;
10765 default:
10766 gcc_unreachable ();
10769 for (i = 0; i < units; i++)
10771 rtx elt;
10773 elt = CONST_VECTOR_ELT (x, i);
10774 assemble_integer
10775 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10778 return true;
10781 return default_assemble_integer (x, size, aligned_p);
10783 #endif
10785 /* A finite state machine takes care of noticing whether or not instructions
10786 can be conditionally executed, and thus decrease execution time and code
10787 size by deleting branch instructions. The fsm is controlled by
10788 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10790 /* The state of the fsm controlling condition codes are:
10791 0: normal, do nothing special
10792 1: make ASM_OUTPUT_OPCODE not output this instruction
10793 2: make ASM_OUTPUT_OPCODE not output this instruction
10794 3: make instructions conditional
10795 4: make instructions conditional
10797 State transitions (state->state by whom under condition):
10798 0 -> 1 final_prescan_insn if the `target' is a label
10799 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10800 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10801 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10802 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10803 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10804 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10805 (the target insn is arm_target_insn).
10807 If the jump clobbers the conditions then we use states 2 and 4.
10809 A similar thing can be done with conditional return insns.
10811 XXX In case the `target' is an unconditional branch, this conditionalising
10812 of the instructions always reduces code size, but not always execution
10813 time. But then, I want to reduce the code size to somewhere near what
10814 /bin/cc produces. */
10816 /* Returns the index of the ARM condition code string in
10817 `arm_condition_codes'. COMPARISON should be an rtx like
10818 `(eq (...) (...))'. */
10819 static enum arm_cond_code
10820 get_arm_condition_code (rtx comparison)
10822 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10823 int code;
10824 enum rtx_code comp_code = GET_CODE (comparison);
10826 if (GET_MODE_CLASS (mode) != MODE_CC)
10827 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10828 XEXP (comparison, 1));
10830 switch (mode)
10832 case CC_DNEmode: code = ARM_NE; goto dominance;
10833 case CC_DEQmode: code = ARM_EQ; goto dominance;
10834 case CC_DGEmode: code = ARM_GE; goto dominance;
10835 case CC_DGTmode: code = ARM_GT; goto dominance;
10836 case CC_DLEmode: code = ARM_LE; goto dominance;
10837 case CC_DLTmode: code = ARM_LT; goto dominance;
10838 case CC_DGEUmode: code = ARM_CS; goto dominance;
10839 case CC_DGTUmode: code = ARM_HI; goto dominance;
10840 case CC_DLEUmode: code = ARM_LS; goto dominance;
10841 case CC_DLTUmode: code = ARM_CC;
10843 dominance:
10844 gcc_assert (comp_code == EQ || comp_code == NE);
10846 if (comp_code == EQ)
10847 return ARM_INVERSE_CONDITION_CODE (code);
10848 return code;
10850 case CC_NOOVmode:
10851 switch (comp_code)
10853 case NE: return ARM_NE;
10854 case EQ: return ARM_EQ;
10855 case GE: return ARM_PL;
10856 case LT: return ARM_MI;
10857 default: gcc_unreachable ();
10860 case CC_Zmode:
10861 switch (comp_code)
10863 case NE: return ARM_NE;
10864 case EQ: return ARM_EQ;
10865 default: gcc_unreachable ();
10868 case CC_Nmode:
10869 switch (comp_code)
10871 case NE: return ARM_MI;
10872 case EQ: return ARM_PL;
10873 default: gcc_unreachable ();
10876 case CCFPEmode:
10877 case CCFPmode:
10878 /* These encodings assume that AC=1 in the FPA system control
10879 byte. This allows us to handle all cases except UNEQ and
10880 LTGT. */
10881 switch (comp_code)
10883 case GE: return ARM_GE;
10884 case GT: return ARM_GT;
10885 case LE: return ARM_LS;
10886 case LT: return ARM_MI;
10887 case NE: return ARM_NE;
10888 case EQ: return ARM_EQ;
10889 case ORDERED: return ARM_VC;
10890 case UNORDERED: return ARM_VS;
10891 case UNLT: return ARM_LT;
10892 case UNLE: return ARM_LE;
10893 case UNGT: return ARM_HI;
10894 case UNGE: return ARM_PL;
10895 /* UNEQ and LTGT do not have a representation. */
10896 case UNEQ: /* Fall through. */
10897 case LTGT: /* Fall through. */
10898 default: gcc_unreachable ();
10901 case CC_SWPmode:
10902 switch (comp_code)
10904 case NE: return ARM_NE;
10905 case EQ: return ARM_EQ;
10906 case GE: return ARM_LE;
10907 case GT: return ARM_LT;
10908 case LE: return ARM_GE;
10909 case LT: return ARM_GT;
10910 case GEU: return ARM_LS;
10911 case GTU: return ARM_CC;
10912 case LEU: return ARM_CS;
10913 case LTU: return ARM_HI;
10914 default: gcc_unreachable ();
10917 case CC_Cmode:
10918 switch (comp_code)
10920 case LTU: return ARM_CS;
10921 case GEU: return ARM_CC;
10922 default: gcc_unreachable ();
10925 case CCmode:
10926 switch (comp_code)
10928 case NE: return ARM_NE;
10929 case EQ: return ARM_EQ;
10930 case GE: return ARM_GE;
10931 case GT: return ARM_GT;
10932 case LE: return ARM_LE;
10933 case LT: return ARM_LT;
10934 case GEU: return ARM_CS;
10935 case GTU: return ARM_HI;
10936 case LEU: return ARM_LS;
10937 case LTU: return ARM_CC;
10938 default: gcc_unreachable ();
10941 default: gcc_unreachable ();
10945 void
10946 arm_final_prescan_insn (rtx insn)
10948 /* BODY will hold the body of INSN. */
10949 rtx body = PATTERN (insn);
10951 /* This will be 1 if trying to repeat the trick, and things need to be
10952 reversed if it appears to fail. */
10953 int reverse = 0;
10955 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10956 taken are clobbered, even if the rtl suggests otherwise. It also
10957 means that we have to grub around within the jump expression to find
10958 out what the conditions are when the jump isn't taken. */
10959 int jump_clobbers = 0;
10961 /* If we start with a return insn, we only succeed if we find another one. */
10962 int seeking_return = 0;
10964 /* START_INSN will hold the insn from where we start looking. This is the
10965 first insn after the following code_label if REVERSE is true. */
10966 rtx start_insn = insn;
10968 /* If in state 4, check if the target branch is reached, in order to
10969 change back to state 0. */
10970 if (arm_ccfsm_state == 4)
10972 if (insn == arm_target_insn)
10974 arm_target_insn = NULL;
10975 arm_ccfsm_state = 0;
10977 return;
10980 /* If in state 3, it is possible to repeat the trick, if this insn is an
10981 unconditional branch to a label, and immediately following this branch
10982 is the previous target label which is only used once, and the label this
10983 branch jumps to is not too far off. */
10984 if (arm_ccfsm_state == 3)
10986 if (simplejump_p (insn))
10988 start_insn = next_nonnote_insn (start_insn);
10989 if (GET_CODE (start_insn) == BARRIER)
10991 /* XXX Isn't this always a barrier? */
10992 start_insn = next_nonnote_insn (start_insn);
10994 if (GET_CODE (start_insn) == CODE_LABEL
10995 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10996 && LABEL_NUSES (start_insn) == 1)
10997 reverse = TRUE;
10998 else
10999 return;
11001 else if (GET_CODE (body) == RETURN)
11003 start_insn = next_nonnote_insn (start_insn);
11004 if (GET_CODE (start_insn) == BARRIER)
11005 start_insn = next_nonnote_insn (start_insn);
11006 if (GET_CODE (start_insn) == CODE_LABEL
11007 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11008 && LABEL_NUSES (start_insn) == 1)
11010 reverse = TRUE;
11011 seeking_return = 1;
11013 else
11014 return;
11016 else
11017 return;
11020 gcc_assert (!arm_ccfsm_state || reverse);
11021 if (GET_CODE (insn) != JUMP_INSN)
11022 return;
11024 /* This jump might be paralleled with a clobber of the condition codes
11025 the jump should always come first */
11026 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11027 body = XVECEXP (body, 0, 0);
11029 if (reverse
11030 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11031 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11033 int insns_skipped;
11034 int fail = FALSE, succeed = FALSE;
11035 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11036 int then_not_else = TRUE;
11037 rtx this_insn = start_insn, label = 0;
11039 /* If the jump cannot be done with one instruction, we cannot
11040 conditionally execute the instruction in the inverse case. */
11041 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11043 jump_clobbers = 1;
11044 return;
11047 /* Register the insn jumped to. */
11048 if (reverse)
11050 if (!seeking_return)
11051 label = XEXP (SET_SRC (body), 0);
11053 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11054 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11055 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11057 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11058 then_not_else = FALSE;
11060 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11061 seeking_return = 1;
11062 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11064 seeking_return = 1;
11065 then_not_else = FALSE;
11067 else
11068 gcc_unreachable ();
11070 /* See how many insns this branch skips, and what kind of insns. If all
11071 insns are okay, and the label or unconditional branch to the same
11072 label is not too far away, succeed. */
11073 for (insns_skipped = 0;
11074 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11076 rtx scanbody;
11078 this_insn = next_nonnote_insn (this_insn);
11079 if (!this_insn)
11080 break;
11082 switch (GET_CODE (this_insn))
11084 case CODE_LABEL:
11085 /* Succeed if it is the target label, otherwise fail since
11086 control falls in from somewhere else. */
11087 if (this_insn == label)
11089 if (jump_clobbers)
11091 arm_ccfsm_state = 2;
11092 this_insn = next_nonnote_insn (this_insn);
11094 else
11095 arm_ccfsm_state = 1;
11096 succeed = TRUE;
11098 else
11099 fail = TRUE;
11100 break;
11102 case BARRIER:
11103 /* Succeed if the following insn is the target label.
11104 Otherwise fail.
11105 If return insns are used then the last insn in a function
11106 will be a barrier. */
11107 this_insn = next_nonnote_insn (this_insn);
11108 if (this_insn && this_insn == label)
11110 if (jump_clobbers)
11112 arm_ccfsm_state = 2;
11113 this_insn = next_nonnote_insn (this_insn);
11115 else
11116 arm_ccfsm_state = 1;
11117 succeed = TRUE;
11119 else
11120 fail = TRUE;
11121 break;
11123 case CALL_INSN:
11124 /* The AAPCS says that conditional calls should not be
11125 used since they make interworking inefficient (the
11126 linker can't transform BL<cond> into BLX). That's
11127 only a problem if the machine has BLX. */
11128 if (arm_arch5)
11130 fail = TRUE;
11131 break;
11134 /* Succeed if the following insn is the target label, or
11135 if the following two insns are a barrier and the
11136 target label. */
11137 this_insn = next_nonnote_insn (this_insn);
11138 if (this_insn && GET_CODE (this_insn) == BARRIER)
11139 this_insn = next_nonnote_insn (this_insn);
11141 if (this_insn && this_insn == label
11142 && insns_skipped < max_insns_skipped)
11144 if (jump_clobbers)
11146 arm_ccfsm_state = 2;
11147 this_insn = next_nonnote_insn (this_insn);
11149 else
11150 arm_ccfsm_state = 1;
11151 succeed = TRUE;
11153 else
11154 fail = TRUE;
11155 break;
11157 case JUMP_INSN:
11158 /* If this is an unconditional branch to the same label, succeed.
11159 If it is to another label, do nothing. If it is conditional,
11160 fail. */
11161 /* XXX Probably, the tests for SET and the PC are
11162 unnecessary. */
11164 scanbody = PATTERN (this_insn);
11165 if (GET_CODE (scanbody) == SET
11166 && GET_CODE (SET_DEST (scanbody)) == PC)
11168 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11169 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11171 arm_ccfsm_state = 2;
11172 succeed = TRUE;
11174 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11175 fail = TRUE;
11177 /* Fail if a conditional return is undesirable (e.g. on a
11178 StrongARM), but still allow this if optimizing for size. */
11179 else if (GET_CODE (scanbody) == RETURN
11180 && !use_return_insn (TRUE, NULL)
11181 && !optimize_size)
11182 fail = TRUE;
11183 else if (GET_CODE (scanbody) == RETURN
11184 && seeking_return)
11186 arm_ccfsm_state = 2;
11187 succeed = TRUE;
11189 else if (GET_CODE (scanbody) == PARALLEL)
11191 switch (get_attr_conds (this_insn))
11193 case CONDS_NOCOND:
11194 break;
11195 default:
11196 fail = TRUE;
11197 break;
11200 else
11201 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11203 break;
11205 case INSN:
11206 /* Instructions using or affecting the condition codes make it
11207 fail. */
11208 scanbody = PATTERN (this_insn);
11209 if (!(GET_CODE (scanbody) == SET
11210 || GET_CODE (scanbody) == PARALLEL)
11211 || get_attr_conds (this_insn) != CONDS_NOCOND)
11212 fail = TRUE;
11214 /* A conditional cirrus instruction must be followed by
11215 a non Cirrus instruction. However, since we
11216 conditionalize instructions in this function and by
11217 the time we get here we can't add instructions
11218 (nops), because shorten_branches() has already been
11219 called, we will disable conditionalizing Cirrus
11220 instructions to be safe. */
11221 if (GET_CODE (scanbody) != USE
11222 && GET_CODE (scanbody) != CLOBBER
11223 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11224 fail = TRUE;
11225 break;
11227 default:
11228 break;
11231 if (succeed)
11233 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11234 arm_target_label = CODE_LABEL_NUMBER (label);
11235 else
11237 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11239 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11241 this_insn = next_nonnote_insn (this_insn);
11242 gcc_assert (!this_insn
11243 || (GET_CODE (this_insn) != BARRIER
11244 && GET_CODE (this_insn) != CODE_LABEL));
11246 if (!this_insn)
11248 /* Oh, dear! we ran off the end.. give up. */
11249 recog (PATTERN (insn), insn, NULL);
11250 arm_ccfsm_state = 0;
11251 arm_target_insn = NULL;
11252 return;
11254 arm_target_insn = this_insn;
11256 if (jump_clobbers)
11258 gcc_assert (!reverse);
11259 arm_current_cc =
11260 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11261 0), 0), 1));
11262 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11263 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11264 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11265 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11267 else
11269 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11270 what it was. */
11271 if (!reverse)
11272 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11273 0));
11276 if (reverse || then_not_else)
11277 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11280 /* Restore recog_data (getting the attributes of other insns can
11281 destroy this array, but final.c assumes that it remains intact
11282 across this call; since the insn has been recognized already we
11283 call recog direct). */
11284 recog (PATTERN (insn), insn, NULL);
11288 /* Returns true if REGNO is a valid register
11289 for holding a quantity of type MODE. */
11291 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11293 if (GET_MODE_CLASS (mode) == MODE_CC)
11294 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11296 if (TARGET_THUMB)
11297 /* For the Thumb we only allow values bigger than SImode in
11298 registers 0 - 6, so that there is always a second low
11299 register available to hold the upper part of the value.
11300 We probably we ought to ensure that the register is the
11301 start of an even numbered register pair. */
11302 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11304 if (IS_CIRRUS_REGNUM (regno))
11305 /* We have outlawed SI values in Cirrus registers because they
11306 reside in the lower 32 bits, but SF values reside in the
11307 upper 32 bits. This causes gcc all sorts of grief. We can't
11308 even split the registers into pairs because Cirrus SI values
11309 get sign extended to 64bits-- aldyh. */
11310 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11312 if (IS_VFP_REGNUM (regno))
11314 if (mode == SFmode || mode == SImode)
11315 return TRUE;
11317 /* DFmode values are only valid in even register pairs. */
11318 if (mode == DFmode)
11319 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11320 return FALSE;
11323 if (IS_IWMMXT_GR_REGNUM (regno))
11324 return mode == SImode;
11326 if (IS_IWMMXT_REGNUM (regno))
11327 return VALID_IWMMXT_REG_MODE (mode);
11329 /* We allow any value to be stored in the general registers.
11330 Restrict doubleword quantities to even register pairs so that we can
11331 use ldrd. */
11332 if (regno <= LAST_ARM_REGNUM)
11333 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11335 if ( regno == FRAME_POINTER_REGNUM
11336 || regno == ARG_POINTER_REGNUM)
11337 /* We only allow integers in the fake hard registers. */
11338 return GET_MODE_CLASS (mode) == MODE_INT;
11340 /* The only registers left are the FPA registers
11341 which we only allow to hold FP values. */
11342 return GET_MODE_CLASS (mode) == MODE_FLOAT
11343 && regno >= FIRST_FPA_REGNUM
11344 && regno <= LAST_FPA_REGNUM;
11348 arm_regno_class (int regno)
11350 if (TARGET_THUMB)
11352 if (regno == STACK_POINTER_REGNUM)
11353 return STACK_REG;
11354 if (regno == CC_REGNUM)
11355 return CC_REG;
11356 if (regno < 8)
11357 return LO_REGS;
11358 return HI_REGS;
11361 if ( regno <= LAST_ARM_REGNUM
11362 || regno == FRAME_POINTER_REGNUM
11363 || regno == ARG_POINTER_REGNUM)
11364 return GENERAL_REGS;
11366 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11367 return NO_REGS;
11369 if (IS_CIRRUS_REGNUM (regno))
11370 return CIRRUS_REGS;
11372 if (IS_VFP_REGNUM (regno))
11373 return VFP_REGS;
11375 if (IS_IWMMXT_REGNUM (regno))
11376 return IWMMXT_REGS;
11378 if (IS_IWMMXT_GR_REGNUM (regno))
11379 return IWMMXT_GR_REGS;
11381 return FPA_REGS;
11384 /* Handle a special case when computing the offset
11385 of an argument from the frame pointer. */
11387 arm_debugger_arg_offset (int value, rtx addr)
11389 rtx insn;
11391 /* We are only interested if dbxout_parms() failed to compute the offset. */
11392 if (value != 0)
11393 return 0;
11395 /* We can only cope with the case where the address is held in a register. */
11396 if (GET_CODE (addr) != REG)
11397 return 0;
11399 /* If we are using the frame pointer to point at the argument, then
11400 an offset of 0 is correct. */
11401 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11402 return 0;
11404 /* If we are using the stack pointer to point at the
11405 argument, then an offset of 0 is correct. */
11406 if ((TARGET_THUMB || !frame_pointer_needed)
11407 && REGNO (addr) == SP_REGNUM)
11408 return 0;
11410 /* Oh dear. The argument is pointed to by a register rather
11411 than being held in a register, or being stored at a known
11412 offset from the frame pointer. Since GDB only understands
11413 those two kinds of argument we must translate the address
11414 held in the register into an offset from the frame pointer.
11415 We do this by searching through the insns for the function
11416 looking to see where this register gets its value. If the
11417 register is initialized from the frame pointer plus an offset
11418 then we are in luck and we can continue, otherwise we give up.
11420 This code is exercised by producing debugging information
11421 for a function with arguments like this:
11423 double func (double a, double b, int c, double d) {return d;}
11425 Without this code the stab for parameter 'd' will be set to
11426 an offset of 0 from the frame pointer, rather than 8. */
11428 /* The if() statement says:
11430 If the insn is a normal instruction
11431 and if the insn is setting the value in a register
11432 and if the register being set is the register holding the address of the argument
11433 and if the address is computing by an addition
11434 that involves adding to a register
11435 which is the frame pointer
11436 a constant integer
11438 then... */
11440 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11442 if ( GET_CODE (insn) == INSN
11443 && GET_CODE (PATTERN (insn)) == SET
11444 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11445 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11446 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11447 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11448 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11451 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11453 break;
11457 if (value == 0)
11459 debug_rtx (addr);
11460 warning (0, "unable to compute real location of stacked parameter");
11461 value = 8; /* XXX magic hack */
11464 return value;
11467 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11468 do \
11470 if ((MASK) & insn_flags) \
11471 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11472 BUILT_IN_MD, NULL, NULL_TREE); \
11474 while (0)
11476 struct builtin_description
11478 const unsigned int mask;
11479 const enum insn_code icode;
11480 const char * const name;
11481 const enum arm_builtins code;
11482 const enum rtx_code comparison;
11483 const unsigned int flag;
11486 static const struct builtin_description bdesc_2arg[] =
11488 #define IWMMXT_BUILTIN(code, string, builtin) \
11489 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11490 ARM_BUILTIN_##builtin, 0, 0 },
11492 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11493 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11494 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11495 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11496 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11497 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11498 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11499 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11500 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11501 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11502 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11503 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11504 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11505 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11506 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11507 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11508 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11509 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11510 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11511 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11512 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11513 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11514 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11515 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11516 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11517 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11518 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11519 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11520 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11521 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11522 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11523 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11524 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11525 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11526 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11527 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11528 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11529 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11530 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11531 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11532 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11533 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11534 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11535 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11536 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11537 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11538 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11539 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11540 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11541 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11542 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11543 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11544 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11545 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11546 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11547 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11548 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11549 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11551 #define IWMMXT_BUILTIN2(code, builtin) \
11552 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11554 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11555 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11556 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11557 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11558 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11559 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11560 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11561 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11562 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11563 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11564 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11565 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11566 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11567 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11568 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11569 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11570 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11571 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11572 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11573 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11574 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11575 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11576 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11577 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11578 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11579 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11580 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11581 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11582 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11583 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11584 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11585 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11588 static const struct builtin_description bdesc_1arg[] =
11590 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11591 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11592 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11593 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11594 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11595 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11596 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11597 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11598 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11599 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11600 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11601 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11602 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11603 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11604 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11605 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11606 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11607 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11610 /* Set up all the iWMMXt builtins. This is
11611 not called if TARGET_IWMMXT is zero. */
11613 static void
11614 arm_init_iwmmxt_builtins (void)
11616 const struct builtin_description * d;
11617 size_t i;
11618 tree endlink = void_list_node;
11620 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11621 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11622 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11624 tree int_ftype_int
11625 = build_function_type (integer_type_node,
11626 tree_cons (NULL_TREE, integer_type_node, endlink));
11627 tree v8qi_ftype_v8qi_v8qi_int
11628 = build_function_type (V8QI_type_node,
11629 tree_cons (NULL_TREE, V8QI_type_node,
11630 tree_cons (NULL_TREE, V8QI_type_node,
11631 tree_cons (NULL_TREE,
11632 integer_type_node,
11633 endlink))));
11634 tree v4hi_ftype_v4hi_int
11635 = build_function_type (V4HI_type_node,
11636 tree_cons (NULL_TREE, V4HI_type_node,
11637 tree_cons (NULL_TREE, integer_type_node,
11638 endlink)));
11639 tree v2si_ftype_v2si_int
11640 = build_function_type (V2SI_type_node,
11641 tree_cons (NULL_TREE, V2SI_type_node,
11642 tree_cons (NULL_TREE, integer_type_node,
11643 endlink)));
11644 tree v2si_ftype_di_di
11645 = build_function_type (V2SI_type_node,
11646 tree_cons (NULL_TREE, long_long_integer_type_node,
11647 tree_cons (NULL_TREE, long_long_integer_type_node,
11648 endlink)));
11649 tree di_ftype_di_int
11650 = build_function_type (long_long_integer_type_node,
11651 tree_cons (NULL_TREE, long_long_integer_type_node,
11652 tree_cons (NULL_TREE, integer_type_node,
11653 endlink)));
11654 tree di_ftype_di_int_int
11655 = build_function_type (long_long_integer_type_node,
11656 tree_cons (NULL_TREE, long_long_integer_type_node,
11657 tree_cons (NULL_TREE, integer_type_node,
11658 tree_cons (NULL_TREE,
11659 integer_type_node,
11660 endlink))));
11661 tree int_ftype_v8qi
11662 = build_function_type (integer_type_node,
11663 tree_cons (NULL_TREE, V8QI_type_node,
11664 endlink));
11665 tree int_ftype_v4hi
11666 = build_function_type (integer_type_node,
11667 tree_cons (NULL_TREE, V4HI_type_node,
11668 endlink));
11669 tree int_ftype_v2si
11670 = build_function_type (integer_type_node,
11671 tree_cons (NULL_TREE, V2SI_type_node,
11672 endlink));
11673 tree int_ftype_v8qi_int
11674 = build_function_type (integer_type_node,
11675 tree_cons (NULL_TREE, V8QI_type_node,
11676 tree_cons (NULL_TREE, integer_type_node,
11677 endlink)));
11678 tree int_ftype_v4hi_int
11679 = build_function_type (integer_type_node,
11680 tree_cons (NULL_TREE, V4HI_type_node,
11681 tree_cons (NULL_TREE, integer_type_node,
11682 endlink)));
11683 tree int_ftype_v2si_int
11684 = build_function_type (integer_type_node,
11685 tree_cons (NULL_TREE, V2SI_type_node,
11686 tree_cons (NULL_TREE, integer_type_node,
11687 endlink)));
11688 tree v8qi_ftype_v8qi_int_int
11689 = build_function_type (V8QI_type_node,
11690 tree_cons (NULL_TREE, V8QI_type_node,
11691 tree_cons (NULL_TREE, integer_type_node,
11692 tree_cons (NULL_TREE,
11693 integer_type_node,
11694 endlink))));
11695 tree v4hi_ftype_v4hi_int_int
11696 = build_function_type (V4HI_type_node,
11697 tree_cons (NULL_TREE, V4HI_type_node,
11698 tree_cons (NULL_TREE, integer_type_node,
11699 tree_cons (NULL_TREE,
11700 integer_type_node,
11701 endlink))));
11702 tree v2si_ftype_v2si_int_int
11703 = build_function_type (V2SI_type_node,
11704 tree_cons (NULL_TREE, V2SI_type_node,
11705 tree_cons (NULL_TREE, integer_type_node,
11706 tree_cons (NULL_TREE,
11707 integer_type_node,
11708 endlink))));
11709 /* Miscellaneous. */
11710 tree v8qi_ftype_v4hi_v4hi
11711 = build_function_type (V8QI_type_node,
11712 tree_cons (NULL_TREE, V4HI_type_node,
11713 tree_cons (NULL_TREE, V4HI_type_node,
11714 endlink)));
11715 tree v4hi_ftype_v2si_v2si
11716 = build_function_type (V4HI_type_node,
11717 tree_cons (NULL_TREE, V2SI_type_node,
11718 tree_cons (NULL_TREE, V2SI_type_node,
11719 endlink)));
11720 tree v2si_ftype_v4hi_v4hi
11721 = build_function_type (V2SI_type_node,
11722 tree_cons (NULL_TREE, V4HI_type_node,
11723 tree_cons (NULL_TREE, V4HI_type_node,
11724 endlink)));
11725 tree v2si_ftype_v8qi_v8qi
11726 = build_function_type (V2SI_type_node,
11727 tree_cons (NULL_TREE, V8QI_type_node,
11728 tree_cons (NULL_TREE, V8QI_type_node,
11729 endlink)));
11730 tree v4hi_ftype_v4hi_di
11731 = build_function_type (V4HI_type_node,
11732 tree_cons (NULL_TREE, V4HI_type_node,
11733 tree_cons (NULL_TREE,
11734 long_long_integer_type_node,
11735 endlink)));
11736 tree v2si_ftype_v2si_di
11737 = build_function_type (V2SI_type_node,
11738 tree_cons (NULL_TREE, V2SI_type_node,
11739 tree_cons (NULL_TREE,
11740 long_long_integer_type_node,
11741 endlink)));
11742 tree void_ftype_int_int
11743 = build_function_type (void_type_node,
11744 tree_cons (NULL_TREE, integer_type_node,
11745 tree_cons (NULL_TREE, integer_type_node,
11746 endlink)));
11747 tree di_ftype_void
11748 = build_function_type (long_long_unsigned_type_node, endlink);
11749 tree di_ftype_v8qi
11750 = build_function_type (long_long_integer_type_node,
11751 tree_cons (NULL_TREE, V8QI_type_node,
11752 endlink));
11753 tree di_ftype_v4hi
11754 = build_function_type (long_long_integer_type_node,
11755 tree_cons (NULL_TREE, V4HI_type_node,
11756 endlink));
11757 tree di_ftype_v2si
11758 = build_function_type (long_long_integer_type_node,
11759 tree_cons (NULL_TREE, V2SI_type_node,
11760 endlink));
11761 tree v2si_ftype_v4hi
11762 = build_function_type (V2SI_type_node,
11763 tree_cons (NULL_TREE, V4HI_type_node,
11764 endlink));
11765 tree v4hi_ftype_v8qi
11766 = build_function_type (V4HI_type_node,
11767 tree_cons (NULL_TREE, V8QI_type_node,
11768 endlink));
11770 tree di_ftype_di_v4hi_v4hi
11771 = build_function_type (long_long_unsigned_type_node,
11772 tree_cons (NULL_TREE,
11773 long_long_unsigned_type_node,
11774 tree_cons (NULL_TREE, V4HI_type_node,
11775 tree_cons (NULL_TREE,
11776 V4HI_type_node,
11777 endlink))));
11779 tree di_ftype_v4hi_v4hi
11780 = build_function_type (long_long_unsigned_type_node,
11781 tree_cons (NULL_TREE, V4HI_type_node,
11782 tree_cons (NULL_TREE, V4HI_type_node,
11783 endlink)));
11785 /* Normal vector binops. */
11786 tree v8qi_ftype_v8qi_v8qi
11787 = build_function_type (V8QI_type_node,
11788 tree_cons (NULL_TREE, V8QI_type_node,
11789 tree_cons (NULL_TREE, V8QI_type_node,
11790 endlink)));
11791 tree v4hi_ftype_v4hi_v4hi
11792 = build_function_type (V4HI_type_node,
11793 tree_cons (NULL_TREE, V4HI_type_node,
11794 tree_cons (NULL_TREE, V4HI_type_node,
11795 endlink)));
11796 tree v2si_ftype_v2si_v2si
11797 = build_function_type (V2SI_type_node,
11798 tree_cons (NULL_TREE, V2SI_type_node,
11799 tree_cons (NULL_TREE, V2SI_type_node,
11800 endlink)));
11801 tree di_ftype_di_di
11802 = build_function_type (long_long_unsigned_type_node,
11803 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11804 tree_cons (NULL_TREE,
11805 long_long_unsigned_type_node,
11806 endlink)));
11808 /* Add all builtins that are more or less simple operations on two
11809 operands. */
11810 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11812 /* Use one of the operands; the target can have a different mode for
11813 mask-generating compares. */
11814 enum machine_mode mode;
11815 tree type;
11817 if (d->name == 0)
11818 continue;
11820 mode = insn_data[d->icode].operand[1].mode;
11822 switch (mode)
11824 case V8QImode:
11825 type = v8qi_ftype_v8qi_v8qi;
11826 break;
11827 case V4HImode:
11828 type = v4hi_ftype_v4hi_v4hi;
11829 break;
11830 case V2SImode:
11831 type = v2si_ftype_v2si_v2si;
11832 break;
11833 case DImode:
11834 type = di_ftype_di_di;
11835 break;
11837 default:
11838 gcc_unreachable ();
11841 def_mbuiltin (d->mask, d->name, type, d->code);
11844 /* Add the remaining MMX insns with somewhat more complicated types. */
11845 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11846 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11847 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11849 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11850 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11851 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11852 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11853 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11854 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11856 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11857 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11858 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11859 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11860 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11861 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11863 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11864 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11865 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11866 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11867 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11868 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11870 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11871 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11872 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11873 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11874 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11875 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11877 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11879 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11880 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11881 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11882 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11884 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11885 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11886 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11887 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11888 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11889 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11890 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11891 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11892 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11894 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11895 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11896 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11898 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11899 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11900 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11902 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11903 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11904 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11905 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11906 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11907 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11909 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11910 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11911 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11912 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11913 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11914 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11915 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11916 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11917 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11918 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11919 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11920 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11922 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11923 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11924 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11925 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11927 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11928 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11929 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11930 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11931 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11932 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11933 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11936 static void
11937 arm_init_builtins (void)
11939 if (TARGET_REALLY_IWMMXT)
11940 arm_init_iwmmxt_builtins ();
11943 /* Errors in the source file can cause expand_expr to return const0_rtx
11944 where we expect a vector. To avoid crashing, use one of the vector
11945 clear instructions. */
11947 static rtx
11948 safe_vector_operand (rtx x, enum machine_mode mode)
11950 if (x != const0_rtx)
11951 return x;
11952 x = gen_reg_rtx (mode);
11954 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11955 : gen_rtx_SUBREG (DImode, x, 0)));
11956 return x;
11959 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11961 static rtx
11962 arm_expand_binop_builtin (enum insn_code icode,
11963 tree arglist, rtx target)
11965 rtx pat;
11966 tree arg0 = TREE_VALUE (arglist);
11967 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11968 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11969 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11970 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11971 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11972 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11974 if (VECTOR_MODE_P (mode0))
11975 op0 = safe_vector_operand (op0, mode0);
11976 if (VECTOR_MODE_P (mode1))
11977 op1 = safe_vector_operand (op1, mode1);
11979 if (! target
11980 || GET_MODE (target) != tmode
11981 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11982 target = gen_reg_rtx (tmode);
11984 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
11986 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11987 op0 = copy_to_mode_reg (mode0, op0);
11988 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11989 op1 = copy_to_mode_reg (mode1, op1);
11991 pat = GEN_FCN (icode) (target, op0, op1);
11992 if (! pat)
11993 return 0;
11994 emit_insn (pat);
11995 return target;
11998 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12000 static rtx
12001 arm_expand_unop_builtin (enum insn_code icode,
12002 tree arglist, rtx target, int do_load)
12004 rtx pat;
12005 tree arg0 = TREE_VALUE (arglist);
12006 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12007 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12008 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12010 if (! target
12011 || GET_MODE (target) != tmode
12012 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12013 target = gen_reg_rtx (tmode);
12014 if (do_load)
12015 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12016 else
12018 if (VECTOR_MODE_P (mode0))
12019 op0 = safe_vector_operand (op0, mode0);
12021 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12022 op0 = copy_to_mode_reg (mode0, op0);
12025 pat = GEN_FCN (icode) (target, op0);
12026 if (! pat)
12027 return 0;
12028 emit_insn (pat);
12029 return target;
12032 /* Expand an expression EXP that calls a built-in function,
12033 with result going to TARGET if that's convenient
12034 (and in mode MODE if that's convenient).
12035 SUBTARGET may be used as the target for computing one of EXP's operands.
12036 IGNORE is nonzero if the value is to be ignored. */
12038 static rtx
12039 arm_expand_builtin (tree exp,
12040 rtx target,
12041 rtx subtarget ATTRIBUTE_UNUSED,
12042 enum machine_mode mode ATTRIBUTE_UNUSED,
12043 int ignore ATTRIBUTE_UNUSED)
12045 const struct builtin_description * d;
12046 enum insn_code icode;
12047 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12048 tree arglist = TREE_OPERAND (exp, 1);
12049 tree arg0;
12050 tree arg1;
12051 tree arg2;
12052 rtx op0;
12053 rtx op1;
12054 rtx op2;
12055 rtx pat;
12056 int fcode = DECL_FUNCTION_CODE (fndecl);
12057 size_t i;
12058 enum machine_mode tmode;
12059 enum machine_mode mode0;
12060 enum machine_mode mode1;
12061 enum machine_mode mode2;
12063 switch (fcode)
12065 case ARM_BUILTIN_TEXTRMSB:
12066 case ARM_BUILTIN_TEXTRMUB:
12067 case ARM_BUILTIN_TEXTRMSH:
12068 case ARM_BUILTIN_TEXTRMUH:
12069 case ARM_BUILTIN_TEXTRMSW:
12070 case ARM_BUILTIN_TEXTRMUW:
12071 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12072 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12073 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12074 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12075 : CODE_FOR_iwmmxt_textrmw);
12077 arg0 = TREE_VALUE (arglist);
12078 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12079 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12080 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12081 tmode = insn_data[icode].operand[0].mode;
12082 mode0 = insn_data[icode].operand[1].mode;
12083 mode1 = insn_data[icode].operand[2].mode;
12085 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12086 op0 = copy_to_mode_reg (mode0, op0);
12087 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12089 /* @@@ better error message */
12090 error ("selector must be an immediate");
12091 return gen_reg_rtx (tmode);
12093 if (target == 0
12094 || GET_MODE (target) != tmode
12095 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12096 target = gen_reg_rtx (tmode);
12097 pat = GEN_FCN (icode) (target, op0, op1);
12098 if (! pat)
12099 return 0;
12100 emit_insn (pat);
12101 return target;
12103 case ARM_BUILTIN_TINSRB:
12104 case ARM_BUILTIN_TINSRH:
12105 case ARM_BUILTIN_TINSRW:
12106 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12107 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12108 : CODE_FOR_iwmmxt_tinsrw);
12109 arg0 = TREE_VALUE (arglist);
12110 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12111 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12112 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12113 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12114 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12115 tmode = insn_data[icode].operand[0].mode;
12116 mode0 = insn_data[icode].operand[1].mode;
12117 mode1 = insn_data[icode].operand[2].mode;
12118 mode2 = insn_data[icode].operand[3].mode;
12120 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12121 op0 = copy_to_mode_reg (mode0, op0);
12122 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12123 op1 = copy_to_mode_reg (mode1, op1);
12124 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12126 /* @@@ better error message */
12127 error ("selector must be an immediate");
12128 return const0_rtx;
12130 if (target == 0
12131 || GET_MODE (target) != tmode
12132 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12133 target = gen_reg_rtx (tmode);
12134 pat = GEN_FCN (icode) (target, op0, op1, op2);
12135 if (! pat)
12136 return 0;
12137 emit_insn (pat);
12138 return target;
12140 case ARM_BUILTIN_SETWCX:
12141 arg0 = TREE_VALUE (arglist);
12142 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12143 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12144 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12145 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12146 return 0;
12148 case ARM_BUILTIN_GETWCX:
12149 arg0 = TREE_VALUE (arglist);
12150 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12151 target = gen_reg_rtx (SImode);
12152 emit_insn (gen_iwmmxt_tmrc (target, op0));
12153 return target;
12155 case ARM_BUILTIN_WSHUFH:
12156 icode = CODE_FOR_iwmmxt_wshufh;
12157 arg0 = TREE_VALUE (arglist);
12158 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12159 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12160 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12161 tmode = insn_data[icode].operand[0].mode;
12162 mode1 = insn_data[icode].operand[1].mode;
12163 mode2 = insn_data[icode].operand[2].mode;
12165 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12166 op0 = copy_to_mode_reg (mode1, op0);
12167 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12169 /* @@@ better error message */
12170 error ("mask must be an immediate");
12171 return const0_rtx;
12173 if (target == 0
12174 || GET_MODE (target) != tmode
12175 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12176 target = gen_reg_rtx (tmode);
12177 pat = GEN_FCN (icode) (target, op0, op1);
12178 if (! pat)
12179 return 0;
12180 emit_insn (pat);
12181 return target;
12183 case ARM_BUILTIN_WSADB:
12184 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12185 case ARM_BUILTIN_WSADH:
12186 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12187 case ARM_BUILTIN_WSADBZ:
12188 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12189 case ARM_BUILTIN_WSADHZ:
12190 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12192 /* Several three-argument builtins. */
12193 case ARM_BUILTIN_WMACS:
12194 case ARM_BUILTIN_WMACU:
12195 case ARM_BUILTIN_WALIGN:
12196 case ARM_BUILTIN_TMIA:
12197 case ARM_BUILTIN_TMIAPH:
12198 case ARM_BUILTIN_TMIATT:
12199 case ARM_BUILTIN_TMIATB:
12200 case ARM_BUILTIN_TMIABT:
12201 case ARM_BUILTIN_TMIABB:
12202 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12203 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12204 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12205 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12206 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12207 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12208 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12209 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12210 : CODE_FOR_iwmmxt_walign);
12211 arg0 = TREE_VALUE (arglist);
12212 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12213 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12214 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12215 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12216 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12217 tmode = insn_data[icode].operand[0].mode;
12218 mode0 = insn_data[icode].operand[1].mode;
12219 mode1 = insn_data[icode].operand[2].mode;
12220 mode2 = insn_data[icode].operand[3].mode;
12222 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12223 op0 = copy_to_mode_reg (mode0, op0);
12224 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12225 op1 = copy_to_mode_reg (mode1, op1);
12226 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12227 op2 = copy_to_mode_reg (mode2, op2);
12228 if (target == 0
12229 || GET_MODE (target) != tmode
12230 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12231 target = gen_reg_rtx (tmode);
12232 pat = GEN_FCN (icode) (target, op0, op1, op2);
12233 if (! pat)
12234 return 0;
12235 emit_insn (pat);
12236 return target;
12238 case ARM_BUILTIN_WZERO:
12239 target = gen_reg_rtx (DImode);
12240 emit_insn (gen_iwmmxt_clrdi (target));
12241 return target;
12243 default:
12244 break;
12247 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12248 if (d->code == (const enum arm_builtins) fcode)
12249 return arm_expand_binop_builtin (d->icode, arglist, target);
12251 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12252 if (d->code == (const enum arm_builtins) fcode)
12253 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12255 /* @@@ Should really do something sensible here. */
12256 return NULL_RTX;
12259 /* Return the number (counting from 0) of
12260 the least significant set bit in MASK. */
12262 inline static int
12263 number_of_first_bit_set (unsigned mask)
12265 int bit;
12267 for (bit = 0;
12268 (mask & (1 << bit)) == 0;
12269 ++bit)
12270 continue;
12272 return bit;
12275 /* Emit code to push or pop registers to or from the stack. F is the
12276 assembly file. MASK is the registers to push or pop. PUSH is
12277 nonzero if we should push, and zero if we should pop. For debugging
12278 output, if pushing, adjust CFA_OFFSET by the amount of space added
12279 to the stack. REAL_REGS should have the same number of bits set as
12280 MASK, and will be used instead (in the same order) to describe which
12281 registers were saved - this is used to mark the save slots when we
12282 push high registers after moving them to low registers. */
12283 static void
12284 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12285 unsigned long real_regs)
12287 int regno;
12288 int lo_mask = mask & 0xFF;
12289 int pushed_words = 0;
12291 gcc_assert (mask);
12293 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12295 /* Special case. Do not generate a POP PC statement here, do it in
12296 thumb_exit() */
12297 thumb_exit (f, -1);
12298 return;
12301 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12303 /* Look at the low registers first. */
12304 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12306 if (lo_mask & 1)
12308 asm_fprintf (f, "%r", regno);
12310 if ((lo_mask & ~1) != 0)
12311 fprintf (f, ", ");
12313 pushed_words++;
12317 if (push && (mask & (1 << LR_REGNUM)))
12319 /* Catch pushing the LR. */
12320 if (mask & 0xFF)
12321 fprintf (f, ", ");
12323 asm_fprintf (f, "%r", LR_REGNUM);
12325 pushed_words++;
12327 else if (!push && (mask & (1 << PC_REGNUM)))
12329 /* Catch popping the PC. */
12330 if (TARGET_INTERWORK || TARGET_BACKTRACE
12331 || current_function_calls_eh_return)
12333 /* The PC is never poped directly, instead
12334 it is popped into r3 and then BX is used. */
12335 fprintf (f, "}\n");
12337 thumb_exit (f, -1);
12339 return;
12341 else
12343 if (mask & 0xFF)
12344 fprintf (f, ", ");
12346 asm_fprintf (f, "%r", PC_REGNUM);
12350 fprintf (f, "}\n");
12352 if (push && pushed_words && dwarf2out_do_frame ())
12354 char *l = dwarf2out_cfi_label ();
12355 int pushed_mask = real_regs;
12357 *cfa_offset += pushed_words * 4;
12358 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12360 pushed_words = 0;
12361 pushed_mask = real_regs;
12362 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12364 if (pushed_mask & 1)
12365 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12370 /* Generate code to return from a thumb function.
12371 If 'reg_containing_return_addr' is -1, then the return address is
12372 actually on the stack, at the stack pointer. */
12373 static void
12374 thumb_exit (FILE *f, int reg_containing_return_addr)
12376 unsigned regs_available_for_popping;
12377 unsigned regs_to_pop;
12378 int pops_needed;
12379 unsigned available;
12380 unsigned required;
12381 int mode;
12382 int size;
12383 int restore_a4 = FALSE;
12385 /* Compute the registers we need to pop. */
12386 regs_to_pop = 0;
12387 pops_needed = 0;
12389 if (reg_containing_return_addr == -1)
12391 regs_to_pop |= 1 << LR_REGNUM;
12392 ++pops_needed;
12395 if (TARGET_BACKTRACE)
12397 /* Restore the (ARM) frame pointer and stack pointer. */
12398 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12399 pops_needed += 2;
12402 /* If there is nothing to pop then just emit the BX instruction and
12403 return. */
12404 if (pops_needed == 0)
12406 if (current_function_calls_eh_return)
12407 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12409 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12410 return;
12412 /* Otherwise if we are not supporting interworking and we have not created
12413 a backtrace structure and the function was not entered in ARM mode then
12414 just pop the return address straight into the PC. */
12415 else if (!TARGET_INTERWORK
12416 && !TARGET_BACKTRACE
12417 && !is_called_in_ARM_mode (current_function_decl)
12418 && !current_function_calls_eh_return)
12420 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12421 return;
12424 /* Find out how many of the (return) argument registers we can corrupt. */
12425 regs_available_for_popping = 0;
12427 /* If returning via __builtin_eh_return, the bottom three registers
12428 all contain information needed for the return. */
12429 if (current_function_calls_eh_return)
12430 size = 12;
12431 else
12433 /* If we can deduce the registers used from the function's
12434 return value. This is more reliable that examining
12435 regs_ever_live[] because that will be set if the register is
12436 ever used in the function, not just if the register is used
12437 to hold a return value. */
12439 if (current_function_return_rtx != 0)
12440 mode = GET_MODE (current_function_return_rtx);
12441 else
12442 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12444 size = GET_MODE_SIZE (mode);
12446 if (size == 0)
12448 /* In a void function we can use any argument register.
12449 In a function that returns a structure on the stack
12450 we can use the second and third argument registers. */
12451 if (mode == VOIDmode)
12452 regs_available_for_popping =
12453 (1 << ARG_REGISTER (1))
12454 | (1 << ARG_REGISTER (2))
12455 | (1 << ARG_REGISTER (3));
12456 else
12457 regs_available_for_popping =
12458 (1 << ARG_REGISTER (2))
12459 | (1 << ARG_REGISTER (3));
12461 else if (size <= 4)
12462 regs_available_for_popping =
12463 (1 << ARG_REGISTER (2))
12464 | (1 << ARG_REGISTER (3));
12465 else if (size <= 8)
12466 regs_available_for_popping =
12467 (1 << ARG_REGISTER (3));
12470 /* Match registers to be popped with registers into which we pop them. */
12471 for (available = regs_available_for_popping,
12472 required = regs_to_pop;
12473 required != 0 && available != 0;
12474 available &= ~(available & - available),
12475 required &= ~(required & - required))
12476 -- pops_needed;
12478 /* If we have any popping registers left over, remove them. */
12479 if (available > 0)
12480 regs_available_for_popping &= ~available;
12482 /* Otherwise if we need another popping register we can use
12483 the fourth argument register. */
12484 else if (pops_needed)
12486 /* If we have not found any free argument registers and
12487 reg a4 contains the return address, we must move it. */
12488 if (regs_available_for_popping == 0
12489 && reg_containing_return_addr == LAST_ARG_REGNUM)
12491 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12492 reg_containing_return_addr = LR_REGNUM;
12494 else if (size > 12)
12496 /* Register a4 is being used to hold part of the return value,
12497 but we have dire need of a free, low register. */
12498 restore_a4 = TRUE;
12500 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12503 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12505 /* The fourth argument register is available. */
12506 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12508 --pops_needed;
12512 /* Pop as many registers as we can. */
12513 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12514 regs_available_for_popping);
12516 /* Process the registers we popped. */
12517 if (reg_containing_return_addr == -1)
12519 /* The return address was popped into the lowest numbered register. */
12520 regs_to_pop &= ~(1 << LR_REGNUM);
12522 reg_containing_return_addr =
12523 number_of_first_bit_set (regs_available_for_popping);
12525 /* Remove this register for the mask of available registers, so that
12526 the return address will not be corrupted by further pops. */
12527 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12530 /* If we popped other registers then handle them here. */
12531 if (regs_available_for_popping)
12533 int frame_pointer;
12535 /* Work out which register currently contains the frame pointer. */
12536 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12538 /* Move it into the correct place. */
12539 asm_fprintf (f, "\tmov\t%r, %r\n",
12540 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12542 /* (Temporarily) remove it from the mask of popped registers. */
12543 regs_available_for_popping &= ~(1 << frame_pointer);
12544 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12546 if (regs_available_for_popping)
12548 int stack_pointer;
12550 /* We popped the stack pointer as well,
12551 find the register that contains it. */
12552 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12554 /* Move it into the stack register. */
12555 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12557 /* At this point we have popped all necessary registers, so
12558 do not worry about restoring regs_available_for_popping
12559 to its correct value:
12561 assert (pops_needed == 0)
12562 assert (regs_available_for_popping == (1 << frame_pointer))
12563 assert (regs_to_pop == (1 << STACK_POINTER)) */
12565 else
12567 /* Since we have just move the popped value into the frame
12568 pointer, the popping register is available for reuse, and
12569 we know that we still have the stack pointer left to pop. */
12570 regs_available_for_popping |= (1 << frame_pointer);
12574 /* If we still have registers left on the stack, but we no longer have
12575 any registers into which we can pop them, then we must move the return
12576 address into the link register and make available the register that
12577 contained it. */
12578 if (regs_available_for_popping == 0 && pops_needed > 0)
12580 regs_available_for_popping |= 1 << reg_containing_return_addr;
12582 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12583 reg_containing_return_addr);
12585 reg_containing_return_addr = LR_REGNUM;
12588 /* If we have registers left on the stack then pop some more.
12589 We know that at most we will want to pop FP and SP. */
12590 if (pops_needed > 0)
12592 int popped_into;
12593 int move_to;
12595 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12596 regs_available_for_popping);
12598 /* We have popped either FP or SP.
12599 Move whichever one it is into the correct register. */
12600 popped_into = number_of_first_bit_set (regs_available_for_popping);
12601 move_to = number_of_first_bit_set (regs_to_pop);
12603 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12605 regs_to_pop &= ~(1 << move_to);
12607 --pops_needed;
12610 /* If we still have not popped everything then we must have only
12611 had one register available to us and we are now popping the SP. */
12612 if (pops_needed > 0)
12614 int popped_into;
12616 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12617 regs_available_for_popping);
12619 popped_into = number_of_first_bit_set (regs_available_for_popping);
12621 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12623 assert (regs_to_pop == (1 << STACK_POINTER))
12624 assert (pops_needed == 1)
12628 /* If necessary restore the a4 register. */
12629 if (restore_a4)
12631 if (reg_containing_return_addr != LR_REGNUM)
12633 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12634 reg_containing_return_addr = LR_REGNUM;
12637 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12640 if (current_function_calls_eh_return)
12641 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12643 /* Return to caller. */
12644 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12648 void
12649 thumb_final_prescan_insn (rtx insn)
12651 if (flag_print_asm_name)
12652 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12653 INSN_ADDRESSES (INSN_UID (insn)));
12657 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12659 unsigned HOST_WIDE_INT mask = 0xff;
12660 int i;
12662 if (val == 0) /* XXX */
12663 return 0;
12665 for (i = 0; i < 25; i++)
12666 if ((val & (mask << i)) == val)
12667 return 1;
12669 return 0;
12672 /* Returns nonzero if the current function contains,
12673 or might contain a far jump. */
12674 static int
12675 thumb_far_jump_used_p (void)
12677 rtx insn;
12679 /* This test is only important for leaf functions. */
12680 /* assert (!leaf_function_p ()); */
12682 /* If we have already decided that far jumps may be used,
12683 do not bother checking again, and always return true even if
12684 it turns out that they are not being used. Once we have made
12685 the decision that far jumps are present (and that hence the link
12686 register will be pushed onto the stack) we cannot go back on it. */
12687 if (cfun->machine->far_jump_used)
12688 return 1;
12690 /* If this function is not being called from the prologue/epilogue
12691 generation code then it must be being called from the
12692 INITIAL_ELIMINATION_OFFSET macro. */
12693 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12695 /* In this case we know that we are being asked about the elimination
12696 of the arg pointer register. If that register is not being used,
12697 then there are no arguments on the stack, and we do not have to
12698 worry that a far jump might force the prologue to push the link
12699 register, changing the stack offsets. In this case we can just
12700 return false, since the presence of far jumps in the function will
12701 not affect stack offsets.
12703 If the arg pointer is live (or if it was live, but has now been
12704 eliminated and so set to dead) then we do have to test to see if
12705 the function might contain a far jump. This test can lead to some
12706 false negatives, since before reload is completed, then length of
12707 branch instructions is not known, so gcc defaults to returning their
12708 longest length, which in turn sets the far jump attribute to true.
12710 A false negative will not result in bad code being generated, but it
12711 will result in a needless push and pop of the link register. We
12712 hope that this does not occur too often.
12714 If we need doubleword stack alignment this could affect the other
12715 elimination offsets so we can't risk getting it wrong. */
12716 if (regs_ever_live [ARG_POINTER_REGNUM])
12717 cfun->machine->arg_pointer_live = 1;
12718 else if (!cfun->machine->arg_pointer_live)
12719 return 0;
12722 /* Check to see if the function contains a branch
12723 insn with the far jump attribute set. */
12724 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12726 if (GET_CODE (insn) == JUMP_INSN
12727 /* Ignore tablejump patterns. */
12728 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12729 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12730 && get_attr_far_jump (insn) == FAR_JUMP_YES
12733 /* Record the fact that we have decided that
12734 the function does use far jumps. */
12735 cfun->machine->far_jump_used = 1;
12736 return 1;
12740 return 0;
12743 /* Return nonzero if FUNC must be entered in ARM mode. */
12745 is_called_in_ARM_mode (tree func)
12747 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12749 /* Ignore the problem about functions whose address is taken. */
12750 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12751 return TRUE;
12753 #ifdef ARM_PE
12754 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12755 #else
12756 return FALSE;
12757 #endif
12760 /* The bits which aren't usefully expanded as rtl. */
12761 const char *
12762 thumb_unexpanded_epilogue (void)
12764 int regno;
12765 unsigned long live_regs_mask = 0;
12766 int high_regs_pushed = 0;
12767 int had_to_push_lr;
12768 int size;
12769 int mode;
12771 if (return_used_this_function)
12772 return "";
12774 if (IS_NAKED (arm_current_func_type ()))
12775 return "";
12777 live_regs_mask = thumb_compute_save_reg_mask ();
12778 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12780 /* If we can deduce the registers used from the function's return value.
12781 This is more reliable that examining regs_ever_live[] because that
12782 will be set if the register is ever used in the function, not just if
12783 the register is used to hold a return value. */
12785 if (current_function_return_rtx != 0)
12786 mode = GET_MODE (current_function_return_rtx);
12787 else
12788 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12790 size = GET_MODE_SIZE (mode);
12792 /* The prolog may have pushed some high registers to use as
12793 work registers. e.g. the testsuite file:
12794 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12795 compiles to produce:
12796 push {r4, r5, r6, r7, lr}
12797 mov r7, r9
12798 mov r6, r8
12799 push {r6, r7}
12800 as part of the prolog. We have to undo that pushing here. */
12802 if (high_regs_pushed)
12804 unsigned long mask = live_regs_mask & 0xff;
12805 int next_hi_reg;
12807 /* The available low registers depend on the size of the value we are
12808 returning. */
12809 if (size <= 12)
12810 mask |= 1 << 3;
12811 if (size <= 8)
12812 mask |= 1 << 2;
12814 if (mask == 0)
12815 /* Oh dear! We have no low registers into which we can pop
12816 high registers! */
12817 internal_error
12818 ("no low registers available for popping high registers");
12820 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12821 if (live_regs_mask & (1 << next_hi_reg))
12822 break;
12824 while (high_regs_pushed)
12826 /* Find lo register(s) into which the high register(s) can
12827 be popped. */
12828 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12830 if (mask & (1 << regno))
12831 high_regs_pushed--;
12832 if (high_regs_pushed == 0)
12833 break;
12836 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12838 /* Pop the values into the low register(s). */
12839 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12841 /* Move the value(s) into the high registers. */
12842 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12844 if (mask & (1 << regno))
12846 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12847 regno);
12849 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12850 if (live_regs_mask & (1 << next_hi_reg))
12851 break;
12855 live_regs_mask &= ~0x0f00;
12858 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12859 live_regs_mask &= 0xff;
12861 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12863 /* Pop the return address into the PC. */
12864 if (had_to_push_lr)
12865 live_regs_mask |= 1 << PC_REGNUM;
12867 /* Either no argument registers were pushed or a backtrace
12868 structure was created which includes an adjusted stack
12869 pointer, so just pop everything. */
12870 if (live_regs_mask)
12871 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12872 live_regs_mask);
12874 /* We have either just popped the return address into the
12875 PC or it is was kept in LR for the entire function. */
12876 if (!had_to_push_lr)
12877 thumb_exit (asm_out_file, LR_REGNUM);
12879 else
12881 /* Pop everything but the return address. */
12882 if (live_regs_mask)
12883 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12884 live_regs_mask);
12886 if (had_to_push_lr)
12888 if (size > 12)
12890 /* We have no free low regs, so save one. */
12891 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12892 LAST_ARG_REGNUM);
12895 /* Get the return address into a temporary register. */
12896 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12897 1 << LAST_ARG_REGNUM);
12899 if (size > 12)
12901 /* Move the return address to lr. */
12902 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
12903 LAST_ARG_REGNUM);
12904 /* Restore the low register. */
12905 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
12906 IP_REGNUM);
12907 regno = LR_REGNUM;
12909 else
12910 regno = LAST_ARG_REGNUM;
12912 else
12913 regno = LR_REGNUM;
12915 /* Remove the argument registers that were pushed onto the stack. */
12916 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12917 SP_REGNUM, SP_REGNUM,
12918 current_function_pretend_args_size);
12920 thumb_exit (asm_out_file, regno);
12923 return "";
12926 /* Functions to save and restore machine-specific function data. */
12927 static struct machine_function *
12928 arm_init_machine_status (void)
12930 struct machine_function *machine;
12931 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12933 #if ARM_FT_UNKNOWN != 0
12934 machine->func_type = ARM_FT_UNKNOWN;
12935 #endif
12936 return machine;
12939 /* Return an RTX indicating where the return address to the
12940 calling function can be found. */
12942 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12944 if (count != 0)
12945 return NULL_RTX;
12947 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12950 /* Do anything needed before RTL is emitted for each function. */
12951 void
12952 arm_init_expanders (void)
12954 /* Arrange to initialize and mark the machine per-function status. */
12955 init_machine_status = arm_init_machine_status;
12957 /* This is to stop the combine pass optimizing away the alignment
12958 adjustment of va_arg. */
12959 /* ??? It is claimed that this should not be necessary. */
12960 if (cfun)
12961 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
12965 /* Like arm_compute_initial_elimination offset. Simpler because
12966 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
12968 HOST_WIDE_INT
12969 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
12971 arm_stack_offsets *offsets;
12973 offsets = arm_get_frame_offsets ();
12975 switch (from)
12977 case ARG_POINTER_REGNUM:
12978 switch (to)
12980 case STACK_POINTER_REGNUM:
12981 return offsets->outgoing_args - offsets->saved_args;
12983 case FRAME_POINTER_REGNUM:
12984 return offsets->soft_frame - offsets->saved_args;
12986 case THUMB_HARD_FRAME_POINTER_REGNUM:
12987 case ARM_HARD_FRAME_POINTER_REGNUM:
12988 return offsets->saved_regs - offsets->saved_args;
12990 default:
12991 gcc_unreachable ();
12993 break;
12995 case FRAME_POINTER_REGNUM:
12996 switch (to)
12998 case STACK_POINTER_REGNUM:
12999 return offsets->outgoing_args - offsets->soft_frame;
13001 case THUMB_HARD_FRAME_POINTER_REGNUM:
13002 case ARM_HARD_FRAME_POINTER_REGNUM:
13003 return offsets->saved_regs - offsets->soft_frame;
13005 default:
13006 gcc_unreachable ();
13008 break;
13010 default:
13011 gcc_unreachable ();
13016 /* Generate the rest of a function's prologue. */
13017 void
13018 thumb_expand_prologue (void)
13020 rtx insn, dwarf;
13022 HOST_WIDE_INT amount;
13023 arm_stack_offsets *offsets;
13024 unsigned long func_type;
13025 int regno;
13026 unsigned long live_regs_mask;
13028 func_type = arm_current_func_type ();
13030 /* Naked functions don't have prologues. */
13031 if (IS_NAKED (func_type))
13032 return;
13034 if (IS_INTERRUPT (func_type))
13036 error ("interrupt Service Routines cannot be coded in Thumb mode");
13037 return;
13040 live_regs_mask = thumb_compute_save_reg_mask ();
13041 /* Load the pic register before setting the frame pointer,
13042 so we can use r7 as a temporary work register. */
13043 if (flag_pic)
13044 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13046 offsets = arm_get_frame_offsets ();
13048 if (frame_pointer_needed)
13050 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13051 stack_pointer_rtx));
13052 RTX_FRAME_RELATED_P (insn) = 1;
13054 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13055 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13056 stack_pointer_rtx);
13058 amount = offsets->outgoing_args - offsets->saved_regs;
13059 if (amount)
13061 if (amount < 512)
13063 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13064 GEN_INT (- amount)));
13065 RTX_FRAME_RELATED_P (insn) = 1;
13067 else
13069 rtx reg;
13071 /* The stack decrement is too big for an immediate value in a single
13072 insn. In theory we could issue multiple subtracts, but after
13073 three of them it becomes more space efficient to place the full
13074 value in the constant pool and load into a register. (Also the
13075 ARM debugger really likes to see only one stack decrement per
13076 function). So instead we look for a scratch register into which
13077 we can load the decrement, and then we subtract this from the
13078 stack pointer. Unfortunately on the thumb the only available
13079 scratch registers are the argument registers, and we cannot use
13080 these as they may hold arguments to the function. Instead we
13081 attempt to locate a call preserved register which is used by this
13082 function. If we can find one, then we know that it will have
13083 been pushed at the start of the prologue and so we can corrupt
13084 it now. */
13085 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13086 if (live_regs_mask & (1 << regno)
13087 && !(frame_pointer_needed
13088 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13089 break;
13091 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13093 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13095 /* Choose an arbitrary, non-argument low register. */
13096 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13098 /* Save it by copying it into a high, scratch register. */
13099 emit_insn (gen_movsi (spare, reg));
13100 /* Add a USE to stop propagate_one_insn() from barfing. */
13101 emit_insn (gen_prologue_use (spare));
13103 /* Decrement the stack. */
13104 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13105 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13106 stack_pointer_rtx, reg));
13107 RTX_FRAME_RELATED_P (insn) = 1;
13108 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13109 plus_constant (stack_pointer_rtx,
13110 -amount));
13111 RTX_FRAME_RELATED_P (dwarf) = 1;
13112 REG_NOTES (insn)
13113 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13114 REG_NOTES (insn));
13116 /* Restore the low register's original value. */
13117 emit_insn (gen_movsi (reg, spare));
13119 /* Emit a USE of the restored scratch register, so that flow
13120 analysis will not consider the restore redundant. The
13121 register won't be used again in this function and isn't
13122 restored by the epilogue. */
13123 emit_insn (gen_prologue_use (reg));
13125 else
13127 reg = gen_rtx_REG (SImode, regno);
13129 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13131 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13132 stack_pointer_rtx, reg));
13133 RTX_FRAME_RELATED_P (insn) = 1;
13134 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13135 plus_constant (stack_pointer_rtx,
13136 -amount));
13137 RTX_FRAME_RELATED_P (dwarf) = 1;
13138 REG_NOTES (insn)
13139 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13140 REG_NOTES (insn));
13143 /* If the frame pointer is needed, emit a special barrier that
13144 will prevent the scheduler from moving stores to the frame
13145 before the stack adjustment. */
13146 if (frame_pointer_needed)
13147 emit_insn (gen_stack_tie (stack_pointer_rtx,
13148 hard_frame_pointer_rtx));
13151 if (current_function_profile || TARGET_NO_SCHED_PRO)
13152 emit_insn (gen_blockage ());
13154 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13155 if (live_regs_mask & 0xff)
13156 cfun->machine->lr_save_eliminated = 0;
13158 /* If the link register is being kept alive, with the return address in it,
13159 then make sure that it does not get reused by the ce2 pass. */
13160 if (cfun->machine->lr_save_eliminated)
13161 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13165 void
13166 thumb_expand_epilogue (void)
13168 HOST_WIDE_INT amount;
13169 arm_stack_offsets *offsets;
13170 int regno;
13172 /* Naked functions don't have prologues. */
13173 if (IS_NAKED (arm_current_func_type ()))
13174 return;
13176 offsets = arm_get_frame_offsets ();
13177 amount = offsets->outgoing_args - offsets->saved_regs;
13179 if (frame_pointer_needed)
13180 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13181 else if (amount)
13183 if (amount < 512)
13184 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13185 GEN_INT (amount)));
13186 else
13188 /* r3 is always free in the epilogue. */
13189 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13191 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13192 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13196 /* Emit a USE (stack_pointer_rtx), so that
13197 the stack adjustment will not be deleted. */
13198 emit_insn (gen_prologue_use (stack_pointer_rtx));
13200 if (current_function_profile || TARGET_NO_SCHED_PRO)
13201 emit_insn (gen_blockage ());
13203 /* Emit a clobber for each insn that will be restored in the epilogue,
13204 so that flow2 will get register lifetimes correct. */
13205 for (regno = 0; regno < 13; regno++)
13206 if (regs_ever_live[regno] && !call_used_regs[regno])
13207 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13209 if (! regs_ever_live[LR_REGNUM])
13210 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13213 static void
13214 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13216 unsigned long live_regs_mask = 0;
13217 unsigned long l_mask;
13218 unsigned high_regs_pushed = 0;
13219 int cfa_offset = 0;
13220 int regno;
13222 if (IS_NAKED (arm_current_func_type ()))
13223 return;
13225 if (is_called_in_ARM_mode (current_function_decl))
13227 const char * name;
13229 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13230 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13231 == SYMBOL_REF);
13232 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13234 /* Generate code sequence to switch us into Thumb mode. */
13235 /* The .code 32 directive has already been emitted by
13236 ASM_DECLARE_FUNCTION_NAME. */
13237 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13238 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13240 /* Generate a label, so that the debugger will notice the
13241 change in instruction sets. This label is also used by
13242 the assembler to bypass the ARM code when this function
13243 is called from a Thumb encoded function elsewhere in the
13244 same file. Hence the definition of STUB_NAME here must
13245 agree with the definition in gas/config/tc-arm.c. */
13247 #define STUB_NAME ".real_start_of"
13249 fprintf (f, "\t.code\t16\n");
13250 #ifdef ARM_PE
13251 if (arm_dllexport_name_p (name))
13252 name = arm_strip_name_encoding (name);
13253 #endif
13254 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13255 fprintf (f, "\t.thumb_func\n");
13256 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13259 if (current_function_pretend_args_size)
13261 if (cfun->machine->uses_anonymous_args)
13263 int num_pushes;
13265 fprintf (f, "\tpush\t{");
13267 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13269 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13270 regno <= LAST_ARG_REGNUM;
13271 regno++)
13272 asm_fprintf (f, "%r%s", regno,
13273 regno == LAST_ARG_REGNUM ? "" : ", ");
13275 fprintf (f, "}\n");
13277 else
13278 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13279 SP_REGNUM, SP_REGNUM,
13280 current_function_pretend_args_size);
13282 /* We don't need to record the stores for unwinding (would it
13283 help the debugger any if we did?), but record the change in
13284 the stack pointer. */
13285 if (dwarf2out_do_frame ())
13287 char *l = dwarf2out_cfi_label ();
13289 cfa_offset = cfa_offset + current_function_pretend_args_size;
13290 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13294 /* Get the registers we are going to push. */
13295 live_regs_mask = thumb_compute_save_reg_mask ();
13296 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13297 l_mask = live_regs_mask & 0x40ff;
13298 /* Then count how many other high registers will need to be pushed. */
13299 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13301 if (TARGET_BACKTRACE)
13303 unsigned offset;
13304 unsigned work_register;
13306 /* We have been asked to create a stack backtrace structure.
13307 The code looks like this:
13309 0 .align 2
13310 0 func:
13311 0 sub SP, #16 Reserve space for 4 registers.
13312 2 push {R7} Push low registers.
13313 4 add R7, SP, #20 Get the stack pointer before the push.
13314 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13315 8 mov R7, PC Get hold of the start of this code plus 12.
13316 10 str R7, [SP, #16] Store it.
13317 12 mov R7, FP Get hold of the current frame pointer.
13318 14 str R7, [SP, #4] Store it.
13319 16 mov R7, LR Get hold of the current return address.
13320 18 str R7, [SP, #12] Store it.
13321 20 add R7, SP, #16 Point at the start of the backtrace structure.
13322 22 mov FP, R7 Put this value into the frame pointer. */
13324 work_register = thumb_find_work_register (live_regs_mask);
13326 asm_fprintf
13327 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13328 SP_REGNUM, SP_REGNUM);
13330 if (dwarf2out_do_frame ())
13332 char *l = dwarf2out_cfi_label ();
13334 cfa_offset = cfa_offset + 16;
13335 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13338 if (l_mask)
13340 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13341 offset = bit_count (l_mask);
13343 else
13344 offset = 0;
13346 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13347 offset + 16 + current_function_pretend_args_size);
13349 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13350 offset + 4);
13352 /* Make sure that the instruction fetching the PC is in the right place
13353 to calculate "start of backtrace creation code + 12". */
13354 if (l_mask)
13356 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13357 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13358 offset + 12);
13359 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13360 ARM_HARD_FRAME_POINTER_REGNUM);
13361 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13362 offset);
13364 else
13366 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13367 ARM_HARD_FRAME_POINTER_REGNUM);
13368 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13369 offset);
13370 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13371 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13372 offset + 12);
13375 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13376 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13377 offset + 8);
13378 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13379 offset + 12);
13380 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13381 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13383 /* Optimisation: If we are not pushing any low registers but we are going
13384 to push some high registers then delay our first push. This will just
13385 be a push of LR and we can combine it with the push of the first high
13386 register. */
13387 else if ((l_mask & 0xff) != 0
13388 || (high_regs_pushed == 0 && l_mask))
13389 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13391 if (high_regs_pushed)
13393 unsigned pushable_regs;
13394 unsigned next_hi_reg;
13396 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13397 if (live_regs_mask & (1 << next_hi_reg))
13398 break;
13400 pushable_regs = l_mask & 0xff;
13402 if (pushable_regs == 0)
13403 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13405 while (high_regs_pushed > 0)
13407 unsigned long real_regs_mask = 0;
13409 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13411 if (pushable_regs & (1 << regno))
13413 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13415 high_regs_pushed --;
13416 real_regs_mask |= (1 << next_hi_reg);
13418 if (high_regs_pushed)
13420 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13421 next_hi_reg --)
13422 if (live_regs_mask & (1 << next_hi_reg))
13423 break;
13425 else
13427 pushable_regs &= ~((1 << regno) - 1);
13428 break;
13433 /* If we had to find a work register and we have not yet
13434 saved the LR then add it to the list of regs to push. */
13435 if (l_mask == (1 << LR_REGNUM))
13437 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13438 1, &cfa_offset,
13439 real_regs_mask | (1 << LR_REGNUM));
13440 l_mask = 0;
13442 else
13443 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13448 /* Handle the case of a double word load into a low register from
13449 a computed memory address. The computed address may involve a
13450 register which is overwritten by the load. */
13451 const char *
13452 thumb_load_double_from_address (rtx *operands)
13454 rtx addr;
13455 rtx base;
13456 rtx offset;
13457 rtx arg1;
13458 rtx arg2;
13460 gcc_assert (GET_CODE (operands[0]) == REG);
13461 gcc_assert (GET_CODE (operands[1]) == MEM);
13463 /* Get the memory address. */
13464 addr = XEXP (operands[1], 0);
13466 /* Work out how the memory address is computed. */
13467 switch (GET_CODE (addr))
13469 case REG:
13470 operands[2] = gen_rtx_MEM (SImode,
13471 plus_constant (XEXP (operands[1], 0), 4));
13473 if (REGNO (operands[0]) == REGNO (addr))
13475 output_asm_insn ("ldr\t%H0, %2", operands);
13476 output_asm_insn ("ldr\t%0, %1", operands);
13478 else
13480 output_asm_insn ("ldr\t%0, %1", operands);
13481 output_asm_insn ("ldr\t%H0, %2", operands);
13483 break;
13485 case CONST:
13486 /* Compute <address> + 4 for the high order load. */
13487 operands[2] = gen_rtx_MEM (SImode,
13488 plus_constant (XEXP (operands[1], 0), 4));
13490 output_asm_insn ("ldr\t%0, %1", operands);
13491 output_asm_insn ("ldr\t%H0, %2", operands);
13492 break;
13494 case PLUS:
13495 arg1 = XEXP (addr, 0);
13496 arg2 = XEXP (addr, 1);
13498 if (CONSTANT_P (arg1))
13499 base = arg2, offset = arg1;
13500 else
13501 base = arg1, offset = arg2;
13503 gcc_assert (GET_CODE (base) == REG);
13505 /* Catch the case of <address> = <reg> + <reg> */
13506 if (GET_CODE (offset) == REG)
13508 int reg_offset = REGNO (offset);
13509 int reg_base = REGNO (base);
13510 int reg_dest = REGNO (operands[0]);
13512 /* Add the base and offset registers together into the
13513 higher destination register. */
13514 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13515 reg_dest + 1, reg_base, reg_offset);
13517 /* Load the lower destination register from the address in
13518 the higher destination register. */
13519 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13520 reg_dest, reg_dest + 1);
13522 /* Load the higher destination register from its own address
13523 plus 4. */
13524 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13525 reg_dest + 1, reg_dest + 1);
13527 else
13529 /* Compute <address> + 4 for the high order load. */
13530 operands[2] = gen_rtx_MEM (SImode,
13531 plus_constant (XEXP (operands[1], 0), 4));
13533 /* If the computed address is held in the low order register
13534 then load the high order register first, otherwise always
13535 load the low order register first. */
13536 if (REGNO (operands[0]) == REGNO (base))
13538 output_asm_insn ("ldr\t%H0, %2", operands);
13539 output_asm_insn ("ldr\t%0, %1", operands);
13541 else
13543 output_asm_insn ("ldr\t%0, %1", operands);
13544 output_asm_insn ("ldr\t%H0, %2", operands);
13547 break;
13549 case LABEL_REF:
13550 /* With no registers to worry about we can just load the value
13551 directly. */
13552 operands[2] = gen_rtx_MEM (SImode,
13553 plus_constant (XEXP (operands[1], 0), 4));
13555 output_asm_insn ("ldr\t%H0, %2", operands);
13556 output_asm_insn ("ldr\t%0, %1", operands);
13557 break;
13559 default:
13560 gcc_unreachable ();
13563 return "";
13566 const char *
13567 thumb_output_move_mem_multiple (int n, rtx *operands)
13569 rtx tmp;
13571 switch (n)
13573 case 2:
13574 if (REGNO (operands[4]) > REGNO (operands[5]))
13576 tmp = operands[4];
13577 operands[4] = operands[5];
13578 operands[5] = tmp;
13580 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13581 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13582 break;
13584 case 3:
13585 if (REGNO (operands[4]) > REGNO (operands[5]))
13587 tmp = operands[4];
13588 operands[4] = operands[5];
13589 operands[5] = tmp;
13591 if (REGNO (operands[5]) > REGNO (operands[6]))
13593 tmp = operands[5];
13594 operands[5] = operands[6];
13595 operands[6] = tmp;
13597 if (REGNO (operands[4]) > REGNO (operands[5]))
13599 tmp = operands[4];
13600 operands[4] = operands[5];
13601 operands[5] = tmp;
13604 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13605 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13606 break;
13608 default:
13609 gcc_unreachable ();
13612 return "";
13615 /* Output a call-via instruction for thumb state. */
13616 const char *
13617 thumb_call_via_reg (rtx reg)
13619 int regno = REGNO (reg);
13620 rtx *labelp;
13622 gcc_assert (regno < LR_REGNUM);
13624 /* If we are in the normal text section we can use a single instance
13625 per compilation unit. If we are doing function sections, then we need
13626 an entry per section, since we can't rely on reachability. */
13627 if (in_text_section ())
13629 thumb_call_reg_needed = 1;
13631 if (thumb_call_via_label[regno] == NULL)
13632 thumb_call_via_label[regno] = gen_label_rtx ();
13633 labelp = thumb_call_via_label + regno;
13635 else
13637 if (cfun->machine->call_via[regno] == NULL)
13638 cfun->machine->call_via[regno] = gen_label_rtx ();
13639 labelp = cfun->machine->call_via + regno;
13642 output_asm_insn ("bl\t%a0", labelp);
13643 return "";
13646 /* Routines for generating rtl. */
13647 void
13648 thumb_expand_movmemqi (rtx *operands)
13650 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13651 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13652 HOST_WIDE_INT len = INTVAL (operands[2]);
13653 HOST_WIDE_INT offset = 0;
13655 while (len >= 12)
13657 emit_insn (gen_movmem12b (out, in, out, in));
13658 len -= 12;
13661 if (len >= 8)
13663 emit_insn (gen_movmem8b (out, in, out, in));
13664 len -= 8;
13667 if (len >= 4)
13669 rtx reg = gen_reg_rtx (SImode);
13670 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13671 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13672 len -= 4;
13673 offset += 4;
13676 if (len >= 2)
13678 rtx reg = gen_reg_rtx (HImode);
13679 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13680 plus_constant (in, offset))));
13681 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13682 reg));
13683 len -= 2;
13684 offset += 2;
13687 if (len)
13689 rtx reg = gen_reg_rtx (QImode);
13690 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13691 plus_constant (in, offset))));
13692 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13693 reg));
13697 void
13698 thumb_reload_out_hi (rtx *operands)
13700 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13703 /* Handle reading a half-word from memory during reload. */
13704 void
13705 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13707 gcc_unreachable ();
13710 /* Return the length of a function name prefix
13711 that starts with the character 'c'. */
13712 static int
13713 arm_get_strip_length (int c)
13715 switch (c)
13717 ARM_NAME_ENCODING_LENGTHS
13718 default: return 0;
13722 /* Return a pointer to a function's name with any
13723 and all prefix encodings stripped from it. */
13724 const char *
13725 arm_strip_name_encoding (const char *name)
13727 int skip;
13729 while ((skip = arm_get_strip_length (* name)))
13730 name += skip;
13732 return name;
13735 /* If there is a '*' anywhere in the name's prefix, then
13736 emit the stripped name verbatim, otherwise prepend an
13737 underscore if leading underscores are being used. */
13738 void
13739 arm_asm_output_labelref (FILE *stream, const char *name)
13741 int skip;
13742 int verbatim = 0;
13744 while ((skip = arm_get_strip_length (* name)))
13746 verbatim |= (*name == '*');
13747 name += skip;
13750 if (verbatim)
13751 fputs (name, stream);
13752 else
13753 asm_fprintf (stream, "%U%s", name);
13756 static void
13757 arm_file_end (void)
13759 int regno;
13761 if (! thumb_call_reg_needed)
13762 return;
13764 text_section ();
13765 asm_fprintf (asm_out_file, "\t.code 16\n");
13766 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13768 for (regno = 0; regno < LR_REGNUM; regno++)
13770 rtx label = thumb_call_via_label[regno];
13772 if (label != 0)
13774 targetm.asm_out.internal_label (asm_out_file, "L",
13775 CODE_LABEL_NUMBER (label));
13776 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13781 rtx aof_pic_label;
13783 #ifdef AOF_ASSEMBLER
13784 /* Special functions only needed when producing AOF syntax assembler. */
13786 struct pic_chain
13788 struct pic_chain * next;
13789 const char * symname;
13792 static struct pic_chain * aof_pic_chain = NULL;
13795 aof_pic_entry (rtx x)
13797 struct pic_chain ** chainp;
13798 int offset;
13800 if (aof_pic_label == NULL_RTX)
13802 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13805 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13806 offset += 4, chainp = &(*chainp)->next)
13807 if ((*chainp)->symname == XSTR (x, 0))
13808 return plus_constant (aof_pic_label, offset);
13810 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13811 (*chainp)->next = NULL;
13812 (*chainp)->symname = XSTR (x, 0);
13813 return plus_constant (aof_pic_label, offset);
13816 void
13817 aof_dump_pic_table (FILE *f)
13819 struct pic_chain * chain;
13821 if (aof_pic_chain == NULL)
13822 return;
13824 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13825 PIC_OFFSET_TABLE_REGNUM,
13826 PIC_OFFSET_TABLE_REGNUM);
13827 fputs ("|x$adcons|\n", f);
13829 for (chain = aof_pic_chain; chain; chain = chain->next)
13831 fputs ("\tDCD\t", f);
13832 assemble_name (f, chain->symname);
13833 fputs ("\n", f);
13837 int arm_text_section_count = 1;
13839 char *
13840 aof_text_section (void )
13842 static char buf[100];
13843 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13844 arm_text_section_count++);
13845 if (flag_pic)
13846 strcat (buf, ", PIC, REENTRANT");
13847 return buf;
13850 static int arm_data_section_count = 1;
13852 char *
13853 aof_data_section (void)
13855 static char buf[100];
13856 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13857 return buf;
13860 /* The AOF assembler is religiously strict about declarations of
13861 imported and exported symbols, so that it is impossible to declare
13862 a function as imported near the beginning of the file, and then to
13863 export it later on. It is, however, possible to delay the decision
13864 until all the functions in the file have been compiled. To get
13865 around this, we maintain a list of the imports and exports, and
13866 delete from it any that are subsequently defined. At the end of
13867 compilation we spit the remainder of the list out before the END
13868 directive. */
13870 struct import
13872 struct import * next;
13873 const char * name;
13876 static struct import * imports_list = NULL;
13878 void
13879 aof_add_import (const char *name)
13881 struct import * new;
13883 for (new = imports_list; new; new = new->next)
13884 if (new->name == name)
13885 return;
13887 new = (struct import *) xmalloc (sizeof (struct import));
13888 new->next = imports_list;
13889 imports_list = new;
13890 new->name = name;
13893 void
13894 aof_delete_import (const char *name)
13896 struct import ** old;
13898 for (old = &imports_list; *old; old = & (*old)->next)
13900 if ((*old)->name == name)
13902 *old = (*old)->next;
13903 return;
13908 int arm_main_function = 0;
13910 static void
13911 aof_dump_imports (FILE *f)
13913 /* The AOF assembler needs this to cause the startup code to be extracted
13914 from the library. Brining in __main causes the whole thing to work
13915 automagically. */
13916 if (arm_main_function)
13918 text_section ();
13919 fputs ("\tIMPORT __main\n", f);
13920 fputs ("\tDCD __main\n", f);
13923 /* Now dump the remaining imports. */
13924 while (imports_list)
13926 fprintf (f, "\tIMPORT\t");
13927 assemble_name (f, imports_list->name);
13928 fputc ('\n', f);
13929 imports_list = imports_list->next;
13933 static void
13934 aof_globalize_label (FILE *stream, const char *name)
13936 default_globalize_label (stream, name);
13937 if (! strcmp (name, "main"))
13938 arm_main_function = 1;
13941 static void
13942 aof_file_start (void)
13944 fputs ("__r0\tRN\t0\n", asm_out_file);
13945 fputs ("__a1\tRN\t0\n", asm_out_file);
13946 fputs ("__a2\tRN\t1\n", asm_out_file);
13947 fputs ("__a3\tRN\t2\n", asm_out_file);
13948 fputs ("__a4\tRN\t3\n", asm_out_file);
13949 fputs ("__v1\tRN\t4\n", asm_out_file);
13950 fputs ("__v2\tRN\t5\n", asm_out_file);
13951 fputs ("__v3\tRN\t6\n", asm_out_file);
13952 fputs ("__v4\tRN\t7\n", asm_out_file);
13953 fputs ("__v5\tRN\t8\n", asm_out_file);
13954 fputs ("__v6\tRN\t9\n", asm_out_file);
13955 fputs ("__sl\tRN\t10\n", asm_out_file);
13956 fputs ("__fp\tRN\t11\n", asm_out_file);
13957 fputs ("__ip\tRN\t12\n", asm_out_file);
13958 fputs ("__sp\tRN\t13\n", asm_out_file);
13959 fputs ("__lr\tRN\t14\n", asm_out_file);
13960 fputs ("__pc\tRN\t15\n", asm_out_file);
13961 fputs ("__f0\tFN\t0\n", asm_out_file);
13962 fputs ("__f1\tFN\t1\n", asm_out_file);
13963 fputs ("__f2\tFN\t2\n", asm_out_file);
13964 fputs ("__f3\tFN\t3\n", asm_out_file);
13965 fputs ("__f4\tFN\t4\n", asm_out_file);
13966 fputs ("__f5\tFN\t5\n", asm_out_file);
13967 fputs ("__f6\tFN\t6\n", asm_out_file);
13968 fputs ("__f7\tFN\t7\n", asm_out_file);
13969 text_section ();
13972 static void
13973 aof_file_end (void)
13975 if (flag_pic)
13976 aof_dump_pic_table (asm_out_file);
13977 arm_file_end ();
13978 aof_dump_imports (asm_out_file);
13979 fputs ("\tEND\n", asm_out_file);
13981 #endif /* AOF_ASSEMBLER */
13983 #ifndef ARM_PE
13984 /* Symbols in the text segment can be accessed without indirecting via the
13985 constant pool; it may take an extra binary operation, but this is still
13986 faster than indirecting via memory. Don't do this when not optimizing,
13987 since we won't be calculating al of the offsets necessary to do this
13988 simplification. */
13990 static void
13991 arm_encode_section_info (tree decl, rtx rtl, int first)
13993 /* This doesn't work with AOF syntax, since the string table may be in
13994 a different AREA. */
13995 #ifndef AOF_ASSEMBLER
13996 if (optimize > 0 && TREE_CONSTANT (decl))
13997 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13998 #endif
14000 /* If we are referencing a function that is weak then encode a long call
14001 flag in the function name, otherwise if the function is static or
14002 or known to be defined in this file then encode a short call flag. */
14003 if (first && DECL_P (decl))
14005 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14006 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14007 else if (! TREE_PUBLIC (decl))
14008 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14011 #endif /* !ARM_PE */
14013 static void
14014 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14016 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14017 && !strcmp (prefix, "L"))
14019 arm_ccfsm_state = 0;
14020 arm_target_insn = NULL;
14022 default_internal_label (stream, prefix, labelno);
14025 /* Output code to add DELTA to the first argument, and then jump
14026 to FUNCTION. Used for C++ multiple inheritance. */
14027 static void
14028 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14029 HOST_WIDE_INT delta,
14030 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14031 tree function)
14033 static int thunk_label = 0;
14034 char label[256];
14035 int mi_delta = delta;
14036 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14037 int shift = 0;
14038 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14039 ? 1 : 0);
14040 if (mi_delta < 0)
14041 mi_delta = - mi_delta;
14042 if (TARGET_THUMB)
14044 int labelno = thunk_label++;
14045 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14046 fputs ("\tldr\tr12, ", file);
14047 assemble_name (file, label);
14048 fputc ('\n', file);
14050 while (mi_delta != 0)
14052 if ((mi_delta & (3 << shift)) == 0)
14053 shift += 2;
14054 else
14056 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14057 mi_op, this_regno, this_regno,
14058 mi_delta & (0xff << shift));
14059 mi_delta &= ~(0xff << shift);
14060 shift += 8;
14063 if (TARGET_THUMB)
14065 fprintf (file, "\tbx\tr12\n");
14066 ASM_OUTPUT_ALIGN (file, 2);
14067 assemble_name (file, label);
14068 fputs (":\n", file);
14069 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14071 else
14073 fputs ("\tb\t", file);
14074 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14075 if (NEED_PLT_RELOC)
14076 fputs ("(PLT)", file);
14077 fputc ('\n', file);
14082 arm_emit_vector_const (FILE *file, rtx x)
14084 int i;
14085 const char * pattern;
14087 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14089 switch (GET_MODE (x))
14091 case V2SImode: pattern = "%08x"; break;
14092 case V4HImode: pattern = "%04x"; break;
14093 case V8QImode: pattern = "%02x"; break;
14094 default: gcc_unreachable ();
14097 fprintf (file, "0x");
14098 for (i = CONST_VECTOR_NUNITS (x); i--;)
14100 rtx element;
14102 element = CONST_VECTOR_ELT (x, i);
14103 fprintf (file, pattern, INTVAL (element));
14106 return 1;
14109 const char *
14110 arm_output_load_gr (rtx *operands)
14112 rtx reg;
14113 rtx offset;
14114 rtx wcgr;
14115 rtx sum;
14117 if (GET_CODE (operands [1]) != MEM
14118 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14119 || GET_CODE (reg = XEXP (sum, 0)) != REG
14120 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14121 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14122 return "wldrw%?\t%0, %1";
14124 /* Fix up an out-of-range load of a GR register. */
14125 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14126 wcgr = operands[0];
14127 operands[0] = reg;
14128 output_asm_insn ("ldr%?\t%0, %1", operands);
14130 operands[0] = wcgr;
14131 operands[1] = reg;
14132 output_asm_insn ("tmcr%?\t%0, %1", operands);
14133 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14135 return "";
14138 static rtx
14139 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14140 int incoming ATTRIBUTE_UNUSED)
14142 #if 0
14143 /* FIXME: The ARM backend has special code to handle structure
14144 returns, and will reserve its own hidden first argument. So
14145 if this macro is enabled a *second* hidden argument will be
14146 reserved, which will break binary compatibility with old
14147 toolchains and also thunk handling. One day this should be
14148 fixed. */
14149 return 0;
14150 #else
14151 /* Register in which address to store a structure value
14152 is passed to a function. */
14153 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14154 #endif
14157 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14159 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14160 named arg and all anonymous args onto the stack.
14161 XXX I know the prologue shouldn't be pushing registers, but it is faster
14162 that way. */
14164 static void
14165 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14166 enum machine_mode mode ATTRIBUTE_UNUSED,
14167 tree type ATTRIBUTE_UNUSED,
14168 int *pretend_size,
14169 int second_time ATTRIBUTE_UNUSED)
14171 cfun->machine->uses_anonymous_args = 1;
14172 if (cum->nregs < NUM_ARG_REGS)
14173 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14176 /* Return nonzero if the CONSUMER instruction (a store) does not need
14177 PRODUCER's value to calculate the address. */
14180 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14182 rtx value = PATTERN (producer);
14183 rtx addr = PATTERN (consumer);
14185 if (GET_CODE (value) == COND_EXEC)
14186 value = COND_EXEC_CODE (value);
14187 if (GET_CODE (value) == PARALLEL)
14188 value = XVECEXP (value, 0, 0);
14189 value = XEXP (value, 0);
14190 if (GET_CODE (addr) == COND_EXEC)
14191 addr = COND_EXEC_CODE (addr);
14192 if (GET_CODE (addr) == PARALLEL)
14193 addr = XVECEXP (addr, 0, 0);
14194 addr = XEXP (addr, 0);
14196 return !reg_overlap_mentioned_p (value, addr);
14199 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14200 have an early register shift value or amount dependency on the
14201 result of PRODUCER. */
14204 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14206 rtx value = PATTERN (producer);
14207 rtx op = PATTERN (consumer);
14208 rtx early_op;
14210 if (GET_CODE (value) == COND_EXEC)
14211 value = COND_EXEC_CODE (value);
14212 if (GET_CODE (value) == PARALLEL)
14213 value = XVECEXP (value, 0, 0);
14214 value = XEXP (value, 0);
14215 if (GET_CODE (op) == COND_EXEC)
14216 op = COND_EXEC_CODE (op);
14217 if (GET_CODE (op) == PARALLEL)
14218 op = XVECEXP (op, 0, 0);
14219 op = XEXP (op, 1);
14221 early_op = XEXP (op, 0);
14222 /* This is either an actual independent shift, or a shift applied to
14223 the first operand of another operation. We want the whole shift
14224 operation. */
14225 if (GET_CODE (early_op) == REG)
14226 early_op = op;
14228 return !reg_overlap_mentioned_p (value, early_op);
14231 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14232 have an early register shift value dependency on the result of
14233 PRODUCER. */
14236 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14238 rtx value = PATTERN (producer);
14239 rtx op = PATTERN (consumer);
14240 rtx early_op;
14242 if (GET_CODE (value) == COND_EXEC)
14243 value = COND_EXEC_CODE (value);
14244 if (GET_CODE (value) == PARALLEL)
14245 value = XVECEXP (value, 0, 0);
14246 value = XEXP (value, 0);
14247 if (GET_CODE (op) == COND_EXEC)
14248 op = COND_EXEC_CODE (op);
14249 if (GET_CODE (op) == PARALLEL)
14250 op = XVECEXP (op, 0, 0);
14251 op = XEXP (op, 1);
14253 early_op = XEXP (op, 0);
14255 /* This is either an actual independent shift, or a shift applied to
14256 the first operand of another operation. We want the value being
14257 shifted, in either case. */
14258 if (GET_CODE (early_op) != REG)
14259 early_op = XEXP (early_op, 0);
14261 return !reg_overlap_mentioned_p (value, early_op);
14264 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14265 have an early register mult dependency on the result of
14266 PRODUCER. */
14269 arm_no_early_mul_dep (rtx producer, rtx consumer)
14271 rtx value = PATTERN (producer);
14272 rtx op = PATTERN (consumer);
14274 if (GET_CODE (value) == COND_EXEC)
14275 value = COND_EXEC_CODE (value);
14276 if (GET_CODE (value) == PARALLEL)
14277 value = XVECEXP (value, 0, 0);
14278 value = XEXP (value, 0);
14279 if (GET_CODE (op) == COND_EXEC)
14280 op = COND_EXEC_CODE (op);
14281 if (GET_CODE (op) == PARALLEL)
14282 op = XVECEXP (op, 0, 0);
14283 op = XEXP (op, 1);
14285 return (GET_CODE (op) == PLUS
14286 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14290 /* We can't rely on the caller doing the proper promotion when
14291 using APCS or ATPCS. */
14293 static bool
14294 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14296 return !TARGET_AAPCS_BASED;
14300 /* AAPCS based ABIs use short enums by default. */
14302 static bool
14303 arm_default_short_enums (void)
14305 return TARGET_AAPCS_BASED;
14309 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14311 static bool
14312 arm_align_anon_bitfield (void)
14314 return TARGET_AAPCS_BASED;
14318 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14320 static tree
14321 arm_cxx_guard_type (void)
14323 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14327 /* The EABI says test the least significan bit of a guard variable. */
14329 static bool
14330 arm_cxx_guard_mask_bit (void)
14332 return TARGET_AAPCS_BASED;
14336 /* The EABI specifies that all array cookies are 8 bytes long. */
14338 static tree
14339 arm_get_cookie_size (tree type)
14341 tree size;
14343 if (!TARGET_AAPCS_BASED)
14344 return default_cxx_get_cookie_size (type);
14346 size = build_int_cst (sizetype, 8);
14347 return size;
14351 /* The EABI says that array cookies should also contain the element size. */
14353 static bool
14354 arm_cookie_has_size (void)
14356 return TARGET_AAPCS_BASED;
14360 /* The EABI says constructors and destructors should return a pointer to
14361 the object constructed/destroyed. */
14363 static bool
14364 arm_cxx_cdtor_returns_this (void)
14366 return TARGET_AAPCS_BASED;
14369 /* The EABI says that an inline function may never be the key
14370 method. */
14372 static bool
14373 arm_cxx_key_method_may_be_inline (void)
14375 return !TARGET_AAPCS_BASED;
14378 static void
14379 arm_cxx_determine_class_data_visibility (tree decl)
14381 if (!TARGET_AAPCS_BASED)
14382 return;
14384 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14385 is exported. However, on systems without dynamic vague linkage,
14386 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14387 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14388 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14389 else
14390 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14391 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14394 static bool
14395 arm_cxx_class_data_always_comdat (void)
14397 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14398 vague linkage if the class has no key function. */
14399 return !TARGET_AAPCS_BASED;
14403 /* The EABI says __aeabi_atexit should be used to register static
14404 destructors. */
14406 static bool
14407 arm_cxx_use_aeabi_atexit (void)
14409 return TARGET_AAPCS_BASED;
14413 void
14414 arm_set_return_address (rtx source, rtx scratch)
14416 arm_stack_offsets *offsets;
14417 HOST_WIDE_INT delta;
14418 rtx addr;
14419 unsigned long saved_regs;
14421 saved_regs = arm_compute_save_reg_mask ();
14423 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14424 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14425 else
14427 if (frame_pointer_needed)
14428 addr = plus_constant(hard_frame_pointer_rtx, -4);
14429 else
14431 /* LR will be the first saved register. */
14432 offsets = arm_get_frame_offsets ();
14433 delta = offsets->outgoing_args - (offsets->frame + 4);
14436 if (delta >= 4096)
14438 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14439 GEN_INT (delta & ~4095)));
14440 addr = scratch;
14441 delta &= 4095;
14443 else
14444 addr = stack_pointer_rtx;
14446 addr = plus_constant (addr, delta);
14448 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14453 void
14454 thumb_set_return_address (rtx source, rtx scratch)
14456 arm_stack_offsets *offsets;
14457 HOST_WIDE_INT delta;
14458 int reg;
14459 rtx addr;
14460 unsigned long mask;
14462 emit_insn (gen_rtx_USE (VOIDmode, source));
14464 mask = thumb_compute_save_reg_mask ();
14465 if (mask & (1 << LR_REGNUM))
14467 offsets = arm_get_frame_offsets ();
14469 /* Find the saved regs. */
14470 if (frame_pointer_needed)
14472 delta = offsets->soft_frame - offsets->saved_args;
14473 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14475 else
14477 delta = offsets->outgoing_args - offsets->saved_args;
14478 reg = SP_REGNUM;
14480 /* Allow for the stack frame. */
14481 if (TARGET_BACKTRACE)
14482 delta -= 16;
14483 /* The link register is always the first saved register. */
14484 delta -= 4;
14486 /* Construct the address. */
14487 addr = gen_rtx_REG (SImode, reg);
14488 if ((reg != SP_REGNUM && delta >= 128)
14489 || delta >= 1024)
14491 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14492 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14493 addr = scratch;
14495 else
14496 addr = plus_constant (addr, delta);
14498 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14500 else
14501 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14504 /* Implements target hook vector_mode_supported_p. */
14505 bool
14506 arm_vector_mode_supported_p (enum machine_mode mode)
14508 if ((mode == V2SImode)
14509 || (mode == V4HImode)
14510 || (mode == V8QImode))
14511 return true;
14513 return false;
14516 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14517 ARM insns and therefore guarantee that the shift count is modulo 256.
14518 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14519 guarantee no particular behavior for out-of-range counts. */
14521 static unsigned HOST_WIDE_INT
14522 arm_shift_truncation_mask (enum machine_mode mode)
14524 return mode == SImode ? 255 : 0;
14528 /* Map internal gcc register numbers to DWARF2 register numbers. */
14530 unsigned int
14531 arm_dbx_register_number (unsigned int regno)
14533 if (regno < 16)
14534 return regno;
14536 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14537 compatibility. The EABI defines them as registers 96-103. */
14538 if (IS_FPA_REGNUM (regno))
14539 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14541 if (IS_VFP_REGNUM (regno))
14542 return 64 + regno - FIRST_VFP_REGNUM;
14544 if (IS_IWMMXT_GR_REGNUM (regno))
14545 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14547 if (IS_IWMMXT_REGNUM (regno))
14548 return 112 + regno - FIRST_IWMMXT_REGNUM;
14550 gcc_unreachable ();