* c-common.c, c-parser.c, cfgbuild.c, cfghooks.c, cfghooks.h,
[official-gcc.git] / gcc / config / arm / arm.c
blob8924a490ff4ebe8ddd5ea8c937e960d575667699
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
146 tree, bool);
148 #ifdef OBJECT_FORMAT_ELF
149 static void arm_elf_asm_constructor (rtx, int);
150 #endif
151 #ifndef ARM_PE
152 static void arm_encode_section_info (tree, rtx, int);
153 #endif
155 static void arm_file_end (void);
157 #ifdef AOF_ASSEMBLER
158 static void aof_globalize_label (FILE *, const char *);
159 static void aof_dump_imports (FILE *);
160 static void aof_dump_pic_table (FILE *);
161 static void aof_file_start (void);
162 static void aof_file_end (void);
163 #endif
164 static rtx arm_struct_value_rtx (tree, int);
165 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
166 tree, int *, int);
167 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
168 enum machine_mode, tree, bool);
169 static bool arm_promote_prototypes (tree);
170 static bool arm_default_short_enums (void);
171 static bool arm_align_anon_bitfield (void);
172 static bool arm_return_in_msb (tree);
173 static bool arm_must_pass_in_stack (enum machine_mode, tree);
175 static tree arm_cxx_guard_type (void);
176 static bool arm_cxx_guard_mask_bit (void);
177 static tree arm_get_cookie_size (tree);
178 static bool arm_cookie_has_size (void);
179 static bool arm_cxx_cdtor_returns_this (void);
180 static bool arm_cxx_key_method_may_be_inline (void);
181 static void arm_cxx_determine_class_data_visibility (tree);
182 static bool arm_cxx_class_data_always_comdat (void);
183 static bool arm_cxx_use_aeabi_atexit (void);
184 static void arm_init_libfuncs (void);
185 static bool arm_handle_option (size_t, const char *, int);
186 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
188 /* Initialize the GCC target structure. */
189 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
190 #undef TARGET_MERGE_DECL_ATTRIBUTES
191 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
192 #endif
194 #undef TARGET_ATTRIBUTE_TABLE
195 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
197 #undef TARGET_ASM_FILE_END
198 #define TARGET_ASM_FILE_END arm_file_end
200 #ifdef AOF_ASSEMBLER
201 #undef TARGET_ASM_BYTE_OP
202 #define TARGET_ASM_BYTE_OP "\tDCB\t"
203 #undef TARGET_ASM_ALIGNED_HI_OP
204 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
205 #undef TARGET_ASM_ALIGNED_SI_OP
206 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
207 #undef TARGET_ASM_GLOBALIZE_LABEL
208 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
209 #undef TARGET_ASM_FILE_START
210 #define TARGET_ASM_FILE_START aof_file_start
211 #undef TARGET_ASM_FILE_END
212 #define TARGET_ASM_FILE_END aof_file_end
213 #else
214 #undef TARGET_ASM_ALIGNED_SI_OP
215 #define TARGET_ASM_ALIGNED_SI_OP NULL
216 #undef TARGET_ASM_INTEGER
217 #define TARGET_ASM_INTEGER arm_assemble_integer
218 #endif
220 #undef TARGET_ASM_FUNCTION_PROLOGUE
221 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
223 #undef TARGET_ASM_FUNCTION_EPILOGUE
224 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
226 #undef TARGET_DEFAULT_TARGET_FLAGS
227 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
228 #undef TARGET_HANDLE_OPTION
229 #define TARGET_HANDLE_OPTION arm_handle_option
231 #undef TARGET_COMP_TYPE_ATTRIBUTES
232 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
234 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
235 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
237 #undef TARGET_SCHED_ADJUST_COST
238 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
240 #undef TARGET_ENCODE_SECTION_INFO
241 #ifdef ARM_PE
242 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
243 #else
244 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
245 #endif
247 #undef TARGET_STRIP_NAME_ENCODING
248 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
250 #undef TARGET_ASM_INTERNAL_LABEL
251 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
253 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
254 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
256 #undef TARGET_ASM_OUTPUT_MI_THUNK
257 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
258 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
259 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
261 /* This will be overridden in arm_override_options. */
262 #undef TARGET_RTX_COSTS
263 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
264 #undef TARGET_ADDRESS_COST
265 #define TARGET_ADDRESS_COST arm_address_cost
267 #undef TARGET_SHIFT_TRUNCATION_MASK
268 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
269 #undef TARGET_VECTOR_MODE_SUPPORTED_P
270 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
272 #undef TARGET_MACHINE_DEPENDENT_REORG
273 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
275 #undef TARGET_INIT_BUILTINS
276 #define TARGET_INIT_BUILTINS arm_init_builtins
277 #undef TARGET_EXPAND_BUILTIN
278 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
280 #undef TARGET_INIT_LIBFUNCS
281 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
283 #undef TARGET_PROMOTE_FUNCTION_ARGS
284 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
285 #undef TARGET_PROMOTE_FUNCTION_RETURN
286 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
287 #undef TARGET_PROMOTE_PROTOTYPES
288 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
289 #undef TARGET_PASS_BY_REFERENCE
290 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
291 #undef TARGET_ARG_PARTIAL_BYTES
292 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
294 #undef TARGET_STRUCT_VALUE_RTX
295 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
297 #undef TARGET_SETUP_INCOMING_VARARGS
298 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
300 #undef TARGET_DEFAULT_SHORT_ENUMS
301 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
303 #undef TARGET_ALIGN_ANON_BITFIELD
304 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
306 #undef TARGET_CXX_GUARD_TYPE
307 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
309 #undef TARGET_CXX_GUARD_MASK_BIT
310 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
312 #undef TARGET_CXX_GET_COOKIE_SIZE
313 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
315 #undef TARGET_CXX_COOKIE_HAS_SIZE
316 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
318 #undef TARGET_CXX_CDTOR_RETURNS_THIS
319 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
321 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
322 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
324 #undef TARGET_CXX_USE_AEABI_ATEXIT
325 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
327 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
328 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
329 arm_cxx_determine_class_data_visibility
331 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
332 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
334 #undef TARGET_RETURN_IN_MSB
335 #define TARGET_RETURN_IN_MSB arm_return_in_msb
337 #undef TARGET_MUST_PASS_IN_STACK
338 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
340 struct gcc_target targetm = TARGET_INITIALIZER;
342 /* Obstack for minipool constant handling. */
343 static struct obstack minipool_obstack;
344 static char * minipool_startobj;
346 /* The maximum number of insns skipped which
347 will be conditionalised if possible. */
348 static int max_insns_skipped = 5;
350 extern FILE * asm_out_file;
352 /* True if we are currently building a constant table. */
353 int making_const_table;
355 /* Define the information needed to generate branch insns. This is
356 stored from the compare operation. */
357 rtx arm_compare_op0, arm_compare_op1;
359 /* The processor for which instructions should be scheduled. */
360 enum processor_type arm_tune = arm_none;
362 /* Which floating point model to use. */
363 enum arm_fp_model arm_fp_model;
365 /* Which floating point hardware is available. */
366 enum fputype arm_fpu_arch;
368 /* Which floating point hardware to schedule for. */
369 enum fputype arm_fpu_tune;
371 /* Whether to use floating point hardware. */
372 enum float_abi_type arm_float_abi;
374 /* Which ABI to use. */
375 enum arm_abi_type arm_abi;
377 /* Used to parse -mstructure_size_boundary command line option. */
378 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
380 /* Used for Thumb call_via trampolines. */
381 rtx thumb_call_via_label[14];
382 static int thumb_call_reg_needed;
384 /* Bit values used to identify processor capabilities. */
385 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
386 #define FL_ARCH3M (1 << 1) /* Extended multiply */
387 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
388 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
389 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
390 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
391 #define FL_THUMB (1 << 6) /* Thumb aware */
392 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
393 #define FL_STRONG (1 << 8) /* StrongARM */
394 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
395 #define FL_XSCALE (1 << 10) /* XScale */
396 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
397 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
398 media instructions. */
399 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
400 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
401 Note: ARM6 & 7 derivatives only. */
403 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
405 #define FL_FOR_ARCH2 0
406 #define FL_FOR_ARCH3 FL_MODE32
407 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
408 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
409 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
410 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
411 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
412 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
413 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
414 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
415 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
416 #define FL_FOR_ARCH6J FL_FOR_ARCH6
417 #define FL_FOR_ARCH6K FL_FOR_ARCH6
418 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
419 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
421 /* The bits in this mask specify which
422 instructions we are allowed to generate. */
423 static unsigned long insn_flags = 0;
425 /* The bits in this mask specify which instruction scheduling options should
426 be used. */
427 static unsigned long tune_flags = 0;
429 /* The following are used in the arm.md file as equivalents to bits
430 in the above two flag variables. */
432 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
433 int arm_arch3m = 0;
435 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
436 int arm_arch4 = 0;
438 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
439 int arm_arch4t = 0;
441 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
442 int arm_arch5 = 0;
444 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
445 int arm_arch5e = 0;
447 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
448 int arm_arch6 = 0;
450 /* Nonzero if this chip can benefit from load scheduling. */
451 int arm_ld_sched = 0;
453 /* Nonzero if this chip is a StrongARM. */
454 int arm_tune_strongarm = 0;
456 /* Nonzero if this chip is a Cirrus variant. */
457 int arm_arch_cirrus = 0;
459 /* Nonzero if this chip supports Intel Wireless MMX technology. */
460 int arm_arch_iwmmxt = 0;
462 /* Nonzero if this chip is an XScale. */
463 int arm_arch_xscale = 0;
465 /* Nonzero if tuning for XScale */
466 int arm_tune_xscale = 0;
468 /* Nonzero if we want to tune for stores that access the write-buffer.
469 This typically means an ARM6 or ARM7 with MMU or MPU. */
470 int arm_tune_wbuf = 0;
472 /* Nonzero if generating Thumb instructions. */
473 int thumb_code = 0;
475 /* Nonzero if we should define __THUMB_INTERWORK__ in the
476 preprocessor.
477 XXX This is a bit of a hack, it's intended to help work around
478 problems in GLD which doesn't understand that armv5t code is
479 interworking clean. */
480 int arm_cpp_interwork = 0;
482 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
483 must report the mode of the memory reference from PRINT_OPERAND to
484 PRINT_OPERAND_ADDRESS. */
485 enum machine_mode output_memory_reference_mode;
487 /* The register number to be used for the PIC offset register. */
488 int arm_pic_register = INVALID_REGNUM;
490 /* Set to 1 when a return insn is output, this means that the epilogue
491 is not needed. */
492 int return_used_this_function;
494 /* Set to 1 after arm_reorg has started. Reset to start at the start of
495 the next function. */
496 static int after_arm_reorg = 0;
498 /* The maximum number of insns to be used when loading a constant. */
499 static int arm_constant_limit = 3;
501 /* For an explanation of these variables, see final_prescan_insn below. */
502 int arm_ccfsm_state;
503 enum arm_cond_code arm_current_cc;
504 rtx arm_target_insn;
505 int arm_target_label;
507 /* The condition codes of the ARM, and the inverse function. */
508 static const char * const arm_condition_codes[] =
510 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
511 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
514 #define streq(string1, string2) (strcmp (string1, string2) == 0)
516 /* Initialization code. */
518 struct processors
520 const char *const name;
521 enum processor_type core;
522 const char *arch;
523 const unsigned long flags;
524 bool (* rtx_costs) (rtx, int, int, int *);
527 /* Not all of these give usefully different compilation alternatives,
528 but there is no simple way of generalizing them. */
529 static const struct processors all_cores[] =
531 /* ARM Cores */
532 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
533 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
534 #include "arm-cores.def"
535 #undef ARM_CORE
536 {NULL, arm_none, NULL, 0, NULL}
539 static const struct processors all_architectures[] =
541 /* ARM Architectures */
542 /* We don't specify rtx_costs here as it will be figured out
543 from the core. */
545 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
546 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
547 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
548 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
549 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
550 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
551 implementations that support it, so we will leave it out for now. */
552 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
553 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
554 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
555 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
556 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
557 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
558 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
559 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
560 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
561 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
562 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
563 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
564 {NULL, arm_none, NULL, 0 , NULL}
567 struct arm_cpu_select
569 const char * string;
570 const char * name;
571 const struct processors * processors;
574 /* This is a magic structure. The 'string' field is magically filled in
575 with a pointer to the value specified by the user on the command line
576 assuming that the user has specified such a value. */
578 static struct arm_cpu_select arm_select[] =
580 /* string name processors */
581 { NULL, "-mcpu=", all_cores },
582 { NULL, "-march=", all_architectures },
583 { NULL, "-mtune=", all_cores }
586 /* Defines representing the indexes into the above table. */
587 #define ARM_OPT_SET_CPU 0
588 #define ARM_OPT_SET_ARCH 1
589 #define ARM_OPT_SET_TUNE 2
591 /* The name of the proprocessor macro to define for this architecture. */
593 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
595 struct fpu_desc
597 const char * name;
598 enum fputype fpu;
602 /* Available values for for -mfpu=. */
604 static const struct fpu_desc all_fpus[] =
606 {"fpa", FPUTYPE_FPA},
607 {"fpe2", FPUTYPE_FPA_EMU2},
608 {"fpe3", FPUTYPE_FPA_EMU2},
609 {"maverick", FPUTYPE_MAVERICK},
610 {"vfp", FPUTYPE_VFP}
614 /* Floating point models used by the different hardware.
615 See fputype in arm.h. */
617 static const enum fputype fp_model_for_fpu[] =
619 /* No FP hardware. */
620 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
621 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
622 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
623 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
624 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
625 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
629 struct float_abi
631 const char * name;
632 enum float_abi_type abi_type;
636 /* Available values for -mfloat-abi=. */
638 static const struct float_abi all_float_abis[] =
640 {"soft", ARM_FLOAT_ABI_SOFT},
641 {"softfp", ARM_FLOAT_ABI_SOFTFP},
642 {"hard", ARM_FLOAT_ABI_HARD}
646 struct abi_name
648 const char *name;
649 enum arm_abi_type abi_type;
653 /* Available values for -mabi=. */
655 static const struct abi_name arm_all_abis[] =
657 {"apcs-gnu", ARM_ABI_APCS},
658 {"atpcs", ARM_ABI_ATPCS},
659 {"aapcs", ARM_ABI_AAPCS},
660 {"iwmmxt", ARM_ABI_IWMMXT}
663 /* Return the number of bits set in VALUE. */
664 static unsigned
665 bit_count (unsigned long value)
667 unsigned long count = 0;
669 while (value)
671 count++;
672 value &= value - 1; /* Clear the least-significant set bit. */
675 return count;
678 /* Set up library functions unique to ARM. */
680 static void
681 arm_init_libfuncs (void)
683 /* There are no special library functions unless we are using the
684 ARM BPABI. */
685 if (!TARGET_BPABI)
686 return;
688 /* The functions below are described in Section 4 of the "Run-Time
689 ABI for the ARM architecture", Version 1.0. */
691 /* Double-precision floating-point arithmetic. Table 2. */
692 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
693 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
694 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
695 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
696 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
698 /* Double-precision comparisons. Table 3. */
699 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
700 set_optab_libfunc (ne_optab, DFmode, NULL);
701 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
702 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
703 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
704 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
705 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
707 /* Single-precision floating-point arithmetic. Table 4. */
708 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
709 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
710 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
711 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
712 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
714 /* Single-precision comparisons. Table 5. */
715 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
716 set_optab_libfunc (ne_optab, SFmode, NULL);
717 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
718 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
719 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
720 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
721 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
723 /* Floating-point to integer conversions. Table 6. */
724 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
725 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
726 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
727 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
728 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
729 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
730 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
731 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
733 /* Conversions between floating types. Table 7. */
734 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
735 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
737 /* Integer to floating-point conversions. Table 8. */
738 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
739 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
740 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
741 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
742 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
743 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
744 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
745 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
747 /* Long long. Table 9. */
748 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
749 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
750 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
751 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
752 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
753 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
754 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
755 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
757 /* Integer (32/32->32) division. \S 4.3.1. */
758 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
759 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
761 /* The divmod functions are designed so that they can be used for
762 plain division, even though they return both the quotient and the
763 remainder. The quotient is returned in the usual location (i.e.,
764 r0 for SImode, {r0, r1} for DImode), just as would be expected
765 for an ordinary division routine. Because the AAPCS calling
766 conventions specify that all of { r0, r1, r2, r3 } are
767 callee-saved registers, there is no need to tell the compiler
768 explicitly that those registers are clobbered by these
769 routines. */
770 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
771 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
772 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
773 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
775 /* We don't have mod libcalls. Fortunately gcc knows how to use the
776 divmod libcalls instead. */
777 set_optab_libfunc (smod_optab, DImode, NULL);
778 set_optab_libfunc (umod_optab, DImode, NULL);
779 set_optab_libfunc (smod_optab, SImode, NULL);
780 set_optab_libfunc (umod_optab, SImode, NULL);
783 /* Implement TARGET_HANDLE_OPTION. */
785 static bool
786 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
788 switch (code)
790 case OPT_march_:
791 arm_select[1].string = arg;
792 return true;
794 case OPT_mcpu_:
795 arm_select[0].string = arg;
796 return true;
798 case OPT_mhard_float:
799 target_float_abi_name = "hard";
800 return true;
802 case OPT_msoft_float:
803 target_float_abi_name = "soft";
804 return true;
806 case OPT_mtune_:
807 arm_select[2].string = arg;
808 return true;
810 default:
811 return true;
815 /* Fix up any incompatible options that the user has specified.
816 This has now turned into a maze. */
817 void
818 arm_override_options (void)
820 unsigned i;
821 enum processor_type target_arch_cpu = arm_none;
823 /* Set up the flags based on the cpu/architecture selected by the user. */
824 for (i = ARRAY_SIZE (arm_select); i--;)
826 struct arm_cpu_select * ptr = arm_select + i;
828 if (ptr->string != NULL && ptr->string[0] != '\0')
830 const struct processors * sel;
832 for (sel = ptr->processors; sel->name != NULL; sel++)
833 if (streq (ptr->string, sel->name))
835 /* Set the architecture define. */
836 if (i != ARM_OPT_SET_TUNE)
837 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
839 /* Determine the processor core for which we should
840 tune code-generation. */
841 if (/* -mcpu= is a sensible default. */
842 i == ARM_OPT_SET_CPU
843 /* -mtune= overrides -mcpu= and -march=. */
844 || i == ARM_OPT_SET_TUNE)
845 arm_tune = (enum processor_type) (sel - ptr->processors);
847 /* Remember the CPU associated with this architecture.
848 If no other option is used to set the CPU type,
849 we'll use this to guess the most suitable tuning
850 options. */
851 if (i == ARM_OPT_SET_ARCH)
852 target_arch_cpu = sel->core;
854 if (i != ARM_OPT_SET_TUNE)
856 /* If we have been given an architecture and a processor
857 make sure that they are compatible. We only generate
858 a warning though, and we prefer the CPU over the
859 architecture. */
860 if (insn_flags != 0 && (insn_flags ^ sel->flags))
861 warning (0, "switch -mcpu=%s conflicts with -march= switch",
862 ptr->string);
864 insn_flags = sel->flags;
867 break;
870 if (sel->name == NULL)
871 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
875 /* Guess the tuning options from the architecture if necessary. */
876 if (arm_tune == arm_none)
877 arm_tune = target_arch_cpu;
879 /* If the user did not specify a processor, choose one for them. */
880 if (insn_flags == 0)
882 const struct processors * sel;
883 unsigned int sought;
884 enum processor_type cpu;
886 cpu = TARGET_CPU_DEFAULT;
887 if (cpu == arm_none)
889 #ifdef SUBTARGET_CPU_DEFAULT
890 /* Use the subtarget default CPU if none was specified by
891 configure. */
892 cpu = SUBTARGET_CPU_DEFAULT;
893 #endif
894 /* Default to ARM6. */
895 if (cpu == arm_none)
896 cpu = arm6;
898 sel = &all_cores[cpu];
900 insn_flags = sel->flags;
902 /* Now check to see if the user has specified some command line
903 switch that require certain abilities from the cpu. */
904 sought = 0;
906 if (TARGET_INTERWORK || TARGET_THUMB)
908 sought |= (FL_THUMB | FL_MODE32);
910 /* There are no ARM processors that support both APCS-26 and
911 interworking. Therefore we force FL_MODE26 to be removed
912 from insn_flags here (if it was set), so that the search
913 below will always be able to find a compatible processor. */
914 insn_flags &= ~FL_MODE26;
917 if (sought != 0 && ((sought & insn_flags) != sought))
919 /* Try to locate a CPU type that supports all of the abilities
920 of the default CPU, plus the extra abilities requested by
921 the user. */
922 for (sel = all_cores; sel->name != NULL; sel++)
923 if ((sel->flags & sought) == (sought | insn_flags))
924 break;
926 if (sel->name == NULL)
928 unsigned current_bit_count = 0;
929 const struct processors * best_fit = NULL;
931 /* Ideally we would like to issue an error message here
932 saying that it was not possible to find a CPU compatible
933 with the default CPU, but which also supports the command
934 line options specified by the programmer, and so they
935 ought to use the -mcpu=<name> command line option to
936 override the default CPU type.
938 If we cannot find a cpu that has both the
939 characteristics of the default cpu and the given
940 command line options we scan the array again looking
941 for a best match. */
942 for (sel = all_cores; sel->name != NULL; sel++)
943 if ((sel->flags & sought) == sought)
945 unsigned count;
947 count = bit_count (sel->flags & insn_flags);
949 if (count >= current_bit_count)
951 best_fit = sel;
952 current_bit_count = count;
956 gcc_assert (best_fit);
957 sel = best_fit;
960 insn_flags = sel->flags;
962 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
963 if (arm_tune == arm_none)
964 arm_tune = (enum processor_type) (sel - all_cores);
967 /* The processor for which we should tune should now have been
968 chosen. */
969 gcc_assert (arm_tune != arm_none);
971 tune_flags = all_cores[(int)arm_tune].flags;
972 if (optimize_size)
973 targetm.rtx_costs = arm_size_rtx_costs;
974 else
975 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
977 /* Make sure that the processor choice does not conflict with any of the
978 other command line choices. */
979 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
981 warning (0, "target CPU does not support interworking" );
982 target_flags &= ~MASK_INTERWORK;
985 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
987 warning (0, "target CPU does not support THUMB instructions");
988 target_flags &= ~MASK_THUMB;
991 if (TARGET_APCS_FRAME && TARGET_THUMB)
993 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
994 target_flags &= ~MASK_APCS_FRAME;
997 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
998 from here where no function is being compiled currently. */
999 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1000 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1002 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1003 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1005 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1006 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1008 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1010 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1011 target_flags |= MASK_APCS_FRAME;
1014 if (TARGET_POKE_FUNCTION_NAME)
1015 target_flags |= MASK_APCS_FRAME;
1017 if (TARGET_APCS_REENT && flag_pic)
1018 error ("-fpic and -mapcs-reent are incompatible");
1020 if (TARGET_APCS_REENT)
1021 warning (0, "APCS reentrant code not supported. Ignored");
1023 /* If this target is normally configured to use APCS frames, warn if they
1024 are turned off and debugging is turned on. */
1025 if (TARGET_ARM
1026 && write_symbols != NO_DEBUG
1027 && !TARGET_APCS_FRAME
1028 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1029 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1031 /* If stack checking is disabled, we can use r10 as the PIC register,
1032 which keeps r9 available. */
1033 if (flag_pic)
1034 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1036 if (TARGET_APCS_FLOAT)
1037 warning (0, "passing floating point arguments in fp regs not yet supported");
1039 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1040 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1041 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1042 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1043 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1044 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1045 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1046 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1047 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1049 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1050 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1051 thumb_code = (TARGET_ARM == 0);
1052 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1053 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1054 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1056 /* V5 code we generate is completely interworking capable, so we turn off
1057 TARGET_INTERWORK here to avoid many tests later on. */
1059 /* XXX However, we must pass the right pre-processor defines to CPP
1060 or GLD can get confused. This is a hack. */
1061 if (TARGET_INTERWORK)
1062 arm_cpp_interwork = 1;
1064 if (arm_arch5)
1065 target_flags &= ~MASK_INTERWORK;
1067 if (target_abi_name)
1069 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1071 if (streq (arm_all_abis[i].name, target_abi_name))
1073 arm_abi = arm_all_abis[i].abi_type;
1074 break;
1077 if (i == ARRAY_SIZE (arm_all_abis))
1078 error ("invalid ABI option: -mabi=%s", target_abi_name);
1080 else
1081 arm_abi = ARM_DEFAULT_ABI;
1083 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1084 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1086 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1087 error ("iwmmxt abi requires an iwmmxt capable cpu");
1089 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1090 if (target_fpu_name == NULL && target_fpe_name != NULL)
1092 if (streq (target_fpe_name, "2"))
1093 target_fpu_name = "fpe2";
1094 else if (streq (target_fpe_name, "3"))
1095 target_fpu_name = "fpe3";
1096 else
1097 error ("invalid floating point emulation option: -mfpe=%s",
1098 target_fpe_name);
1100 if (target_fpu_name != NULL)
1102 /* The user specified a FPU. */
1103 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1105 if (streq (all_fpus[i].name, target_fpu_name))
1107 arm_fpu_arch = all_fpus[i].fpu;
1108 arm_fpu_tune = arm_fpu_arch;
1109 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1110 break;
1113 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1114 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1116 else
1118 #ifdef FPUTYPE_DEFAULT
1119 /* Use the default if it is specified for this platform. */
1120 arm_fpu_arch = FPUTYPE_DEFAULT;
1121 arm_fpu_tune = FPUTYPE_DEFAULT;
1122 #else
1123 /* Pick one based on CPU type. */
1124 /* ??? Some targets assume FPA is the default.
1125 if ((insn_flags & FL_VFP) != 0)
1126 arm_fpu_arch = FPUTYPE_VFP;
1127 else
1129 if (arm_arch_cirrus)
1130 arm_fpu_arch = FPUTYPE_MAVERICK;
1131 else
1132 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1133 #endif
1134 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1135 arm_fpu_tune = FPUTYPE_FPA;
1136 else
1137 arm_fpu_tune = arm_fpu_arch;
1138 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1139 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1142 if (target_float_abi_name != NULL)
1144 /* The user specified a FP ABI. */
1145 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1147 if (streq (all_float_abis[i].name, target_float_abi_name))
1149 arm_float_abi = all_float_abis[i].abi_type;
1150 break;
1153 if (i == ARRAY_SIZE (all_float_abis))
1154 error ("invalid floating point abi: -mfloat-abi=%s",
1155 target_float_abi_name);
1157 else
1158 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1160 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1161 sorry ("-mfloat-abi=hard and VFP");
1163 /* If soft-float is specified then don't use FPU. */
1164 if (TARGET_SOFT_FLOAT)
1165 arm_fpu_arch = FPUTYPE_NONE;
1167 /* For arm2/3 there is no need to do any scheduling if there is only
1168 a floating point emulator, or we are doing software floating-point. */
1169 if ((TARGET_SOFT_FLOAT
1170 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1171 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1172 && (tune_flags & FL_MODE32) == 0)
1173 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1175 /* Override the default structure alignment for AAPCS ABI. */
1176 if (arm_abi == ARM_ABI_AAPCS)
1177 arm_structure_size_boundary = 8;
1179 if (structure_size_string != NULL)
1181 int size = strtol (structure_size_string, NULL, 0);
1183 if (size == 8 || size == 32
1184 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1185 arm_structure_size_boundary = size;
1186 else
1187 warning (0, "structure size boundary can only be set to %s",
1188 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1191 if (arm_pic_register_string != NULL)
1193 int pic_register = decode_reg_name (arm_pic_register_string);
1195 if (!flag_pic)
1196 warning (0, "-mpic-register= is useless without -fpic");
1198 /* Prevent the user from choosing an obviously stupid PIC register. */
1199 else if (pic_register < 0 || call_used_regs[pic_register]
1200 || pic_register == HARD_FRAME_POINTER_REGNUM
1201 || pic_register == STACK_POINTER_REGNUM
1202 || pic_register >= PC_REGNUM)
1203 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1204 else
1205 arm_pic_register = pic_register;
1208 if (TARGET_THUMB && flag_schedule_insns)
1210 /* Don't warn since it's on by default in -O2. */
1211 flag_schedule_insns = 0;
1214 if (optimize_size)
1216 /* There's some dispute as to whether this should be 1 or 2. However,
1217 experiments seem to show that in pathological cases a setting of
1218 1 degrades less severely than a setting of 2. This could change if
1219 other parts of the compiler change their behavior. */
1220 arm_constant_limit = 1;
1222 /* If optimizing for size, bump the number of instructions that we
1223 are prepared to conditionally execute (even on a StrongARM). */
1224 max_insns_skipped = 6;
1226 else
1228 /* For processors with load scheduling, it never costs more than
1229 2 cycles to load a constant, and the load scheduler may well
1230 reduce that to 1. */
1231 if (arm_ld_sched)
1232 arm_constant_limit = 1;
1234 /* On XScale the longer latency of a load makes it more difficult
1235 to achieve a good schedule, so it's faster to synthesize
1236 constants that can be done in two insns. */
1237 if (arm_tune_xscale)
1238 arm_constant_limit = 2;
1240 /* StrongARM has early execution of branches, so a sequence
1241 that is worth skipping is shorter. */
1242 if (arm_tune_strongarm)
1243 max_insns_skipped = 3;
1246 /* Register global variables with the garbage collector. */
1247 arm_add_gc_roots ();
1250 static void
1251 arm_add_gc_roots (void)
1253 gcc_obstack_init(&minipool_obstack);
1254 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1257 /* A table of known ARM exception types.
1258 For use with the interrupt function attribute. */
1260 typedef struct
1262 const char *const arg;
1263 const unsigned long return_value;
1265 isr_attribute_arg;
1267 static const isr_attribute_arg isr_attribute_args [] =
1269 { "IRQ", ARM_FT_ISR },
1270 { "irq", ARM_FT_ISR },
1271 { "FIQ", ARM_FT_FIQ },
1272 { "fiq", ARM_FT_FIQ },
1273 { "ABORT", ARM_FT_ISR },
1274 { "abort", ARM_FT_ISR },
1275 { "ABORT", ARM_FT_ISR },
1276 { "abort", ARM_FT_ISR },
1277 { "UNDEF", ARM_FT_EXCEPTION },
1278 { "undef", ARM_FT_EXCEPTION },
1279 { "SWI", ARM_FT_EXCEPTION },
1280 { "swi", ARM_FT_EXCEPTION },
1281 { NULL, ARM_FT_NORMAL }
1284 /* Returns the (interrupt) function type of the current
1285 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1287 static unsigned long
1288 arm_isr_value (tree argument)
1290 const isr_attribute_arg * ptr;
1291 const char * arg;
1293 /* No argument - default to IRQ. */
1294 if (argument == NULL_TREE)
1295 return ARM_FT_ISR;
1297 /* Get the value of the argument. */
1298 if (TREE_VALUE (argument) == NULL_TREE
1299 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1300 return ARM_FT_UNKNOWN;
1302 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1304 /* Check it against the list of known arguments. */
1305 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1306 if (streq (arg, ptr->arg))
1307 return ptr->return_value;
1309 /* An unrecognized interrupt type. */
1310 return ARM_FT_UNKNOWN;
1313 /* Computes the type of the current function. */
1315 static unsigned long
1316 arm_compute_func_type (void)
1318 unsigned long type = ARM_FT_UNKNOWN;
1319 tree a;
1320 tree attr;
1322 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1324 /* Decide if the current function is volatile. Such functions
1325 never return, and many memory cycles can be saved by not storing
1326 register values that will never be needed again. This optimization
1327 was added to speed up context switching in a kernel application. */
1328 if (optimize > 0
1329 && TREE_NOTHROW (current_function_decl)
1330 && TREE_THIS_VOLATILE (current_function_decl))
1331 type |= ARM_FT_VOLATILE;
1333 if (cfun->static_chain_decl != NULL)
1334 type |= ARM_FT_NESTED;
1336 attr = DECL_ATTRIBUTES (current_function_decl);
1338 a = lookup_attribute ("naked", attr);
1339 if (a != NULL_TREE)
1340 type |= ARM_FT_NAKED;
1342 a = lookup_attribute ("isr", attr);
1343 if (a == NULL_TREE)
1344 a = lookup_attribute ("interrupt", attr);
1346 if (a == NULL_TREE)
1347 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1348 else
1349 type |= arm_isr_value (TREE_VALUE (a));
1351 return type;
1354 /* Returns the type of the current function. */
1356 unsigned long
1357 arm_current_func_type (void)
1359 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1360 cfun->machine->func_type = arm_compute_func_type ();
1362 return cfun->machine->func_type;
1365 /* Return 1 if it is possible to return using a single instruction.
1366 If SIBLING is non-null, this is a test for a return before a sibling
1367 call. SIBLING is the call insn, so we can examine its register usage. */
1370 use_return_insn (int iscond, rtx sibling)
1372 int regno;
1373 unsigned int func_type;
1374 unsigned long saved_int_regs;
1375 unsigned HOST_WIDE_INT stack_adjust;
1376 arm_stack_offsets *offsets;
1378 /* Never use a return instruction before reload has run. */
1379 if (!reload_completed)
1380 return 0;
1382 func_type = arm_current_func_type ();
1384 /* Naked functions and volatile functions need special
1385 consideration. */
1386 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1387 return 0;
1389 /* So do interrupt functions that use the frame pointer. */
1390 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1391 return 0;
1393 offsets = arm_get_frame_offsets ();
1394 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1396 /* As do variadic functions. */
1397 if (current_function_pretend_args_size
1398 || cfun->machine->uses_anonymous_args
1399 /* Or if the function calls __builtin_eh_return () */
1400 || current_function_calls_eh_return
1401 /* Or if the function calls alloca */
1402 || current_function_calls_alloca
1403 /* Or if there is a stack adjustment. However, if the stack pointer
1404 is saved on the stack, we can use a pre-incrementing stack load. */
1405 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1406 return 0;
1408 saved_int_regs = arm_compute_save_reg_mask ();
1410 /* Unfortunately, the insn
1412 ldmib sp, {..., sp, ...}
1414 triggers a bug on most SA-110 based devices, such that the stack
1415 pointer won't be correctly restored if the instruction takes a
1416 page fault. We work around this problem by popping r3 along with
1417 the other registers, since that is never slower than executing
1418 another instruction.
1420 We test for !arm_arch5 here, because code for any architecture
1421 less than this could potentially be run on one of the buggy
1422 chips. */
1423 if (stack_adjust == 4 && !arm_arch5)
1425 /* Validate that r3 is a call-clobbered register (always true in
1426 the default abi) ... */
1427 if (!call_used_regs[3])
1428 return 0;
1430 /* ... that it isn't being used for a return value ... */
1431 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1432 return 0;
1434 /* ... or for a tail-call argument ... */
1435 if (sibling)
1437 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1439 if (find_regno_fusage (sibling, USE, 3))
1440 return 0;
1443 /* ... and that there are no call-saved registers in r0-r2
1444 (always true in the default ABI). */
1445 if (saved_int_regs & 0x7)
1446 return 0;
1449 /* Can't be done if interworking with Thumb, and any registers have been
1450 stacked. */
1451 if (TARGET_INTERWORK && saved_int_regs != 0)
1452 return 0;
1454 /* On StrongARM, conditional returns are expensive if they aren't
1455 taken and multiple registers have been stacked. */
1456 if (iscond && arm_tune_strongarm)
1458 /* Conditional return when just the LR is stored is a simple
1459 conditional-load instruction, that's not expensive. */
1460 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1461 return 0;
1463 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1464 return 0;
1467 /* If there are saved registers but the LR isn't saved, then we need
1468 two instructions for the return. */
1469 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1470 return 0;
1472 /* Can't be done if any of the FPA regs are pushed,
1473 since this also requires an insn. */
1474 if (TARGET_HARD_FLOAT && TARGET_FPA)
1475 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1476 if (regs_ever_live[regno] && !call_used_regs[regno])
1477 return 0;
1479 /* Likewise VFP regs. */
1480 if (TARGET_HARD_FLOAT && TARGET_VFP)
1481 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1482 if (regs_ever_live[regno] && !call_used_regs[regno])
1483 return 0;
1485 if (TARGET_REALLY_IWMMXT)
1486 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1487 if (regs_ever_live[regno] && ! call_used_regs [regno])
1488 return 0;
1490 return 1;
1493 /* Return TRUE if int I is a valid immediate ARM constant. */
1496 const_ok_for_arm (HOST_WIDE_INT i)
1498 int lowbit;
1500 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1501 be all zero, or all one. */
1502 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1503 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1504 != ((~(unsigned HOST_WIDE_INT) 0)
1505 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1506 return FALSE;
1508 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1510 /* Fast return for 0 and small values. We must do this for zero, since
1511 the code below can't handle that one case. */
1512 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1513 return TRUE;
1515 /* Get the number of trailing zeros, rounded down to the nearest even
1516 number. */
1517 lowbit = (ffs ((int) i) - 1) & ~1;
1519 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1520 return TRUE;
1521 else if (lowbit <= 4
1522 && ((i & ~0xc000003f) == 0
1523 || (i & ~0xf000000f) == 0
1524 || (i & ~0xfc000003) == 0))
1525 return TRUE;
1527 return FALSE;
1530 /* Return true if I is a valid constant for the operation CODE. */
1531 static int
1532 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1534 if (const_ok_for_arm (i))
1535 return 1;
1537 switch (code)
1539 case PLUS:
1540 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1542 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1543 case XOR:
1544 case IOR:
1545 return 0;
1547 case AND:
1548 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1550 default:
1551 gcc_unreachable ();
1555 /* Emit a sequence of insns to handle a large constant.
1556 CODE is the code of the operation required, it can be any of SET, PLUS,
1557 IOR, AND, XOR, MINUS;
1558 MODE is the mode in which the operation is being performed;
1559 VAL is the integer to operate on;
1560 SOURCE is the other operand (a register, or a null-pointer for SET);
1561 SUBTARGETS means it is safe to create scratch registers if that will
1562 either produce a simpler sequence, or we will want to cse the values.
1563 Return value is the number of insns emitted. */
1566 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1567 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1569 rtx cond;
1571 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1572 cond = COND_EXEC_TEST (PATTERN (insn));
1573 else
1574 cond = NULL_RTX;
1576 if (subtargets || code == SET
1577 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1578 && REGNO (target) != REGNO (source)))
1580 /* After arm_reorg has been called, we can't fix up expensive
1581 constants by pushing them into memory so we must synthesize
1582 them in-line, regardless of the cost. This is only likely to
1583 be more costly on chips that have load delay slots and we are
1584 compiling without running the scheduler (so no splitting
1585 occurred before the final instruction emission).
1587 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1589 if (!after_arm_reorg
1590 && !cond
1591 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1592 1, 0)
1593 > arm_constant_limit + (code != SET)))
1595 if (code == SET)
1597 /* Currently SET is the only monadic value for CODE, all
1598 the rest are diadic. */
1599 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1600 return 1;
1602 else
1604 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1606 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1607 /* For MINUS, the value is subtracted from, since we never
1608 have subtraction of a constant. */
1609 if (code == MINUS)
1610 emit_insn (gen_rtx_SET (VOIDmode, target,
1611 gen_rtx_MINUS (mode, temp, source)));
1612 else
1613 emit_insn (gen_rtx_SET (VOIDmode, target,
1614 gen_rtx_fmt_ee (code, mode, source, temp)));
1615 return 2;
1620 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1624 static int
1625 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1627 HOST_WIDE_INT temp1;
1628 int num_insns = 0;
1631 int end;
1633 if (i <= 0)
1634 i += 32;
1635 if (remainder & (3 << (i - 2)))
1637 end = i - 8;
1638 if (end < 0)
1639 end += 32;
1640 temp1 = remainder & ((0x0ff << end)
1641 | ((i < end) ? (0xff >> (32 - end)) : 0));
1642 remainder &= ~temp1;
1643 num_insns++;
1644 i -= 6;
1646 i -= 2;
1647 } while (remainder);
1648 return num_insns;
1651 /* Emit an instruction with the indicated PATTERN. If COND is
1652 non-NULL, conditionalize the execution of the instruction on COND
1653 being true. */
1655 static void
1656 emit_constant_insn (rtx cond, rtx pattern)
1658 if (cond)
1659 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1660 emit_insn (pattern);
1663 /* As above, but extra parameter GENERATE which, if clear, suppresses
1664 RTL generation. */
1666 static int
1667 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1668 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1669 int generate)
1671 int can_invert = 0;
1672 int can_negate = 0;
1673 int can_negate_initial = 0;
1674 int can_shift = 0;
1675 int i;
1676 int num_bits_set = 0;
1677 int set_sign_bit_copies = 0;
1678 int clear_sign_bit_copies = 0;
1679 int clear_zero_bit_copies = 0;
1680 int set_zero_bit_copies = 0;
1681 int insns = 0;
1682 unsigned HOST_WIDE_INT temp1, temp2;
1683 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1685 /* Find out which operations are safe for a given CODE. Also do a quick
1686 check for degenerate cases; these can occur when DImode operations
1687 are split. */
1688 switch (code)
1690 case SET:
1691 can_invert = 1;
1692 can_shift = 1;
1693 can_negate = 1;
1694 break;
1696 case PLUS:
1697 can_negate = 1;
1698 can_negate_initial = 1;
1699 break;
1701 case IOR:
1702 if (remainder == 0xffffffff)
1704 if (generate)
1705 emit_constant_insn (cond,
1706 gen_rtx_SET (VOIDmode, target,
1707 GEN_INT (ARM_SIGN_EXTEND (val))));
1708 return 1;
1710 if (remainder == 0)
1712 if (reload_completed && rtx_equal_p (target, source))
1713 return 0;
1714 if (generate)
1715 emit_constant_insn (cond,
1716 gen_rtx_SET (VOIDmode, target, source));
1717 return 1;
1719 break;
1721 case AND:
1722 if (remainder == 0)
1724 if (generate)
1725 emit_constant_insn (cond,
1726 gen_rtx_SET (VOIDmode, target, const0_rtx));
1727 return 1;
1729 if (remainder == 0xffffffff)
1731 if (reload_completed && rtx_equal_p (target, source))
1732 return 0;
1733 if (generate)
1734 emit_constant_insn (cond,
1735 gen_rtx_SET (VOIDmode, target, source));
1736 return 1;
1738 can_invert = 1;
1739 break;
1741 case XOR:
1742 if (remainder == 0)
1744 if (reload_completed && rtx_equal_p (target, source))
1745 return 0;
1746 if (generate)
1747 emit_constant_insn (cond,
1748 gen_rtx_SET (VOIDmode, target, source));
1749 return 1;
1752 /* We don't know how to handle other cases yet. */
1753 gcc_assert (remainder == 0xffffffff);
1755 if (generate)
1756 emit_constant_insn (cond,
1757 gen_rtx_SET (VOIDmode, target,
1758 gen_rtx_NOT (mode, source)));
1759 return 1;
1761 case MINUS:
1762 /* We treat MINUS as (val - source), since (source - val) is always
1763 passed as (source + (-val)). */
1764 if (remainder == 0)
1766 if (generate)
1767 emit_constant_insn (cond,
1768 gen_rtx_SET (VOIDmode, target,
1769 gen_rtx_NEG (mode, source)));
1770 return 1;
1772 if (const_ok_for_arm (val))
1774 if (generate)
1775 emit_constant_insn (cond,
1776 gen_rtx_SET (VOIDmode, target,
1777 gen_rtx_MINUS (mode, GEN_INT (val),
1778 source)));
1779 return 1;
1781 can_negate = 1;
1783 break;
1785 default:
1786 gcc_unreachable ();
1789 /* If we can do it in one insn get out quickly. */
1790 if (const_ok_for_arm (val)
1791 || (can_negate_initial && const_ok_for_arm (-val))
1792 || (can_invert && const_ok_for_arm (~val)))
1794 if (generate)
1795 emit_constant_insn (cond,
1796 gen_rtx_SET (VOIDmode, target,
1797 (source
1798 ? gen_rtx_fmt_ee (code, mode, source,
1799 GEN_INT (val))
1800 : GEN_INT (val))));
1801 return 1;
1804 /* Calculate a few attributes that may be useful for specific
1805 optimizations. */
1806 for (i = 31; i >= 0; i--)
1808 if ((remainder & (1 << i)) == 0)
1809 clear_sign_bit_copies++;
1810 else
1811 break;
1814 for (i = 31; i >= 0; i--)
1816 if ((remainder & (1 << i)) != 0)
1817 set_sign_bit_copies++;
1818 else
1819 break;
1822 for (i = 0; i <= 31; i++)
1824 if ((remainder & (1 << i)) == 0)
1825 clear_zero_bit_copies++;
1826 else
1827 break;
1830 for (i = 0; i <= 31; i++)
1832 if ((remainder & (1 << i)) != 0)
1833 set_zero_bit_copies++;
1834 else
1835 break;
1838 switch (code)
1840 case SET:
1841 /* See if we can do this by sign_extending a constant that is known
1842 to be negative. This is a good, way of doing it, since the shift
1843 may well merge into a subsequent insn. */
1844 if (set_sign_bit_copies > 1)
1846 if (const_ok_for_arm
1847 (temp1 = ARM_SIGN_EXTEND (remainder
1848 << (set_sign_bit_copies - 1))))
1850 if (generate)
1852 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1853 emit_constant_insn (cond,
1854 gen_rtx_SET (VOIDmode, new_src,
1855 GEN_INT (temp1)));
1856 emit_constant_insn (cond,
1857 gen_ashrsi3 (target, new_src,
1858 GEN_INT (set_sign_bit_copies - 1)));
1860 return 2;
1862 /* For an inverted constant, we will need to set the low bits,
1863 these will be shifted out of harm's way. */
1864 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1865 if (const_ok_for_arm (~temp1))
1867 if (generate)
1869 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1870 emit_constant_insn (cond,
1871 gen_rtx_SET (VOIDmode, new_src,
1872 GEN_INT (temp1)));
1873 emit_constant_insn (cond,
1874 gen_ashrsi3 (target, new_src,
1875 GEN_INT (set_sign_bit_copies - 1)));
1877 return 2;
1881 /* See if we can calculate the value as the difference between two
1882 valid immediates. */
1883 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1885 int topshift = clear_sign_bit_copies & ~1;
1887 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1888 & (0xff000000 >> topshift));
1890 /* If temp1 is zero, then that means the 9 most significant
1891 bits of remainder were 1 and we've caused it to overflow.
1892 When topshift is 0 we don't need to do anything since we
1893 can borrow from 'bit 32'. */
1894 if (temp1 == 0 && topshift != 0)
1895 temp1 = 0x80000000 >> (topshift - 1);
1897 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1899 if (const_ok_for_arm (temp2))
1901 if (generate)
1903 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1904 emit_constant_insn (cond,
1905 gen_rtx_SET (VOIDmode, new_src,
1906 GEN_INT (temp1)));
1907 emit_constant_insn (cond,
1908 gen_addsi3 (target, new_src,
1909 GEN_INT (-temp2)));
1912 return 2;
1916 /* See if we can generate this by setting the bottom (or the top)
1917 16 bits, and then shifting these into the other half of the
1918 word. We only look for the simplest cases, to do more would cost
1919 too much. Be careful, however, not to generate this when the
1920 alternative would take fewer insns. */
1921 if (val & 0xffff0000)
1923 temp1 = remainder & 0xffff0000;
1924 temp2 = remainder & 0x0000ffff;
1926 /* Overlaps outside this range are best done using other methods. */
1927 for (i = 9; i < 24; i++)
1929 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1930 && !const_ok_for_arm (temp2))
1932 rtx new_src = (subtargets
1933 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1934 : target);
1935 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1936 source, subtargets, generate);
1937 source = new_src;
1938 if (generate)
1939 emit_constant_insn
1940 (cond,
1941 gen_rtx_SET
1942 (VOIDmode, target,
1943 gen_rtx_IOR (mode,
1944 gen_rtx_ASHIFT (mode, source,
1945 GEN_INT (i)),
1946 source)));
1947 return insns + 1;
1951 /* Don't duplicate cases already considered. */
1952 for (i = 17; i < 24; i++)
1954 if (((temp1 | (temp1 >> i)) == remainder)
1955 && !const_ok_for_arm (temp1))
1957 rtx new_src = (subtargets
1958 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1959 : target);
1960 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1961 source, subtargets, generate);
1962 source = new_src;
1963 if (generate)
1964 emit_constant_insn
1965 (cond,
1966 gen_rtx_SET (VOIDmode, target,
1967 gen_rtx_IOR
1968 (mode,
1969 gen_rtx_LSHIFTRT (mode, source,
1970 GEN_INT (i)),
1971 source)));
1972 return insns + 1;
1976 break;
1978 case IOR:
1979 case XOR:
1980 /* If we have IOR or XOR, and the constant can be loaded in a
1981 single instruction, and we can find a temporary to put it in,
1982 then this can be done in two instructions instead of 3-4. */
1983 if (subtargets
1984 /* TARGET can't be NULL if SUBTARGETS is 0 */
1985 || (reload_completed && !reg_mentioned_p (target, source)))
1987 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1989 if (generate)
1991 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1993 emit_constant_insn (cond,
1994 gen_rtx_SET (VOIDmode, sub,
1995 GEN_INT (val)));
1996 emit_constant_insn (cond,
1997 gen_rtx_SET (VOIDmode, target,
1998 gen_rtx_fmt_ee (code, mode,
1999 source, sub)));
2001 return 2;
2005 if (code == XOR)
2006 break;
2008 if (set_sign_bit_copies > 8
2009 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2011 if (generate)
2013 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2014 rtx shift = GEN_INT (set_sign_bit_copies);
2016 emit_constant_insn
2017 (cond,
2018 gen_rtx_SET (VOIDmode, sub,
2019 gen_rtx_NOT (mode,
2020 gen_rtx_ASHIFT (mode,
2021 source,
2022 shift))));
2023 emit_constant_insn
2024 (cond,
2025 gen_rtx_SET (VOIDmode, target,
2026 gen_rtx_NOT (mode,
2027 gen_rtx_LSHIFTRT (mode, sub,
2028 shift))));
2030 return 2;
2033 if (set_zero_bit_copies > 8
2034 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2036 if (generate)
2038 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2039 rtx shift = GEN_INT (set_zero_bit_copies);
2041 emit_constant_insn
2042 (cond,
2043 gen_rtx_SET (VOIDmode, sub,
2044 gen_rtx_NOT (mode,
2045 gen_rtx_LSHIFTRT (mode,
2046 source,
2047 shift))));
2048 emit_constant_insn
2049 (cond,
2050 gen_rtx_SET (VOIDmode, target,
2051 gen_rtx_NOT (mode,
2052 gen_rtx_ASHIFT (mode, sub,
2053 shift))));
2055 return 2;
2058 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2060 if (generate)
2062 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2063 emit_constant_insn (cond,
2064 gen_rtx_SET (VOIDmode, sub,
2065 gen_rtx_NOT (mode, source)));
2066 source = sub;
2067 if (subtargets)
2068 sub = gen_reg_rtx (mode);
2069 emit_constant_insn (cond,
2070 gen_rtx_SET (VOIDmode, sub,
2071 gen_rtx_AND (mode, source,
2072 GEN_INT (temp1))));
2073 emit_constant_insn (cond,
2074 gen_rtx_SET (VOIDmode, target,
2075 gen_rtx_NOT (mode, sub)));
2077 return 3;
2079 break;
2081 case AND:
2082 /* See if two shifts will do 2 or more insn's worth of work. */
2083 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2085 HOST_WIDE_INT shift_mask = ((0xffffffff
2086 << (32 - clear_sign_bit_copies))
2087 & 0xffffffff);
2089 if ((remainder | shift_mask) != 0xffffffff)
2091 if (generate)
2093 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2094 insns = arm_gen_constant (AND, mode, cond,
2095 remainder | shift_mask,
2096 new_src, source, subtargets, 1);
2097 source = new_src;
2099 else
2101 rtx targ = subtargets ? NULL_RTX : target;
2102 insns = arm_gen_constant (AND, mode, cond,
2103 remainder | shift_mask,
2104 targ, source, subtargets, 0);
2108 if (generate)
2110 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2111 rtx shift = GEN_INT (clear_sign_bit_copies);
2113 emit_insn (gen_ashlsi3 (new_src, source, shift));
2114 emit_insn (gen_lshrsi3 (target, new_src, shift));
2117 return insns + 2;
2120 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2122 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2124 if ((remainder | shift_mask) != 0xffffffff)
2126 if (generate)
2128 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2130 insns = arm_gen_constant (AND, mode, cond,
2131 remainder | shift_mask,
2132 new_src, source, subtargets, 1);
2133 source = new_src;
2135 else
2137 rtx targ = subtargets ? NULL_RTX : target;
2139 insns = arm_gen_constant (AND, mode, cond,
2140 remainder | shift_mask,
2141 targ, source, subtargets, 0);
2145 if (generate)
2147 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2148 rtx shift = GEN_INT (clear_zero_bit_copies);
2150 emit_insn (gen_lshrsi3 (new_src, source, shift));
2151 emit_insn (gen_ashlsi3 (target, new_src, shift));
2154 return insns + 2;
2157 break;
2159 default:
2160 break;
2163 for (i = 0; i < 32; i++)
2164 if (remainder & (1 << i))
2165 num_bits_set++;
2167 if (code == AND || (can_invert && num_bits_set > 16))
2168 remainder = (~remainder) & 0xffffffff;
2169 else if (code == PLUS && num_bits_set > 16)
2170 remainder = (-remainder) & 0xffffffff;
2171 else
2173 can_invert = 0;
2174 can_negate = 0;
2177 /* Now try and find a way of doing the job in either two or three
2178 instructions.
2179 We start by looking for the largest block of zeros that are aligned on
2180 a 2-bit boundary, we then fill up the temps, wrapping around to the
2181 top of the word when we drop off the bottom.
2182 In the worst case this code should produce no more than four insns. */
2184 int best_start = 0;
2185 int best_consecutive_zeros = 0;
2187 for (i = 0; i < 32; i += 2)
2189 int consecutive_zeros = 0;
2191 if (!(remainder & (3 << i)))
2193 while ((i < 32) && !(remainder & (3 << i)))
2195 consecutive_zeros += 2;
2196 i += 2;
2198 if (consecutive_zeros > best_consecutive_zeros)
2200 best_consecutive_zeros = consecutive_zeros;
2201 best_start = i - consecutive_zeros;
2203 i -= 2;
2207 /* So long as it won't require any more insns to do so, it's
2208 desirable to emit a small constant (in bits 0...9) in the last
2209 insn. This way there is more chance that it can be combined with
2210 a later addressing insn to form a pre-indexed load or store
2211 operation. Consider:
2213 *((volatile int *)0xe0000100) = 1;
2214 *((volatile int *)0xe0000110) = 2;
2216 We want this to wind up as:
2218 mov rA, #0xe0000000
2219 mov rB, #1
2220 str rB, [rA, #0x100]
2221 mov rB, #2
2222 str rB, [rA, #0x110]
2224 rather than having to synthesize both large constants from scratch.
2226 Therefore, we calculate how many insns would be required to emit
2227 the constant starting from `best_start', and also starting from
2228 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2229 yield a shorter sequence, we may as well use zero. */
2230 if (best_start != 0
2231 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2232 && (count_insns_for_constant (remainder, 0) <=
2233 count_insns_for_constant (remainder, best_start)))
2234 best_start = 0;
2236 /* Now start emitting the insns. */
2237 i = best_start;
2240 int end;
2242 if (i <= 0)
2243 i += 32;
2244 if (remainder & (3 << (i - 2)))
2246 end = i - 8;
2247 if (end < 0)
2248 end += 32;
2249 temp1 = remainder & ((0x0ff << end)
2250 | ((i < end) ? (0xff >> (32 - end)) : 0));
2251 remainder &= ~temp1;
2253 if (generate)
2255 rtx new_src, temp1_rtx;
2257 if (code == SET || code == MINUS)
2259 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2260 if (can_invert && code != MINUS)
2261 temp1 = ~temp1;
2263 else
2265 if (remainder && subtargets)
2266 new_src = gen_reg_rtx (mode);
2267 else
2268 new_src = target;
2269 if (can_invert)
2270 temp1 = ~temp1;
2271 else if (can_negate)
2272 temp1 = -temp1;
2275 temp1 = trunc_int_for_mode (temp1, mode);
2276 temp1_rtx = GEN_INT (temp1);
2278 if (code == SET)
2280 else if (code == MINUS)
2281 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2282 else
2283 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2285 emit_constant_insn (cond,
2286 gen_rtx_SET (VOIDmode, new_src,
2287 temp1_rtx));
2288 source = new_src;
2291 if (code == SET)
2293 can_invert = 0;
2294 code = PLUS;
2296 else if (code == MINUS)
2297 code = PLUS;
2299 insns++;
2300 i -= 6;
2302 i -= 2;
2304 while (remainder);
2307 return insns;
2310 /* Canonicalize a comparison so that we are more likely to recognize it.
2311 This can be done for a few constant compares, where we can make the
2312 immediate value easier to load. */
2314 enum rtx_code
2315 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2317 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2319 switch (code)
2321 case EQ:
2322 case NE:
2323 return code;
2325 case GT:
2326 case LE:
2327 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2328 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2330 *op1 = GEN_INT (i + 1);
2331 return code == GT ? GE : LT;
2333 break;
2335 case GE:
2336 case LT:
2337 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2338 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2340 *op1 = GEN_INT (i - 1);
2341 return code == GE ? GT : LE;
2343 break;
2345 case GTU:
2346 case LEU:
2347 if (i != ~((unsigned HOST_WIDE_INT) 0)
2348 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2350 *op1 = GEN_INT (i + 1);
2351 return code == GTU ? GEU : LTU;
2353 break;
2355 case GEU:
2356 case LTU:
2357 if (i != 0
2358 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2360 *op1 = GEN_INT (i - 1);
2361 return code == GEU ? GTU : LEU;
2363 break;
2365 default:
2366 gcc_unreachable ();
2369 return code;
2373 /* Define how to find the value returned by a function. */
2376 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2378 enum machine_mode mode;
2379 int unsignedp ATTRIBUTE_UNUSED;
2380 rtx r ATTRIBUTE_UNUSED;
2382 mode = TYPE_MODE (type);
2383 /* Promote integer types. */
2384 if (INTEGRAL_TYPE_P (type))
2385 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2387 /* Promotes small structs returned in a register to full-word size
2388 for big-endian AAPCS. */
2389 if (arm_return_in_msb (type))
2391 HOST_WIDE_INT size = int_size_in_bytes (type);
2392 if (size % UNITS_PER_WORD != 0)
2394 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2395 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2399 return LIBCALL_VALUE(mode);
2402 /* Determine the amount of memory needed to store the possible return
2403 registers of an untyped call. */
2405 arm_apply_result_size (void)
2407 int size = 16;
2409 if (TARGET_ARM)
2411 if (TARGET_HARD_FLOAT_ABI)
2413 if (TARGET_FPA)
2414 size += 12;
2415 if (TARGET_MAVERICK)
2416 size += 8;
2418 if (TARGET_IWMMXT_ABI)
2419 size += 8;
2422 return size;
2425 /* Decide whether a type should be returned in memory (true)
2426 or in a register (false). This is called by the macro
2427 RETURN_IN_MEMORY. */
2429 arm_return_in_memory (tree type)
2431 HOST_WIDE_INT size;
2433 if (!AGGREGATE_TYPE_P (type) &&
2434 (TREE_CODE (type) != VECTOR_TYPE) &&
2435 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2436 /* All simple types are returned in registers.
2437 For AAPCS, complex types are treated the same as aggregates. */
2438 return 0;
2440 size = int_size_in_bytes (type);
2442 if (arm_abi != ARM_ABI_APCS)
2444 /* ATPCS and later return aggregate types in memory only if they are
2445 larger than a word (or are variable size). */
2446 return (size < 0 || size > UNITS_PER_WORD);
2449 /* To maximize backwards compatibility with previous versions of gcc,
2450 return vectors up to 4 words in registers. */
2451 if (TREE_CODE (type) == VECTOR_TYPE)
2452 return (size < 0 || size > (4 * UNITS_PER_WORD));
2454 /* For the arm-wince targets we choose to be compatible with Microsoft's
2455 ARM and Thumb compilers, which always return aggregates in memory. */
2456 #ifndef ARM_WINCE
2457 /* All structures/unions bigger than one word are returned in memory.
2458 Also catch the case where int_size_in_bytes returns -1. In this case
2459 the aggregate is either huge or of variable size, and in either case
2460 we will want to return it via memory and not in a register. */
2461 if (size < 0 || size > UNITS_PER_WORD)
2462 return 1;
2464 if (TREE_CODE (type) == RECORD_TYPE)
2466 tree field;
2468 /* For a struct the APCS says that we only return in a register
2469 if the type is 'integer like' and every addressable element
2470 has an offset of zero. For practical purposes this means
2471 that the structure can have at most one non bit-field element
2472 and that this element must be the first one in the structure. */
2474 /* Find the first field, ignoring non FIELD_DECL things which will
2475 have been created by C++. */
2476 for (field = TYPE_FIELDS (type);
2477 field && TREE_CODE (field) != FIELD_DECL;
2478 field = TREE_CHAIN (field))
2479 continue;
2481 if (field == NULL)
2482 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2484 /* Check that the first field is valid for returning in a register. */
2486 /* ... Floats are not allowed */
2487 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2488 return 1;
2490 /* ... Aggregates that are not themselves valid for returning in
2491 a register are not allowed. */
2492 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2493 return 1;
2495 /* Now check the remaining fields, if any. Only bitfields are allowed,
2496 since they are not addressable. */
2497 for (field = TREE_CHAIN (field);
2498 field;
2499 field = TREE_CHAIN (field))
2501 if (TREE_CODE (field) != FIELD_DECL)
2502 continue;
2504 if (!DECL_BIT_FIELD_TYPE (field))
2505 return 1;
2508 return 0;
2511 if (TREE_CODE (type) == UNION_TYPE)
2513 tree field;
2515 /* Unions can be returned in registers if every element is
2516 integral, or can be returned in an integer register. */
2517 for (field = TYPE_FIELDS (type);
2518 field;
2519 field = TREE_CHAIN (field))
2521 if (TREE_CODE (field) != FIELD_DECL)
2522 continue;
2524 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2525 return 1;
2527 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2528 return 1;
2531 return 0;
2533 #endif /* not ARM_WINCE */
2535 /* Return all other types in memory. */
2536 return 1;
2539 /* Indicate whether or not words of a double are in big-endian order. */
2542 arm_float_words_big_endian (void)
2544 if (TARGET_MAVERICK)
2545 return 0;
2547 /* For FPA, float words are always big-endian. For VFP, floats words
2548 follow the memory system mode. */
2550 if (TARGET_FPA)
2552 return 1;
2555 if (TARGET_VFP)
2556 return (TARGET_BIG_END ? 1 : 0);
2558 return 1;
2561 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2562 for a call to a function whose data type is FNTYPE.
2563 For a library call, FNTYPE is NULL. */
2564 void
2565 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2566 rtx libname ATTRIBUTE_UNUSED,
2567 tree fndecl ATTRIBUTE_UNUSED)
2569 /* On the ARM, the offset starts at 0. */
2570 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2571 pcum->iwmmxt_nregs = 0;
2572 pcum->can_split = true;
2574 pcum->call_cookie = CALL_NORMAL;
2576 if (TARGET_LONG_CALLS)
2577 pcum->call_cookie = CALL_LONG;
2579 /* Check for long call/short call attributes. The attributes
2580 override any command line option. */
2581 if (fntype)
2583 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2584 pcum->call_cookie = CALL_SHORT;
2585 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2586 pcum->call_cookie = CALL_LONG;
2589 /* Varargs vectors are treated the same as long long.
2590 named_count avoids having to change the way arm handles 'named' */
2591 pcum->named_count = 0;
2592 pcum->nargs = 0;
2594 if (TARGET_REALLY_IWMMXT && fntype)
2596 tree fn_arg;
2598 for (fn_arg = TYPE_ARG_TYPES (fntype);
2599 fn_arg;
2600 fn_arg = TREE_CHAIN (fn_arg))
2601 pcum->named_count += 1;
2603 if (! pcum->named_count)
2604 pcum->named_count = INT_MAX;
2609 /* Return true if mode/type need doubleword alignment. */
2610 bool
2611 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2613 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2614 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2618 /* Determine where to put an argument to a function.
2619 Value is zero to push the argument on the stack,
2620 or a hard register in which to store the argument.
2622 MODE is the argument's machine mode.
2623 TYPE is the data type of the argument (as a tree).
2624 This is null for libcalls where that information may
2625 not be available.
2626 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2627 the preceding args and about the function being called.
2628 NAMED is nonzero if this argument is a named parameter
2629 (otherwise it is an extra parameter matching an ellipsis). */
2632 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2633 tree type, int named)
2635 int nregs;
2637 /* Varargs vectors are treated the same as long long.
2638 named_count avoids having to change the way arm handles 'named' */
2639 if (TARGET_IWMMXT_ABI
2640 && arm_vector_mode_supported_p (mode)
2641 && pcum->named_count > pcum->nargs + 1)
2643 if (pcum->iwmmxt_nregs <= 9)
2644 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2645 else
2647 pcum->can_split = false;
2648 return NULL_RTX;
2652 /* Put doubleword aligned quantities in even register pairs. */
2653 if (pcum->nregs & 1
2654 && ARM_DOUBLEWORD_ALIGN
2655 && arm_needs_doubleword_align (mode, type))
2656 pcum->nregs++;
2658 if (mode == VOIDmode)
2659 /* Compute operand 2 of the call insn. */
2660 return GEN_INT (pcum->call_cookie);
2662 /* Only allow splitting an arg between regs and memory if all preceding
2663 args were allocated to regs. For args passed by reference we only count
2664 the reference pointer. */
2665 if (pcum->can_split)
2666 nregs = 1;
2667 else
2668 nregs = ARM_NUM_REGS2 (mode, type);
2670 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2671 return NULL_RTX;
2673 return gen_rtx_REG (mode, pcum->nregs);
2676 static int
2677 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2678 tree type, bool named ATTRIBUTE_UNUSED)
2680 int nregs = pcum->nregs;
2682 if (arm_vector_mode_supported_p (mode))
2683 return 0;
2685 if (NUM_ARG_REGS > nregs
2686 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2687 && pcum->can_split)
2688 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2690 return 0;
2693 /* Variable sized types are passed by reference. This is a GCC
2694 extension to the ARM ABI. */
2696 static bool
2697 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2698 enum machine_mode mode ATTRIBUTE_UNUSED,
2699 tree type, bool named ATTRIBUTE_UNUSED)
2701 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2704 /* Encode the current state of the #pragma [no_]long_calls. */
2705 typedef enum
2707 OFF, /* No #pramgma [no_]long_calls is in effect. */
2708 LONG, /* #pragma long_calls is in effect. */
2709 SHORT /* #pragma no_long_calls is in effect. */
2710 } arm_pragma_enum;
2712 static arm_pragma_enum arm_pragma_long_calls = OFF;
2714 void
2715 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2717 arm_pragma_long_calls = LONG;
2720 void
2721 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2723 arm_pragma_long_calls = SHORT;
2726 void
2727 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2729 arm_pragma_long_calls = OFF;
2732 /* Table of machine attributes. */
2733 const struct attribute_spec arm_attribute_table[] =
2735 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2736 /* Function calls made to this symbol must be done indirectly, because
2737 it may lie outside of the 26 bit addressing range of a normal function
2738 call. */
2739 { "long_call", 0, 0, false, true, true, NULL },
2740 /* Whereas these functions are always known to reside within the 26 bit
2741 addressing range. */
2742 { "short_call", 0, 0, false, true, true, NULL },
2743 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2744 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2745 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2746 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2747 #ifdef ARM_PE
2748 /* ARM/PE has three new attributes:
2749 interfacearm - ?
2750 dllexport - for exporting a function/variable that will live in a dll
2751 dllimport - for importing a function/variable from a dll
2753 Microsoft allows multiple declspecs in one __declspec, separating
2754 them with spaces. We do NOT support this. Instead, use __declspec
2755 multiple times.
2757 { "dllimport", 0, 0, true, false, false, NULL },
2758 { "dllexport", 0, 0, true, false, false, NULL },
2759 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2760 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2761 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2762 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2763 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2764 #endif
2765 { NULL, 0, 0, false, false, false, NULL }
2768 /* Handle an attribute requiring a FUNCTION_DECL;
2769 arguments as in struct attribute_spec.handler. */
2770 static tree
2771 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2772 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2774 if (TREE_CODE (*node) != FUNCTION_DECL)
2776 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2777 IDENTIFIER_POINTER (name));
2778 *no_add_attrs = true;
2781 return NULL_TREE;
2784 /* Handle an "interrupt" or "isr" attribute;
2785 arguments as in struct attribute_spec.handler. */
2786 static tree
2787 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2788 bool *no_add_attrs)
2790 if (DECL_P (*node))
2792 if (TREE_CODE (*node) != FUNCTION_DECL)
2794 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2795 IDENTIFIER_POINTER (name));
2796 *no_add_attrs = true;
2798 /* FIXME: the argument if any is checked for type attributes;
2799 should it be checked for decl ones? */
2801 else
2803 if (TREE_CODE (*node) == FUNCTION_TYPE
2804 || TREE_CODE (*node) == METHOD_TYPE)
2806 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2808 warning (OPT_Wattributes, "%qs attribute ignored",
2809 IDENTIFIER_POINTER (name));
2810 *no_add_attrs = true;
2813 else if (TREE_CODE (*node) == POINTER_TYPE
2814 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2815 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2816 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2818 *node = build_variant_type_copy (*node);
2819 TREE_TYPE (*node) = build_type_attribute_variant
2820 (TREE_TYPE (*node),
2821 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2822 *no_add_attrs = true;
2824 else
2826 /* Possibly pass this attribute on from the type to a decl. */
2827 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2828 | (int) ATTR_FLAG_FUNCTION_NEXT
2829 | (int) ATTR_FLAG_ARRAY_NEXT))
2831 *no_add_attrs = true;
2832 return tree_cons (name, args, NULL_TREE);
2834 else
2836 warning (OPT_Wattributes, "%qs attribute ignored",
2837 IDENTIFIER_POINTER (name));
2842 return NULL_TREE;
2845 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2846 /* Handle the "notshared" attribute. This attribute is another way of
2847 requesting hidden visibility. ARM's compiler supports
2848 "__declspec(notshared)"; we support the same thing via an
2849 attribute. */
2851 static tree
2852 arm_handle_notshared_attribute (tree *node,
2853 tree name ATTRIBUTE_UNUSED,
2854 tree args ATTRIBUTE_UNUSED,
2855 int flags ATTRIBUTE_UNUSED,
2856 bool *no_add_attrs)
2858 tree decl = TYPE_NAME (*node);
2860 if (decl)
2862 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2863 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2864 *no_add_attrs = false;
2866 return NULL_TREE;
2868 #endif
2870 /* Return 0 if the attributes for two types are incompatible, 1 if they
2871 are compatible, and 2 if they are nearly compatible (which causes a
2872 warning to be generated). */
2873 static int
2874 arm_comp_type_attributes (tree type1, tree type2)
2876 int l1, l2, s1, s2;
2878 /* Check for mismatch of non-default calling convention. */
2879 if (TREE_CODE (type1) != FUNCTION_TYPE)
2880 return 1;
2882 /* Check for mismatched call attributes. */
2883 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2884 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2885 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2886 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2888 /* Only bother to check if an attribute is defined. */
2889 if (l1 | l2 | s1 | s2)
2891 /* If one type has an attribute, the other must have the same attribute. */
2892 if ((l1 != l2) || (s1 != s2))
2893 return 0;
2895 /* Disallow mixed attributes. */
2896 if ((l1 & s2) || (l2 & s1))
2897 return 0;
2900 /* Check for mismatched ISR attribute. */
2901 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2902 if (! l1)
2903 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2904 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2905 if (! l2)
2906 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2907 if (l1 != l2)
2908 return 0;
2910 return 1;
2913 /* Encode long_call or short_call attribute by prefixing
2914 symbol name in DECL with a special character FLAG. */
2915 void
2916 arm_encode_call_attribute (tree decl, int flag)
2918 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2919 int len = strlen (str);
2920 char * newstr;
2922 /* Do not allow weak functions to be treated as short call. */
2923 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2924 return;
2926 newstr = alloca (len + 2);
2927 newstr[0] = flag;
2928 strcpy (newstr + 1, str);
2930 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2931 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2934 /* Assigns default attributes to newly defined type. This is used to
2935 set short_call/long_call attributes for function types of
2936 functions defined inside corresponding #pragma scopes. */
2937 static void
2938 arm_set_default_type_attributes (tree type)
2940 /* Add __attribute__ ((long_call)) to all functions, when
2941 inside #pragma long_calls or __attribute__ ((short_call)),
2942 when inside #pragma no_long_calls. */
2943 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2945 tree type_attr_list, attr_name;
2946 type_attr_list = TYPE_ATTRIBUTES (type);
2948 if (arm_pragma_long_calls == LONG)
2949 attr_name = get_identifier ("long_call");
2950 else if (arm_pragma_long_calls == SHORT)
2951 attr_name = get_identifier ("short_call");
2952 else
2953 return;
2955 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2956 TYPE_ATTRIBUTES (type) = type_attr_list;
2960 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2961 defined within the current compilation unit. If this cannot be
2962 determined, then 0 is returned. */
2963 static int
2964 current_file_function_operand (rtx sym_ref)
2966 /* This is a bit of a fib. A function will have a short call flag
2967 applied to its name if it has the short call attribute, or it has
2968 already been defined within the current compilation unit. */
2969 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2970 return 1;
2972 /* The current function is always defined within the current compilation
2973 unit. If it s a weak definition however, then this may not be the real
2974 definition of the function, and so we have to say no. */
2975 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2976 && !DECL_WEAK (current_function_decl))
2977 return 1;
2979 /* We cannot make the determination - default to returning 0. */
2980 return 0;
2983 /* Return nonzero if a 32 bit "long_call" should be generated for
2984 this call. We generate a long_call if the function:
2986 a. has an __attribute__((long call))
2987 or b. is within the scope of a #pragma long_calls
2988 or c. the -mlong-calls command line switch has been specified
2989 . and either:
2990 1. -ffunction-sections is in effect
2991 or 2. the current function has __attribute__ ((section))
2992 or 3. the target function has __attribute__ ((section))
2994 However we do not generate a long call if the function:
2996 d. has an __attribute__ ((short_call))
2997 or e. is inside the scope of a #pragma no_long_calls
2998 or f. is defined within the current compilation unit.
3000 This function will be called by C fragments contained in the machine
3001 description file. SYM_REF and CALL_COOKIE correspond to the matched
3002 rtl operands. CALL_SYMBOL is used to distinguish between
3003 two different callers of the function. It is set to 1 in the
3004 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3005 and "call_value" patterns. This is because of the difference in the
3006 SYM_REFs passed by these patterns. */
3008 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3010 if (!call_symbol)
3012 if (GET_CODE (sym_ref) != MEM)
3013 return 0;
3015 sym_ref = XEXP (sym_ref, 0);
3018 if (GET_CODE (sym_ref) != SYMBOL_REF)
3019 return 0;
3021 if (call_cookie & CALL_SHORT)
3022 return 0;
3024 if (TARGET_LONG_CALLS)
3026 if (flag_function_sections
3027 || DECL_SECTION_NAME (current_function_decl))
3028 /* c.3 is handled by the definition of the
3029 ARM_DECLARE_FUNCTION_SIZE macro. */
3030 return 1;
3033 if (current_file_function_operand (sym_ref))
3034 return 0;
3036 return (call_cookie & CALL_LONG)
3037 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3038 || TARGET_LONG_CALLS;
3041 /* Return nonzero if it is ok to make a tail-call to DECL. */
3042 static bool
3043 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3045 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3047 if (cfun->machine->sibcall_blocked)
3048 return false;
3050 /* Never tailcall something for which we have no decl, or if we
3051 are in Thumb mode. */
3052 if (decl == NULL || TARGET_THUMB)
3053 return false;
3055 /* Get the calling method. */
3056 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3057 call_type = CALL_SHORT;
3058 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3059 call_type = CALL_LONG;
3061 /* Cannot tail-call to long calls, since these are out of range of
3062 a branch instruction. However, if not compiling PIC, we know
3063 we can reach the symbol if it is in this compilation unit. */
3064 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3065 return false;
3067 /* If we are interworking and the function is not declared static
3068 then we can't tail-call it unless we know that it exists in this
3069 compilation unit (since it might be a Thumb routine). */
3070 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3071 return false;
3073 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3074 if (IS_INTERRUPT (arm_current_func_type ()))
3075 return false;
3077 /* Everything else is ok. */
3078 return true;
3082 /* Addressing mode support functions. */
3084 /* Return nonzero if X is a legitimate immediate operand when compiling
3085 for PIC. */
3087 legitimate_pic_operand_p (rtx x)
3089 if (CONSTANT_P (x)
3090 && flag_pic
3091 && (GET_CODE (x) == SYMBOL_REF
3092 || (GET_CODE (x) == CONST
3093 && GET_CODE (XEXP (x, 0)) == PLUS
3094 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3095 return 0;
3097 return 1;
3101 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3103 if (GET_CODE (orig) == SYMBOL_REF
3104 || GET_CODE (orig) == LABEL_REF)
3106 #ifndef AOF_ASSEMBLER
3107 rtx pic_ref, address;
3108 #endif
3109 rtx insn;
3110 int subregs = 0;
3112 if (reg == 0)
3114 gcc_assert (!no_new_pseudos);
3115 reg = gen_reg_rtx (Pmode);
3117 subregs = 1;
3120 #ifdef AOF_ASSEMBLER
3121 /* The AOF assembler can generate relocations for these directly, and
3122 understands that the PIC register has to be added into the offset. */
3123 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3124 #else
3125 if (subregs)
3126 address = gen_reg_rtx (Pmode);
3127 else
3128 address = reg;
3130 if (TARGET_ARM)
3131 emit_insn (gen_pic_load_addr_arm (address, orig));
3132 else
3133 emit_insn (gen_pic_load_addr_thumb (address, orig));
3135 if ((GET_CODE (orig) == LABEL_REF
3136 || (GET_CODE (orig) == SYMBOL_REF &&
3137 SYMBOL_REF_LOCAL_P (orig)))
3138 && NEED_GOT_RELOC)
3139 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3140 else
3142 pic_ref = gen_const_mem (Pmode,
3143 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3144 address));
3147 insn = emit_move_insn (reg, pic_ref);
3148 #endif
3149 current_function_uses_pic_offset_table = 1;
3150 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3151 by loop. */
3152 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3153 REG_NOTES (insn));
3154 return reg;
3156 else if (GET_CODE (orig) == CONST)
3158 rtx base, offset;
3160 if (GET_CODE (XEXP (orig, 0)) == PLUS
3161 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3162 return orig;
3164 if (reg == 0)
3166 gcc_assert (!no_new_pseudos);
3167 reg = gen_reg_rtx (Pmode);
3170 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3172 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3173 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3174 base == reg ? 0 : reg);
3176 if (GET_CODE (offset) == CONST_INT)
3178 /* The base register doesn't really matter, we only want to
3179 test the index for the appropriate mode. */
3180 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3182 gcc_assert (!no_new_pseudos);
3183 offset = force_reg (Pmode, offset);
3186 if (GET_CODE (offset) == CONST_INT)
3187 return plus_constant (base, INTVAL (offset));
3190 if (GET_MODE_SIZE (mode) > 4
3191 && (GET_MODE_CLASS (mode) == MODE_INT
3192 || TARGET_SOFT_FLOAT))
3194 emit_insn (gen_addsi3 (reg, base, offset));
3195 return reg;
3198 return gen_rtx_PLUS (Pmode, base, offset);
3201 return orig;
3205 /* Find a spare low register to use during the prolog of a function. */
3207 static int
3208 thumb_find_work_register (unsigned long pushed_regs_mask)
3210 int reg;
3212 /* Check the argument registers first as these are call-used. The
3213 register allocation order means that sometimes r3 might be used
3214 but earlier argument registers might not, so check them all. */
3215 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3216 if (!regs_ever_live[reg])
3217 return reg;
3219 /* Before going on to check the call-saved registers we can try a couple
3220 more ways of deducing that r3 is available. The first is when we are
3221 pushing anonymous arguments onto the stack and we have less than 4
3222 registers worth of fixed arguments(*). In this case r3 will be part of
3223 the variable argument list and so we can be sure that it will be
3224 pushed right at the start of the function. Hence it will be available
3225 for the rest of the prologue.
3226 (*): ie current_function_pretend_args_size is greater than 0. */
3227 if (cfun->machine->uses_anonymous_args
3228 && current_function_pretend_args_size > 0)
3229 return LAST_ARG_REGNUM;
3231 /* The other case is when we have fixed arguments but less than 4 registers
3232 worth. In this case r3 might be used in the body of the function, but
3233 it is not being used to convey an argument into the function. In theory
3234 we could just check current_function_args_size to see how many bytes are
3235 being passed in argument registers, but it seems that it is unreliable.
3236 Sometimes it will have the value 0 when in fact arguments are being
3237 passed. (See testcase execute/20021111-1.c for an example). So we also
3238 check the args_info.nregs field as well. The problem with this field is
3239 that it makes no allowances for arguments that are passed to the
3240 function but which are not used. Hence we could miss an opportunity
3241 when a function has an unused argument in r3. But it is better to be
3242 safe than to be sorry. */
3243 if (! cfun->machine->uses_anonymous_args
3244 && current_function_args_size >= 0
3245 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3246 && cfun->args_info.nregs < 4)
3247 return LAST_ARG_REGNUM;
3249 /* Otherwise look for a call-saved register that is going to be pushed. */
3250 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3251 if (pushed_regs_mask & (1 << reg))
3252 return reg;
3254 /* Something went wrong - thumb_compute_save_reg_mask()
3255 should have arranged for a suitable register to be pushed. */
3256 gcc_unreachable ();
3260 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3261 low register. */
3263 void
3264 arm_load_pic_register (unsigned int scratch)
3266 #ifndef AOF_ASSEMBLER
3267 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3268 rtx global_offset_table;
3270 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3271 return;
3273 gcc_assert (flag_pic);
3275 l1 = gen_label_rtx ();
3277 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3278 /* On the ARM the PC register contains 'dot + 8' at the time of the
3279 addition, on the Thumb it is 'dot + 4'. */
3280 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3281 if (GOT_PCREL)
3282 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3283 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3284 else
3285 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3287 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3289 if (TARGET_ARM)
3291 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3292 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3294 else
3296 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3298 /* We will have pushed the pic register, so should always be
3299 able to find a work register. */
3300 pic_tmp = gen_rtx_REG (SImode, scratch);
3301 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3302 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3304 else
3305 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3306 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3309 /* Need to emit this whether or not we obey regdecls,
3310 since setjmp/longjmp can cause life info to screw up. */
3311 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3312 #endif /* AOF_ASSEMBLER */
3316 /* Return nonzero if X is valid as an ARM state addressing register. */
3317 static int
3318 arm_address_register_rtx_p (rtx x, int strict_p)
3320 int regno;
3322 if (GET_CODE (x) != REG)
3323 return 0;
3325 regno = REGNO (x);
3327 if (strict_p)
3328 return ARM_REGNO_OK_FOR_BASE_P (regno);
3330 return (regno <= LAST_ARM_REGNUM
3331 || regno >= FIRST_PSEUDO_REGISTER
3332 || regno == FRAME_POINTER_REGNUM
3333 || regno == ARG_POINTER_REGNUM);
3336 /* Return nonzero if X is a valid ARM state address operand. */
3338 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3339 int strict_p)
3341 bool use_ldrd;
3342 enum rtx_code code = GET_CODE (x);
3344 if (arm_address_register_rtx_p (x, strict_p))
3345 return 1;
3347 use_ldrd = (TARGET_LDRD
3348 && (mode == DImode
3349 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3351 if (code == POST_INC || code == PRE_DEC
3352 || ((code == PRE_INC || code == POST_DEC)
3353 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3354 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3356 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3357 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3358 && GET_CODE (XEXP (x, 1)) == PLUS
3359 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3361 rtx addend = XEXP (XEXP (x, 1), 1);
3363 /* Don't allow ldrd post increment by register because it's hard
3364 to fixup invalid register choices. */
3365 if (use_ldrd
3366 && GET_CODE (x) == POST_MODIFY
3367 && GET_CODE (addend) == REG)
3368 return 0;
3370 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3371 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3374 /* After reload constants split into minipools will have addresses
3375 from a LABEL_REF. */
3376 else if (reload_completed
3377 && (code == LABEL_REF
3378 || (code == CONST
3379 && GET_CODE (XEXP (x, 0)) == PLUS
3380 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3381 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3382 return 1;
3384 else if (mode == TImode)
3385 return 0;
3387 else if (code == PLUS)
3389 rtx xop0 = XEXP (x, 0);
3390 rtx xop1 = XEXP (x, 1);
3392 return ((arm_address_register_rtx_p (xop0, strict_p)
3393 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3394 || (arm_address_register_rtx_p (xop1, strict_p)
3395 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3398 #if 0
3399 /* Reload currently can't handle MINUS, so disable this for now */
3400 else if (GET_CODE (x) == MINUS)
3402 rtx xop0 = XEXP (x, 0);
3403 rtx xop1 = XEXP (x, 1);
3405 return (arm_address_register_rtx_p (xop0, strict_p)
3406 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3408 #endif
3410 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3411 && code == SYMBOL_REF
3412 && CONSTANT_POOL_ADDRESS_P (x)
3413 && ! (flag_pic
3414 && symbol_mentioned_p (get_pool_constant (x))))
3415 return 1;
3417 return 0;
3420 /* Return nonzero if INDEX is valid for an address index operand in
3421 ARM state. */
3422 static int
3423 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3424 int strict_p)
3426 HOST_WIDE_INT range;
3427 enum rtx_code code = GET_CODE (index);
3429 /* Standard coprocessor addressing modes. */
3430 if (TARGET_HARD_FLOAT
3431 && (TARGET_FPA || TARGET_MAVERICK)
3432 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3433 || (TARGET_MAVERICK && mode == DImode)))
3434 return (code == CONST_INT && INTVAL (index) < 1024
3435 && INTVAL (index) > -1024
3436 && (INTVAL (index) & 3) == 0);
3438 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3439 return (code == CONST_INT
3440 && INTVAL (index) < 1024
3441 && INTVAL (index) > -1024
3442 && (INTVAL (index) & 3) == 0);
3444 if (arm_address_register_rtx_p (index, strict_p)
3445 && (GET_MODE_SIZE (mode) <= 4))
3446 return 1;
3448 if (mode == DImode || mode == DFmode)
3450 if (code == CONST_INT)
3452 HOST_WIDE_INT val = INTVAL (index);
3454 if (TARGET_LDRD)
3455 return val > -256 && val < 256;
3456 else
3457 return val > -4096 && val < 4092;
3460 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3463 if (GET_MODE_SIZE (mode) <= 4
3464 && ! (arm_arch4
3465 && (mode == HImode
3466 || (mode == QImode && outer == SIGN_EXTEND))))
3468 if (code == MULT)
3470 rtx xiop0 = XEXP (index, 0);
3471 rtx xiop1 = XEXP (index, 1);
3473 return ((arm_address_register_rtx_p (xiop0, strict_p)
3474 && power_of_two_operand (xiop1, SImode))
3475 || (arm_address_register_rtx_p (xiop1, strict_p)
3476 && power_of_two_operand (xiop0, SImode)));
3478 else if (code == LSHIFTRT || code == ASHIFTRT
3479 || code == ASHIFT || code == ROTATERT)
3481 rtx op = XEXP (index, 1);
3483 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3484 && GET_CODE (op) == CONST_INT
3485 && INTVAL (op) > 0
3486 && INTVAL (op) <= 31);
3490 /* For ARM v4 we may be doing a sign-extend operation during the
3491 load. */
3492 if (arm_arch4)
3494 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3495 range = 256;
3496 else
3497 range = 4096;
3499 else
3500 range = (mode == HImode) ? 4095 : 4096;
3502 return (code == CONST_INT
3503 && INTVAL (index) < range
3504 && INTVAL (index) > -range);
3507 /* Return nonzero if X is valid as a Thumb state base register. */
3508 static int
3509 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3511 int regno;
3513 if (GET_CODE (x) != REG)
3514 return 0;
3516 regno = REGNO (x);
3518 if (strict_p)
3519 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3521 return (regno <= LAST_LO_REGNUM
3522 || regno > LAST_VIRTUAL_REGISTER
3523 || regno == FRAME_POINTER_REGNUM
3524 || (GET_MODE_SIZE (mode) >= 4
3525 && (regno == STACK_POINTER_REGNUM
3526 || regno >= FIRST_PSEUDO_REGISTER
3527 || x == hard_frame_pointer_rtx
3528 || x == arg_pointer_rtx)));
3531 /* Return nonzero if x is a legitimate index register. This is the case
3532 for any base register that can access a QImode object. */
3533 inline static int
3534 thumb_index_register_rtx_p (rtx x, int strict_p)
3536 return thumb_base_register_rtx_p (x, QImode, strict_p);
3539 /* Return nonzero if x is a legitimate Thumb-state address.
3541 The AP may be eliminated to either the SP or the FP, so we use the
3542 least common denominator, e.g. SImode, and offsets from 0 to 64.
3544 ??? Verify whether the above is the right approach.
3546 ??? Also, the FP may be eliminated to the SP, so perhaps that
3547 needs special handling also.
3549 ??? Look at how the mips16 port solves this problem. It probably uses
3550 better ways to solve some of these problems.
3552 Although it is not incorrect, we don't accept QImode and HImode
3553 addresses based on the frame pointer or arg pointer until the
3554 reload pass starts. This is so that eliminating such addresses
3555 into stack based ones won't produce impossible code. */
3557 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3559 /* ??? Not clear if this is right. Experiment. */
3560 if (GET_MODE_SIZE (mode) < 4
3561 && !(reload_in_progress || reload_completed)
3562 && (reg_mentioned_p (frame_pointer_rtx, x)
3563 || reg_mentioned_p (arg_pointer_rtx, x)
3564 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3565 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3566 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3567 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3568 return 0;
3570 /* Accept any base register. SP only in SImode or larger. */
3571 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3572 return 1;
3574 /* This is PC relative data before arm_reorg runs. */
3575 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3576 && GET_CODE (x) == SYMBOL_REF
3577 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3578 return 1;
3580 /* This is PC relative data after arm_reorg runs. */
3581 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3582 && (GET_CODE (x) == LABEL_REF
3583 || (GET_CODE (x) == CONST
3584 && GET_CODE (XEXP (x, 0)) == PLUS
3585 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3586 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3587 return 1;
3589 /* Post-inc indexing only supported for SImode and larger. */
3590 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3591 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3592 return 1;
3594 else if (GET_CODE (x) == PLUS)
3596 /* REG+REG address can be any two index registers. */
3597 /* We disallow FRAME+REG addressing since we know that FRAME
3598 will be replaced with STACK, and SP relative addressing only
3599 permits SP+OFFSET. */
3600 if (GET_MODE_SIZE (mode) <= 4
3601 && XEXP (x, 0) != frame_pointer_rtx
3602 && XEXP (x, 1) != frame_pointer_rtx
3603 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3604 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3605 return 1;
3607 /* REG+const has 5-7 bit offset for non-SP registers. */
3608 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3609 || XEXP (x, 0) == arg_pointer_rtx)
3610 && GET_CODE (XEXP (x, 1)) == CONST_INT
3611 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3612 return 1;
3614 /* REG+const has 10 bit offset for SP, but only SImode and
3615 larger is supported. */
3616 /* ??? Should probably check for DI/DFmode overflow here
3617 just like GO_IF_LEGITIMATE_OFFSET does. */
3618 else if (GET_CODE (XEXP (x, 0)) == REG
3619 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3620 && GET_MODE_SIZE (mode) >= 4
3621 && GET_CODE (XEXP (x, 1)) == CONST_INT
3622 && INTVAL (XEXP (x, 1)) >= 0
3623 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3624 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3625 return 1;
3627 else if (GET_CODE (XEXP (x, 0)) == REG
3628 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3629 && GET_MODE_SIZE (mode) >= 4
3630 && GET_CODE (XEXP (x, 1)) == CONST_INT
3631 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3632 return 1;
3635 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3636 && GET_MODE_SIZE (mode) == 4
3637 && GET_CODE (x) == SYMBOL_REF
3638 && CONSTANT_POOL_ADDRESS_P (x)
3639 && !(flag_pic
3640 && symbol_mentioned_p (get_pool_constant (x))))
3641 return 1;
3643 return 0;
3646 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3647 instruction of mode MODE. */
3649 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3651 switch (GET_MODE_SIZE (mode))
3653 case 1:
3654 return val >= 0 && val < 32;
3656 case 2:
3657 return val >= 0 && val < 64 && (val & 1) == 0;
3659 default:
3660 return (val >= 0
3661 && (val + GET_MODE_SIZE (mode)) <= 128
3662 && (val & 3) == 0);
3666 /* Try machine-dependent ways of modifying an illegitimate address
3667 to be legitimate. If we find one, return the new, valid address. */
3669 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3671 if (GET_CODE (x) == PLUS)
3673 rtx xop0 = XEXP (x, 0);
3674 rtx xop1 = XEXP (x, 1);
3676 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3677 xop0 = force_reg (SImode, xop0);
3679 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3680 xop1 = force_reg (SImode, xop1);
3682 if (ARM_BASE_REGISTER_RTX_P (xop0)
3683 && GET_CODE (xop1) == CONST_INT)
3685 HOST_WIDE_INT n, low_n;
3686 rtx base_reg, val;
3687 n = INTVAL (xop1);
3689 /* VFP addressing modes actually allow greater offsets, but for
3690 now we just stick with the lowest common denominator. */
3691 if (mode == DImode
3692 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3694 low_n = n & 0x0f;
3695 n &= ~0x0f;
3696 if (low_n > 4)
3698 n += 16;
3699 low_n -= 16;
3702 else
3704 low_n = ((mode) == TImode ? 0
3705 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3706 n -= low_n;
3709 base_reg = gen_reg_rtx (SImode);
3710 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3711 GEN_INT (n)), NULL_RTX);
3712 emit_move_insn (base_reg, val);
3713 x = (low_n == 0 ? base_reg
3714 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3716 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3717 x = gen_rtx_PLUS (SImode, xop0, xop1);
3720 /* XXX We don't allow MINUS any more -- see comment in
3721 arm_legitimate_address_p (). */
3722 else if (GET_CODE (x) == MINUS)
3724 rtx xop0 = XEXP (x, 0);
3725 rtx xop1 = XEXP (x, 1);
3727 if (CONSTANT_P (xop0))
3728 xop0 = force_reg (SImode, xop0);
3730 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3731 xop1 = force_reg (SImode, xop1);
3733 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3734 x = gen_rtx_MINUS (SImode, xop0, xop1);
3737 if (flag_pic)
3739 /* We need to find and carefully transform any SYMBOL and LABEL
3740 references; so go back to the original address expression. */
3741 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3743 if (new_x != orig_x)
3744 x = new_x;
3747 return x;
3751 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3752 to be legitimate. If we find one, return the new, valid address. */
3754 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3756 if (GET_CODE (x) == PLUS
3757 && GET_CODE (XEXP (x, 1)) == CONST_INT
3758 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3759 || INTVAL (XEXP (x, 1)) < 0))
3761 rtx xop0 = XEXP (x, 0);
3762 rtx xop1 = XEXP (x, 1);
3763 HOST_WIDE_INT offset = INTVAL (xop1);
3765 /* Try and fold the offset into a biasing of the base register and
3766 then offsetting that. Don't do this when optimizing for space
3767 since it can cause too many CSEs. */
3768 if (optimize_size && offset >= 0
3769 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3771 HOST_WIDE_INT delta;
3773 if (offset >= 256)
3774 delta = offset - (256 - GET_MODE_SIZE (mode));
3775 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3776 delta = 31 * GET_MODE_SIZE (mode);
3777 else
3778 delta = offset & (~31 * GET_MODE_SIZE (mode));
3780 xop0 = force_operand (plus_constant (xop0, offset - delta),
3781 NULL_RTX);
3782 x = plus_constant (xop0, delta);
3784 else if (offset < 0 && offset > -256)
3785 /* Small negative offsets are best done with a subtract before the
3786 dereference, forcing these into a register normally takes two
3787 instructions. */
3788 x = force_operand (x, NULL_RTX);
3789 else
3791 /* For the remaining cases, force the constant into a register. */
3792 xop1 = force_reg (SImode, xop1);
3793 x = gen_rtx_PLUS (SImode, xop0, xop1);
3796 else if (GET_CODE (x) == PLUS
3797 && s_register_operand (XEXP (x, 1), SImode)
3798 && !s_register_operand (XEXP (x, 0), SImode))
3800 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3802 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3805 if (flag_pic)
3807 /* We need to find and carefully transform any SYMBOL and LABEL
3808 references; so go back to the original address expression. */
3809 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3811 if (new_x != orig_x)
3812 x = new_x;
3815 return x;
3820 #define REG_OR_SUBREG_REG(X) \
3821 (GET_CODE (X) == REG \
3822 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3824 #define REG_OR_SUBREG_RTX(X) \
3825 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3827 #ifndef COSTS_N_INSNS
3828 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3829 #endif
3830 static inline int
3831 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3833 enum machine_mode mode = GET_MODE (x);
3835 switch (code)
3837 case ASHIFT:
3838 case ASHIFTRT:
3839 case LSHIFTRT:
3840 case ROTATERT:
3841 case PLUS:
3842 case MINUS:
3843 case COMPARE:
3844 case NEG:
3845 case NOT:
3846 return COSTS_N_INSNS (1);
3848 case MULT:
3849 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3851 int cycles = 0;
3852 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3854 while (i)
3856 i >>= 2;
3857 cycles++;
3859 return COSTS_N_INSNS (2) + cycles;
3861 return COSTS_N_INSNS (1) + 16;
3863 case SET:
3864 return (COSTS_N_INSNS (1)
3865 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3866 + GET_CODE (SET_DEST (x)) == MEM));
3868 case CONST_INT:
3869 if (outer == SET)
3871 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3872 return 0;
3873 if (thumb_shiftable_const (INTVAL (x)))
3874 return COSTS_N_INSNS (2);
3875 return COSTS_N_INSNS (3);
3877 else if ((outer == PLUS || outer == COMPARE)
3878 && INTVAL (x) < 256 && INTVAL (x) > -256)
3879 return 0;
3880 else if (outer == AND
3881 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3882 return COSTS_N_INSNS (1);
3883 else if (outer == ASHIFT || outer == ASHIFTRT
3884 || outer == LSHIFTRT)
3885 return 0;
3886 return COSTS_N_INSNS (2);
3888 case CONST:
3889 case CONST_DOUBLE:
3890 case LABEL_REF:
3891 case SYMBOL_REF:
3892 return COSTS_N_INSNS (3);
3894 case UDIV:
3895 case UMOD:
3896 case DIV:
3897 case MOD:
3898 return 100;
3900 case TRUNCATE:
3901 return 99;
3903 case AND:
3904 case XOR:
3905 case IOR:
3906 /* XXX guess. */
3907 return 8;
3909 case MEM:
3910 /* XXX another guess. */
3911 /* Memory costs quite a lot for the first word, but subsequent words
3912 load at the equivalent of a single insn each. */
3913 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3914 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3915 ? 4 : 0));
3917 case IF_THEN_ELSE:
3918 /* XXX a guess. */
3919 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3920 return 14;
3921 return 2;
3923 case ZERO_EXTEND:
3924 /* XXX still guessing. */
3925 switch (GET_MODE (XEXP (x, 0)))
3927 case QImode:
3928 return (1 + (mode == DImode ? 4 : 0)
3929 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3931 case HImode:
3932 return (4 + (mode == DImode ? 4 : 0)
3933 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3935 case SImode:
3936 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3938 default:
3939 return 99;
3942 default:
3943 return 99;
3948 /* Worker routine for arm_rtx_costs. */
3949 static inline int
3950 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3952 enum machine_mode mode = GET_MODE (x);
3953 enum rtx_code subcode;
3954 int extra_cost;
3956 switch (code)
3958 case MEM:
3959 /* Memory costs quite a lot for the first word, but subsequent words
3960 load at the equivalent of a single insn each. */
3961 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3962 + (GET_CODE (x) == SYMBOL_REF
3963 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3965 case DIV:
3966 case MOD:
3967 case UDIV:
3968 case UMOD:
3969 return optimize_size ? COSTS_N_INSNS (2) : 100;
3971 case ROTATE:
3972 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3973 return 4;
3974 /* Fall through */
3975 case ROTATERT:
3976 if (mode != SImode)
3977 return 8;
3978 /* Fall through */
3979 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3980 if (mode == DImode)
3981 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3982 + ((GET_CODE (XEXP (x, 0)) == REG
3983 || (GET_CODE (XEXP (x, 0)) == SUBREG
3984 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3985 ? 0 : 8));
3986 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3987 || (GET_CODE (XEXP (x, 0)) == SUBREG
3988 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3989 ? 0 : 4)
3990 + ((GET_CODE (XEXP (x, 1)) == REG
3991 || (GET_CODE (XEXP (x, 1)) == SUBREG
3992 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3993 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3994 ? 0 : 4));
3996 case MINUS:
3997 if (mode == DImode)
3998 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3999 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4000 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4001 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4002 ? 0 : 8));
4004 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4005 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4006 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4007 && arm_const_double_rtx (XEXP (x, 1))))
4008 ? 0 : 8)
4009 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4010 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4011 && arm_const_double_rtx (XEXP (x, 0))))
4012 ? 0 : 8));
4014 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4015 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4016 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4017 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4018 || subcode == ASHIFTRT || subcode == LSHIFTRT
4019 || subcode == ROTATE || subcode == ROTATERT
4020 || (subcode == MULT
4021 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4022 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4023 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4024 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4025 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4026 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4027 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4028 return 1;
4029 /* Fall through */
4031 case PLUS:
4032 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4033 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4034 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4035 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4036 && arm_const_double_rtx (XEXP (x, 1))))
4037 ? 0 : 8));
4039 /* Fall through */
4040 case AND: case XOR: case IOR:
4041 extra_cost = 0;
4043 /* Normally the frame registers will be spilt into reg+const during
4044 reload, so it is a bad idea to combine them with other instructions,
4045 since then they might not be moved outside of loops. As a compromise
4046 we allow integration with ops that have a constant as their second
4047 operand. */
4048 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4049 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4050 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4051 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4052 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4053 extra_cost = 4;
4055 if (mode == DImode)
4056 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4057 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4058 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4059 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4060 ? 0 : 8));
4062 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4063 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4064 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4065 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4066 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4067 ? 0 : 4));
4069 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4070 return (1 + extra_cost
4071 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4072 || subcode == LSHIFTRT || subcode == ASHIFTRT
4073 || subcode == ROTATE || subcode == ROTATERT
4074 || (subcode == MULT
4075 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4076 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4077 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4078 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4079 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4080 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4081 ? 0 : 4));
4083 return 8;
4085 case MULT:
4086 /* This should have been handled by the CPU specific routines. */
4087 gcc_unreachable ();
4089 case TRUNCATE:
4090 if (arm_arch3m && mode == SImode
4091 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4092 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4093 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4094 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4095 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4096 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4097 return 8;
4098 return 99;
4100 case NEG:
4101 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4102 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4103 /* Fall through */
4104 case NOT:
4105 if (mode == DImode)
4106 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4108 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4110 case IF_THEN_ELSE:
4111 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4112 return 14;
4113 return 2;
4115 case COMPARE:
4116 return 1;
4118 case ABS:
4119 return 4 + (mode == DImode ? 4 : 0);
4121 case SIGN_EXTEND:
4122 if (GET_MODE (XEXP (x, 0)) == QImode)
4123 return (4 + (mode == DImode ? 4 : 0)
4124 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4125 /* Fall through */
4126 case ZERO_EXTEND:
4127 switch (GET_MODE (XEXP (x, 0)))
4129 case QImode:
4130 return (1 + (mode == DImode ? 4 : 0)
4131 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4133 case HImode:
4134 return (4 + (mode == DImode ? 4 : 0)
4135 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4137 case SImode:
4138 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4140 case V8QImode:
4141 case V4HImode:
4142 case V2SImode:
4143 case V4QImode:
4144 case V2HImode:
4145 return 1;
4147 default:
4148 gcc_unreachable ();
4150 gcc_unreachable ();
4152 case CONST_INT:
4153 if (const_ok_for_arm (INTVAL (x)))
4154 return outer == SET ? 2 : -1;
4155 else if (outer == AND
4156 && const_ok_for_arm (~INTVAL (x)))
4157 return -1;
4158 else if ((outer == COMPARE
4159 || outer == PLUS || outer == MINUS)
4160 && const_ok_for_arm (-INTVAL (x)))
4161 return -1;
4162 else
4163 return 5;
4165 case CONST:
4166 case LABEL_REF:
4167 case SYMBOL_REF:
4168 return 6;
4170 case CONST_DOUBLE:
4171 if (arm_const_double_rtx (x))
4172 return outer == SET ? 2 : -1;
4173 else if ((outer == COMPARE || outer == PLUS)
4174 && neg_const_double_rtx_ok_for_fpa (x))
4175 return -1;
4176 return 7;
4178 default:
4179 return 99;
4183 /* RTX costs when optimizing for size. */
4184 static bool
4185 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4187 enum machine_mode mode = GET_MODE (x);
4189 if (TARGET_THUMB)
4191 /* XXX TBD. For now, use the standard costs. */
4192 *total = thumb_rtx_costs (x, code, outer_code);
4193 return true;
4196 switch (code)
4198 case MEM:
4199 /* A memory access costs 1 insn if the mode is small, or the address is
4200 a single register, otherwise it costs one insn per word. */
4201 if (REG_P (XEXP (x, 0)))
4202 *total = COSTS_N_INSNS (1);
4203 else
4204 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4205 return true;
4207 case DIV:
4208 case MOD:
4209 case UDIV:
4210 case UMOD:
4211 /* Needs a libcall, so it costs about this. */
4212 *total = COSTS_N_INSNS (2);
4213 return false;
4215 case ROTATE:
4216 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4218 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4219 return true;
4221 /* Fall through */
4222 case ROTATERT:
4223 case ASHIFT:
4224 case LSHIFTRT:
4225 case ASHIFTRT:
4226 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4228 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4229 return true;
4231 else if (mode == SImode)
4233 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4234 /* Slightly disparage register shifts, but not by much. */
4235 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4236 *total += 1 + rtx_cost (XEXP (x, 1), code);
4237 return true;
4240 /* Needs a libcall. */
4241 *total = COSTS_N_INSNS (2);
4242 return false;
4244 case MINUS:
4245 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4247 *total = COSTS_N_INSNS (1);
4248 return false;
4251 if (mode == SImode)
4253 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4254 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4256 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4257 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4258 || subcode1 == ROTATE || subcode1 == ROTATERT
4259 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4260 || subcode1 == ASHIFTRT)
4262 /* It's just the cost of the two operands. */
4263 *total = 0;
4264 return false;
4267 *total = COSTS_N_INSNS (1);
4268 return false;
4271 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4272 return false;
4274 case PLUS:
4275 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4277 *total = COSTS_N_INSNS (1);
4278 return false;
4281 /* Fall through */
4282 case AND: case XOR: case IOR:
4283 if (mode == SImode)
4285 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4287 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4288 || subcode == LSHIFTRT || subcode == ASHIFTRT
4289 || (code == AND && subcode == NOT))
4291 /* It's just the cost of the two operands. */
4292 *total = 0;
4293 return false;
4297 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4298 return false;
4300 case MULT:
4301 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4302 return false;
4304 case NEG:
4305 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4306 *total = COSTS_N_INSNS (1);
4307 /* Fall through */
4308 case NOT:
4309 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4311 return false;
4313 case IF_THEN_ELSE:
4314 *total = 0;
4315 return false;
4317 case COMPARE:
4318 if (cc_register (XEXP (x, 0), VOIDmode))
4319 * total = 0;
4320 else
4321 *total = COSTS_N_INSNS (1);
4322 return false;
4324 case ABS:
4325 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4326 *total = COSTS_N_INSNS (1);
4327 else
4328 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4329 return false;
4331 case SIGN_EXTEND:
4332 *total = 0;
4333 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4335 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4336 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4338 if (mode == DImode)
4339 *total += COSTS_N_INSNS (1);
4340 return false;
4342 case ZERO_EXTEND:
4343 *total = 0;
4344 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4346 switch (GET_MODE (XEXP (x, 0)))
4348 case QImode:
4349 *total += COSTS_N_INSNS (1);
4350 break;
4352 case HImode:
4353 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4355 case SImode:
4356 break;
4358 default:
4359 *total += COSTS_N_INSNS (2);
4363 if (mode == DImode)
4364 *total += COSTS_N_INSNS (1);
4366 return false;
4368 case CONST_INT:
4369 if (const_ok_for_arm (INTVAL (x)))
4370 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4371 else if (const_ok_for_arm (~INTVAL (x)))
4372 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4373 else if (const_ok_for_arm (-INTVAL (x)))
4375 if (outer_code == COMPARE || outer_code == PLUS
4376 || outer_code == MINUS)
4377 *total = 0;
4378 else
4379 *total = COSTS_N_INSNS (1);
4381 else
4382 *total = COSTS_N_INSNS (2);
4383 return true;
4385 case CONST:
4386 case LABEL_REF:
4387 case SYMBOL_REF:
4388 *total = COSTS_N_INSNS (2);
4389 return true;
4391 case CONST_DOUBLE:
4392 *total = COSTS_N_INSNS (4);
4393 return true;
4395 default:
4396 if (mode != VOIDmode)
4397 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4398 else
4399 *total = COSTS_N_INSNS (4); /* How knows? */
4400 return false;
4404 /* RTX costs for cores with a slow MUL implementation. */
4406 static bool
4407 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4409 enum machine_mode mode = GET_MODE (x);
4411 if (TARGET_THUMB)
4413 *total = thumb_rtx_costs (x, code, outer_code);
4414 return true;
4417 switch (code)
4419 case MULT:
4420 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4421 || mode == DImode)
4423 *total = 30;
4424 return true;
4427 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4429 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4430 & (unsigned HOST_WIDE_INT) 0xffffffff);
4431 int cost, const_ok = const_ok_for_arm (i);
4432 int j, booth_unit_size;
4434 /* Tune as appropriate. */
4435 cost = const_ok ? 4 : 8;
4436 booth_unit_size = 2;
4437 for (j = 0; i && j < 32; j += booth_unit_size)
4439 i >>= booth_unit_size;
4440 cost += 2;
4443 *total = cost;
4444 return true;
4447 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4448 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4449 return true;
4451 default:
4452 *total = arm_rtx_costs_1 (x, code, outer_code);
4453 return true;
4458 /* RTX cost for cores with a fast multiply unit (M variants). */
4460 static bool
4461 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4463 enum machine_mode mode = GET_MODE (x);
4465 if (TARGET_THUMB)
4467 *total = thumb_rtx_costs (x, code, outer_code);
4468 return true;
4471 switch (code)
4473 case MULT:
4474 /* There is no point basing this on the tuning, since it is always the
4475 fast variant if it exists at all. */
4476 if (mode == DImode
4477 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4478 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4479 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4481 *total = 8;
4482 return true;
4486 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4487 || mode == DImode)
4489 *total = 30;
4490 return true;
4493 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4495 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4496 & (unsigned HOST_WIDE_INT) 0xffffffff);
4497 int cost, const_ok = const_ok_for_arm (i);
4498 int j, booth_unit_size;
4500 /* Tune as appropriate. */
4501 cost = const_ok ? 4 : 8;
4502 booth_unit_size = 8;
4503 for (j = 0; i && j < 32; j += booth_unit_size)
4505 i >>= booth_unit_size;
4506 cost += 2;
4509 *total = cost;
4510 return true;
4513 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4514 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4515 return true;
4517 default:
4518 *total = arm_rtx_costs_1 (x, code, outer_code);
4519 return true;
4524 /* RTX cost for XScale CPUs. */
4526 static bool
4527 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4529 enum machine_mode mode = GET_MODE (x);
4531 if (TARGET_THUMB)
4533 *total = thumb_rtx_costs (x, code, outer_code);
4534 return true;
4537 switch (code)
4539 case MULT:
4540 /* There is no point basing this on the tuning, since it is always the
4541 fast variant if it exists at all. */
4542 if (mode == DImode
4543 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4544 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4545 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4547 *total = 8;
4548 return true;
4552 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4553 || mode == DImode)
4555 *total = 30;
4556 return true;
4559 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4561 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4562 & (unsigned HOST_WIDE_INT) 0xffffffff);
4563 int cost, const_ok = const_ok_for_arm (i);
4564 unsigned HOST_WIDE_INT masked_const;
4566 /* The cost will be related to two insns.
4567 First a load of the constant (MOV or LDR), then a multiply. */
4568 cost = 2;
4569 if (! const_ok)
4570 cost += 1; /* LDR is probably more expensive because
4571 of longer result latency. */
4572 masked_const = i & 0xffff8000;
4573 if (masked_const != 0 && masked_const != 0xffff8000)
4575 masked_const = i & 0xf8000000;
4576 if (masked_const == 0 || masked_const == 0xf8000000)
4577 cost += 1;
4578 else
4579 cost += 2;
4581 *total = cost;
4582 return true;
4585 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4586 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4587 return true;
4589 case COMPARE:
4590 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4591 will stall until the multiplication is complete. */
4592 if (GET_CODE (XEXP (x, 0)) == MULT)
4593 *total = 4 + rtx_cost (XEXP (x, 0), code);
4594 else
4595 *total = arm_rtx_costs_1 (x, code, outer_code);
4596 return true;
4598 default:
4599 *total = arm_rtx_costs_1 (x, code, outer_code);
4600 return true;
4605 /* RTX costs for 9e (and later) cores. */
4607 static bool
4608 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4610 enum machine_mode mode = GET_MODE (x);
4611 int nonreg_cost;
4612 int cost;
4614 if (TARGET_THUMB)
4616 switch (code)
4618 case MULT:
4619 *total = COSTS_N_INSNS (3);
4620 return true;
4622 default:
4623 *total = thumb_rtx_costs (x, code, outer_code);
4624 return true;
4628 switch (code)
4630 case MULT:
4631 /* There is no point basing this on the tuning, since it is always the
4632 fast variant if it exists at all. */
4633 if (mode == DImode
4634 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4635 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4636 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4638 *total = 3;
4639 return true;
4643 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4645 *total = 30;
4646 return true;
4648 if (mode == DImode)
4650 cost = 7;
4651 nonreg_cost = 8;
4653 else
4655 cost = 2;
4656 nonreg_cost = 4;
4660 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4661 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4662 return true;
4664 default:
4665 *total = arm_rtx_costs_1 (x, code, outer_code);
4666 return true;
4669 /* All address computations that can be done are free, but rtx cost returns
4670 the same for practically all of them. So we weight the different types
4671 of address here in the order (most pref first):
4672 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4673 static inline int
4674 arm_arm_address_cost (rtx x)
4676 enum rtx_code c = GET_CODE (x);
4678 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4679 return 0;
4680 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4681 return 10;
4683 if (c == PLUS || c == MINUS)
4685 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4686 return 2;
4688 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4689 return 3;
4691 return 4;
4694 return 6;
4697 static inline int
4698 arm_thumb_address_cost (rtx x)
4700 enum rtx_code c = GET_CODE (x);
4702 if (c == REG)
4703 return 1;
4704 if (c == PLUS
4705 && GET_CODE (XEXP (x, 0)) == REG
4706 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4707 return 1;
4709 return 2;
4712 static int
4713 arm_address_cost (rtx x)
4715 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4718 static int
4719 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4721 rtx i_pat, d_pat;
4723 /* Some true dependencies can have a higher cost depending
4724 on precisely how certain input operands are used. */
4725 if (arm_tune_xscale
4726 && REG_NOTE_KIND (link) == 0
4727 && recog_memoized (insn) >= 0
4728 && recog_memoized (dep) >= 0)
4730 int shift_opnum = get_attr_shift (insn);
4731 enum attr_type attr_type = get_attr_type (dep);
4733 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4734 operand for INSN. If we have a shifted input operand and the
4735 instruction we depend on is another ALU instruction, then we may
4736 have to account for an additional stall. */
4737 if (shift_opnum != 0
4738 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4740 rtx shifted_operand;
4741 int opno;
4743 /* Get the shifted operand. */
4744 extract_insn (insn);
4745 shifted_operand = recog_data.operand[shift_opnum];
4747 /* Iterate over all the operands in DEP. If we write an operand
4748 that overlaps with SHIFTED_OPERAND, then we have increase the
4749 cost of this dependency. */
4750 extract_insn (dep);
4751 preprocess_constraints ();
4752 for (opno = 0; opno < recog_data.n_operands; opno++)
4754 /* We can ignore strict inputs. */
4755 if (recog_data.operand_type[opno] == OP_IN)
4756 continue;
4758 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4759 shifted_operand))
4760 return 2;
4765 /* XXX This is not strictly true for the FPA. */
4766 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4767 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4768 return 0;
4770 /* Call insns don't incur a stall, even if they follow a load. */
4771 if (REG_NOTE_KIND (link) == 0
4772 && GET_CODE (insn) == CALL_INSN)
4773 return 1;
4775 if ((i_pat = single_set (insn)) != NULL
4776 && GET_CODE (SET_SRC (i_pat)) == MEM
4777 && (d_pat = single_set (dep)) != NULL
4778 && GET_CODE (SET_DEST (d_pat)) == MEM)
4780 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4781 /* This is a load after a store, there is no conflict if the load reads
4782 from a cached area. Assume that loads from the stack, and from the
4783 constant pool are cached, and that others will miss. This is a
4784 hack. */
4786 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4787 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4788 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4789 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4790 return 1;
4793 return cost;
4796 static int fp_consts_inited = 0;
4798 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4799 static const char * const strings_fp[8] =
4801 "0", "1", "2", "3",
4802 "4", "5", "0.5", "10"
4805 static REAL_VALUE_TYPE values_fp[8];
4807 static void
4808 init_fp_table (void)
4810 int i;
4811 REAL_VALUE_TYPE r;
4813 if (TARGET_VFP)
4814 fp_consts_inited = 1;
4815 else
4816 fp_consts_inited = 8;
4818 for (i = 0; i < fp_consts_inited; i++)
4820 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4821 values_fp[i] = r;
4825 /* Return TRUE if rtx X is a valid immediate FP constant. */
4827 arm_const_double_rtx (rtx x)
4829 REAL_VALUE_TYPE r;
4830 int i;
4832 if (!fp_consts_inited)
4833 init_fp_table ();
4835 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4836 if (REAL_VALUE_MINUS_ZERO (r))
4837 return 0;
4839 for (i = 0; i < fp_consts_inited; i++)
4840 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4841 return 1;
4843 return 0;
4846 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4848 neg_const_double_rtx_ok_for_fpa (rtx x)
4850 REAL_VALUE_TYPE r;
4851 int i;
4853 if (!fp_consts_inited)
4854 init_fp_table ();
4856 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4857 r = REAL_VALUE_NEGATE (r);
4858 if (REAL_VALUE_MINUS_ZERO (r))
4859 return 0;
4861 for (i = 0; i < 8; i++)
4862 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4863 return 1;
4865 return 0;
4868 /* Predicates for `match_operand' and `match_operator'. */
4870 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4872 cirrus_memory_offset (rtx op)
4874 /* Reject eliminable registers. */
4875 if (! (reload_in_progress || reload_completed)
4876 && ( reg_mentioned_p (frame_pointer_rtx, op)
4877 || reg_mentioned_p (arg_pointer_rtx, op)
4878 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4879 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4880 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4881 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4882 return 0;
4884 if (GET_CODE (op) == MEM)
4886 rtx ind;
4888 ind = XEXP (op, 0);
4890 /* Match: (mem (reg)). */
4891 if (GET_CODE (ind) == REG)
4892 return 1;
4894 /* Match:
4895 (mem (plus (reg)
4896 (const))). */
4897 if (GET_CODE (ind) == PLUS
4898 && GET_CODE (XEXP (ind, 0)) == REG
4899 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4900 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4901 return 1;
4904 return 0;
4907 /* Return TRUE if OP is a valid VFP memory address pattern.
4908 WB if true if writeback address modes are allowed. */
4911 arm_coproc_mem_operand (rtx op, bool wb)
4913 rtx ind;
4915 /* Reject eliminable registers. */
4916 if (! (reload_in_progress || reload_completed)
4917 && ( reg_mentioned_p (frame_pointer_rtx, op)
4918 || reg_mentioned_p (arg_pointer_rtx, op)
4919 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4920 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4921 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4922 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4923 return FALSE;
4925 /* Constants are converted into offsets from labels. */
4926 if (GET_CODE (op) != MEM)
4927 return FALSE;
4929 ind = XEXP (op, 0);
4931 if (reload_completed
4932 && (GET_CODE (ind) == LABEL_REF
4933 || (GET_CODE (ind) == CONST
4934 && GET_CODE (XEXP (ind, 0)) == PLUS
4935 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4936 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4937 return TRUE;
4939 /* Match: (mem (reg)). */
4940 if (GET_CODE (ind) == REG)
4941 return arm_address_register_rtx_p (ind, 0);
4943 /* Autoincremment addressing modes. */
4944 if (wb
4945 && (GET_CODE (ind) == PRE_INC
4946 || GET_CODE (ind) == POST_INC
4947 || GET_CODE (ind) == PRE_DEC
4948 || GET_CODE (ind) == POST_DEC))
4949 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4951 if (wb
4952 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4953 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4954 && GET_CODE (XEXP (ind, 1)) == PLUS
4955 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4956 ind = XEXP (ind, 1);
4958 /* Match:
4959 (plus (reg)
4960 (const)). */
4961 if (GET_CODE (ind) == PLUS
4962 && GET_CODE (XEXP (ind, 0)) == REG
4963 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4964 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4965 && INTVAL (XEXP (ind, 1)) > -1024
4966 && INTVAL (XEXP (ind, 1)) < 1024
4967 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4968 return TRUE;
4970 return FALSE;
4973 /* Return true if X is a register that will be eliminated later on. */
4975 arm_eliminable_register (rtx x)
4977 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4978 || REGNO (x) == ARG_POINTER_REGNUM
4979 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4980 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4983 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4984 VFP registers. Otherwise return NO_REGS. */
4986 enum reg_class
4987 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4989 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4990 return NO_REGS;
4992 return GENERAL_REGS;
4995 /* Values which must be returned in the most-significant end of the return
4996 register. */
4998 static bool
4999 arm_return_in_msb (tree valtype)
5001 return (TARGET_AAPCS_BASED
5002 && BYTES_BIG_ENDIAN
5003 && (AGGREGATE_TYPE_P (valtype)
5004 || TREE_CODE (valtype) == COMPLEX_TYPE));
5007 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5008 Use by the Cirrus Maverick code which has to workaround
5009 a hardware bug triggered by such instructions. */
5010 static bool
5011 arm_memory_load_p (rtx insn)
5013 rtx body, lhs, rhs;;
5015 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5016 return false;
5018 body = PATTERN (insn);
5020 if (GET_CODE (body) != SET)
5021 return false;
5023 lhs = XEXP (body, 0);
5024 rhs = XEXP (body, 1);
5026 lhs = REG_OR_SUBREG_RTX (lhs);
5028 /* If the destination is not a general purpose
5029 register we do not have to worry. */
5030 if (GET_CODE (lhs) != REG
5031 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5032 return false;
5034 /* As well as loads from memory we also have to react
5035 to loads of invalid constants which will be turned
5036 into loads from the minipool. */
5037 return (GET_CODE (rhs) == MEM
5038 || GET_CODE (rhs) == SYMBOL_REF
5039 || note_invalid_constants (insn, -1, false));
5042 /* Return TRUE if INSN is a Cirrus instruction. */
5043 static bool
5044 arm_cirrus_insn_p (rtx insn)
5046 enum attr_cirrus attr;
5048 /* get_attr cannot accept USE or CLOBBER. */
5049 if (!insn
5050 || GET_CODE (insn) != INSN
5051 || GET_CODE (PATTERN (insn)) == USE
5052 || GET_CODE (PATTERN (insn)) == CLOBBER)
5053 return 0;
5055 attr = get_attr_cirrus (insn);
5057 return attr != CIRRUS_NOT;
5060 /* Cirrus reorg for invalid instruction combinations. */
5061 static void
5062 cirrus_reorg (rtx first)
5064 enum attr_cirrus attr;
5065 rtx body = PATTERN (first);
5066 rtx t;
5067 int nops;
5069 /* Any branch must be followed by 2 non Cirrus instructions. */
5070 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5072 nops = 0;
5073 t = next_nonnote_insn (first);
5075 if (arm_cirrus_insn_p (t))
5076 ++ nops;
5078 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5079 ++ nops;
5081 while (nops --)
5082 emit_insn_after (gen_nop (), first);
5084 return;
5087 /* (float (blah)) is in parallel with a clobber. */
5088 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5089 body = XVECEXP (body, 0, 0);
5091 if (GET_CODE (body) == SET)
5093 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5095 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5096 be followed by a non Cirrus insn. */
5097 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5099 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5100 emit_insn_after (gen_nop (), first);
5102 return;
5104 else if (arm_memory_load_p (first))
5106 unsigned int arm_regno;
5108 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5109 ldr/cfmv64hr combination where the Rd field is the same
5110 in both instructions must be split with a non Cirrus
5111 insn. Example:
5113 ldr r0, blah
5115 cfmvsr mvf0, r0. */
5117 /* Get Arm register number for ldr insn. */
5118 if (GET_CODE (lhs) == REG)
5119 arm_regno = REGNO (lhs);
5120 else
5122 gcc_assert (GET_CODE (rhs) == REG);
5123 arm_regno = REGNO (rhs);
5126 /* Next insn. */
5127 first = next_nonnote_insn (first);
5129 if (! arm_cirrus_insn_p (first))
5130 return;
5132 body = PATTERN (first);
5134 /* (float (blah)) is in parallel with a clobber. */
5135 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5136 body = XVECEXP (body, 0, 0);
5138 if (GET_CODE (body) == FLOAT)
5139 body = XEXP (body, 0);
5141 if (get_attr_cirrus (first) == CIRRUS_MOVE
5142 && GET_CODE (XEXP (body, 1)) == REG
5143 && arm_regno == REGNO (XEXP (body, 1)))
5144 emit_insn_after (gen_nop (), first);
5146 return;
5150 /* get_attr cannot accept USE or CLOBBER. */
5151 if (!first
5152 || GET_CODE (first) != INSN
5153 || GET_CODE (PATTERN (first)) == USE
5154 || GET_CODE (PATTERN (first)) == CLOBBER)
5155 return;
5157 attr = get_attr_cirrus (first);
5159 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5160 must be followed by a non-coprocessor instruction. */
5161 if (attr == CIRRUS_COMPARE)
5163 nops = 0;
5165 t = next_nonnote_insn (first);
5167 if (arm_cirrus_insn_p (t))
5168 ++ nops;
5170 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5171 ++ nops;
5173 while (nops --)
5174 emit_insn_after (gen_nop (), first);
5176 return;
5180 /* Return TRUE if X references a SYMBOL_REF. */
5182 symbol_mentioned_p (rtx x)
5184 const char * fmt;
5185 int i;
5187 if (GET_CODE (x) == SYMBOL_REF)
5188 return 1;
5190 fmt = GET_RTX_FORMAT (GET_CODE (x));
5192 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5194 if (fmt[i] == 'E')
5196 int j;
5198 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5199 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5200 return 1;
5202 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5203 return 1;
5206 return 0;
5209 /* Return TRUE if X references a LABEL_REF. */
5211 label_mentioned_p (rtx x)
5213 const char * fmt;
5214 int i;
5216 if (GET_CODE (x) == LABEL_REF)
5217 return 1;
5219 fmt = GET_RTX_FORMAT (GET_CODE (x));
5220 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5222 if (fmt[i] == 'E')
5224 int j;
5226 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5227 if (label_mentioned_p (XVECEXP (x, i, j)))
5228 return 1;
5230 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5231 return 1;
5234 return 0;
5237 enum rtx_code
5238 minmax_code (rtx x)
5240 enum rtx_code code = GET_CODE (x);
5242 switch (code)
5244 case SMAX:
5245 return GE;
5246 case SMIN:
5247 return LE;
5248 case UMIN:
5249 return LEU;
5250 case UMAX:
5251 return GEU;
5252 default:
5253 gcc_unreachable ();
5257 /* Return 1 if memory locations are adjacent. */
5259 adjacent_mem_locations (rtx a, rtx b)
5261 /* We don't guarantee to preserve the order of these memory refs. */
5262 if (volatile_refs_p (a) || volatile_refs_p (b))
5263 return 0;
5265 if ((GET_CODE (XEXP (a, 0)) == REG
5266 || (GET_CODE (XEXP (a, 0)) == PLUS
5267 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5268 && (GET_CODE (XEXP (b, 0)) == REG
5269 || (GET_CODE (XEXP (b, 0)) == PLUS
5270 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5272 HOST_WIDE_INT val0 = 0, val1 = 0;
5273 rtx reg0, reg1;
5274 int val_diff;
5276 if (GET_CODE (XEXP (a, 0)) == PLUS)
5278 reg0 = XEXP (XEXP (a, 0), 0);
5279 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5281 else
5282 reg0 = XEXP (a, 0);
5284 if (GET_CODE (XEXP (b, 0)) == PLUS)
5286 reg1 = XEXP (XEXP (b, 0), 0);
5287 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5289 else
5290 reg1 = XEXP (b, 0);
5292 /* Don't accept any offset that will require multiple
5293 instructions to handle, since this would cause the
5294 arith_adjacentmem pattern to output an overlong sequence. */
5295 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5296 return 0;
5298 /* Don't allow an eliminable register: register elimination can make
5299 the offset too large. */
5300 if (arm_eliminable_register (reg0))
5301 return 0;
5303 val_diff = val1 - val0;
5305 if (arm_ld_sched)
5307 /* If the target has load delay slots, then there's no benefit
5308 to using an ldm instruction unless the offset is zero and
5309 we are optimizing for size. */
5310 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5311 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5312 && (val_diff == 4 || val_diff == -4));
5315 return ((REGNO (reg0) == REGNO (reg1))
5316 && (val_diff == 4 || val_diff == -4));
5319 return 0;
5323 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5324 HOST_WIDE_INT *load_offset)
5326 int unsorted_regs[4];
5327 HOST_WIDE_INT unsorted_offsets[4];
5328 int order[4];
5329 int base_reg = -1;
5330 int i;
5332 /* Can only handle 2, 3, or 4 insns at present,
5333 though could be easily extended if required. */
5334 gcc_assert (nops >= 2 && nops <= 4);
5336 /* Loop over the operands and check that the memory references are
5337 suitable (i.e. immediate offsets from the same base register). At
5338 the same time, extract the target register, and the memory
5339 offsets. */
5340 for (i = 0; i < nops; i++)
5342 rtx reg;
5343 rtx offset;
5345 /* Convert a subreg of a mem into the mem itself. */
5346 if (GET_CODE (operands[nops + i]) == SUBREG)
5347 operands[nops + i] = alter_subreg (operands + (nops + i));
5349 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5351 /* Don't reorder volatile memory references; it doesn't seem worth
5352 looking for the case where the order is ok anyway. */
5353 if (MEM_VOLATILE_P (operands[nops + i]))
5354 return 0;
5356 offset = const0_rtx;
5358 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5359 || (GET_CODE (reg) == SUBREG
5360 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5361 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5362 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5363 == REG)
5364 || (GET_CODE (reg) == SUBREG
5365 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5366 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5367 == CONST_INT)))
5369 if (i == 0)
5371 base_reg = REGNO (reg);
5372 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5373 ? REGNO (operands[i])
5374 : REGNO (SUBREG_REG (operands[i])));
5375 order[0] = 0;
5377 else
5379 if (base_reg != (int) REGNO (reg))
5380 /* Not addressed from the same base register. */
5381 return 0;
5383 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5384 ? REGNO (operands[i])
5385 : REGNO (SUBREG_REG (operands[i])));
5386 if (unsorted_regs[i] < unsorted_regs[order[0]])
5387 order[0] = i;
5390 /* If it isn't an integer register, or if it overwrites the
5391 base register but isn't the last insn in the list, then
5392 we can't do this. */
5393 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5394 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5395 return 0;
5397 unsorted_offsets[i] = INTVAL (offset);
5399 else
5400 /* Not a suitable memory address. */
5401 return 0;
5404 /* All the useful information has now been extracted from the
5405 operands into unsorted_regs and unsorted_offsets; additionally,
5406 order[0] has been set to the lowest numbered register in the
5407 list. Sort the registers into order, and check that the memory
5408 offsets are ascending and adjacent. */
5410 for (i = 1; i < nops; i++)
5412 int j;
5414 order[i] = order[i - 1];
5415 for (j = 0; j < nops; j++)
5416 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5417 && (order[i] == order[i - 1]
5418 || unsorted_regs[j] < unsorted_regs[order[i]]))
5419 order[i] = j;
5421 /* Have we found a suitable register? if not, one must be used more
5422 than once. */
5423 if (order[i] == order[i - 1])
5424 return 0;
5426 /* Is the memory address adjacent and ascending? */
5427 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5428 return 0;
5431 if (base)
5433 *base = base_reg;
5435 for (i = 0; i < nops; i++)
5436 regs[i] = unsorted_regs[order[i]];
5438 *load_offset = unsorted_offsets[order[0]];
5441 if (unsorted_offsets[order[0]] == 0)
5442 return 1; /* ldmia */
5444 if (unsorted_offsets[order[0]] == 4)
5445 return 2; /* ldmib */
5447 if (unsorted_offsets[order[nops - 1]] == 0)
5448 return 3; /* ldmda */
5450 if (unsorted_offsets[order[nops - 1]] == -4)
5451 return 4; /* ldmdb */
5453 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5454 if the offset isn't small enough. The reason 2 ldrs are faster
5455 is because these ARMs are able to do more than one cache access
5456 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5457 whilst the ARM8 has a double bandwidth cache. This means that
5458 these cores can do both an instruction fetch and a data fetch in
5459 a single cycle, so the trick of calculating the address into a
5460 scratch register (one of the result regs) and then doing a load
5461 multiple actually becomes slower (and no smaller in code size).
5462 That is the transformation
5464 ldr rd1, [rbase + offset]
5465 ldr rd2, [rbase + offset + 4]
5469 add rd1, rbase, offset
5470 ldmia rd1, {rd1, rd2}
5472 produces worse code -- '3 cycles + any stalls on rd2' instead of
5473 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5474 access per cycle, the first sequence could never complete in less
5475 than 6 cycles, whereas the ldm sequence would only take 5 and
5476 would make better use of sequential accesses if not hitting the
5477 cache.
5479 We cheat here and test 'arm_ld_sched' which we currently know to
5480 only be true for the ARM8, ARM9 and StrongARM. If this ever
5481 changes, then the test below needs to be reworked. */
5482 if (nops == 2 && arm_ld_sched)
5483 return 0;
5485 /* Can't do it without setting up the offset, only do this if it takes
5486 no more than one insn. */
5487 return (const_ok_for_arm (unsorted_offsets[order[0]])
5488 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5491 const char *
5492 emit_ldm_seq (rtx *operands, int nops)
5494 int regs[4];
5495 int base_reg;
5496 HOST_WIDE_INT offset;
5497 char buf[100];
5498 int i;
5500 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5502 case 1:
5503 strcpy (buf, "ldm%?ia\t");
5504 break;
5506 case 2:
5507 strcpy (buf, "ldm%?ib\t");
5508 break;
5510 case 3:
5511 strcpy (buf, "ldm%?da\t");
5512 break;
5514 case 4:
5515 strcpy (buf, "ldm%?db\t");
5516 break;
5518 case 5:
5519 if (offset >= 0)
5520 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5521 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5522 (long) offset);
5523 else
5524 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5525 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5526 (long) -offset);
5527 output_asm_insn (buf, operands);
5528 base_reg = regs[0];
5529 strcpy (buf, "ldm%?ia\t");
5530 break;
5532 default:
5533 gcc_unreachable ();
5536 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5537 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5539 for (i = 1; i < nops; i++)
5540 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5541 reg_names[regs[i]]);
5543 strcat (buf, "}\t%@ phole ldm");
5545 output_asm_insn (buf, operands);
5546 return "";
5550 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5551 HOST_WIDE_INT * load_offset)
5553 int unsorted_regs[4];
5554 HOST_WIDE_INT unsorted_offsets[4];
5555 int order[4];
5556 int base_reg = -1;
5557 int i;
5559 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5560 extended if required. */
5561 gcc_assert (nops >= 2 && nops <= 4);
5563 /* Loop over the operands and check that the memory references are
5564 suitable (i.e. immediate offsets from the same base register). At
5565 the same time, extract the target register, and the memory
5566 offsets. */
5567 for (i = 0; i < nops; i++)
5569 rtx reg;
5570 rtx offset;
5572 /* Convert a subreg of a mem into the mem itself. */
5573 if (GET_CODE (operands[nops + i]) == SUBREG)
5574 operands[nops + i] = alter_subreg (operands + (nops + i));
5576 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5578 /* Don't reorder volatile memory references; it doesn't seem worth
5579 looking for the case where the order is ok anyway. */
5580 if (MEM_VOLATILE_P (operands[nops + i]))
5581 return 0;
5583 offset = const0_rtx;
5585 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5586 || (GET_CODE (reg) == SUBREG
5587 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5588 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5589 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5590 == REG)
5591 || (GET_CODE (reg) == SUBREG
5592 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5593 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5594 == CONST_INT)))
5596 if (i == 0)
5598 base_reg = REGNO (reg);
5599 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5600 ? REGNO (operands[i])
5601 : REGNO (SUBREG_REG (operands[i])));
5602 order[0] = 0;
5604 else
5606 if (base_reg != (int) REGNO (reg))
5607 /* Not addressed from the same base register. */
5608 return 0;
5610 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5611 ? REGNO (operands[i])
5612 : REGNO (SUBREG_REG (operands[i])));
5613 if (unsorted_regs[i] < unsorted_regs[order[0]])
5614 order[0] = i;
5617 /* If it isn't an integer register, then we can't do this. */
5618 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5619 return 0;
5621 unsorted_offsets[i] = INTVAL (offset);
5623 else
5624 /* Not a suitable memory address. */
5625 return 0;
5628 /* All the useful information has now been extracted from the
5629 operands into unsorted_regs and unsorted_offsets; additionally,
5630 order[0] has been set to the lowest numbered register in the
5631 list. Sort the registers into order, and check that the memory
5632 offsets are ascending and adjacent. */
5634 for (i = 1; i < nops; i++)
5636 int j;
5638 order[i] = order[i - 1];
5639 for (j = 0; j < nops; j++)
5640 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5641 && (order[i] == order[i - 1]
5642 || unsorted_regs[j] < unsorted_regs[order[i]]))
5643 order[i] = j;
5645 /* Have we found a suitable register? if not, one must be used more
5646 than once. */
5647 if (order[i] == order[i - 1])
5648 return 0;
5650 /* Is the memory address adjacent and ascending? */
5651 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5652 return 0;
5655 if (base)
5657 *base = base_reg;
5659 for (i = 0; i < nops; i++)
5660 regs[i] = unsorted_regs[order[i]];
5662 *load_offset = unsorted_offsets[order[0]];
5665 if (unsorted_offsets[order[0]] == 0)
5666 return 1; /* stmia */
5668 if (unsorted_offsets[order[0]] == 4)
5669 return 2; /* stmib */
5671 if (unsorted_offsets[order[nops - 1]] == 0)
5672 return 3; /* stmda */
5674 if (unsorted_offsets[order[nops - 1]] == -4)
5675 return 4; /* stmdb */
5677 return 0;
5680 const char *
5681 emit_stm_seq (rtx *operands, int nops)
5683 int regs[4];
5684 int base_reg;
5685 HOST_WIDE_INT offset;
5686 char buf[100];
5687 int i;
5689 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5691 case 1:
5692 strcpy (buf, "stm%?ia\t");
5693 break;
5695 case 2:
5696 strcpy (buf, "stm%?ib\t");
5697 break;
5699 case 3:
5700 strcpy (buf, "stm%?da\t");
5701 break;
5703 case 4:
5704 strcpy (buf, "stm%?db\t");
5705 break;
5707 default:
5708 gcc_unreachable ();
5711 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5712 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5714 for (i = 1; i < nops; i++)
5715 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5716 reg_names[regs[i]]);
5718 strcat (buf, "}\t%@ phole stm");
5720 output_asm_insn (buf, operands);
5721 return "";
5725 /* Routines for use in generating RTL. */
5728 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5729 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5731 HOST_WIDE_INT offset = *offsetp;
5732 int i = 0, j;
5733 rtx result;
5734 int sign = up ? 1 : -1;
5735 rtx mem, addr;
5737 /* XScale has load-store double instructions, but they have stricter
5738 alignment requirements than load-store multiple, so we cannot
5739 use them.
5741 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5742 the pipeline until completion.
5744 NREGS CYCLES
5750 An ldr instruction takes 1-3 cycles, but does not block the
5751 pipeline.
5753 NREGS CYCLES
5754 1 1-3
5755 2 2-6
5756 3 3-9
5757 4 4-12
5759 Best case ldr will always win. However, the more ldr instructions
5760 we issue, the less likely we are to be able to schedule them well.
5761 Using ldr instructions also increases code size.
5763 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5764 for counts of 3 or 4 regs. */
5765 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5767 rtx seq;
5769 start_sequence ();
5771 for (i = 0; i < count; i++)
5773 addr = plus_constant (from, i * 4 * sign);
5774 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5775 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5776 offset += 4 * sign;
5779 if (write_back)
5781 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5782 *offsetp = offset;
5785 seq = get_insns ();
5786 end_sequence ();
5788 return seq;
5791 result = gen_rtx_PARALLEL (VOIDmode,
5792 rtvec_alloc (count + (write_back ? 1 : 0)));
5793 if (write_back)
5795 XVECEXP (result, 0, 0)
5796 = gen_rtx_SET (GET_MODE (from), from,
5797 plus_constant (from, count * 4 * sign));
5798 i = 1;
5799 count++;
5802 for (j = 0; i < count; i++, j++)
5804 addr = plus_constant (from, j * 4 * sign);
5805 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5806 XVECEXP (result, 0, i)
5807 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5808 offset += 4 * sign;
5811 if (write_back)
5812 *offsetp = offset;
5814 return result;
5818 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5819 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5821 HOST_WIDE_INT offset = *offsetp;
5822 int i = 0, j;
5823 rtx result;
5824 int sign = up ? 1 : -1;
5825 rtx mem, addr;
5827 /* See arm_gen_load_multiple for discussion of
5828 the pros/cons of ldm/stm usage for XScale. */
5829 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5831 rtx seq;
5833 start_sequence ();
5835 for (i = 0; i < count; i++)
5837 addr = plus_constant (to, i * 4 * sign);
5838 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5839 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5840 offset += 4 * sign;
5843 if (write_back)
5845 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5846 *offsetp = offset;
5849 seq = get_insns ();
5850 end_sequence ();
5852 return seq;
5855 result = gen_rtx_PARALLEL (VOIDmode,
5856 rtvec_alloc (count + (write_back ? 1 : 0)));
5857 if (write_back)
5859 XVECEXP (result, 0, 0)
5860 = gen_rtx_SET (GET_MODE (to), to,
5861 plus_constant (to, count * 4 * sign));
5862 i = 1;
5863 count++;
5866 for (j = 0; i < count; i++, j++)
5868 addr = plus_constant (to, j * 4 * sign);
5869 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5870 XVECEXP (result, 0, i)
5871 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5872 offset += 4 * sign;
5875 if (write_back)
5876 *offsetp = offset;
5878 return result;
5882 arm_gen_movmemqi (rtx *operands)
5884 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5885 HOST_WIDE_INT srcoffset, dstoffset;
5886 int i;
5887 rtx src, dst, srcbase, dstbase;
5888 rtx part_bytes_reg = NULL;
5889 rtx mem;
5891 if (GET_CODE (operands[2]) != CONST_INT
5892 || GET_CODE (operands[3]) != CONST_INT
5893 || INTVAL (operands[2]) > 64
5894 || INTVAL (operands[3]) & 3)
5895 return 0;
5897 dstbase = operands[0];
5898 srcbase = operands[1];
5900 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5901 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5903 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5904 out_words_to_go = INTVAL (operands[2]) / 4;
5905 last_bytes = INTVAL (operands[2]) & 3;
5906 dstoffset = srcoffset = 0;
5908 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5909 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5911 for (i = 0; in_words_to_go >= 2; i+=4)
5913 if (in_words_to_go > 4)
5914 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5915 srcbase, &srcoffset));
5916 else
5917 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5918 FALSE, srcbase, &srcoffset));
5920 if (out_words_to_go)
5922 if (out_words_to_go > 4)
5923 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5924 dstbase, &dstoffset));
5925 else if (out_words_to_go != 1)
5926 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5927 dst, TRUE,
5928 (last_bytes == 0
5929 ? FALSE : TRUE),
5930 dstbase, &dstoffset));
5931 else
5933 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5934 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5935 if (last_bytes != 0)
5937 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5938 dstoffset += 4;
5943 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5944 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5947 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5948 if (out_words_to_go)
5950 rtx sreg;
5952 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5953 sreg = copy_to_reg (mem);
5955 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5956 emit_move_insn (mem, sreg);
5957 in_words_to_go--;
5959 gcc_assert (!in_words_to_go); /* Sanity check */
5962 if (in_words_to_go)
5964 gcc_assert (in_words_to_go > 0);
5966 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5967 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5970 gcc_assert (!last_bytes || part_bytes_reg);
5972 if (BYTES_BIG_ENDIAN && last_bytes)
5974 rtx tmp = gen_reg_rtx (SImode);
5976 /* The bytes we want are in the top end of the word. */
5977 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5978 GEN_INT (8 * (4 - last_bytes))));
5979 part_bytes_reg = tmp;
5981 while (last_bytes)
5983 mem = adjust_automodify_address (dstbase, QImode,
5984 plus_constant (dst, last_bytes - 1),
5985 dstoffset + last_bytes - 1);
5986 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5988 if (--last_bytes)
5990 tmp = gen_reg_rtx (SImode);
5991 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5992 part_bytes_reg = tmp;
5997 else
5999 if (last_bytes > 1)
6001 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6002 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6003 last_bytes -= 2;
6004 if (last_bytes)
6006 rtx tmp = gen_reg_rtx (SImode);
6007 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6008 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6009 part_bytes_reg = tmp;
6010 dstoffset += 2;
6014 if (last_bytes)
6016 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6017 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6021 return 1;
6024 /* Generate a memory reference for a half word, such that it will be loaded
6025 into the top 16 bits of the word. We can assume that the address is
6026 known to be alignable and of the form reg, or plus (reg, const). */
6029 arm_gen_rotated_half_load (rtx memref)
6031 HOST_WIDE_INT offset = 0;
6032 rtx base = XEXP (memref, 0);
6034 if (GET_CODE (base) == PLUS)
6036 offset = INTVAL (XEXP (base, 1));
6037 base = XEXP (base, 0);
6040 /* If we aren't allowed to generate unaligned addresses, then fail. */
6041 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6042 return NULL;
6044 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6046 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6047 return base;
6049 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6052 /* Select a dominance comparison mode if possible for a test of the general
6053 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6054 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6055 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6056 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6057 In all cases OP will be either EQ or NE, but we don't need to know which
6058 here. If we are unable to support a dominance comparison we return
6059 CC mode. This will then fail to match for the RTL expressions that
6060 generate this call. */
6061 enum machine_mode
6062 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6064 enum rtx_code cond1, cond2;
6065 int swapped = 0;
6067 /* Currently we will probably get the wrong result if the individual
6068 comparisons are not simple. This also ensures that it is safe to
6069 reverse a comparison if necessary. */
6070 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6071 != CCmode)
6072 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6073 != CCmode))
6074 return CCmode;
6076 /* The if_then_else variant of this tests the second condition if the
6077 first passes, but is true if the first fails. Reverse the first
6078 condition to get a true "inclusive-or" expression. */
6079 if (cond_or == DOM_CC_NX_OR_Y)
6080 cond1 = reverse_condition (cond1);
6082 /* If the comparisons are not equal, and one doesn't dominate the other,
6083 then we can't do this. */
6084 if (cond1 != cond2
6085 && !comparison_dominates_p (cond1, cond2)
6086 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6087 return CCmode;
6089 if (swapped)
6091 enum rtx_code temp = cond1;
6092 cond1 = cond2;
6093 cond2 = temp;
6096 switch (cond1)
6098 case EQ:
6099 if (cond_or == DOM_CC_X_AND_Y)
6100 return CC_DEQmode;
6102 switch (cond2)
6104 case EQ: return CC_DEQmode;
6105 case LE: return CC_DLEmode;
6106 case LEU: return CC_DLEUmode;
6107 case GE: return CC_DGEmode;
6108 case GEU: return CC_DGEUmode;
6109 default: gcc_unreachable ();
6112 case LT:
6113 if (cond_or == DOM_CC_X_AND_Y)
6114 return CC_DLTmode;
6116 switch (cond2)
6118 case LT:
6119 return CC_DLTmode;
6120 case LE:
6121 return CC_DLEmode;
6122 case NE:
6123 return CC_DNEmode;
6124 default:
6125 gcc_unreachable ();
6128 case GT:
6129 if (cond_or == DOM_CC_X_AND_Y)
6130 return CC_DGTmode;
6132 switch (cond2)
6134 case GT:
6135 return CC_DGTmode;
6136 case GE:
6137 return CC_DGEmode;
6138 case NE:
6139 return CC_DNEmode;
6140 default:
6141 gcc_unreachable ();
6144 case LTU:
6145 if (cond_or == DOM_CC_X_AND_Y)
6146 return CC_DLTUmode;
6148 switch (cond2)
6150 case LTU:
6151 return CC_DLTUmode;
6152 case LEU:
6153 return CC_DLEUmode;
6154 case NE:
6155 return CC_DNEmode;
6156 default:
6157 gcc_unreachable ();
6160 case GTU:
6161 if (cond_or == DOM_CC_X_AND_Y)
6162 return CC_DGTUmode;
6164 switch (cond2)
6166 case GTU:
6167 return CC_DGTUmode;
6168 case GEU:
6169 return CC_DGEUmode;
6170 case NE:
6171 return CC_DNEmode;
6172 default:
6173 gcc_unreachable ();
6176 /* The remaining cases only occur when both comparisons are the
6177 same. */
6178 case NE:
6179 gcc_assert (cond1 == cond2);
6180 return CC_DNEmode;
6182 case LE:
6183 gcc_assert (cond1 == cond2);
6184 return CC_DLEmode;
6186 case GE:
6187 gcc_assert (cond1 == cond2);
6188 return CC_DGEmode;
6190 case LEU:
6191 gcc_assert (cond1 == cond2);
6192 return CC_DLEUmode;
6194 case GEU:
6195 gcc_assert (cond1 == cond2);
6196 return CC_DGEUmode;
6198 default:
6199 gcc_unreachable ();
6203 enum machine_mode
6204 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6206 /* All floating point compares return CCFP if it is an equality
6207 comparison, and CCFPE otherwise. */
6208 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6210 switch (op)
6212 case EQ:
6213 case NE:
6214 case UNORDERED:
6215 case ORDERED:
6216 case UNLT:
6217 case UNLE:
6218 case UNGT:
6219 case UNGE:
6220 case UNEQ:
6221 case LTGT:
6222 return CCFPmode;
6224 case LT:
6225 case LE:
6226 case GT:
6227 case GE:
6228 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6229 return CCFPmode;
6230 return CCFPEmode;
6232 default:
6233 gcc_unreachable ();
6237 /* A compare with a shifted operand. Because of canonicalization, the
6238 comparison will have to be swapped when we emit the assembler. */
6239 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6240 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6241 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6242 || GET_CODE (x) == ROTATERT))
6243 return CC_SWPmode;
6245 /* This operation is performed swapped, but since we only rely on the Z
6246 flag we don't need an additional mode. */
6247 if (GET_MODE (y) == SImode && REG_P (y)
6248 && GET_CODE (x) == NEG
6249 && (op == EQ || op == NE))
6250 return CC_Zmode;
6252 /* This is a special case that is used by combine to allow a
6253 comparison of a shifted byte load to be split into a zero-extend
6254 followed by a comparison of the shifted integer (only valid for
6255 equalities and unsigned inequalities). */
6256 if (GET_MODE (x) == SImode
6257 && GET_CODE (x) == ASHIFT
6258 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6259 && GET_CODE (XEXP (x, 0)) == SUBREG
6260 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6261 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6262 && (op == EQ || op == NE
6263 || op == GEU || op == GTU || op == LTU || op == LEU)
6264 && GET_CODE (y) == CONST_INT)
6265 return CC_Zmode;
6267 /* A construct for a conditional compare, if the false arm contains
6268 0, then both conditions must be true, otherwise either condition
6269 must be true. Not all conditions are possible, so CCmode is
6270 returned if it can't be done. */
6271 if (GET_CODE (x) == IF_THEN_ELSE
6272 && (XEXP (x, 2) == const0_rtx
6273 || XEXP (x, 2) == const1_rtx)
6274 && COMPARISON_P (XEXP (x, 0))
6275 && COMPARISON_P (XEXP (x, 1)))
6276 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6277 INTVAL (XEXP (x, 2)));
6279 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6280 if (GET_CODE (x) == AND
6281 && COMPARISON_P (XEXP (x, 0))
6282 && COMPARISON_P (XEXP (x, 1)))
6283 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6284 DOM_CC_X_AND_Y);
6286 if (GET_CODE (x) == IOR
6287 && COMPARISON_P (XEXP (x, 0))
6288 && COMPARISON_P (XEXP (x, 1)))
6289 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6290 DOM_CC_X_OR_Y);
6292 /* An operation (on Thumb) where we want to test for a single bit.
6293 This is done by shifting that bit up into the top bit of a
6294 scratch register; we can then branch on the sign bit. */
6295 if (TARGET_THUMB
6296 && GET_MODE (x) == SImode
6297 && (op == EQ || op == NE)
6298 && (GET_CODE (x) == ZERO_EXTRACT))
6299 return CC_Nmode;
6301 /* An operation that sets the condition codes as a side-effect, the
6302 V flag is not set correctly, so we can only use comparisons where
6303 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6304 instead.) */
6305 if (GET_MODE (x) == SImode
6306 && y == const0_rtx
6307 && (op == EQ || op == NE || op == LT || op == GE)
6308 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6309 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6310 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6311 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6312 || GET_CODE (x) == LSHIFTRT
6313 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6314 || GET_CODE (x) == ROTATERT
6315 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6316 return CC_NOOVmode;
6318 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6319 return CC_Zmode;
6321 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6322 && GET_CODE (x) == PLUS
6323 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6324 return CC_Cmode;
6326 return CCmode;
6329 /* X and Y are two things to compare using CODE. Emit the compare insn and
6330 return the rtx for register 0 in the proper mode. FP means this is a
6331 floating point compare: I don't think that it is needed on the arm. */
6333 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6335 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6336 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6338 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6339 gen_rtx_COMPARE (mode, x, y)));
6341 return cc_reg;
6344 /* Generate a sequence of insns that will generate the correct return
6345 address mask depending on the physical architecture that the program
6346 is running on. */
6348 arm_gen_return_addr_mask (void)
6350 rtx reg = gen_reg_rtx (Pmode);
6352 emit_insn (gen_return_addr_mask (reg));
6353 return reg;
6356 void
6357 arm_reload_in_hi (rtx *operands)
6359 rtx ref = operands[1];
6360 rtx base, scratch;
6361 HOST_WIDE_INT offset = 0;
6363 if (GET_CODE (ref) == SUBREG)
6365 offset = SUBREG_BYTE (ref);
6366 ref = SUBREG_REG (ref);
6369 if (GET_CODE (ref) == REG)
6371 /* We have a pseudo which has been spilt onto the stack; there
6372 are two cases here: the first where there is a simple
6373 stack-slot replacement and a second where the stack-slot is
6374 out of range, or is used as a subreg. */
6375 if (reg_equiv_mem[REGNO (ref)])
6377 ref = reg_equiv_mem[REGNO (ref)];
6378 base = find_replacement (&XEXP (ref, 0));
6380 else
6381 /* The slot is out of range, or was dressed up in a SUBREG. */
6382 base = reg_equiv_address[REGNO (ref)];
6384 else
6385 base = find_replacement (&XEXP (ref, 0));
6387 /* Handle the case where the address is too complex to be offset by 1. */
6388 if (GET_CODE (base) == MINUS
6389 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6391 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6393 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6394 base = base_plus;
6396 else if (GET_CODE (base) == PLUS)
6398 /* The addend must be CONST_INT, or we would have dealt with it above. */
6399 HOST_WIDE_INT hi, lo;
6401 offset += INTVAL (XEXP (base, 1));
6402 base = XEXP (base, 0);
6404 /* Rework the address into a legal sequence of insns. */
6405 /* Valid range for lo is -4095 -> 4095 */
6406 lo = (offset >= 0
6407 ? (offset & 0xfff)
6408 : -((-offset) & 0xfff));
6410 /* Corner case, if lo is the max offset then we would be out of range
6411 once we have added the additional 1 below, so bump the msb into the
6412 pre-loading insn(s). */
6413 if (lo == 4095)
6414 lo &= 0x7ff;
6416 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6417 ^ (HOST_WIDE_INT) 0x80000000)
6418 - (HOST_WIDE_INT) 0x80000000);
6420 gcc_assert (hi + lo == offset);
6422 if (hi != 0)
6424 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6426 /* Get the base address; addsi3 knows how to handle constants
6427 that require more than one insn. */
6428 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6429 base = base_plus;
6430 offset = lo;
6434 /* Operands[2] may overlap operands[0] (though it won't overlap
6435 operands[1]), that's why we asked for a DImode reg -- so we can
6436 use the bit that does not overlap. */
6437 if (REGNO (operands[2]) == REGNO (operands[0]))
6438 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6439 else
6440 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6442 emit_insn (gen_zero_extendqisi2 (scratch,
6443 gen_rtx_MEM (QImode,
6444 plus_constant (base,
6445 offset))));
6446 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6447 gen_rtx_MEM (QImode,
6448 plus_constant (base,
6449 offset + 1))));
6450 if (!BYTES_BIG_ENDIAN)
6451 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6452 gen_rtx_IOR (SImode,
6453 gen_rtx_ASHIFT
6454 (SImode,
6455 gen_rtx_SUBREG (SImode, operands[0], 0),
6456 GEN_INT (8)),
6457 scratch)));
6458 else
6459 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6460 gen_rtx_IOR (SImode,
6461 gen_rtx_ASHIFT (SImode, scratch,
6462 GEN_INT (8)),
6463 gen_rtx_SUBREG (SImode, operands[0],
6464 0))));
6467 /* Handle storing a half-word to memory during reload by synthesizing as two
6468 byte stores. Take care not to clobber the input values until after we
6469 have moved them somewhere safe. This code assumes that if the DImode
6470 scratch in operands[2] overlaps either the input value or output address
6471 in some way, then that value must die in this insn (we absolutely need
6472 two scratch registers for some corner cases). */
6473 void
6474 arm_reload_out_hi (rtx *operands)
6476 rtx ref = operands[0];
6477 rtx outval = operands[1];
6478 rtx base, scratch;
6479 HOST_WIDE_INT offset = 0;
6481 if (GET_CODE (ref) == SUBREG)
6483 offset = SUBREG_BYTE (ref);
6484 ref = SUBREG_REG (ref);
6487 if (GET_CODE (ref) == REG)
6489 /* We have a pseudo which has been spilt onto the stack; there
6490 are two cases here: the first where there is a simple
6491 stack-slot replacement and a second where the stack-slot is
6492 out of range, or is used as a subreg. */
6493 if (reg_equiv_mem[REGNO (ref)])
6495 ref = reg_equiv_mem[REGNO (ref)];
6496 base = find_replacement (&XEXP (ref, 0));
6498 else
6499 /* The slot is out of range, or was dressed up in a SUBREG. */
6500 base = reg_equiv_address[REGNO (ref)];
6502 else
6503 base = find_replacement (&XEXP (ref, 0));
6505 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6507 /* Handle the case where the address is too complex to be offset by 1. */
6508 if (GET_CODE (base) == MINUS
6509 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6511 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6513 /* Be careful not to destroy OUTVAL. */
6514 if (reg_overlap_mentioned_p (base_plus, outval))
6516 /* Updating base_plus might destroy outval, see if we can
6517 swap the scratch and base_plus. */
6518 if (!reg_overlap_mentioned_p (scratch, outval))
6520 rtx tmp = scratch;
6521 scratch = base_plus;
6522 base_plus = tmp;
6524 else
6526 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6528 /* Be conservative and copy OUTVAL into the scratch now,
6529 this should only be necessary if outval is a subreg
6530 of something larger than a word. */
6531 /* XXX Might this clobber base? I can't see how it can,
6532 since scratch is known to overlap with OUTVAL, and
6533 must be wider than a word. */
6534 emit_insn (gen_movhi (scratch_hi, outval));
6535 outval = scratch_hi;
6539 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6540 base = base_plus;
6542 else if (GET_CODE (base) == PLUS)
6544 /* The addend must be CONST_INT, or we would have dealt with it above. */
6545 HOST_WIDE_INT hi, lo;
6547 offset += INTVAL (XEXP (base, 1));
6548 base = XEXP (base, 0);
6550 /* Rework the address into a legal sequence of insns. */
6551 /* Valid range for lo is -4095 -> 4095 */
6552 lo = (offset >= 0
6553 ? (offset & 0xfff)
6554 : -((-offset) & 0xfff));
6556 /* Corner case, if lo is the max offset then we would be out of range
6557 once we have added the additional 1 below, so bump the msb into the
6558 pre-loading insn(s). */
6559 if (lo == 4095)
6560 lo &= 0x7ff;
6562 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6563 ^ (HOST_WIDE_INT) 0x80000000)
6564 - (HOST_WIDE_INT) 0x80000000);
6566 gcc_assert (hi + lo == offset);
6568 if (hi != 0)
6570 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6572 /* Be careful not to destroy OUTVAL. */
6573 if (reg_overlap_mentioned_p (base_plus, outval))
6575 /* Updating base_plus might destroy outval, see if we
6576 can swap the scratch and base_plus. */
6577 if (!reg_overlap_mentioned_p (scratch, outval))
6579 rtx tmp = scratch;
6580 scratch = base_plus;
6581 base_plus = tmp;
6583 else
6585 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6587 /* Be conservative and copy outval into scratch now,
6588 this should only be necessary if outval is a
6589 subreg of something larger than a word. */
6590 /* XXX Might this clobber base? I can't see how it
6591 can, since scratch is known to overlap with
6592 outval. */
6593 emit_insn (gen_movhi (scratch_hi, outval));
6594 outval = scratch_hi;
6598 /* Get the base address; addsi3 knows how to handle constants
6599 that require more than one insn. */
6600 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6601 base = base_plus;
6602 offset = lo;
6606 if (BYTES_BIG_ENDIAN)
6608 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6609 plus_constant (base, offset + 1)),
6610 gen_lowpart (QImode, outval)));
6611 emit_insn (gen_lshrsi3 (scratch,
6612 gen_rtx_SUBREG (SImode, outval, 0),
6613 GEN_INT (8)));
6614 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6615 gen_lowpart (QImode, scratch)));
6617 else
6619 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6620 gen_lowpart (QImode, outval)));
6621 emit_insn (gen_lshrsi3 (scratch,
6622 gen_rtx_SUBREG (SImode, outval, 0),
6623 GEN_INT (8)));
6624 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6625 plus_constant (base, offset + 1)),
6626 gen_lowpart (QImode, scratch)));
6630 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6631 (padded to the size of a word) should be passed in a register. */
6633 static bool
6634 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6636 if (TARGET_AAPCS_BASED)
6637 return must_pass_in_stack_var_size (mode, type);
6638 else
6639 return must_pass_in_stack_var_size_or_pad (mode, type);
6643 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6644 Return true if an argument passed on the stack should be padded upwards,
6645 i.e. if the least-significant byte has useful data. */
6647 bool
6648 arm_pad_arg_upward (enum machine_mode mode, tree type)
6650 if (!TARGET_AAPCS_BASED)
6651 return DEFAULT_FUNCTION_ARG_PADDING(mode, type);
6653 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6654 return false;
6656 return true;
6660 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6661 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6662 byte of the register has useful data, and return the opposite if the
6663 most significant byte does.
6664 For AAPCS, small aggregates and small complex types are always padded
6665 upwards. */
6667 bool
6668 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6669 tree type, int first ATTRIBUTE_UNUSED)
6671 if (TARGET_AAPCS_BASED
6672 && BYTES_BIG_ENDIAN
6673 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6674 && int_size_in_bytes (type) <= 4)
6675 return true;
6677 /* Otherwise, use default padding. */
6678 return !BYTES_BIG_ENDIAN;
6683 /* Print a symbolic form of X to the debug file, F. */
6684 static void
6685 arm_print_value (FILE *f, rtx x)
6687 switch (GET_CODE (x))
6689 case CONST_INT:
6690 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6691 return;
6693 case CONST_DOUBLE:
6694 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6695 return;
6697 case CONST_VECTOR:
6699 int i;
6701 fprintf (f, "<");
6702 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6704 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6705 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6706 fputc (',', f);
6708 fprintf (f, ">");
6710 return;
6712 case CONST_STRING:
6713 fprintf (f, "\"%s\"", XSTR (x, 0));
6714 return;
6716 case SYMBOL_REF:
6717 fprintf (f, "`%s'", XSTR (x, 0));
6718 return;
6720 case LABEL_REF:
6721 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6722 return;
6724 case CONST:
6725 arm_print_value (f, XEXP (x, 0));
6726 return;
6728 case PLUS:
6729 arm_print_value (f, XEXP (x, 0));
6730 fprintf (f, "+");
6731 arm_print_value (f, XEXP (x, 1));
6732 return;
6734 case PC:
6735 fprintf (f, "pc");
6736 return;
6738 default:
6739 fprintf (f, "????");
6740 return;
6744 /* Routines for manipulation of the constant pool. */
6746 /* Arm instructions cannot load a large constant directly into a
6747 register; they have to come from a pc relative load. The constant
6748 must therefore be placed in the addressable range of the pc
6749 relative load. Depending on the precise pc relative load
6750 instruction the range is somewhere between 256 bytes and 4k. This
6751 means that we often have to dump a constant inside a function, and
6752 generate code to branch around it.
6754 It is important to minimize this, since the branches will slow
6755 things down and make the code larger.
6757 Normally we can hide the table after an existing unconditional
6758 branch so that there is no interruption of the flow, but in the
6759 worst case the code looks like this:
6761 ldr rn, L1
6763 b L2
6764 align
6765 L1: .long value
6769 ldr rn, L3
6771 b L4
6772 align
6773 L3: .long value
6777 We fix this by performing a scan after scheduling, which notices
6778 which instructions need to have their operands fetched from the
6779 constant table and builds the table.
6781 The algorithm starts by building a table of all the constants that
6782 need fixing up and all the natural barriers in the function (places
6783 where a constant table can be dropped without breaking the flow).
6784 For each fixup we note how far the pc-relative replacement will be
6785 able to reach and the offset of the instruction into the function.
6787 Having built the table we then group the fixes together to form
6788 tables that are as large as possible (subject to addressing
6789 constraints) and emit each table of constants after the last
6790 barrier that is within range of all the instructions in the group.
6791 If a group does not contain a barrier, then we forcibly create one
6792 by inserting a jump instruction into the flow. Once the table has
6793 been inserted, the insns are then modified to reference the
6794 relevant entry in the pool.
6796 Possible enhancements to the algorithm (not implemented) are:
6798 1) For some processors and object formats, there may be benefit in
6799 aligning the pools to the start of cache lines; this alignment
6800 would need to be taken into account when calculating addressability
6801 of a pool. */
6803 /* These typedefs are located at the start of this file, so that
6804 they can be used in the prototypes there. This comment is to
6805 remind readers of that fact so that the following structures
6806 can be understood more easily.
6808 typedef struct minipool_node Mnode;
6809 typedef struct minipool_fixup Mfix; */
6811 struct minipool_node
6813 /* Doubly linked chain of entries. */
6814 Mnode * next;
6815 Mnode * prev;
6816 /* The maximum offset into the code that this entry can be placed. While
6817 pushing fixes for forward references, all entries are sorted in order
6818 of increasing max_address. */
6819 HOST_WIDE_INT max_address;
6820 /* Similarly for an entry inserted for a backwards ref. */
6821 HOST_WIDE_INT min_address;
6822 /* The number of fixes referencing this entry. This can become zero
6823 if we "unpush" an entry. In this case we ignore the entry when we
6824 come to emit the code. */
6825 int refcount;
6826 /* The offset from the start of the minipool. */
6827 HOST_WIDE_INT offset;
6828 /* The value in table. */
6829 rtx value;
6830 /* The mode of value. */
6831 enum machine_mode mode;
6832 /* The size of the value. With iWMMXt enabled
6833 sizes > 4 also imply an alignment of 8-bytes. */
6834 int fix_size;
6837 struct minipool_fixup
6839 Mfix * next;
6840 rtx insn;
6841 HOST_WIDE_INT address;
6842 rtx * loc;
6843 enum machine_mode mode;
6844 int fix_size;
6845 rtx value;
6846 Mnode * minipool;
6847 HOST_WIDE_INT forwards;
6848 HOST_WIDE_INT backwards;
6851 /* Fixes less than a word need padding out to a word boundary. */
6852 #define MINIPOOL_FIX_SIZE(mode) \
6853 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6855 static Mnode * minipool_vector_head;
6856 static Mnode * minipool_vector_tail;
6857 static rtx minipool_vector_label;
6859 /* The linked list of all minipool fixes required for this function. */
6860 Mfix * minipool_fix_head;
6861 Mfix * minipool_fix_tail;
6862 /* The fix entry for the current minipool, once it has been placed. */
6863 Mfix * minipool_barrier;
6865 /* Determines if INSN is the start of a jump table. Returns the end
6866 of the TABLE or NULL_RTX. */
6867 static rtx
6868 is_jump_table (rtx insn)
6870 rtx table;
6872 if (GET_CODE (insn) == JUMP_INSN
6873 && JUMP_LABEL (insn) != NULL
6874 && ((table = next_real_insn (JUMP_LABEL (insn)))
6875 == next_real_insn (insn))
6876 && table != NULL
6877 && GET_CODE (table) == JUMP_INSN
6878 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6879 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6880 return table;
6882 return NULL_RTX;
6885 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6886 #define JUMP_TABLES_IN_TEXT_SECTION 0
6887 #endif
6889 static HOST_WIDE_INT
6890 get_jump_table_size (rtx insn)
6892 /* ADDR_VECs only take room if read-only data does into the text
6893 section. */
6894 if (JUMP_TABLES_IN_TEXT_SECTION
6895 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6896 || 1
6897 #endif
6900 rtx body = PATTERN (insn);
6901 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6903 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6906 return 0;
6909 /* Move a minipool fix MP from its current location to before MAX_MP.
6910 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6911 constraints may need updating. */
6912 static Mnode *
6913 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6914 HOST_WIDE_INT max_address)
6916 /* The code below assumes these are different. */
6917 gcc_assert (mp != max_mp);
6919 if (max_mp == NULL)
6921 if (max_address < mp->max_address)
6922 mp->max_address = max_address;
6924 else
6926 if (max_address > max_mp->max_address - mp->fix_size)
6927 mp->max_address = max_mp->max_address - mp->fix_size;
6928 else
6929 mp->max_address = max_address;
6931 /* Unlink MP from its current position. Since max_mp is non-null,
6932 mp->prev must be non-null. */
6933 mp->prev->next = mp->next;
6934 if (mp->next != NULL)
6935 mp->next->prev = mp->prev;
6936 else
6937 minipool_vector_tail = mp->prev;
6939 /* Re-insert it before MAX_MP. */
6940 mp->next = max_mp;
6941 mp->prev = max_mp->prev;
6942 max_mp->prev = mp;
6944 if (mp->prev != NULL)
6945 mp->prev->next = mp;
6946 else
6947 minipool_vector_head = mp;
6950 /* Save the new entry. */
6951 max_mp = mp;
6953 /* Scan over the preceding entries and adjust their addresses as
6954 required. */
6955 while (mp->prev != NULL
6956 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6958 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6959 mp = mp->prev;
6962 return max_mp;
6965 /* Add a constant to the minipool for a forward reference. Returns the
6966 node added or NULL if the constant will not fit in this pool. */
6967 static Mnode *
6968 add_minipool_forward_ref (Mfix *fix)
6970 /* If set, max_mp is the first pool_entry that has a lower
6971 constraint than the one we are trying to add. */
6972 Mnode * max_mp = NULL;
6973 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6974 Mnode * mp;
6976 /* If this fix's address is greater than the address of the first
6977 entry, then we can't put the fix in this pool. We subtract the
6978 size of the current fix to ensure that if the table is fully
6979 packed we still have enough room to insert this value by shuffling
6980 the other fixes forwards. */
6981 if (minipool_vector_head &&
6982 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6983 return NULL;
6985 /* Scan the pool to see if a constant with the same value has
6986 already been added. While we are doing this, also note the
6987 location where we must insert the constant if it doesn't already
6988 exist. */
6989 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6991 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6992 && fix->mode == mp->mode
6993 && (GET_CODE (fix->value) != CODE_LABEL
6994 || (CODE_LABEL_NUMBER (fix->value)
6995 == CODE_LABEL_NUMBER (mp->value)))
6996 && rtx_equal_p (fix->value, mp->value))
6998 /* More than one fix references this entry. */
6999 mp->refcount++;
7000 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7003 /* Note the insertion point if necessary. */
7004 if (max_mp == NULL
7005 && mp->max_address > max_address)
7006 max_mp = mp;
7008 /* If we are inserting an 8-bytes aligned quantity and
7009 we have not already found an insertion point, then
7010 make sure that all such 8-byte aligned quantities are
7011 placed at the start of the pool. */
7012 if (ARM_DOUBLEWORD_ALIGN
7013 && max_mp == NULL
7014 && fix->fix_size == 8
7015 && mp->fix_size != 8)
7017 max_mp = mp;
7018 max_address = mp->max_address;
7022 /* The value is not currently in the minipool, so we need to create
7023 a new entry for it. If MAX_MP is NULL, the entry will be put on
7024 the end of the list since the placement is less constrained than
7025 any existing entry. Otherwise, we insert the new fix before
7026 MAX_MP and, if necessary, adjust the constraints on the other
7027 entries. */
7028 mp = xmalloc (sizeof (* mp));
7029 mp->fix_size = fix->fix_size;
7030 mp->mode = fix->mode;
7031 mp->value = fix->value;
7032 mp->refcount = 1;
7033 /* Not yet required for a backwards ref. */
7034 mp->min_address = -65536;
7036 if (max_mp == NULL)
7038 mp->max_address = max_address;
7039 mp->next = NULL;
7040 mp->prev = minipool_vector_tail;
7042 if (mp->prev == NULL)
7044 minipool_vector_head = mp;
7045 minipool_vector_label = gen_label_rtx ();
7047 else
7048 mp->prev->next = mp;
7050 minipool_vector_tail = mp;
7052 else
7054 if (max_address > max_mp->max_address - mp->fix_size)
7055 mp->max_address = max_mp->max_address - mp->fix_size;
7056 else
7057 mp->max_address = max_address;
7059 mp->next = max_mp;
7060 mp->prev = max_mp->prev;
7061 max_mp->prev = mp;
7062 if (mp->prev != NULL)
7063 mp->prev->next = mp;
7064 else
7065 minipool_vector_head = mp;
7068 /* Save the new entry. */
7069 max_mp = mp;
7071 /* Scan over the preceding entries and adjust their addresses as
7072 required. */
7073 while (mp->prev != NULL
7074 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7076 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7077 mp = mp->prev;
7080 return max_mp;
7083 static Mnode *
7084 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7085 HOST_WIDE_INT min_address)
7087 HOST_WIDE_INT offset;
7089 /* The code below assumes these are different. */
7090 gcc_assert (mp != min_mp);
7092 if (min_mp == NULL)
7094 if (min_address > mp->min_address)
7095 mp->min_address = min_address;
7097 else
7099 /* We will adjust this below if it is too loose. */
7100 mp->min_address = min_address;
7102 /* Unlink MP from its current position. Since min_mp is non-null,
7103 mp->next must be non-null. */
7104 mp->next->prev = mp->prev;
7105 if (mp->prev != NULL)
7106 mp->prev->next = mp->next;
7107 else
7108 minipool_vector_head = mp->next;
7110 /* Reinsert it after MIN_MP. */
7111 mp->prev = min_mp;
7112 mp->next = min_mp->next;
7113 min_mp->next = mp;
7114 if (mp->next != NULL)
7115 mp->next->prev = mp;
7116 else
7117 minipool_vector_tail = mp;
7120 min_mp = mp;
7122 offset = 0;
7123 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7125 mp->offset = offset;
7126 if (mp->refcount > 0)
7127 offset += mp->fix_size;
7129 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7130 mp->next->min_address = mp->min_address + mp->fix_size;
7133 return min_mp;
7136 /* Add a constant to the minipool for a backward reference. Returns the
7137 node added or NULL if the constant will not fit in this pool.
7139 Note that the code for insertion for a backwards reference can be
7140 somewhat confusing because the calculated offsets for each fix do
7141 not take into account the size of the pool (which is still under
7142 construction. */
7143 static Mnode *
7144 add_minipool_backward_ref (Mfix *fix)
7146 /* If set, min_mp is the last pool_entry that has a lower constraint
7147 than the one we are trying to add. */
7148 Mnode *min_mp = NULL;
7149 /* This can be negative, since it is only a constraint. */
7150 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7151 Mnode *mp;
7153 /* If we can't reach the current pool from this insn, or if we can't
7154 insert this entry at the end of the pool without pushing other
7155 fixes out of range, then we don't try. This ensures that we
7156 can't fail later on. */
7157 if (min_address >= minipool_barrier->address
7158 || (minipool_vector_tail->min_address + fix->fix_size
7159 >= minipool_barrier->address))
7160 return NULL;
7162 /* Scan the pool to see if a constant with the same value has
7163 already been added. While we are doing this, also note the
7164 location where we must insert the constant if it doesn't already
7165 exist. */
7166 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7168 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7169 && fix->mode == mp->mode
7170 && (GET_CODE (fix->value) != CODE_LABEL
7171 || (CODE_LABEL_NUMBER (fix->value)
7172 == CODE_LABEL_NUMBER (mp->value)))
7173 && rtx_equal_p (fix->value, mp->value)
7174 /* Check that there is enough slack to move this entry to the
7175 end of the table (this is conservative). */
7176 && (mp->max_address
7177 > (minipool_barrier->address
7178 + minipool_vector_tail->offset
7179 + minipool_vector_tail->fix_size)))
7181 mp->refcount++;
7182 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7185 if (min_mp != NULL)
7186 mp->min_address += fix->fix_size;
7187 else
7189 /* Note the insertion point if necessary. */
7190 if (mp->min_address < min_address)
7192 /* For now, we do not allow the insertion of 8-byte alignment
7193 requiring nodes anywhere but at the start of the pool. */
7194 if (ARM_DOUBLEWORD_ALIGN
7195 && fix->fix_size == 8 && mp->fix_size != 8)
7196 return NULL;
7197 else
7198 min_mp = mp;
7200 else if (mp->max_address
7201 < minipool_barrier->address + mp->offset + fix->fix_size)
7203 /* Inserting before this entry would push the fix beyond
7204 its maximum address (which can happen if we have
7205 re-located a forwards fix); force the new fix to come
7206 after it. */
7207 min_mp = mp;
7208 min_address = mp->min_address + fix->fix_size;
7210 /* If we are inserting an 8-bytes aligned quantity and
7211 we have not already found an insertion point, then
7212 make sure that all such 8-byte aligned quantities are
7213 placed at the start of the pool. */
7214 else if (ARM_DOUBLEWORD_ALIGN
7215 && min_mp == NULL
7216 && fix->fix_size == 8
7217 && mp->fix_size < 8)
7219 min_mp = mp;
7220 min_address = mp->min_address + fix->fix_size;
7225 /* We need to create a new entry. */
7226 mp = xmalloc (sizeof (* mp));
7227 mp->fix_size = fix->fix_size;
7228 mp->mode = fix->mode;
7229 mp->value = fix->value;
7230 mp->refcount = 1;
7231 mp->max_address = minipool_barrier->address + 65536;
7233 mp->min_address = min_address;
7235 if (min_mp == NULL)
7237 mp->prev = NULL;
7238 mp->next = minipool_vector_head;
7240 if (mp->next == NULL)
7242 minipool_vector_tail = mp;
7243 minipool_vector_label = gen_label_rtx ();
7245 else
7246 mp->next->prev = mp;
7248 minipool_vector_head = mp;
7250 else
7252 mp->next = min_mp->next;
7253 mp->prev = min_mp;
7254 min_mp->next = mp;
7256 if (mp->next != NULL)
7257 mp->next->prev = mp;
7258 else
7259 minipool_vector_tail = mp;
7262 /* Save the new entry. */
7263 min_mp = mp;
7265 if (mp->prev)
7266 mp = mp->prev;
7267 else
7268 mp->offset = 0;
7270 /* Scan over the following entries and adjust their offsets. */
7271 while (mp->next != NULL)
7273 if (mp->next->min_address < mp->min_address + mp->fix_size)
7274 mp->next->min_address = mp->min_address + mp->fix_size;
7276 if (mp->refcount)
7277 mp->next->offset = mp->offset + mp->fix_size;
7278 else
7279 mp->next->offset = mp->offset;
7281 mp = mp->next;
7284 return min_mp;
7287 static void
7288 assign_minipool_offsets (Mfix *barrier)
7290 HOST_WIDE_INT offset = 0;
7291 Mnode *mp;
7293 minipool_barrier = barrier;
7295 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7297 mp->offset = offset;
7299 if (mp->refcount > 0)
7300 offset += mp->fix_size;
7304 /* Output the literal table */
7305 static void
7306 dump_minipool (rtx scan)
7308 Mnode * mp;
7309 Mnode * nmp;
7310 int align64 = 0;
7312 if (ARM_DOUBLEWORD_ALIGN)
7313 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7314 if (mp->refcount > 0 && mp->fix_size == 8)
7316 align64 = 1;
7317 break;
7320 if (dump_file)
7321 fprintf (dump_file,
7322 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7323 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7325 scan = emit_label_after (gen_label_rtx (), scan);
7326 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7327 scan = emit_label_after (minipool_vector_label, scan);
7329 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7331 if (mp->refcount > 0)
7333 if (dump_file)
7335 fprintf (dump_file,
7336 ";; Offset %u, min %ld, max %ld ",
7337 (unsigned) mp->offset, (unsigned long) mp->min_address,
7338 (unsigned long) mp->max_address);
7339 arm_print_value (dump_file, mp->value);
7340 fputc ('\n', dump_file);
7343 switch (mp->fix_size)
7345 #ifdef HAVE_consttable_1
7346 case 1:
7347 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7348 break;
7350 #endif
7351 #ifdef HAVE_consttable_2
7352 case 2:
7353 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7354 break;
7356 #endif
7357 #ifdef HAVE_consttable_4
7358 case 4:
7359 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7360 break;
7362 #endif
7363 #ifdef HAVE_consttable_8
7364 case 8:
7365 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7366 break;
7368 #endif
7369 default:
7370 gcc_unreachable ();
7374 nmp = mp->next;
7375 free (mp);
7378 minipool_vector_head = minipool_vector_tail = NULL;
7379 scan = emit_insn_after (gen_consttable_end (), scan);
7380 scan = emit_barrier_after (scan);
7383 /* Return the cost of forcibly inserting a barrier after INSN. */
7384 static int
7385 arm_barrier_cost (rtx insn)
7387 /* Basing the location of the pool on the loop depth is preferable,
7388 but at the moment, the basic block information seems to be
7389 corrupt by this stage of the compilation. */
7390 int base_cost = 50;
7391 rtx next = next_nonnote_insn (insn);
7393 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7394 base_cost -= 20;
7396 switch (GET_CODE (insn))
7398 case CODE_LABEL:
7399 /* It will always be better to place the table before the label, rather
7400 than after it. */
7401 return 50;
7403 case INSN:
7404 case CALL_INSN:
7405 return base_cost;
7407 case JUMP_INSN:
7408 return base_cost - 10;
7410 default:
7411 return base_cost + 10;
7415 /* Find the best place in the insn stream in the range
7416 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7417 Create the barrier by inserting a jump and add a new fix entry for
7418 it. */
7419 static Mfix *
7420 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7422 HOST_WIDE_INT count = 0;
7423 rtx barrier;
7424 rtx from = fix->insn;
7425 rtx selected = from;
7426 int selected_cost;
7427 HOST_WIDE_INT selected_address;
7428 Mfix * new_fix;
7429 HOST_WIDE_INT max_count = max_address - fix->address;
7430 rtx label = gen_label_rtx ();
7432 selected_cost = arm_barrier_cost (from);
7433 selected_address = fix->address;
7435 while (from && count < max_count)
7437 rtx tmp;
7438 int new_cost;
7440 /* This code shouldn't have been called if there was a natural barrier
7441 within range. */
7442 gcc_assert (GET_CODE (from) != BARRIER);
7444 /* Count the length of this insn. */
7445 count += get_attr_length (from);
7447 /* If there is a jump table, add its length. */
7448 tmp = is_jump_table (from);
7449 if (tmp != NULL)
7451 count += get_jump_table_size (tmp);
7453 /* Jump tables aren't in a basic block, so base the cost on
7454 the dispatch insn. If we select this location, we will
7455 still put the pool after the table. */
7456 new_cost = arm_barrier_cost (from);
7458 if (count < max_count && new_cost <= selected_cost)
7460 selected = tmp;
7461 selected_cost = new_cost;
7462 selected_address = fix->address + count;
7465 /* Continue after the dispatch table. */
7466 from = NEXT_INSN (tmp);
7467 continue;
7470 new_cost = arm_barrier_cost (from);
7472 if (count < max_count && new_cost <= selected_cost)
7474 selected = from;
7475 selected_cost = new_cost;
7476 selected_address = fix->address + count;
7479 from = NEXT_INSN (from);
7482 /* Create a new JUMP_INSN that branches around a barrier. */
7483 from = emit_jump_insn_after (gen_jump (label), selected);
7484 JUMP_LABEL (from) = label;
7485 barrier = emit_barrier_after (from);
7486 emit_label_after (label, barrier);
7488 /* Create a minipool barrier entry for the new barrier. */
7489 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7490 new_fix->insn = barrier;
7491 new_fix->address = selected_address;
7492 new_fix->next = fix->next;
7493 fix->next = new_fix;
7495 return new_fix;
7498 /* Record that there is a natural barrier in the insn stream at
7499 ADDRESS. */
7500 static void
7501 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7503 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7505 fix->insn = insn;
7506 fix->address = address;
7508 fix->next = NULL;
7509 if (minipool_fix_head != NULL)
7510 minipool_fix_tail->next = fix;
7511 else
7512 minipool_fix_head = fix;
7514 minipool_fix_tail = fix;
7517 /* Record INSN, which will need fixing up to load a value from the
7518 minipool. ADDRESS is the offset of the insn since the start of the
7519 function; LOC is a pointer to the part of the insn which requires
7520 fixing; VALUE is the constant that must be loaded, which is of type
7521 MODE. */
7522 static void
7523 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7524 enum machine_mode mode, rtx value)
7526 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7528 #ifdef AOF_ASSEMBLER
7529 /* PIC symbol references need to be converted into offsets into the
7530 based area. */
7531 /* XXX This shouldn't be done here. */
7532 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7533 value = aof_pic_entry (value);
7534 #endif /* AOF_ASSEMBLER */
7536 fix->insn = insn;
7537 fix->address = address;
7538 fix->loc = loc;
7539 fix->mode = mode;
7540 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7541 fix->value = value;
7542 fix->forwards = get_attr_pool_range (insn);
7543 fix->backwards = get_attr_neg_pool_range (insn);
7544 fix->minipool = NULL;
7546 /* If an insn doesn't have a range defined for it, then it isn't
7547 expecting to be reworked by this code. Better to stop now than
7548 to generate duff assembly code. */
7549 gcc_assert (fix->forwards || fix->backwards);
7551 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7552 So there might be an empty word before the start of the pool.
7553 Hence we reduce the forward range by 4 to allow for this
7554 possibility. */
7555 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7556 fix->forwards -= 4;
7558 if (dump_file)
7560 fprintf (dump_file,
7561 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7562 GET_MODE_NAME (mode),
7563 INSN_UID (insn), (unsigned long) address,
7564 -1 * (long)fix->backwards, (long)fix->forwards);
7565 arm_print_value (dump_file, fix->value);
7566 fprintf (dump_file, "\n");
7569 /* Add it to the chain of fixes. */
7570 fix->next = NULL;
7572 if (minipool_fix_head != NULL)
7573 minipool_fix_tail->next = fix;
7574 else
7575 minipool_fix_head = fix;
7577 minipool_fix_tail = fix;
7580 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7581 Returns the number of insns needed, or 99 if we don't know how to
7582 do it. */
7584 arm_const_double_inline_cost (rtx val)
7586 rtx lowpart, highpart;
7587 enum machine_mode mode;
7589 mode = GET_MODE (val);
7591 if (mode == VOIDmode)
7592 mode = DImode;
7594 gcc_assert (GET_MODE_SIZE (mode) == 8);
7596 lowpart = gen_lowpart (SImode, val);
7597 highpart = gen_highpart_mode (SImode, mode, val);
7599 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7600 gcc_assert (GET_CODE (highpart) == CONST_INT);
7602 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7603 NULL_RTX, NULL_RTX, 0, 0)
7604 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7605 NULL_RTX, NULL_RTX, 0, 0));
7608 /* Return true if it is worthwhile to split a 64-bit constant into two
7609 32-bit operations. This is the case if optimizing for size, or
7610 if we have load delay slots, or if one 32-bit part can be done with
7611 a single data operation. */
7612 bool
7613 arm_const_double_by_parts (rtx val)
7615 enum machine_mode mode = GET_MODE (val);
7616 rtx part;
7618 if (optimize_size || arm_ld_sched)
7619 return true;
7621 if (mode == VOIDmode)
7622 mode = DImode;
7624 part = gen_highpart_mode (SImode, mode, val);
7626 gcc_assert (GET_CODE (part) == CONST_INT);
7628 if (const_ok_for_arm (INTVAL (part))
7629 || const_ok_for_arm (~INTVAL (part)))
7630 return true;
7632 part = gen_lowpart (SImode, val);
7634 gcc_assert (GET_CODE (part) == CONST_INT);
7636 if (const_ok_for_arm (INTVAL (part))
7637 || const_ok_for_arm (~INTVAL (part)))
7638 return true;
7640 return false;
7643 /* Scan INSN and note any of its operands that need fixing.
7644 If DO_PUSHES is false we do not actually push any of the fixups
7645 needed. The function returns TRUE if any fixups were needed/pushed.
7646 This is used by arm_memory_load_p() which needs to know about loads
7647 of constants that will be converted into minipool loads. */
7648 static bool
7649 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7651 bool result = false;
7652 int opno;
7654 extract_insn (insn);
7656 if (!constrain_operands (1))
7657 fatal_insn_not_found (insn);
7659 if (recog_data.n_alternatives == 0)
7660 return false;
7662 /* Fill in recog_op_alt with information about the constraints of
7663 this insn. */
7664 preprocess_constraints ();
7666 for (opno = 0; opno < recog_data.n_operands; opno++)
7668 /* Things we need to fix can only occur in inputs. */
7669 if (recog_data.operand_type[opno] != OP_IN)
7670 continue;
7672 /* If this alternative is a memory reference, then any mention
7673 of constants in this alternative is really to fool reload
7674 into allowing us to accept one there. We need to fix them up
7675 now so that we output the right code. */
7676 if (recog_op_alt[opno][which_alternative].memory_ok)
7678 rtx op = recog_data.operand[opno];
7680 if (CONSTANT_P (op))
7682 if (do_pushes)
7683 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7684 recog_data.operand_mode[opno], op);
7685 result = true;
7687 else if (GET_CODE (op) == MEM
7688 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7689 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7691 if (do_pushes)
7693 rtx cop = avoid_constant_pool_reference (op);
7695 /* Casting the address of something to a mode narrower
7696 than a word can cause avoid_constant_pool_reference()
7697 to return the pool reference itself. That's no good to
7698 us here. Lets just hope that we can use the
7699 constant pool value directly. */
7700 if (op == cop)
7701 cop = get_pool_constant (XEXP (op, 0));
7703 push_minipool_fix (insn, address,
7704 recog_data.operand_loc[opno],
7705 recog_data.operand_mode[opno], cop);
7708 result = true;
7713 return result;
7716 /* Gcc puts the pool in the wrong place for ARM, since we can only
7717 load addresses a limited distance around the pc. We do some
7718 special munging to move the constant pool values to the correct
7719 point in the code. */
7720 static void
7721 arm_reorg (void)
7723 rtx insn;
7724 HOST_WIDE_INT address = 0;
7725 Mfix * fix;
7727 minipool_fix_head = minipool_fix_tail = NULL;
7729 /* The first insn must always be a note, or the code below won't
7730 scan it properly. */
7731 insn = get_insns ();
7732 gcc_assert (GET_CODE (insn) == NOTE);
7734 /* Scan all the insns and record the operands that will need fixing. */
7735 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7737 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7738 && (arm_cirrus_insn_p (insn)
7739 || GET_CODE (insn) == JUMP_INSN
7740 || arm_memory_load_p (insn)))
7741 cirrus_reorg (insn);
7743 if (GET_CODE (insn) == BARRIER)
7744 push_minipool_barrier (insn, address);
7745 else if (INSN_P (insn))
7747 rtx table;
7749 note_invalid_constants (insn, address, true);
7750 address += get_attr_length (insn);
7752 /* If the insn is a vector jump, add the size of the table
7753 and skip the table. */
7754 if ((table = is_jump_table (insn)) != NULL)
7756 address += get_jump_table_size (table);
7757 insn = table;
7762 fix = minipool_fix_head;
7764 /* Now scan the fixups and perform the required changes. */
7765 while (fix)
7767 Mfix * ftmp;
7768 Mfix * fdel;
7769 Mfix * last_added_fix;
7770 Mfix * last_barrier = NULL;
7771 Mfix * this_fix;
7773 /* Skip any further barriers before the next fix. */
7774 while (fix && GET_CODE (fix->insn) == BARRIER)
7775 fix = fix->next;
7777 /* No more fixes. */
7778 if (fix == NULL)
7779 break;
7781 last_added_fix = NULL;
7783 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7785 if (GET_CODE (ftmp->insn) == BARRIER)
7787 if (ftmp->address >= minipool_vector_head->max_address)
7788 break;
7790 last_barrier = ftmp;
7792 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7793 break;
7795 last_added_fix = ftmp; /* Keep track of the last fix added. */
7798 /* If we found a barrier, drop back to that; any fixes that we
7799 could have reached but come after the barrier will now go in
7800 the next mini-pool. */
7801 if (last_barrier != NULL)
7803 /* Reduce the refcount for those fixes that won't go into this
7804 pool after all. */
7805 for (fdel = last_barrier->next;
7806 fdel && fdel != ftmp;
7807 fdel = fdel->next)
7809 fdel->minipool->refcount--;
7810 fdel->minipool = NULL;
7813 ftmp = last_barrier;
7815 else
7817 /* ftmp is first fix that we can't fit into this pool and
7818 there no natural barriers that we could use. Insert a
7819 new barrier in the code somewhere between the previous
7820 fix and this one, and arrange to jump around it. */
7821 HOST_WIDE_INT max_address;
7823 /* The last item on the list of fixes must be a barrier, so
7824 we can never run off the end of the list of fixes without
7825 last_barrier being set. */
7826 gcc_assert (ftmp);
7828 max_address = minipool_vector_head->max_address;
7829 /* Check that there isn't another fix that is in range that
7830 we couldn't fit into this pool because the pool was
7831 already too large: we need to put the pool before such an
7832 instruction. */
7833 if (ftmp->address < max_address)
7834 max_address = ftmp->address;
7836 last_barrier = create_fix_barrier (last_added_fix, max_address);
7839 assign_minipool_offsets (last_barrier);
7841 while (ftmp)
7843 if (GET_CODE (ftmp->insn) != BARRIER
7844 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7845 == NULL))
7846 break;
7848 ftmp = ftmp->next;
7851 /* Scan over the fixes we have identified for this pool, fixing them
7852 up and adding the constants to the pool itself. */
7853 for (this_fix = fix; this_fix && ftmp != this_fix;
7854 this_fix = this_fix->next)
7855 if (GET_CODE (this_fix->insn) != BARRIER)
7857 rtx addr
7858 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7859 minipool_vector_label),
7860 this_fix->minipool->offset);
7861 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7864 dump_minipool (last_barrier->insn);
7865 fix = ftmp;
7868 /* From now on we must synthesize any constants that we can't handle
7869 directly. This can happen if the RTL gets split during final
7870 instruction generation. */
7871 after_arm_reorg = 1;
7873 /* Free the minipool memory. */
7874 obstack_free (&minipool_obstack, minipool_startobj);
7877 /* Routines to output assembly language. */
7879 /* If the rtx is the correct value then return the string of the number.
7880 In this way we can ensure that valid double constants are generated even
7881 when cross compiling. */
7882 const char *
7883 fp_immediate_constant (rtx x)
7885 REAL_VALUE_TYPE r;
7886 int i;
7888 if (!fp_consts_inited)
7889 init_fp_table ();
7891 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7892 for (i = 0; i < 8; i++)
7893 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7894 return strings_fp[i];
7896 gcc_unreachable ();
7899 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7900 static const char *
7901 fp_const_from_val (REAL_VALUE_TYPE *r)
7903 int i;
7905 if (!fp_consts_inited)
7906 init_fp_table ();
7908 for (i = 0; i < 8; i++)
7909 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7910 return strings_fp[i];
7912 gcc_unreachable ();
7915 /* Output the operands of a LDM/STM instruction to STREAM.
7916 MASK is the ARM register set mask of which only bits 0-15 are important.
7917 REG is the base register, either the frame pointer or the stack pointer,
7918 INSTR is the possibly suffixed load or store instruction. */
7920 static void
7921 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7922 unsigned long mask)
7924 unsigned i;
7925 bool not_first = FALSE;
7927 fputc ('\t', stream);
7928 asm_fprintf (stream, instr, reg);
7929 fputs (", {", stream);
7931 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7932 if (mask & (1 << i))
7934 if (not_first)
7935 fprintf (stream, ", ");
7937 asm_fprintf (stream, "%r", i);
7938 not_first = TRUE;
7941 fprintf (stream, "}\n");
7945 /* Output a FLDMX instruction to STREAM.
7946 BASE if the register containing the address.
7947 REG and COUNT specify the register range.
7948 Extra registers may be added to avoid hardware bugs. */
7950 static void
7951 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7953 int i;
7955 /* Workaround ARM10 VFPr1 bug. */
7956 if (count == 2 && !arm_arch6)
7958 if (reg == 15)
7959 reg--;
7960 count++;
7963 fputc ('\t', stream);
7964 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7966 for (i = reg; i < reg + count; i++)
7968 if (i > reg)
7969 fputs (", ", stream);
7970 asm_fprintf (stream, "d%d", i);
7972 fputs ("}\n", stream);
7977 /* Output the assembly for a store multiple. */
7979 const char *
7980 vfp_output_fstmx (rtx * operands)
7982 char pattern[100];
7983 int p;
7984 int base;
7985 int i;
7987 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7988 p = strlen (pattern);
7990 gcc_assert (GET_CODE (operands[1]) == REG);
7992 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7993 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7995 p += sprintf (&pattern[p], ", d%d", base + i);
7997 strcpy (&pattern[p], "}");
7999 output_asm_insn (pattern, operands);
8000 return "";
8004 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8005 number of bytes pushed. */
8007 static int
8008 vfp_emit_fstmx (int base_reg, int count)
8010 rtx par;
8011 rtx dwarf;
8012 rtx tmp, reg;
8013 int i;
8015 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8016 register pairs are stored by a store multiple insn. We avoid this
8017 by pushing an extra pair. */
8018 if (count == 2 && !arm_arch6)
8020 if (base_reg == LAST_VFP_REGNUM - 3)
8021 base_reg -= 2;
8022 count++;
8025 /* ??? The frame layout is implementation defined. We describe
8026 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8027 We really need some way of representing the whole block so that the
8028 unwinder can figure it out at runtime. */
8029 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8030 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8032 reg = gen_rtx_REG (DFmode, base_reg);
8033 base_reg += 2;
8035 XVECEXP (par, 0, 0)
8036 = gen_rtx_SET (VOIDmode,
8037 gen_rtx_MEM (BLKmode,
8038 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8039 gen_rtx_UNSPEC (BLKmode,
8040 gen_rtvec (1, reg),
8041 UNSPEC_PUSH_MULT));
8043 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8044 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8045 GEN_INT (-(count * 8 + 4))));
8046 RTX_FRAME_RELATED_P (tmp) = 1;
8047 XVECEXP (dwarf, 0, 0) = tmp;
8049 tmp = gen_rtx_SET (VOIDmode,
8050 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8051 reg);
8052 RTX_FRAME_RELATED_P (tmp) = 1;
8053 XVECEXP (dwarf, 0, 1) = tmp;
8055 for (i = 1; i < count; i++)
8057 reg = gen_rtx_REG (DFmode, base_reg);
8058 base_reg += 2;
8059 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8061 tmp = gen_rtx_SET (VOIDmode,
8062 gen_rtx_MEM (DFmode,
8063 gen_rtx_PLUS (SImode,
8064 stack_pointer_rtx,
8065 GEN_INT (i * 8))),
8066 reg);
8067 RTX_FRAME_RELATED_P (tmp) = 1;
8068 XVECEXP (dwarf, 0, i + 1) = tmp;
8071 par = emit_insn (par);
8072 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8073 REG_NOTES (par));
8074 RTX_FRAME_RELATED_P (par) = 1;
8076 return count * 8 + 4;
8080 /* Output a 'call' insn. */
8081 const char *
8082 output_call (rtx *operands)
8084 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8086 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8087 if (REGNO (operands[0]) == LR_REGNUM)
8089 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8090 output_asm_insn ("mov%?\t%0, %|lr", operands);
8093 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8095 if (TARGET_INTERWORK || arm_arch4t)
8096 output_asm_insn ("bx%?\t%0", operands);
8097 else
8098 output_asm_insn ("mov%?\t%|pc, %0", operands);
8100 return "";
8103 /* Output a 'call' insn that is a reference in memory. */
8104 const char *
8105 output_call_mem (rtx *operands)
8107 if (TARGET_INTERWORK && !arm_arch5)
8109 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8110 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8111 output_asm_insn ("bx%?\t%|ip", operands);
8113 else if (regno_use_in (LR_REGNUM, operands[0]))
8115 /* LR is used in the memory address. We load the address in the
8116 first instruction. It's safe to use IP as the target of the
8117 load since the call will kill it anyway. */
8118 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8119 if (arm_arch5)
8120 output_asm_insn ("blx%?\t%|ip", operands);
8121 else
8123 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8124 if (arm_arch4t)
8125 output_asm_insn ("bx%?\t%|ip", operands);
8126 else
8127 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8130 else
8132 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8133 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8136 return "";
8140 /* Output a move from arm registers to an fpa registers.
8141 OPERANDS[0] is an fpa register.
8142 OPERANDS[1] is the first registers of an arm register pair. */
8143 const char *
8144 output_mov_long_double_fpa_from_arm (rtx *operands)
8146 int arm_reg0 = REGNO (operands[1]);
8147 rtx ops[3];
8149 gcc_assert (arm_reg0 != IP_REGNUM);
8151 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8152 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8153 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8155 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8156 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8158 return "";
8161 /* Output a move from an fpa register to arm registers.
8162 OPERANDS[0] is the first registers of an arm register pair.
8163 OPERANDS[1] is an fpa register. */
8164 const char *
8165 output_mov_long_double_arm_from_fpa (rtx *operands)
8167 int arm_reg0 = REGNO (operands[0]);
8168 rtx ops[3];
8170 gcc_assert (arm_reg0 != IP_REGNUM);
8172 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8173 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8174 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8176 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8177 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8178 return "";
8181 /* Output a move from arm registers to arm registers of a long double
8182 OPERANDS[0] is the destination.
8183 OPERANDS[1] is the source. */
8184 const char *
8185 output_mov_long_double_arm_from_arm (rtx *operands)
8187 /* We have to be careful here because the two might overlap. */
8188 int dest_start = REGNO (operands[0]);
8189 int src_start = REGNO (operands[1]);
8190 rtx ops[2];
8191 int i;
8193 if (dest_start < src_start)
8195 for (i = 0; i < 3; i++)
8197 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8198 ops[1] = gen_rtx_REG (SImode, src_start + i);
8199 output_asm_insn ("mov%?\t%0, %1", ops);
8202 else
8204 for (i = 2; i >= 0; i--)
8206 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8207 ops[1] = gen_rtx_REG (SImode, src_start + i);
8208 output_asm_insn ("mov%?\t%0, %1", ops);
8212 return "";
8216 /* Output a move from arm registers to an fpa registers.
8217 OPERANDS[0] is an fpa register.
8218 OPERANDS[1] is the first registers of an arm register pair. */
8219 const char *
8220 output_mov_double_fpa_from_arm (rtx *operands)
8222 int arm_reg0 = REGNO (operands[1]);
8223 rtx ops[2];
8225 gcc_assert (arm_reg0 != IP_REGNUM);
8227 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8228 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8229 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8230 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8231 return "";
8234 /* Output a move from an fpa register to arm registers.
8235 OPERANDS[0] is the first registers of an arm register pair.
8236 OPERANDS[1] is an fpa register. */
8237 const char *
8238 output_mov_double_arm_from_fpa (rtx *operands)
8240 int arm_reg0 = REGNO (operands[0]);
8241 rtx ops[2];
8243 gcc_assert (arm_reg0 != IP_REGNUM);
8245 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8246 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8247 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8248 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8249 return "";
8252 /* Output a move between double words.
8253 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8254 or MEM<-REG and all MEMs must be offsettable addresses. */
8255 const char *
8256 output_move_double (rtx *operands)
8258 enum rtx_code code0 = GET_CODE (operands[0]);
8259 enum rtx_code code1 = GET_CODE (operands[1]);
8260 rtx otherops[3];
8262 if (code0 == REG)
8264 int reg0 = REGNO (operands[0]);
8266 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8268 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8270 switch (GET_CODE (XEXP (operands[1], 0)))
8272 case REG:
8273 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8274 break;
8276 case PRE_INC:
8277 gcc_assert (TARGET_LDRD);
8278 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8279 break;
8281 case PRE_DEC:
8282 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8283 break;
8285 case POST_INC:
8286 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8287 break;
8289 case POST_DEC:
8290 gcc_assert (TARGET_LDRD);
8291 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8292 break;
8294 case PRE_MODIFY:
8295 case POST_MODIFY:
8296 otherops[0] = operands[0];
8297 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8298 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8300 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8302 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8304 /* Registers overlap so split out the increment. */
8305 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8306 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8308 else
8309 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8311 else
8313 /* We only allow constant increments, so this is safe. */
8314 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8316 break;
8318 case LABEL_REF:
8319 case CONST:
8320 output_asm_insn ("adr%?\t%0, %1", operands);
8321 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8322 break;
8324 default:
8325 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8326 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8328 otherops[0] = operands[0];
8329 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8330 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8332 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8334 if (GET_CODE (otherops[2]) == CONST_INT)
8336 switch ((int) INTVAL (otherops[2]))
8338 case -8:
8339 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8340 return "";
8341 case -4:
8342 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8343 return "";
8344 case 4:
8345 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8346 return "";
8349 if (TARGET_LDRD
8350 && (GET_CODE (otherops[2]) == REG
8351 || (GET_CODE (otherops[2]) == CONST_INT
8352 && INTVAL (otherops[2]) > -256
8353 && INTVAL (otherops[2]) < 256)))
8355 if (reg_overlap_mentioned_p (otherops[0],
8356 otherops[2]))
8358 /* Swap base and index registers over to
8359 avoid a conflict. */
8360 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8361 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8364 /* If both registers conflict, it will usually
8365 have been fixed by a splitter. */
8366 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8368 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8369 output_asm_insn ("ldr%?d\t%0, [%1]",
8370 otherops);
8372 else
8373 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8374 return "";
8377 if (GET_CODE (otherops[2]) == CONST_INT)
8379 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8380 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8381 else
8382 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8384 else
8385 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8387 else
8388 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8390 return "ldm%?ia\t%0, %M0";
8392 else
8394 otherops[1] = adjust_address (operands[1], SImode, 4);
8395 /* Take care of overlapping base/data reg. */
8396 if (reg_mentioned_p (operands[0], operands[1]))
8398 output_asm_insn ("ldr%?\t%0, %1", otherops);
8399 output_asm_insn ("ldr%?\t%0, %1", operands);
8401 else
8403 output_asm_insn ("ldr%?\t%0, %1", operands);
8404 output_asm_insn ("ldr%?\t%0, %1", otherops);
8409 else
8411 /* Constraints should ensure this. */
8412 gcc_assert (code0 == MEM && code1 == REG);
8413 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8415 switch (GET_CODE (XEXP (operands[0], 0)))
8417 case REG:
8418 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8419 break;
8421 case PRE_INC:
8422 gcc_assert (TARGET_LDRD);
8423 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8424 break;
8426 case PRE_DEC:
8427 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8428 break;
8430 case POST_INC:
8431 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8432 break;
8434 case POST_DEC:
8435 gcc_assert (TARGET_LDRD);
8436 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8437 break;
8439 case PRE_MODIFY:
8440 case POST_MODIFY:
8441 otherops[0] = operands[1];
8442 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8443 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8445 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8446 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8447 else
8448 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8449 break;
8451 case PLUS:
8452 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8453 if (GET_CODE (otherops[2]) == CONST_INT)
8455 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8457 case -8:
8458 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8459 return "";
8461 case -4:
8462 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8463 return "";
8465 case 4:
8466 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8467 return "";
8470 if (TARGET_LDRD
8471 && (GET_CODE (otherops[2]) == REG
8472 || (GET_CODE (otherops[2]) == CONST_INT
8473 && INTVAL (otherops[2]) > -256
8474 && INTVAL (otherops[2]) < 256)))
8476 otherops[0] = operands[1];
8477 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8478 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8479 return "";
8481 /* Fall through */
8483 default:
8484 otherops[0] = adjust_address (operands[0], SImode, 4);
8485 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8486 output_asm_insn ("str%?\t%1, %0", operands);
8487 output_asm_insn ("str%?\t%1, %0", otherops);
8491 return "";
8494 /* Output an ADD r, s, #n where n may be too big for one instruction.
8495 If adding zero to one register, output nothing. */
8496 const char *
8497 output_add_immediate (rtx *operands)
8499 HOST_WIDE_INT n = INTVAL (operands[2]);
8501 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8503 if (n < 0)
8504 output_multi_immediate (operands,
8505 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8506 -n);
8507 else
8508 output_multi_immediate (operands,
8509 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8513 return "";
8516 /* Output a multiple immediate operation.
8517 OPERANDS is the vector of operands referred to in the output patterns.
8518 INSTR1 is the output pattern to use for the first constant.
8519 INSTR2 is the output pattern to use for subsequent constants.
8520 IMMED_OP is the index of the constant slot in OPERANDS.
8521 N is the constant value. */
8522 static const char *
8523 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8524 int immed_op, HOST_WIDE_INT n)
8526 #if HOST_BITS_PER_WIDE_INT > 32
8527 n &= 0xffffffff;
8528 #endif
8530 if (n == 0)
8532 /* Quick and easy output. */
8533 operands[immed_op] = const0_rtx;
8534 output_asm_insn (instr1, operands);
8536 else
8538 int i;
8539 const char * instr = instr1;
8541 /* Note that n is never zero here (which would give no output). */
8542 for (i = 0; i < 32; i += 2)
8544 if (n & (3 << i))
8546 operands[immed_op] = GEN_INT (n & (255 << i));
8547 output_asm_insn (instr, operands);
8548 instr = instr2;
8549 i += 6;
8554 return "";
8557 /* Return the appropriate ARM instruction for the operation code.
8558 The returned result should not be overwritten. OP is the rtx of the
8559 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8560 was shifted. */
8561 const char *
8562 arithmetic_instr (rtx op, int shift_first_arg)
8564 switch (GET_CODE (op))
8566 case PLUS:
8567 return "add";
8569 case MINUS:
8570 return shift_first_arg ? "rsb" : "sub";
8572 case IOR:
8573 return "orr";
8575 case XOR:
8576 return "eor";
8578 case AND:
8579 return "and";
8581 default:
8582 gcc_unreachable ();
8586 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8587 for the operation code. The returned result should not be overwritten.
8588 OP is the rtx code of the shift.
8589 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8590 shift. */
8591 static const char *
8592 shift_op (rtx op, HOST_WIDE_INT *amountp)
8594 const char * mnem;
8595 enum rtx_code code = GET_CODE (op);
8597 switch (GET_CODE (XEXP (op, 1)))
8599 case REG:
8600 case SUBREG:
8601 *amountp = -1;
8602 break;
8604 case CONST_INT:
8605 *amountp = INTVAL (XEXP (op, 1));
8606 break;
8608 default:
8609 gcc_unreachable ();
8612 switch (code)
8614 case ASHIFT:
8615 mnem = "asl";
8616 break;
8618 case ASHIFTRT:
8619 mnem = "asr";
8620 break;
8622 case LSHIFTRT:
8623 mnem = "lsr";
8624 break;
8626 case ROTATE:
8627 gcc_assert (*amountp != -1);
8628 *amountp = 32 - *amountp;
8630 /* Fall through. */
8632 case ROTATERT:
8633 mnem = "ror";
8634 break;
8636 case MULT:
8637 /* We never have to worry about the amount being other than a
8638 power of 2, since this case can never be reloaded from a reg. */
8639 gcc_assert (*amountp != -1);
8640 *amountp = int_log2 (*amountp);
8641 return "asl";
8643 default:
8644 gcc_unreachable ();
8647 if (*amountp != -1)
8649 /* This is not 100% correct, but follows from the desire to merge
8650 multiplication by a power of 2 with the recognizer for a
8651 shift. >=32 is not a valid shift for "asl", so we must try and
8652 output a shift that produces the correct arithmetical result.
8653 Using lsr #32 is identical except for the fact that the carry bit
8654 is not set correctly if we set the flags; but we never use the
8655 carry bit from such an operation, so we can ignore that. */
8656 if (code == ROTATERT)
8657 /* Rotate is just modulo 32. */
8658 *amountp &= 31;
8659 else if (*amountp != (*amountp & 31))
8661 if (code == ASHIFT)
8662 mnem = "lsr";
8663 *amountp = 32;
8666 /* Shifts of 0 are no-ops. */
8667 if (*amountp == 0)
8668 return NULL;
8671 return mnem;
8674 /* Obtain the shift from the POWER of two. */
8676 static HOST_WIDE_INT
8677 int_log2 (HOST_WIDE_INT power)
8679 HOST_WIDE_INT shift = 0;
8681 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8683 gcc_assert (shift <= 31);
8684 shift++;
8687 return shift;
8690 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8691 because /bin/as is horribly restrictive. The judgement about
8692 whether or not each character is 'printable' (and can be output as
8693 is) or not (and must be printed with an octal escape) must be made
8694 with reference to the *host* character set -- the situation is
8695 similar to that discussed in the comments above pp_c_char in
8696 c-pretty-print.c. */
8698 #define MAX_ASCII_LEN 51
8700 void
8701 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8703 int i;
8704 int len_so_far = 0;
8706 fputs ("\t.ascii\t\"", stream);
8708 for (i = 0; i < len; i++)
8710 int c = p[i];
8712 if (len_so_far >= MAX_ASCII_LEN)
8714 fputs ("\"\n\t.ascii\t\"", stream);
8715 len_so_far = 0;
8718 if (ISPRINT (c))
8720 if (c == '\\' || c == '\"')
8722 putc ('\\', stream);
8723 len_so_far++;
8725 putc (c, stream);
8726 len_so_far++;
8728 else
8730 fprintf (stream, "\\%03o", c);
8731 len_so_far += 4;
8735 fputs ("\"\n", stream);
8738 /* Compute the register save mask for registers 0 through 12
8739 inclusive. This code is used by arm_compute_save_reg_mask. */
8741 static unsigned long
8742 arm_compute_save_reg0_reg12_mask (void)
8744 unsigned long func_type = arm_current_func_type ();
8745 unsigned long save_reg_mask = 0;
8746 unsigned int reg;
8748 if (IS_INTERRUPT (func_type))
8750 unsigned int max_reg;
8751 /* Interrupt functions must not corrupt any registers,
8752 even call clobbered ones. If this is a leaf function
8753 we can just examine the registers used by the RTL, but
8754 otherwise we have to assume that whatever function is
8755 called might clobber anything, and so we have to save
8756 all the call-clobbered registers as well. */
8757 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8758 /* FIQ handlers have registers r8 - r12 banked, so
8759 we only need to check r0 - r7, Normal ISRs only
8760 bank r14 and r15, so we must check up to r12.
8761 r13 is the stack pointer which is always preserved,
8762 so we do not need to consider it here. */
8763 max_reg = 7;
8764 else
8765 max_reg = 12;
8767 for (reg = 0; reg <= max_reg; reg++)
8768 if (regs_ever_live[reg]
8769 || (! current_function_is_leaf && call_used_regs [reg]))
8770 save_reg_mask |= (1 << reg);
8772 /* Also save the pic base register if necessary. */
8773 if (flag_pic
8774 && !TARGET_SINGLE_PIC_BASE
8775 && current_function_uses_pic_offset_table)
8776 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8778 else
8780 /* In the normal case we only need to save those registers
8781 which are call saved and which are used by this function. */
8782 for (reg = 0; reg <= 10; reg++)
8783 if (regs_ever_live[reg] && ! call_used_regs [reg])
8784 save_reg_mask |= (1 << reg);
8786 /* Handle the frame pointer as a special case. */
8787 if (! TARGET_APCS_FRAME
8788 && ! frame_pointer_needed
8789 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8790 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8791 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8793 /* If we aren't loading the PIC register,
8794 don't stack it even though it may be live. */
8795 if (flag_pic
8796 && !TARGET_SINGLE_PIC_BASE
8797 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8798 || current_function_uses_pic_offset_table))
8799 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8802 /* Save registers so the exception handler can modify them. */
8803 if (current_function_calls_eh_return)
8805 unsigned int i;
8807 for (i = 0; ; i++)
8809 reg = EH_RETURN_DATA_REGNO (i);
8810 if (reg == INVALID_REGNUM)
8811 break;
8812 save_reg_mask |= 1 << reg;
8816 return save_reg_mask;
8819 /* Compute a bit mask of which registers need to be
8820 saved on the stack for the current function. */
8822 static unsigned long
8823 arm_compute_save_reg_mask (void)
8825 unsigned int save_reg_mask = 0;
8826 unsigned long func_type = arm_current_func_type ();
8828 if (IS_NAKED (func_type))
8829 /* This should never really happen. */
8830 return 0;
8832 /* If we are creating a stack frame, then we must save the frame pointer,
8833 IP (which will hold the old stack pointer), LR and the PC. */
8834 if (frame_pointer_needed)
8835 save_reg_mask |=
8836 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8837 | (1 << IP_REGNUM)
8838 | (1 << LR_REGNUM)
8839 | (1 << PC_REGNUM);
8841 /* Volatile functions do not return, so there
8842 is no need to save any other registers. */
8843 if (IS_VOLATILE (func_type))
8844 return save_reg_mask;
8846 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8848 /* Decide if we need to save the link register.
8849 Interrupt routines have their own banked link register,
8850 so they never need to save it.
8851 Otherwise if we do not use the link register we do not need to save
8852 it. If we are pushing other registers onto the stack however, we
8853 can save an instruction in the epilogue by pushing the link register
8854 now and then popping it back into the PC. This incurs extra memory
8855 accesses though, so we only do it when optimizing for size, and only
8856 if we know that we will not need a fancy return sequence. */
8857 if (regs_ever_live [LR_REGNUM]
8858 || (save_reg_mask
8859 && optimize_size
8860 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8861 && !current_function_calls_eh_return))
8862 save_reg_mask |= 1 << LR_REGNUM;
8864 if (cfun->machine->lr_save_eliminated)
8865 save_reg_mask &= ~ (1 << LR_REGNUM);
8867 if (TARGET_REALLY_IWMMXT
8868 && ((bit_count (save_reg_mask)
8869 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8871 unsigned int reg;
8873 /* The total number of registers that are going to be pushed
8874 onto the stack is odd. We need to ensure that the stack
8875 is 64-bit aligned before we start to save iWMMXt registers,
8876 and also before we start to create locals. (A local variable
8877 might be a double or long long which we will load/store using
8878 an iWMMXt instruction). Therefore we need to push another
8879 ARM register, so that the stack will be 64-bit aligned. We
8880 try to avoid using the arg registers (r0 -r3) as they might be
8881 used to pass values in a tail call. */
8882 for (reg = 4; reg <= 12; reg++)
8883 if ((save_reg_mask & (1 << reg)) == 0)
8884 break;
8886 if (reg <= 12)
8887 save_reg_mask |= (1 << reg);
8888 else
8890 cfun->machine->sibcall_blocked = 1;
8891 save_reg_mask |= (1 << 3);
8895 return save_reg_mask;
8899 /* Compute a bit mask of which registers need to be
8900 saved on the stack for the current function. */
8901 static unsigned long
8902 thumb_compute_save_reg_mask (void)
8904 unsigned long mask;
8905 unsigned reg;
8907 mask = 0;
8908 for (reg = 0; reg < 12; reg ++)
8909 if (regs_ever_live[reg] && !call_used_regs[reg])
8910 mask |= 1 << reg;
8912 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8913 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8915 if (TARGET_SINGLE_PIC_BASE)
8916 mask &= ~(1 << arm_pic_register);
8918 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8919 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8920 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8922 /* LR will also be pushed if any lo regs are pushed. */
8923 if (mask & 0xff || thumb_force_lr_save ())
8924 mask |= (1 << LR_REGNUM);
8926 /* Make sure we have a low work register if we need one.
8927 We will need one if we are going to push a high register,
8928 but we are not currently intending to push a low register. */
8929 if ((mask & 0xff) == 0
8930 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8932 /* Use thumb_find_work_register to choose which register
8933 we will use. If the register is live then we will
8934 have to push it. Use LAST_LO_REGNUM as our fallback
8935 choice for the register to select. */
8936 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8938 if (! call_used_regs[reg])
8939 mask |= 1 << reg;
8942 return mask;
8946 /* Return the number of bytes required to save VFP registers. */
8947 static int
8948 arm_get_vfp_saved_size (void)
8950 unsigned int regno;
8951 int count;
8952 int saved;
8954 saved = 0;
8955 /* Space for saved VFP registers. */
8956 if (TARGET_HARD_FLOAT && TARGET_VFP)
8958 count = 0;
8959 for (regno = FIRST_VFP_REGNUM;
8960 regno < LAST_VFP_REGNUM;
8961 regno += 2)
8963 if ((!regs_ever_live[regno] || call_used_regs[regno])
8964 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8966 if (count > 0)
8968 /* Workaround ARM10 VFPr1 bug. */
8969 if (count == 2 && !arm_arch6)
8970 count++;
8971 saved += count * 8 + 4;
8973 count = 0;
8975 else
8976 count++;
8978 if (count > 0)
8980 if (count == 2 && !arm_arch6)
8981 count++;
8982 saved += count * 8 + 4;
8985 return saved;
8989 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8990 everything bar the final return instruction. */
8991 const char *
8992 output_return_instruction (rtx operand, int really_return, int reverse)
8994 char conditional[10];
8995 char instr[100];
8996 unsigned reg;
8997 unsigned long live_regs_mask;
8998 unsigned long func_type;
8999 arm_stack_offsets *offsets;
9001 func_type = arm_current_func_type ();
9003 if (IS_NAKED (func_type))
9004 return "";
9006 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9008 /* If this function was declared non-returning, and we have
9009 found a tail call, then we have to trust that the called
9010 function won't return. */
9011 if (really_return)
9013 rtx ops[2];
9015 /* Otherwise, trap an attempted return by aborting. */
9016 ops[0] = operand;
9017 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9018 : "abort");
9019 assemble_external_libcall (ops[1]);
9020 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9023 return "";
9026 gcc_assert (!current_function_calls_alloca || really_return);
9028 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9030 return_used_this_function = 1;
9032 live_regs_mask = arm_compute_save_reg_mask ();
9034 if (live_regs_mask)
9036 const char * return_reg;
9038 /* If we do not have any special requirements for function exit
9039 (e.g. interworking, or ISR) then we can load the return address
9040 directly into the PC. Otherwise we must load it into LR. */
9041 if (really_return
9042 && ! TARGET_INTERWORK)
9043 return_reg = reg_names[PC_REGNUM];
9044 else
9045 return_reg = reg_names[LR_REGNUM];
9047 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9049 /* There are three possible reasons for the IP register
9050 being saved. 1) a stack frame was created, in which case
9051 IP contains the old stack pointer, or 2) an ISR routine
9052 corrupted it, or 3) it was saved to align the stack on
9053 iWMMXt. In case 1, restore IP into SP, otherwise just
9054 restore IP. */
9055 if (frame_pointer_needed)
9057 live_regs_mask &= ~ (1 << IP_REGNUM);
9058 live_regs_mask |= (1 << SP_REGNUM);
9060 else
9061 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9064 /* On some ARM architectures it is faster to use LDR rather than
9065 LDM to load a single register. On other architectures, the
9066 cost is the same. In 26 bit mode, or for exception handlers,
9067 we have to use LDM to load the PC so that the CPSR is also
9068 restored. */
9069 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9070 if (live_regs_mask == (1U << reg))
9071 break;
9073 if (reg <= LAST_ARM_REGNUM
9074 && (reg != LR_REGNUM
9075 || ! really_return
9076 || ! IS_INTERRUPT (func_type)))
9078 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9079 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9081 else
9083 char *p;
9084 int first = 1;
9086 /* Generate the load multiple instruction to restore the
9087 registers. Note we can get here, even if
9088 frame_pointer_needed is true, but only if sp already
9089 points to the base of the saved core registers. */
9090 if (live_regs_mask & (1 << SP_REGNUM))
9092 unsigned HOST_WIDE_INT stack_adjust;
9094 offsets = arm_get_frame_offsets ();
9095 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9096 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9098 if (stack_adjust && arm_arch5)
9099 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9100 else
9102 /* If we can't use ldmib (SA110 bug),
9103 then try to pop r3 instead. */
9104 if (stack_adjust)
9105 live_regs_mask |= 1 << 3;
9106 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9109 else
9110 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9112 p = instr + strlen (instr);
9114 for (reg = 0; reg <= SP_REGNUM; reg++)
9115 if (live_regs_mask & (1 << reg))
9117 int l = strlen (reg_names[reg]);
9119 if (first)
9120 first = 0;
9121 else
9123 memcpy (p, ", ", 2);
9124 p += 2;
9127 memcpy (p, "%|", 2);
9128 memcpy (p + 2, reg_names[reg], l);
9129 p += l + 2;
9132 if (live_regs_mask & (1 << LR_REGNUM))
9134 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9135 /* If returning from an interrupt, restore the CPSR. */
9136 if (IS_INTERRUPT (func_type))
9137 strcat (p, "^");
9139 else
9140 strcpy (p, "}");
9143 output_asm_insn (instr, & operand);
9145 /* See if we need to generate an extra instruction to
9146 perform the actual function return. */
9147 if (really_return
9148 && func_type != ARM_FT_INTERWORKED
9149 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9151 /* The return has already been handled
9152 by loading the LR into the PC. */
9153 really_return = 0;
9157 if (really_return)
9159 switch ((int) ARM_FUNC_TYPE (func_type))
9161 case ARM_FT_ISR:
9162 case ARM_FT_FIQ:
9163 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9164 break;
9166 case ARM_FT_INTERWORKED:
9167 sprintf (instr, "bx%s\t%%|lr", conditional);
9168 break;
9170 case ARM_FT_EXCEPTION:
9171 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9172 break;
9174 default:
9175 /* Use bx if it's available. */
9176 if (arm_arch5 || arm_arch4t)
9177 sprintf (instr, "bx%s\t%%|lr", conditional);
9178 else
9179 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9180 break;
9183 output_asm_insn (instr, & operand);
9186 return "";
9189 /* Write the function name into the code section, directly preceding
9190 the function prologue.
9192 Code will be output similar to this:
9194 .ascii "arm_poke_function_name", 0
9195 .align
9197 .word 0xff000000 + (t1 - t0)
9198 arm_poke_function_name
9199 mov ip, sp
9200 stmfd sp!, {fp, ip, lr, pc}
9201 sub fp, ip, #4
9203 When performing a stack backtrace, code can inspect the value
9204 of 'pc' stored at 'fp' + 0. If the trace function then looks
9205 at location pc - 12 and the top 8 bits are set, then we know
9206 that there is a function name embedded immediately preceding this
9207 location and has length ((pc[-3]) & 0xff000000).
9209 We assume that pc is declared as a pointer to an unsigned long.
9211 It is of no benefit to output the function name if we are assembling
9212 a leaf function. These function types will not contain a stack
9213 backtrace structure, therefore it is not possible to determine the
9214 function name. */
9215 void
9216 arm_poke_function_name (FILE *stream, const char *name)
9218 unsigned long alignlength;
9219 unsigned long length;
9220 rtx x;
9222 length = strlen (name) + 1;
9223 alignlength = ROUND_UP_WORD (length);
9225 ASM_OUTPUT_ASCII (stream, name, length);
9226 ASM_OUTPUT_ALIGN (stream, 2);
9227 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9228 assemble_aligned_integer (UNITS_PER_WORD, x);
9231 /* Place some comments into the assembler stream
9232 describing the current function. */
9233 static void
9234 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9236 unsigned long func_type;
9238 if (!TARGET_ARM)
9240 thumb_output_function_prologue (f, frame_size);
9241 return;
9244 /* Sanity check. */
9245 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9247 func_type = arm_current_func_type ();
9249 switch ((int) ARM_FUNC_TYPE (func_type))
9251 default:
9252 case ARM_FT_NORMAL:
9253 break;
9254 case ARM_FT_INTERWORKED:
9255 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9256 break;
9257 case ARM_FT_ISR:
9258 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9259 break;
9260 case ARM_FT_FIQ:
9261 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9262 break;
9263 case ARM_FT_EXCEPTION:
9264 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9265 break;
9268 if (IS_NAKED (func_type))
9269 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9271 if (IS_VOLATILE (func_type))
9272 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9274 if (IS_NESTED (func_type))
9275 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9277 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9278 current_function_args_size,
9279 current_function_pretend_args_size, frame_size);
9281 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9282 frame_pointer_needed,
9283 cfun->machine->uses_anonymous_args);
9285 if (cfun->machine->lr_save_eliminated)
9286 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9288 if (current_function_calls_eh_return)
9289 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9291 #ifdef AOF_ASSEMBLER
9292 if (flag_pic)
9293 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9294 #endif
9296 return_used_this_function = 0;
9299 const char *
9300 arm_output_epilogue (rtx sibling)
9302 int reg;
9303 unsigned long saved_regs_mask;
9304 unsigned long func_type;
9305 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9306 frame that is $fp + 4 for a non-variadic function. */
9307 int floats_offset = 0;
9308 rtx operands[3];
9309 FILE * f = asm_out_file;
9310 unsigned int lrm_count = 0;
9311 int really_return = (sibling == NULL);
9312 int start_reg;
9313 arm_stack_offsets *offsets;
9315 /* If we have already generated the return instruction
9316 then it is futile to generate anything else. */
9317 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9318 return "";
9320 func_type = arm_current_func_type ();
9322 if (IS_NAKED (func_type))
9323 /* Naked functions don't have epilogues. */
9324 return "";
9326 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9328 rtx op;
9330 /* A volatile function should never return. Call abort. */
9331 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9332 assemble_external_libcall (op);
9333 output_asm_insn ("bl\t%a0", &op);
9335 return "";
9338 /* If we are throwing an exception, then we really must be doing a
9339 return, so we can't tail-call. */
9340 gcc_assert (!current_function_calls_eh_return || really_return);
9342 offsets = arm_get_frame_offsets ();
9343 saved_regs_mask = arm_compute_save_reg_mask ();
9345 if (TARGET_IWMMXT)
9346 lrm_count = bit_count (saved_regs_mask);
9348 floats_offset = offsets->saved_args;
9349 /* Compute how far away the floats will be. */
9350 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9351 if (saved_regs_mask & (1 << reg))
9352 floats_offset += 4;
9354 if (frame_pointer_needed)
9356 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9357 int vfp_offset = offsets->frame;
9359 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9361 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9362 if (regs_ever_live[reg] && !call_used_regs[reg])
9364 floats_offset += 12;
9365 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9366 reg, FP_REGNUM, floats_offset - vfp_offset);
9369 else
9371 start_reg = LAST_FPA_REGNUM;
9373 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9375 if (regs_ever_live[reg] && !call_used_regs[reg])
9377 floats_offset += 12;
9379 /* We can't unstack more than four registers at once. */
9380 if (start_reg - reg == 3)
9382 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9383 reg, FP_REGNUM, floats_offset - vfp_offset);
9384 start_reg = reg - 1;
9387 else
9389 if (reg != start_reg)
9390 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9391 reg + 1, start_reg - reg,
9392 FP_REGNUM, floats_offset - vfp_offset);
9393 start_reg = reg - 1;
9397 /* Just in case the last register checked also needs unstacking. */
9398 if (reg != start_reg)
9399 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9400 reg + 1, start_reg - reg,
9401 FP_REGNUM, floats_offset - vfp_offset);
9404 if (TARGET_HARD_FLOAT && TARGET_VFP)
9406 int saved_size;
9408 /* The fldmx insn does not have base+offset addressing modes,
9409 so we use IP to hold the address. */
9410 saved_size = arm_get_vfp_saved_size ();
9412 if (saved_size > 0)
9414 floats_offset += saved_size;
9415 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9416 FP_REGNUM, floats_offset - vfp_offset);
9418 start_reg = FIRST_VFP_REGNUM;
9419 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9421 if ((!regs_ever_live[reg] || call_used_regs[reg])
9422 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9424 if (start_reg != reg)
9425 arm_output_fldmx (f, IP_REGNUM,
9426 (start_reg - FIRST_VFP_REGNUM) / 2,
9427 (reg - start_reg) / 2);
9428 start_reg = reg + 2;
9431 if (start_reg != reg)
9432 arm_output_fldmx (f, IP_REGNUM,
9433 (start_reg - FIRST_VFP_REGNUM) / 2,
9434 (reg - start_reg) / 2);
9437 if (TARGET_IWMMXT)
9439 /* The frame pointer is guaranteed to be non-double-word aligned.
9440 This is because it is set to (old_stack_pointer - 4) and the
9441 old_stack_pointer was double word aligned. Thus the offset to
9442 the iWMMXt registers to be loaded must also be non-double-word
9443 sized, so that the resultant address *is* double-word aligned.
9444 We can ignore floats_offset since that was already included in
9445 the live_regs_mask. */
9446 lrm_count += (lrm_count % 2 ? 2 : 1);
9448 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9449 if (regs_ever_live[reg] && !call_used_regs[reg])
9451 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9452 reg, FP_REGNUM, lrm_count * 4);
9453 lrm_count += 2;
9457 /* saved_regs_mask should contain the IP, which at the time of stack
9458 frame generation actually contains the old stack pointer. So a
9459 quick way to unwind the stack is just pop the IP register directly
9460 into the stack pointer. */
9461 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9462 saved_regs_mask &= ~ (1 << IP_REGNUM);
9463 saved_regs_mask |= (1 << SP_REGNUM);
9465 /* There are two registers left in saved_regs_mask - LR and PC. We
9466 only need to restore the LR register (the return address), but to
9467 save time we can load it directly into the PC, unless we need a
9468 special function exit sequence, or we are not really returning. */
9469 if (really_return
9470 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9471 && !current_function_calls_eh_return)
9472 /* Delete the LR from the register mask, so that the LR on
9473 the stack is loaded into the PC in the register mask. */
9474 saved_regs_mask &= ~ (1 << LR_REGNUM);
9475 else
9476 saved_regs_mask &= ~ (1 << PC_REGNUM);
9478 /* We must use SP as the base register, because SP is one of the
9479 registers being restored. If an interrupt or page fault
9480 happens in the ldm instruction, the SP might or might not
9481 have been restored. That would be bad, as then SP will no
9482 longer indicate the safe area of stack, and we can get stack
9483 corruption. Using SP as the base register means that it will
9484 be reset correctly to the original value, should an interrupt
9485 occur. If the stack pointer already points at the right
9486 place, then omit the subtraction. */
9487 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9488 || current_function_calls_alloca)
9489 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9490 4 * bit_count (saved_regs_mask));
9491 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9493 if (IS_INTERRUPT (func_type))
9494 /* Interrupt handlers will have pushed the
9495 IP onto the stack, so restore it now. */
9496 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9498 else
9500 /* Restore stack pointer if necessary. */
9501 if (offsets->outgoing_args != offsets->saved_regs)
9503 operands[0] = operands[1] = stack_pointer_rtx;
9504 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9505 output_add_immediate (operands);
9508 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9510 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9511 if (regs_ever_live[reg] && !call_used_regs[reg])
9512 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9513 reg, SP_REGNUM);
9515 else
9517 start_reg = FIRST_FPA_REGNUM;
9519 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9521 if (regs_ever_live[reg] && !call_used_regs[reg])
9523 if (reg - start_reg == 3)
9525 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9526 start_reg, SP_REGNUM);
9527 start_reg = reg + 1;
9530 else
9532 if (reg != start_reg)
9533 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9534 start_reg, reg - start_reg,
9535 SP_REGNUM);
9537 start_reg = reg + 1;
9541 /* Just in case the last register checked also needs unstacking. */
9542 if (reg != start_reg)
9543 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9544 start_reg, reg - start_reg, SP_REGNUM);
9547 if (TARGET_HARD_FLOAT && TARGET_VFP)
9549 start_reg = FIRST_VFP_REGNUM;
9550 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9552 if ((!regs_ever_live[reg] || call_used_regs[reg])
9553 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9555 if (start_reg != reg)
9556 arm_output_fldmx (f, SP_REGNUM,
9557 (start_reg - FIRST_VFP_REGNUM) / 2,
9558 (reg - start_reg) / 2);
9559 start_reg = reg + 2;
9562 if (start_reg != reg)
9563 arm_output_fldmx (f, SP_REGNUM,
9564 (start_reg - FIRST_VFP_REGNUM) / 2,
9565 (reg - start_reg) / 2);
9567 if (TARGET_IWMMXT)
9568 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9569 if (regs_ever_live[reg] && !call_used_regs[reg])
9570 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9572 /* If we can, restore the LR into the PC. */
9573 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9574 && really_return
9575 && current_function_pretend_args_size == 0
9576 && saved_regs_mask & (1 << LR_REGNUM)
9577 && !current_function_calls_eh_return)
9579 saved_regs_mask &= ~ (1 << LR_REGNUM);
9580 saved_regs_mask |= (1 << PC_REGNUM);
9583 /* Load the registers off the stack. If we only have one register
9584 to load use the LDR instruction - it is faster. */
9585 if (saved_regs_mask == (1 << LR_REGNUM))
9587 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9589 else if (saved_regs_mask)
9591 if (saved_regs_mask & (1 << SP_REGNUM))
9592 /* Note - write back to the stack register is not enabled
9593 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9594 in the list of registers and if we add writeback the
9595 instruction becomes UNPREDICTABLE. */
9596 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9597 else
9598 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9601 if (current_function_pretend_args_size)
9603 /* Unwind the pre-pushed regs. */
9604 operands[0] = operands[1] = stack_pointer_rtx;
9605 operands[2] = GEN_INT (current_function_pretend_args_size);
9606 output_add_immediate (operands);
9610 /* We may have already restored PC directly from the stack. */
9611 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9612 return "";
9614 /* Stack adjustment for exception handler. */
9615 if (current_function_calls_eh_return)
9616 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9617 ARM_EH_STACKADJ_REGNUM);
9619 /* Generate the return instruction. */
9620 switch ((int) ARM_FUNC_TYPE (func_type))
9622 case ARM_FT_ISR:
9623 case ARM_FT_FIQ:
9624 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9625 break;
9627 case ARM_FT_EXCEPTION:
9628 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9629 break;
9631 case ARM_FT_INTERWORKED:
9632 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9633 break;
9635 default:
9636 if (arm_arch5 || arm_arch4t)
9637 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9638 else
9639 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9640 break;
9643 return "";
9646 static void
9647 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9648 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9650 arm_stack_offsets *offsets;
9652 if (TARGET_THUMB)
9654 int regno;
9656 /* Emit any call-via-reg trampolines that are needed for v4t support
9657 of call_reg and call_value_reg type insns. */
9658 for (regno = 0; regno < LR_REGNUM; regno++)
9660 rtx label = cfun->machine->call_via[regno];
9662 if (label != NULL)
9664 function_section (current_function_decl);
9665 targetm.asm_out.internal_label (asm_out_file, "L",
9666 CODE_LABEL_NUMBER (label));
9667 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9671 /* ??? Probably not safe to set this here, since it assumes that a
9672 function will be emitted as assembly immediately after we generate
9673 RTL for it. This does not happen for inline functions. */
9674 return_used_this_function = 0;
9676 else
9678 /* We need to take into account any stack-frame rounding. */
9679 offsets = arm_get_frame_offsets ();
9681 gcc_assert (!use_return_insn (FALSE, NULL)
9682 || !return_used_this_function
9683 || offsets->saved_regs == offsets->outgoing_args
9684 || frame_pointer_needed);
9686 /* Reset the ARM-specific per-function variables. */
9687 after_arm_reorg = 0;
9691 /* Generate and emit an insn that we will recognize as a push_multi.
9692 Unfortunately, since this insn does not reflect very well the actual
9693 semantics of the operation, we need to annotate the insn for the benefit
9694 of DWARF2 frame unwind information. */
9695 static rtx
9696 emit_multi_reg_push (unsigned long mask)
9698 int num_regs = 0;
9699 int num_dwarf_regs;
9700 int i, j;
9701 rtx par;
9702 rtx dwarf;
9703 int dwarf_par_index;
9704 rtx tmp, reg;
9706 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9707 if (mask & (1 << i))
9708 num_regs++;
9710 gcc_assert (num_regs && num_regs <= 16);
9712 /* We don't record the PC in the dwarf frame information. */
9713 num_dwarf_regs = num_regs;
9714 if (mask & (1 << PC_REGNUM))
9715 num_dwarf_regs--;
9717 /* For the body of the insn we are going to generate an UNSPEC in
9718 parallel with several USEs. This allows the insn to be recognized
9719 by the push_multi pattern in the arm.md file. The insn looks
9720 something like this:
9722 (parallel [
9723 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9724 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9725 (use (reg:SI 11 fp))
9726 (use (reg:SI 12 ip))
9727 (use (reg:SI 14 lr))
9728 (use (reg:SI 15 pc))
9731 For the frame note however, we try to be more explicit and actually
9732 show each register being stored into the stack frame, plus a (single)
9733 decrement of the stack pointer. We do it this way in order to be
9734 friendly to the stack unwinding code, which only wants to see a single
9735 stack decrement per instruction. The RTL we generate for the note looks
9736 something like this:
9738 (sequence [
9739 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9740 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9741 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9742 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9743 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9746 This sequence is used both by the code to support stack unwinding for
9747 exceptions handlers and the code to generate dwarf2 frame debugging. */
9749 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9750 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9751 dwarf_par_index = 1;
9753 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9755 if (mask & (1 << i))
9757 reg = gen_rtx_REG (SImode, i);
9759 XVECEXP (par, 0, 0)
9760 = gen_rtx_SET (VOIDmode,
9761 gen_rtx_MEM (BLKmode,
9762 gen_rtx_PRE_DEC (BLKmode,
9763 stack_pointer_rtx)),
9764 gen_rtx_UNSPEC (BLKmode,
9765 gen_rtvec (1, reg),
9766 UNSPEC_PUSH_MULT));
9768 if (i != PC_REGNUM)
9770 tmp = gen_rtx_SET (VOIDmode,
9771 gen_rtx_MEM (SImode, stack_pointer_rtx),
9772 reg);
9773 RTX_FRAME_RELATED_P (tmp) = 1;
9774 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9775 dwarf_par_index++;
9778 break;
9782 for (j = 1, i++; j < num_regs; i++)
9784 if (mask & (1 << i))
9786 reg = gen_rtx_REG (SImode, i);
9788 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9790 if (i != PC_REGNUM)
9792 tmp = gen_rtx_SET (VOIDmode,
9793 gen_rtx_MEM (SImode,
9794 plus_constant (stack_pointer_rtx,
9795 4 * j)),
9796 reg);
9797 RTX_FRAME_RELATED_P (tmp) = 1;
9798 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9801 j++;
9805 par = emit_insn (par);
9807 tmp = gen_rtx_SET (SImode,
9808 stack_pointer_rtx,
9809 gen_rtx_PLUS (SImode,
9810 stack_pointer_rtx,
9811 GEN_INT (-4 * num_regs)));
9812 RTX_FRAME_RELATED_P (tmp) = 1;
9813 XVECEXP (dwarf, 0, 0) = tmp;
9815 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9816 REG_NOTES (par));
9817 return par;
9820 /* Calculate the size of the return value that is passed in registers. */
9821 static int
9822 arm_size_return_regs (void)
9824 enum machine_mode mode;
9826 if (current_function_return_rtx != 0)
9827 mode = GET_MODE (current_function_return_rtx);
9828 else
9829 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9831 return GET_MODE_SIZE (mode);
9834 static rtx
9835 emit_sfm (int base_reg, int count)
9837 rtx par;
9838 rtx dwarf;
9839 rtx tmp, reg;
9840 int i;
9842 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9843 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9845 reg = gen_rtx_REG (XFmode, base_reg++);
9847 XVECEXP (par, 0, 0)
9848 = gen_rtx_SET (VOIDmode,
9849 gen_rtx_MEM (BLKmode,
9850 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9851 gen_rtx_UNSPEC (BLKmode,
9852 gen_rtvec (1, reg),
9853 UNSPEC_PUSH_MULT));
9854 tmp = gen_rtx_SET (VOIDmode,
9855 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9856 RTX_FRAME_RELATED_P (tmp) = 1;
9857 XVECEXP (dwarf, 0, 1) = tmp;
9859 for (i = 1; i < count; i++)
9861 reg = gen_rtx_REG (XFmode, base_reg++);
9862 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9864 tmp = gen_rtx_SET (VOIDmode,
9865 gen_rtx_MEM (XFmode,
9866 plus_constant (stack_pointer_rtx,
9867 i * 12)),
9868 reg);
9869 RTX_FRAME_RELATED_P (tmp) = 1;
9870 XVECEXP (dwarf, 0, i + 1) = tmp;
9873 tmp = gen_rtx_SET (VOIDmode,
9874 stack_pointer_rtx,
9875 gen_rtx_PLUS (SImode,
9876 stack_pointer_rtx,
9877 GEN_INT (-12 * count)));
9878 RTX_FRAME_RELATED_P (tmp) = 1;
9879 XVECEXP (dwarf, 0, 0) = tmp;
9881 par = emit_insn (par);
9882 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9883 REG_NOTES (par));
9884 return par;
9888 /* Return true if the current function needs to save/restore LR. */
9890 static bool
9891 thumb_force_lr_save (void)
9893 return !cfun->machine->lr_save_eliminated
9894 && (!leaf_function_p ()
9895 || thumb_far_jump_used_p ()
9896 || regs_ever_live [LR_REGNUM]);
9900 /* Compute the distance from register FROM to register TO.
9901 These can be the arg pointer (26), the soft frame pointer (25),
9902 the stack pointer (13) or the hard frame pointer (11).
9903 In thumb mode r7 is used as the soft frame pointer, if needed.
9904 Typical stack layout looks like this:
9906 old stack pointer -> | |
9907 ----
9908 | | \
9909 | | saved arguments for
9910 | | vararg functions
9911 | | /
9913 hard FP & arg pointer -> | | \
9914 | | stack
9915 | | frame
9916 | | /
9918 | | \
9919 | | call saved
9920 | | registers
9921 soft frame pointer -> | | /
9923 | | \
9924 | | local
9925 | | variables
9926 | | /
9928 | | \
9929 | | outgoing
9930 | | arguments
9931 current stack pointer -> | | /
9934 For a given function some or all of these stack components
9935 may not be needed, giving rise to the possibility of
9936 eliminating some of the registers.
9938 The values returned by this function must reflect the behavior
9939 of arm_expand_prologue() and arm_compute_save_reg_mask().
9941 The sign of the number returned reflects the direction of stack
9942 growth, so the values are positive for all eliminations except
9943 from the soft frame pointer to the hard frame pointer.
9945 SFP may point just inside the local variables block to ensure correct
9946 alignment. */
9949 /* Calculate stack offsets. These are used to calculate register elimination
9950 offsets and in prologue/epilogue code. */
9952 static arm_stack_offsets *
9953 arm_get_frame_offsets (void)
9955 struct arm_stack_offsets *offsets;
9956 unsigned long func_type;
9957 int leaf;
9958 int saved;
9959 HOST_WIDE_INT frame_size;
9961 offsets = &cfun->machine->stack_offsets;
9963 /* We need to know if we are a leaf function. Unfortunately, it
9964 is possible to be called after start_sequence has been called,
9965 which causes get_insns to return the insns for the sequence,
9966 not the function, which will cause leaf_function_p to return
9967 the incorrect result.
9969 to know about leaf functions once reload has completed, and the
9970 frame size cannot be changed after that time, so we can safely
9971 use the cached value. */
9973 if (reload_completed)
9974 return offsets;
9976 /* Initially this is the size of the local variables. It will translated
9977 into an offset once we have determined the size of preceding data. */
9978 frame_size = ROUND_UP_WORD (get_frame_size ());
9980 leaf = leaf_function_p ();
9982 /* Space for variadic functions. */
9983 offsets->saved_args = current_function_pretend_args_size;
9985 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9987 if (TARGET_ARM)
9989 unsigned int regno;
9991 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9993 /* We know that SP will be doubleword aligned on entry, and we must
9994 preserve that condition at any subroutine call. We also require the
9995 soft frame pointer to be doubleword aligned. */
9997 if (TARGET_REALLY_IWMMXT)
9999 /* Check for the call-saved iWMMXt registers. */
10000 for (regno = FIRST_IWMMXT_REGNUM;
10001 regno <= LAST_IWMMXT_REGNUM;
10002 regno++)
10003 if (regs_ever_live [regno] && ! call_used_regs [regno])
10004 saved += 8;
10007 func_type = arm_current_func_type ();
10008 if (! IS_VOLATILE (func_type))
10010 /* Space for saved FPA registers. */
10011 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10012 if (regs_ever_live[regno] && ! call_used_regs[regno])
10013 saved += 12;
10015 /* Space for saved VFP registers. */
10016 if (TARGET_HARD_FLOAT && TARGET_VFP)
10017 saved += arm_get_vfp_saved_size ();
10020 else /* TARGET_THUMB */
10022 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10023 if (TARGET_BACKTRACE)
10024 saved += 16;
10027 /* Saved registers include the stack frame. */
10028 offsets->saved_regs = offsets->saved_args + saved;
10029 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10030 /* A leaf function does not need any stack alignment if it has nothing
10031 on the stack. */
10032 if (leaf && frame_size == 0)
10034 offsets->outgoing_args = offsets->soft_frame;
10035 return offsets;
10038 /* Ensure SFP has the correct alignment. */
10039 if (ARM_DOUBLEWORD_ALIGN
10040 && (offsets->soft_frame & 7))
10041 offsets->soft_frame += 4;
10043 offsets->outgoing_args = offsets->soft_frame + frame_size
10044 + current_function_outgoing_args_size;
10046 if (ARM_DOUBLEWORD_ALIGN)
10048 /* Ensure SP remains doubleword aligned. */
10049 if (offsets->outgoing_args & 7)
10050 offsets->outgoing_args += 4;
10051 gcc_assert (!(offsets->outgoing_args & 7));
10054 return offsets;
10058 /* Calculate the relative offsets for the different stack pointers. Positive
10059 offsets are in the direction of stack growth. */
10061 HOST_WIDE_INT
10062 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10064 arm_stack_offsets *offsets;
10066 offsets = arm_get_frame_offsets ();
10068 /* OK, now we have enough information to compute the distances.
10069 There must be an entry in these switch tables for each pair
10070 of registers in ELIMINABLE_REGS, even if some of the entries
10071 seem to be redundant or useless. */
10072 switch (from)
10074 case ARG_POINTER_REGNUM:
10075 switch (to)
10077 case THUMB_HARD_FRAME_POINTER_REGNUM:
10078 return 0;
10080 case FRAME_POINTER_REGNUM:
10081 /* This is the reverse of the soft frame pointer
10082 to hard frame pointer elimination below. */
10083 return offsets->soft_frame - offsets->saved_args;
10085 case ARM_HARD_FRAME_POINTER_REGNUM:
10086 /* If there is no stack frame then the hard
10087 frame pointer and the arg pointer coincide. */
10088 if (offsets->frame == offsets->saved_regs)
10089 return 0;
10090 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10091 return (frame_pointer_needed
10092 && cfun->static_chain_decl != NULL
10093 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10095 case STACK_POINTER_REGNUM:
10096 /* If nothing has been pushed on the stack at all
10097 then this will return -4. This *is* correct! */
10098 return offsets->outgoing_args - (offsets->saved_args + 4);
10100 default:
10101 gcc_unreachable ();
10103 gcc_unreachable ();
10105 case FRAME_POINTER_REGNUM:
10106 switch (to)
10108 case THUMB_HARD_FRAME_POINTER_REGNUM:
10109 return 0;
10111 case ARM_HARD_FRAME_POINTER_REGNUM:
10112 /* The hard frame pointer points to the top entry in the
10113 stack frame. The soft frame pointer to the bottom entry
10114 in the stack frame. If there is no stack frame at all,
10115 then they are identical. */
10117 return offsets->frame - offsets->soft_frame;
10119 case STACK_POINTER_REGNUM:
10120 return offsets->outgoing_args - offsets->soft_frame;
10122 default:
10123 gcc_unreachable ();
10125 gcc_unreachable ();
10127 default:
10128 /* You cannot eliminate from the stack pointer.
10129 In theory you could eliminate from the hard frame
10130 pointer to the stack pointer, but this will never
10131 happen, since if a stack frame is not needed the
10132 hard frame pointer will never be used. */
10133 gcc_unreachable ();
10138 /* Generate the prologue instructions for entry into an ARM function. */
10139 void
10140 arm_expand_prologue (void)
10142 int reg;
10143 rtx amount;
10144 rtx insn;
10145 rtx ip_rtx;
10146 unsigned long live_regs_mask;
10147 unsigned long func_type;
10148 int fp_offset = 0;
10149 int saved_pretend_args = 0;
10150 int saved_regs = 0;
10151 unsigned HOST_WIDE_INT args_to_push;
10152 arm_stack_offsets *offsets;
10154 func_type = arm_current_func_type ();
10156 /* Naked functions don't have prologues. */
10157 if (IS_NAKED (func_type))
10158 return;
10160 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10161 args_to_push = current_function_pretend_args_size;
10163 /* Compute which register we will have to save onto the stack. */
10164 live_regs_mask = arm_compute_save_reg_mask ();
10166 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10168 if (frame_pointer_needed)
10170 if (IS_INTERRUPT (func_type))
10172 /* Interrupt functions must not corrupt any registers.
10173 Creating a frame pointer however, corrupts the IP
10174 register, so we must push it first. */
10175 insn = emit_multi_reg_push (1 << IP_REGNUM);
10177 /* Do not set RTX_FRAME_RELATED_P on this insn.
10178 The dwarf stack unwinding code only wants to see one
10179 stack decrement per function, and this is not it. If
10180 this instruction is labeled as being part of the frame
10181 creation sequence then dwarf2out_frame_debug_expr will
10182 die when it encounters the assignment of IP to FP
10183 later on, since the use of SP here establishes SP as
10184 the CFA register and not IP.
10186 Anyway this instruction is not really part of the stack
10187 frame creation although it is part of the prologue. */
10189 else if (IS_NESTED (func_type))
10191 /* The Static chain register is the same as the IP register
10192 used as a scratch register during stack frame creation.
10193 To get around this need to find somewhere to store IP
10194 whilst the frame is being created. We try the following
10195 places in order:
10197 1. The last argument register.
10198 2. A slot on the stack above the frame. (This only
10199 works if the function is not a varargs function).
10200 3. Register r3, after pushing the argument registers
10201 onto the stack.
10203 Note - we only need to tell the dwarf2 backend about the SP
10204 adjustment in the second variant; the static chain register
10205 doesn't need to be unwound, as it doesn't contain a value
10206 inherited from the caller. */
10208 if (regs_ever_live[3] == 0)
10210 insn = gen_rtx_REG (SImode, 3);
10211 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10212 insn = emit_insn (insn);
10214 else if (args_to_push == 0)
10216 rtx dwarf;
10217 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10218 insn = gen_rtx_MEM (SImode, insn);
10219 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10220 insn = emit_insn (insn);
10222 fp_offset = 4;
10224 /* Just tell the dwarf backend that we adjusted SP. */
10225 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10226 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10227 GEN_INT (-fp_offset)));
10228 RTX_FRAME_RELATED_P (insn) = 1;
10229 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10230 dwarf, REG_NOTES (insn));
10232 else
10234 /* Store the args on the stack. */
10235 if (cfun->machine->uses_anonymous_args)
10236 insn = emit_multi_reg_push
10237 ((0xf0 >> (args_to_push / 4)) & 0xf);
10238 else
10239 insn = emit_insn
10240 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10241 GEN_INT (- args_to_push)));
10243 RTX_FRAME_RELATED_P (insn) = 1;
10245 saved_pretend_args = 1;
10246 fp_offset = args_to_push;
10247 args_to_push = 0;
10249 /* Now reuse r3 to preserve IP. */
10250 insn = gen_rtx_REG (SImode, 3);
10251 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10252 (void) emit_insn (insn);
10256 if (fp_offset)
10258 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10259 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10261 else
10262 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10264 insn = emit_insn (insn);
10265 RTX_FRAME_RELATED_P (insn) = 1;
10268 if (args_to_push)
10270 /* Push the argument registers, or reserve space for them. */
10271 if (cfun->machine->uses_anonymous_args)
10272 insn = emit_multi_reg_push
10273 ((0xf0 >> (args_to_push / 4)) & 0xf);
10274 else
10275 insn = emit_insn
10276 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10277 GEN_INT (- args_to_push)));
10278 RTX_FRAME_RELATED_P (insn) = 1;
10281 /* If this is an interrupt service routine, and the link register
10282 is going to be pushed, and we are not creating a stack frame,
10283 (which would involve an extra push of IP and a pop in the epilogue)
10284 subtracting four from LR now will mean that the function return
10285 can be done with a single instruction. */
10286 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10287 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10288 && ! frame_pointer_needed)
10289 emit_insn (gen_rtx_SET (SImode,
10290 gen_rtx_REG (SImode, LR_REGNUM),
10291 gen_rtx_PLUS (SImode,
10292 gen_rtx_REG (SImode, LR_REGNUM),
10293 GEN_INT (-4))));
10295 if (live_regs_mask)
10297 insn = emit_multi_reg_push (live_regs_mask);
10298 saved_regs += bit_count (live_regs_mask) * 4;
10299 RTX_FRAME_RELATED_P (insn) = 1;
10302 if (TARGET_IWMMXT)
10303 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10304 if (regs_ever_live[reg] && ! call_used_regs [reg])
10306 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10307 insn = gen_rtx_MEM (V2SImode, insn);
10308 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10309 gen_rtx_REG (V2SImode, reg)));
10310 RTX_FRAME_RELATED_P (insn) = 1;
10311 saved_regs += 8;
10314 if (! IS_VOLATILE (func_type))
10316 int start_reg;
10318 /* Save any floating point call-saved registers used by this
10319 function. */
10320 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10322 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10323 if (regs_ever_live[reg] && !call_used_regs[reg])
10325 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10326 insn = gen_rtx_MEM (XFmode, insn);
10327 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10328 gen_rtx_REG (XFmode, reg)));
10329 RTX_FRAME_RELATED_P (insn) = 1;
10330 saved_regs += 12;
10333 else
10335 start_reg = LAST_FPA_REGNUM;
10337 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10339 if (regs_ever_live[reg] && !call_used_regs[reg])
10341 if (start_reg - reg == 3)
10343 insn = emit_sfm (reg, 4);
10344 RTX_FRAME_RELATED_P (insn) = 1;
10345 saved_regs += 48;
10346 start_reg = reg - 1;
10349 else
10351 if (start_reg != reg)
10353 insn = emit_sfm (reg + 1, start_reg - reg);
10354 RTX_FRAME_RELATED_P (insn) = 1;
10355 saved_regs += (start_reg - reg) * 12;
10357 start_reg = reg - 1;
10361 if (start_reg != reg)
10363 insn = emit_sfm (reg + 1, start_reg - reg);
10364 saved_regs += (start_reg - reg) * 12;
10365 RTX_FRAME_RELATED_P (insn) = 1;
10368 if (TARGET_HARD_FLOAT && TARGET_VFP)
10370 start_reg = FIRST_VFP_REGNUM;
10372 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10374 if ((!regs_ever_live[reg] || call_used_regs[reg])
10375 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10377 if (start_reg != reg)
10378 saved_regs += vfp_emit_fstmx (start_reg,
10379 (reg - start_reg) / 2);
10380 start_reg = reg + 2;
10383 if (start_reg != reg)
10384 saved_regs += vfp_emit_fstmx (start_reg,
10385 (reg - start_reg) / 2);
10389 if (frame_pointer_needed)
10391 /* Create the new frame pointer. */
10392 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10393 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10394 RTX_FRAME_RELATED_P (insn) = 1;
10396 if (IS_NESTED (func_type))
10398 /* Recover the static chain register. */
10399 if (regs_ever_live [3] == 0
10400 || saved_pretend_args)
10401 insn = gen_rtx_REG (SImode, 3);
10402 else /* if (current_function_pretend_args_size == 0) */
10404 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10405 GEN_INT (4));
10406 insn = gen_rtx_MEM (SImode, insn);
10409 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10410 /* Add a USE to stop propagate_one_insn() from barfing. */
10411 emit_insn (gen_prologue_use (ip_rtx));
10415 offsets = arm_get_frame_offsets ();
10416 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10418 /* This add can produce multiple insns for a large constant, so we
10419 need to get tricky. */
10420 rtx last = get_last_insn ();
10422 amount = GEN_INT (offsets->saved_args + saved_regs
10423 - offsets->outgoing_args);
10425 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10426 amount));
10429 last = last ? NEXT_INSN (last) : get_insns ();
10430 RTX_FRAME_RELATED_P (last) = 1;
10432 while (last != insn);
10434 /* If the frame pointer is needed, emit a special barrier that
10435 will prevent the scheduler from moving stores to the frame
10436 before the stack adjustment. */
10437 if (frame_pointer_needed)
10438 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10439 hard_frame_pointer_rtx));
10443 if (flag_pic)
10444 arm_load_pic_register (INVALID_REGNUM);
10446 /* If we are profiling, make sure no instructions are scheduled before
10447 the call to mcount. Similarly if the user has requested no
10448 scheduling in the prolog. */
10449 if (current_function_profile || !TARGET_SCHED_PROLOG)
10450 emit_insn (gen_blockage ());
10452 /* If the link register is being kept alive, with the return address in it,
10453 then make sure that it does not get reused by the ce2 pass. */
10454 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10456 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10457 cfun->machine->lr_save_eliminated = 1;
10461 /* If CODE is 'd', then the X is a condition operand and the instruction
10462 should only be executed if the condition is true.
10463 if CODE is 'D', then the X is a condition operand and the instruction
10464 should only be executed if the condition is false: however, if the mode
10465 of the comparison is CCFPEmode, then always execute the instruction -- we
10466 do this because in these circumstances !GE does not necessarily imply LT;
10467 in these cases the instruction pattern will take care to make sure that
10468 an instruction containing %d will follow, thereby undoing the effects of
10469 doing this instruction unconditionally.
10470 If CODE is 'N' then X is a floating point operand that must be negated
10471 before output.
10472 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10473 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10474 void
10475 arm_print_operand (FILE *stream, rtx x, int code)
10477 switch (code)
10479 case '@':
10480 fputs (ASM_COMMENT_START, stream);
10481 return;
10483 case '_':
10484 fputs (user_label_prefix, stream);
10485 return;
10487 case '|':
10488 fputs (REGISTER_PREFIX, stream);
10489 return;
10491 case '?':
10492 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10494 if (TARGET_THUMB)
10496 output_operand_lossage ("predicated Thumb instruction");
10497 break;
10499 if (current_insn_predicate != NULL)
10501 output_operand_lossage
10502 ("predicated instruction in conditional sequence");
10503 break;
10506 fputs (arm_condition_codes[arm_current_cc], stream);
10508 else if (current_insn_predicate)
10510 enum arm_cond_code code;
10512 if (TARGET_THUMB)
10514 output_operand_lossage ("predicated Thumb instruction");
10515 break;
10518 code = get_arm_condition_code (current_insn_predicate);
10519 fputs (arm_condition_codes[code], stream);
10521 return;
10523 case 'N':
10525 REAL_VALUE_TYPE r;
10526 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10527 r = REAL_VALUE_NEGATE (r);
10528 fprintf (stream, "%s", fp_const_from_val (&r));
10530 return;
10532 case 'B':
10533 if (GET_CODE (x) == CONST_INT)
10535 HOST_WIDE_INT val;
10536 val = ARM_SIGN_EXTEND (~INTVAL (x));
10537 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10539 else
10541 putc ('~', stream);
10542 output_addr_const (stream, x);
10544 return;
10546 case 'i':
10547 fprintf (stream, "%s", arithmetic_instr (x, 1));
10548 return;
10550 /* Truncate Cirrus shift counts. */
10551 case 's':
10552 if (GET_CODE (x) == CONST_INT)
10554 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10555 return;
10557 arm_print_operand (stream, x, 0);
10558 return;
10560 case 'I':
10561 fprintf (stream, "%s", arithmetic_instr (x, 0));
10562 return;
10564 case 'S':
10566 HOST_WIDE_INT val;
10567 const char * shift = shift_op (x, &val);
10569 if (shift)
10571 fprintf (stream, ", %s ", shift_op (x, &val));
10572 if (val == -1)
10573 arm_print_operand (stream, XEXP (x, 1), 0);
10574 else
10575 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10578 return;
10580 /* An explanation of the 'Q', 'R' and 'H' register operands:
10582 In a pair of registers containing a DI or DF value the 'Q'
10583 operand returns the register number of the register containing
10584 the least significant part of the value. The 'R' operand returns
10585 the register number of the register containing the most
10586 significant part of the value.
10588 The 'H' operand returns the higher of the two register numbers.
10589 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10590 same as the 'Q' operand, since the most significant part of the
10591 value is held in the lower number register. The reverse is true
10592 on systems where WORDS_BIG_ENDIAN is false.
10594 The purpose of these operands is to distinguish between cases
10595 where the endian-ness of the values is important (for example
10596 when they are added together), and cases where the endian-ness
10597 is irrelevant, but the order of register operations is important.
10598 For example when loading a value from memory into a register
10599 pair, the endian-ness does not matter. Provided that the value
10600 from the lower memory address is put into the lower numbered
10601 register, and the value from the higher address is put into the
10602 higher numbered register, the load will work regardless of whether
10603 the value being loaded is big-wordian or little-wordian. The
10604 order of the two register loads can matter however, if the address
10605 of the memory location is actually held in one of the registers
10606 being overwritten by the load. */
10607 case 'Q':
10608 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10610 output_operand_lossage ("invalid operand for code '%c'", code);
10611 return;
10614 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10615 return;
10617 case 'R':
10618 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10620 output_operand_lossage ("invalid operand for code '%c'", code);
10621 return;
10624 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10625 return;
10627 case 'H':
10628 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10630 output_operand_lossage ("invalid operand for code '%c'", code);
10631 return;
10634 asm_fprintf (stream, "%r", REGNO (x) + 1);
10635 return;
10637 case 'm':
10638 asm_fprintf (stream, "%r",
10639 GET_CODE (XEXP (x, 0)) == REG
10640 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10641 return;
10643 case 'M':
10644 asm_fprintf (stream, "{%r-%r}",
10645 REGNO (x),
10646 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10647 return;
10649 case 'd':
10650 /* CONST_TRUE_RTX means always -- that's the default. */
10651 if (x == const_true_rtx)
10652 return;
10654 if (!COMPARISON_P (x))
10656 output_operand_lossage ("invalid operand for code '%c'", code);
10657 return;
10660 fputs (arm_condition_codes[get_arm_condition_code (x)],
10661 stream);
10662 return;
10664 case 'D':
10665 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10666 want to do that. */
10667 if (x == const_true_rtx)
10669 output_operand_lossage ("instruction never exectued");
10670 return;
10672 if (!COMPARISON_P (x))
10674 output_operand_lossage ("invalid operand for code '%c'", code);
10675 return;
10678 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10679 (get_arm_condition_code (x))],
10680 stream);
10681 return;
10683 /* Cirrus registers can be accessed in a variety of ways:
10684 single floating point (f)
10685 double floating point (d)
10686 32bit integer (fx)
10687 64bit integer (dx). */
10688 case 'W': /* Cirrus register in F mode. */
10689 case 'X': /* Cirrus register in D mode. */
10690 case 'Y': /* Cirrus register in FX mode. */
10691 case 'Z': /* Cirrus register in DX mode. */
10692 gcc_assert (GET_CODE (x) == REG
10693 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10695 fprintf (stream, "mv%s%s",
10696 code == 'W' ? "f"
10697 : code == 'X' ? "d"
10698 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10700 return;
10702 /* Print cirrus register in the mode specified by the register's mode. */
10703 case 'V':
10705 int mode = GET_MODE (x);
10707 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10709 output_operand_lossage ("invalid operand for code '%c'", code);
10710 return;
10713 fprintf (stream, "mv%s%s",
10714 mode == DFmode ? "d"
10715 : mode == SImode ? "fx"
10716 : mode == DImode ? "dx"
10717 : "f", reg_names[REGNO (x)] + 2);
10719 return;
10722 case 'U':
10723 if (GET_CODE (x) != REG
10724 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10725 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10726 /* Bad value for wCG register number. */
10728 output_operand_lossage ("invalid operand for code '%c'", code);
10729 return;
10732 else
10733 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10734 return;
10736 /* Print an iWMMXt control register name. */
10737 case 'w':
10738 if (GET_CODE (x) != CONST_INT
10739 || INTVAL (x) < 0
10740 || INTVAL (x) >= 16)
10741 /* Bad value for wC register number. */
10743 output_operand_lossage ("invalid operand for code '%c'", code);
10744 return;
10747 else
10749 static const char * wc_reg_names [16] =
10751 "wCID", "wCon", "wCSSF", "wCASF",
10752 "wC4", "wC5", "wC6", "wC7",
10753 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10754 "wC12", "wC13", "wC14", "wC15"
10757 fprintf (stream, wc_reg_names [INTVAL (x)]);
10759 return;
10761 /* Print a VFP double precision register name. */
10762 case 'P':
10764 int mode = GET_MODE (x);
10765 int num;
10767 if (mode != DImode && mode != DFmode)
10769 output_operand_lossage ("invalid operand for code '%c'", code);
10770 return;
10773 if (GET_CODE (x) != REG
10774 || !IS_VFP_REGNUM (REGNO (x)))
10776 output_operand_lossage ("invalid operand for code '%c'", code);
10777 return;
10780 num = REGNO(x) - FIRST_VFP_REGNUM;
10781 if (num & 1)
10783 output_operand_lossage ("invalid operand for code '%c'", code);
10784 return;
10787 fprintf (stream, "d%d", num >> 1);
10789 return;
10791 default:
10792 if (x == 0)
10794 output_operand_lossage ("missing operand");
10795 return;
10798 switch (GET_CODE (x))
10800 case REG:
10801 asm_fprintf (stream, "%r", REGNO (x));
10802 break;
10804 case MEM:
10805 output_memory_reference_mode = GET_MODE (x);
10806 output_address (XEXP (x, 0));
10807 break;
10809 case CONST_DOUBLE:
10810 fprintf (stream, "#%s", fp_immediate_constant (x));
10811 break;
10813 default:
10814 gcc_assert (GET_CODE (x) != NEG);
10815 fputc ('#', stream);
10816 output_addr_const (stream, x);
10817 break;
10822 #ifndef AOF_ASSEMBLER
10823 /* Target hook for assembling integer objects. The ARM version needs to
10824 handle word-sized values specially. */
10825 static bool
10826 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10828 if (size == UNITS_PER_WORD && aligned_p)
10830 fputs ("\t.word\t", asm_out_file);
10831 output_addr_const (asm_out_file, x);
10833 /* Mark symbols as position independent. We only do this in the
10834 .text segment, not in the .data segment. */
10835 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10836 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10838 if (GET_CODE (x) == SYMBOL_REF
10839 && (CONSTANT_POOL_ADDRESS_P (x)
10840 || SYMBOL_REF_LOCAL_P (x)))
10841 fputs ("(GOTOFF)", asm_out_file);
10842 else if (GET_CODE (x) == LABEL_REF)
10843 fputs ("(GOTOFF)", asm_out_file);
10844 else
10845 fputs ("(GOT)", asm_out_file);
10847 fputc ('\n', asm_out_file);
10848 return true;
10851 if (arm_vector_mode_supported_p (GET_MODE (x)))
10853 int i, units;
10855 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10857 units = CONST_VECTOR_NUNITS (x);
10859 switch (GET_MODE (x))
10861 case V2SImode: size = 4; break;
10862 case V4HImode: size = 2; break;
10863 case V8QImode: size = 1; break;
10864 default:
10865 gcc_unreachable ();
10868 for (i = 0; i < units; i++)
10870 rtx elt;
10872 elt = CONST_VECTOR_ELT (x, i);
10873 assemble_integer
10874 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10877 return true;
10880 return default_assemble_integer (x, size, aligned_p);
10884 /* Add a function to the list of static constructors. */
10886 static void
10887 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10889 if (!TARGET_AAPCS_BASED)
10891 default_named_section_asm_out_constructor (symbol, priority);
10892 return;
10895 /* Put these in the .init_array section, using a special relocation. */
10896 ctors_section ();
10897 assemble_align (POINTER_SIZE);
10898 fputs ("\t.word\t", asm_out_file);
10899 output_addr_const (asm_out_file, symbol);
10900 fputs ("(target1)\n", asm_out_file);
10902 #endif
10904 /* A finite state machine takes care of noticing whether or not instructions
10905 can be conditionally executed, and thus decrease execution time and code
10906 size by deleting branch instructions. The fsm is controlled by
10907 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10909 /* The state of the fsm controlling condition codes are:
10910 0: normal, do nothing special
10911 1: make ASM_OUTPUT_OPCODE not output this instruction
10912 2: make ASM_OUTPUT_OPCODE not output this instruction
10913 3: make instructions conditional
10914 4: make instructions conditional
10916 State transitions (state->state by whom under condition):
10917 0 -> 1 final_prescan_insn if the `target' is a label
10918 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10919 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10920 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10921 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10922 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10923 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10924 (the target insn is arm_target_insn).
10926 If the jump clobbers the conditions then we use states 2 and 4.
10928 A similar thing can be done with conditional return insns.
10930 XXX In case the `target' is an unconditional branch, this conditionalising
10931 of the instructions always reduces code size, but not always execution
10932 time. But then, I want to reduce the code size to somewhere near what
10933 /bin/cc produces. */
10935 /* Returns the index of the ARM condition code string in
10936 `arm_condition_codes'. COMPARISON should be an rtx like
10937 `(eq (...) (...))'. */
10938 static enum arm_cond_code
10939 get_arm_condition_code (rtx comparison)
10941 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10942 int code;
10943 enum rtx_code comp_code = GET_CODE (comparison);
10945 if (GET_MODE_CLASS (mode) != MODE_CC)
10946 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10947 XEXP (comparison, 1));
10949 switch (mode)
10951 case CC_DNEmode: code = ARM_NE; goto dominance;
10952 case CC_DEQmode: code = ARM_EQ; goto dominance;
10953 case CC_DGEmode: code = ARM_GE; goto dominance;
10954 case CC_DGTmode: code = ARM_GT; goto dominance;
10955 case CC_DLEmode: code = ARM_LE; goto dominance;
10956 case CC_DLTmode: code = ARM_LT; goto dominance;
10957 case CC_DGEUmode: code = ARM_CS; goto dominance;
10958 case CC_DGTUmode: code = ARM_HI; goto dominance;
10959 case CC_DLEUmode: code = ARM_LS; goto dominance;
10960 case CC_DLTUmode: code = ARM_CC;
10962 dominance:
10963 gcc_assert (comp_code == EQ || comp_code == NE);
10965 if (comp_code == EQ)
10966 return ARM_INVERSE_CONDITION_CODE (code);
10967 return code;
10969 case CC_NOOVmode:
10970 switch (comp_code)
10972 case NE: return ARM_NE;
10973 case EQ: return ARM_EQ;
10974 case GE: return ARM_PL;
10975 case LT: return ARM_MI;
10976 default: gcc_unreachable ();
10979 case CC_Zmode:
10980 switch (comp_code)
10982 case NE: return ARM_NE;
10983 case EQ: return ARM_EQ;
10984 default: gcc_unreachable ();
10987 case CC_Nmode:
10988 switch (comp_code)
10990 case NE: return ARM_MI;
10991 case EQ: return ARM_PL;
10992 default: gcc_unreachable ();
10995 case CCFPEmode:
10996 case CCFPmode:
10997 /* These encodings assume that AC=1 in the FPA system control
10998 byte. This allows us to handle all cases except UNEQ and
10999 LTGT. */
11000 switch (comp_code)
11002 case GE: return ARM_GE;
11003 case GT: return ARM_GT;
11004 case LE: return ARM_LS;
11005 case LT: return ARM_MI;
11006 case NE: return ARM_NE;
11007 case EQ: return ARM_EQ;
11008 case ORDERED: return ARM_VC;
11009 case UNORDERED: return ARM_VS;
11010 case UNLT: return ARM_LT;
11011 case UNLE: return ARM_LE;
11012 case UNGT: return ARM_HI;
11013 case UNGE: return ARM_PL;
11014 /* UNEQ and LTGT do not have a representation. */
11015 case UNEQ: /* Fall through. */
11016 case LTGT: /* Fall through. */
11017 default: gcc_unreachable ();
11020 case CC_SWPmode:
11021 switch (comp_code)
11023 case NE: return ARM_NE;
11024 case EQ: return ARM_EQ;
11025 case GE: return ARM_LE;
11026 case GT: return ARM_LT;
11027 case LE: return ARM_GE;
11028 case LT: return ARM_GT;
11029 case GEU: return ARM_LS;
11030 case GTU: return ARM_CC;
11031 case LEU: return ARM_CS;
11032 case LTU: return ARM_HI;
11033 default: gcc_unreachable ();
11036 case CC_Cmode:
11037 switch (comp_code)
11039 case LTU: return ARM_CS;
11040 case GEU: return ARM_CC;
11041 default: gcc_unreachable ();
11044 case CCmode:
11045 switch (comp_code)
11047 case NE: return ARM_NE;
11048 case EQ: return ARM_EQ;
11049 case GE: return ARM_GE;
11050 case GT: return ARM_GT;
11051 case LE: return ARM_LE;
11052 case LT: return ARM_LT;
11053 case GEU: return ARM_CS;
11054 case GTU: return ARM_HI;
11055 case LEU: return ARM_LS;
11056 case LTU: return ARM_CC;
11057 default: gcc_unreachable ();
11060 default: gcc_unreachable ();
11064 void
11065 arm_final_prescan_insn (rtx insn)
11067 /* BODY will hold the body of INSN. */
11068 rtx body = PATTERN (insn);
11070 /* This will be 1 if trying to repeat the trick, and things need to be
11071 reversed if it appears to fail. */
11072 int reverse = 0;
11074 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11075 taken are clobbered, even if the rtl suggests otherwise. It also
11076 means that we have to grub around within the jump expression to find
11077 out what the conditions are when the jump isn't taken. */
11078 int jump_clobbers = 0;
11080 /* If we start with a return insn, we only succeed if we find another one. */
11081 int seeking_return = 0;
11083 /* START_INSN will hold the insn from where we start looking. This is the
11084 first insn after the following code_label if REVERSE is true. */
11085 rtx start_insn = insn;
11087 /* If in state 4, check if the target branch is reached, in order to
11088 change back to state 0. */
11089 if (arm_ccfsm_state == 4)
11091 if (insn == arm_target_insn)
11093 arm_target_insn = NULL;
11094 arm_ccfsm_state = 0;
11096 return;
11099 /* If in state 3, it is possible to repeat the trick, if this insn is an
11100 unconditional branch to a label, and immediately following this branch
11101 is the previous target label which is only used once, and the label this
11102 branch jumps to is not too far off. */
11103 if (arm_ccfsm_state == 3)
11105 if (simplejump_p (insn))
11107 start_insn = next_nonnote_insn (start_insn);
11108 if (GET_CODE (start_insn) == BARRIER)
11110 /* XXX Isn't this always a barrier? */
11111 start_insn = next_nonnote_insn (start_insn);
11113 if (GET_CODE (start_insn) == CODE_LABEL
11114 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11115 && LABEL_NUSES (start_insn) == 1)
11116 reverse = TRUE;
11117 else
11118 return;
11120 else if (GET_CODE (body) == RETURN)
11122 start_insn = next_nonnote_insn (start_insn);
11123 if (GET_CODE (start_insn) == BARRIER)
11124 start_insn = next_nonnote_insn (start_insn);
11125 if (GET_CODE (start_insn) == CODE_LABEL
11126 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11127 && LABEL_NUSES (start_insn) == 1)
11129 reverse = TRUE;
11130 seeking_return = 1;
11132 else
11133 return;
11135 else
11136 return;
11139 gcc_assert (!arm_ccfsm_state || reverse);
11140 if (GET_CODE (insn) != JUMP_INSN)
11141 return;
11143 /* This jump might be paralleled with a clobber of the condition codes
11144 the jump should always come first */
11145 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11146 body = XVECEXP (body, 0, 0);
11148 if (reverse
11149 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11150 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11152 int insns_skipped;
11153 int fail = FALSE, succeed = FALSE;
11154 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11155 int then_not_else = TRUE;
11156 rtx this_insn = start_insn, label = 0;
11158 /* If the jump cannot be done with one instruction, we cannot
11159 conditionally execute the instruction in the inverse case. */
11160 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11162 jump_clobbers = 1;
11163 return;
11166 /* Register the insn jumped to. */
11167 if (reverse)
11169 if (!seeking_return)
11170 label = XEXP (SET_SRC (body), 0);
11172 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11173 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11174 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11176 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11177 then_not_else = FALSE;
11179 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11180 seeking_return = 1;
11181 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11183 seeking_return = 1;
11184 then_not_else = FALSE;
11186 else
11187 gcc_unreachable ();
11189 /* See how many insns this branch skips, and what kind of insns. If all
11190 insns are okay, and the label or unconditional branch to the same
11191 label is not too far away, succeed. */
11192 for (insns_skipped = 0;
11193 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11195 rtx scanbody;
11197 this_insn = next_nonnote_insn (this_insn);
11198 if (!this_insn)
11199 break;
11201 switch (GET_CODE (this_insn))
11203 case CODE_LABEL:
11204 /* Succeed if it is the target label, otherwise fail since
11205 control falls in from somewhere else. */
11206 if (this_insn == label)
11208 if (jump_clobbers)
11210 arm_ccfsm_state = 2;
11211 this_insn = next_nonnote_insn (this_insn);
11213 else
11214 arm_ccfsm_state = 1;
11215 succeed = TRUE;
11217 else
11218 fail = TRUE;
11219 break;
11221 case BARRIER:
11222 /* Succeed if the following insn is the target label.
11223 Otherwise fail.
11224 If return insns are used then the last insn in a function
11225 will be a barrier. */
11226 this_insn = next_nonnote_insn (this_insn);
11227 if (this_insn && this_insn == label)
11229 if (jump_clobbers)
11231 arm_ccfsm_state = 2;
11232 this_insn = next_nonnote_insn (this_insn);
11234 else
11235 arm_ccfsm_state = 1;
11236 succeed = TRUE;
11238 else
11239 fail = TRUE;
11240 break;
11242 case CALL_INSN:
11243 /* The AAPCS says that conditional calls should not be
11244 used since they make interworking inefficient (the
11245 linker can't transform BL<cond> into BLX). That's
11246 only a problem if the machine has BLX. */
11247 if (arm_arch5)
11249 fail = TRUE;
11250 break;
11253 /* Succeed if the following insn is the target label, or
11254 if the following two insns are a barrier and the
11255 target label. */
11256 this_insn = next_nonnote_insn (this_insn);
11257 if (this_insn && GET_CODE (this_insn) == BARRIER)
11258 this_insn = next_nonnote_insn (this_insn);
11260 if (this_insn && this_insn == label
11261 && insns_skipped < max_insns_skipped)
11263 if (jump_clobbers)
11265 arm_ccfsm_state = 2;
11266 this_insn = next_nonnote_insn (this_insn);
11268 else
11269 arm_ccfsm_state = 1;
11270 succeed = TRUE;
11272 else
11273 fail = TRUE;
11274 break;
11276 case JUMP_INSN:
11277 /* If this is an unconditional branch to the same label, succeed.
11278 If it is to another label, do nothing. If it is conditional,
11279 fail. */
11280 /* XXX Probably, the tests for SET and the PC are
11281 unnecessary. */
11283 scanbody = PATTERN (this_insn);
11284 if (GET_CODE (scanbody) == SET
11285 && GET_CODE (SET_DEST (scanbody)) == PC)
11287 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11288 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11290 arm_ccfsm_state = 2;
11291 succeed = TRUE;
11293 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11294 fail = TRUE;
11296 /* Fail if a conditional return is undesirable (e.g. on a
11297 StrongARM), but still allow this if optimizing for size. */
11298 else if (GET_CODE (scanbody) == RETURN
11299 && !use_return_insn (TRUE, NULL)
11300 && !optimize_size)
11301 fail = TRUE;
11302 else if (GET_CODE (scanbody) == RETURN
11303 && seeking_return)
11305 arm_ccfsm_state = 2;
11306 succeed = TRUE;
11308 else if (GET_CODE (scanbody) == PARALLEL)
11310 switch (get_attr_conds (this_insn))
11312 case CONDS_NOCOND:
11313 break;
11314 default:
11315 fail = TRUE;
11316 break;
11319 else
11320 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11322 break;
11324 case INSN:
11325 /* Instructions using or affecting the condition codes make it
11326 fail. */
11327 scanbody = PATTERN (this_insn);
11328 if (!(GET_CODE (scanbody) == SET
11329 || GET_CODE (scanbody) == PARALLEL)
11330 || get_attr_conds (this_insn) != CONDS_NOCOND)
11331 fail = TRUE;
11333 /* A conditional cirrus instruction must be followed by
11334 a non Cirrus instruction. However, since we
11335 conditionalize instructions in this function and by
11336 the time we get here we can't add instructions
11337 (nops), because shorten_branches() has already been
11338 called, we will disable conditionalizing Cirrus
11339 instructions to be safe. */
11340 if (GET_CODE (scanbody) != USE
11341 && GET_CODE (scanbody) != CLOBBER
11342 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11343 fail = TRUE;
11344 break;
11346 default:
11347 break;
11350 if (succeed)
11352 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11353 arm_target_label = CODE_LABEL_NUMBER (label);
11354 else
11356 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11358 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11360 this_insn = next_nonnote_insn (this_insn);
11361 gcc_assert (!this_insn
11362 || (GET_CODE (this_insn) != BARRIER
11363 && GET_CODE (this_insn) != CODE_LABEL));
11365 if (!this_insn)
11367 /* Oh, dear! we ran off the end.. give up. */
11368 recog (PATTERN (insn), insn, NULL);
11369 arm_ccfsm_state = 0;
11370 arm_target_insn = NULL;
11371 return;
11373 arm_target_insn = this_insn;
11375 if (jump_clobbers)
11377 gcc_assert (!reverse);
11378 arm_current_cc =
11379 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11380 0), 0), 1));
11381 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11382 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11383 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11384 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11386 else
11388 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11389 what it was. */
11390 if (!reverse)
11391 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11392 0));
11395 if (reverse || then_not_else)
11396 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11399 /* Restore recog_data (getting the attributes of other insns can
11400 destroy this array, but final.c assumes that it remains intact
11401 across this call; since the insn has been recognized already we
11402 call recog direct). */
11403 recog (PATTERN (insn), insn, NULL);
11407 /* Returns true if REGNO is a valid register
11408 for holding a quantity of type MODE. */
11410 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11412 if (GET_MODE_CLASS (mode) == MODE_CC)
11413 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11415 if (TARGET_THUMB)
11416 /* For the Thumb we only allow values bigger than SImode in
11417 registers 0 - 6, so that there is always a second low
11418 register available to hold the upper part of the value.
11419 We probably we ought to ensure that the register is the
11420 start of an even numbered register pair. */
11421 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11423 if (IS_CIRRUS_REGNUM (regno))
11424 /* We have outlawed SI values in Cirrus registers because they
11425 reside in the lower 32 bits, but SF values reside in the
11426 upper 32 bits. This causes gcc all sorts of grief. We can't
11427 even split the registers into pairs because Cirrus SI values
11428 get sign extended to 64bits-- aldyh. */
11429 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11431 if (IS_VFP_REGNUM (regno))
11433 if (mode == SFmode || mode == SImode)
11434 return TRUE;
11436 /* DFmode values are only valid in even register pairs. */
11437 if (mode == DFmode)
11438 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11439 return FALSE;
11442 if (IS_IWMMXT_GR_REGNUM (regno))
11443 return mode == SImode;
11445 if (IS_IWMMXT_REGNUM (regno))
11446 return VALID_IWMMXT_REG_MODE (mode);
11448 /* We allow any value to be stored in the general registers.
11449 Restrict doubleword quantities to even register pairs so that we can
11450 use ldrd. */
11451 if (regno <= LAST_ARM_REGNUM)
11452 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11454 if ( regno == FRAME_POINTER_REGNUM
11455 || regno == ARG_POINTER_REGNUM)
11456 /* We only allow integers in the fake hard registers. */
11457 return GET_MODE_CLASS (mode) == MODE_INT;
11459 /* The only registers left are the FPA registers
11460 which we only allow to hold FP values. */
11461 return GET_MODE_CLASS (mode) == MODE_FLOAT
11462 && regno >= FIRST_FPA_REGNUM
11463 && regno <= LAST_FPA_REGNUM;
11467 arm_regno_class (int regno)
11469 if (TARGET_THUMB)
11471 if (regno == STACK_POINTER_REGNUM)
11472 return STACK_REG;
11473 if (regno == CC_REGNUM)
11474 return CC_REG;
11475 if (regno < 8)
11476 return LO_REGS;
11477 return HI_REGS;
11480 if ( regno <= LAST_ARM_REGNUM
11481 || regno == FRAME_POINTER_REGNUM
11482 || regno == ARG_POINTER_REGNUM)
11483 return GENERAL_REGS;
11485 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11486 return NO_REGS;
11488 if (IS_CIRRUS_REGNUM (regno))
11489 return CIRRUS_REGS;
11491 if (IS_VFP_REGNUM (regno))
11492 return VFP_REGS;
11494 if (IS_IWMMXT_REGNUM (regno))
11495 return IWMMXT_REGS;
11497 if (IS_IWMMXT_GR_REGNUM (regno))
11498 return IWMMXT_GR_REGS;
11500 return FPA_REGS;
11503 /* Handle a special case when computing the offset
11504 of an argument from the frame pointer. */
11506 arm_debugger_arg_offset (int value, rtx addr)
11508 rtx insn;
11510 /* We are only interested if dbxout_parms() failed to compute the offset. */
11511 if (value != 0)
11512 return 0;
11514 /* We can only cope with the case where the address is held in a register. */
11515 if (GET_CODE (addr) != REG)
11516 return 0;
11518 /* If we are using the frame pointer to point at the argument, then
11519 an offset of 0 is correct. */
11520 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11521 return 0;
11523 /* If we are using the stack pointer to point at the
11524 argument, then an offset of 0 is correct. */
11525 if ((TARGET_THUMB || !frame_pointer_needed)
11526 && REGNO (addr) == SP_REGNUM)
11527 return 0;
11529 /* Oh dear. The argument is pointed to by a register rather
11530 than being held in a register, or being stored at a known
11531 offset from the frame pointer. Since GDB only understands
11532 those two kinds of argument we must translate the address
11533 held in the register into an offset from the frame pointer.
11534 We do this by searching through the insns for the function
11535 looking to see where this register gets its value. If the
11536 register is initialized from the frame pointer plus an offset
11537 then we are in luck and we can continue, otherwise we give up.
11539 This code is exercised by producing debugging information
11540 for a function with arguments like this:
11542 double func (double a, double b, int c, double d) {return d;}
11544 Without this code the stab for parameter 'd' will be set to
11545 an offset of 0 from the frame pointer, rather than 8. */
11547 /* The if() statement says:
11549 If the insn is a normal instruction
11550 and if the insn is setting the value in a register
11551 and if the register being set is the register holding the address of the argument
11552 and if the address is computing by an addition
11553 that involves adding to a register
11554 which is the frame pointer
11555 a constant integer
11557 then... */
11559 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11561 if ( GET_CODE (insn) == INSN
11562 && GET_CODE (PATTERN (insn)) == SET
11563 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11564 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11565 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11566 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11567 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11570 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11572 break;
11576 if (value == 0)
11578 debug_rtx (addr);
11579 warning (0, "unable to compute real location of stacked parameter");
11580 value = 8; /* XXX magic hack */
11583 return value;
11586 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11587 do \
11589 if ((MASK) & insn_flags) \
11590 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11591 BUILT_IN_MD, NULL, NULL_TREE); \
11593 while (0)
11595 struct builtin_description
11597 const unsigned int mask;
11598 const enum insn_code icode;
11599 const char * const name;
11600 const enum arm_builtins code;
11601 const enum rtx_code comparison;
11602 const unsigned int flag;
11605 static const struct builtin_description bdesc_2arg[] =
11607 #define IWMMXT_BUILTIN(code, string, builtin) \
11608 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11609 ARM_BUILTIN_##builtin, 0, 0 },
11611 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11612 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11613 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11614 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11615 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11616 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11617 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11618 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11619 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11620 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11621 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11622 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11623 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11624 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11625 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11626 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11627 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11628 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11629 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11630 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11631 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11632 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11633 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11634 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11635 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11636 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11637 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11638 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11639 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11640 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11641 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11642 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11643 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11644 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11645 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11646 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11647 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11648 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11649 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11650 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11651 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11652 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11653 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11654 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11655 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11656 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11657 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11658 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11659 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11660 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11661 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11662 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11663 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11664 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11665 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11666 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11667 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11668 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11670 #define IWMMXT_BUILTIN2(code, builtin) \
11671 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11673 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11674 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11675 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11676 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11677 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11678 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11679 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11680 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11681 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11682 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11683 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11684 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11685 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11686 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11687 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11688 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11689 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11690 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11691 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11692 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11693 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11694 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11695 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11696 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11697 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11698 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11699 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11700 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11701 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11702 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11703 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11704 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11707 static const struct builtin_description bdesc_1arg[] =
11709 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11710 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11711 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11712 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11713 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11714 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11715 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11716 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11717 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11718 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11719 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11720 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11721 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11722 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11723 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11724 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11725 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11726 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11729 /* Set up all the iWMMXt builtins. This is
11730 not called if TARGET_IWMMXT is zero. */
11732 static void
11733 arm_init_iwmmxt_builtins (void)
11735 const struct builtin_description * d;
11736 size_t i;
11737 tree endlink = void_list_node;
11739 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11740 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11741 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11743 tree int_ftype_int
11744 = build_function_type (integer_type_node,
11745 tree_cons (NULL_TREE, integer_type_node, endlink));
11746 tree v8qi_ftype_v8qi_v8qi_int
11747 = build_function_type (V8QI_type_node,
11748 tree_cons (NULL_TREE, V8QI_type_node,
11749 tree_cons (NULL_TREE, V8QI_type_node,
11750 tree_cons (NULL_TREE,
11751 integer_type_node,
11752 endlink))));
11753 tree v4hi_ftype_v4hi_int
11754 = build_function_type (V4HI_type_node,
11755 tree_cons (NULL_TREE, V4HI_type_node,
11756 tree_cons (NULL_TREE, integer_type_node,
11757 endlink)));
11758 tree v2si_ftype_v2si_int
11759 = build_function_type (V2SI_type_node,
11760 tree_cons (NULL_TREE, V2SI_type_node,
11761 tree_cons (NULL_TREE, integer_type_node,
11762 endlink)));
11763 tree v2si_ftype_di_di
11764 = build_function_type (V2SI_type_node,
11765 tree_cons (NULL_TREE, long_long_integer_type_node,
11766 tree_cons (NULL_TREE, long_long_integer_type_node,
11767 endlink)));
11768 tree di_ftype_di_int
11769 = build_function_type (long_long_integer_type_node,
11770 tree_cons (NULL_TREE, long_long_integer_type_node,
11771 tree_cons (NULL_TREE, integer_type_node,
11772 endlink)));
11773 tree di_ftype_di_int_int
11774 = build_function_type (long_long_integer_type_node,
11775 tree_cons (NULL_TREE, long_long_integer_type_node,
11776 tree_cons (NULL_TREE, integer_type_node,
11777 tree_cons (NULL_TREE,
11778 integer_type_node,
11779 endlink))));
11780 tree int_ftype_v8qi
11781 = build_function_type (integer_type_node,
11782 tree_cons (NULL_TREE, V8QI_type_node,
11783 endlink));
11784 tree int_ftype_v4hi
11785 = build_function_type (integer_type_node,
11786 tree_cons (NULL_TREE, V4HI_type_node,
11787 endlink));
11788 tree int_ftype_v2si
11789 = build_function_type (integer_type_node,
11790 tree_cons (NULL_TREE, V2SI_type_node,
11791 endlink));
11792 tree int_ftype_v8qi_int
11793 = build_function_type (integer_type_node,
11794 tree_cons (NULL_TREE, V8QI_type_node,
11795 tree_cons (NULL_TREE, integer_type_node,
11796 endlink)));
11797 tree int_ftype_v4hi_int
11798 = build_function_type (integer_type_node,
11799 tree_cons (NULL_TREE, V4HI_type_node,
11800 tree_cons (NULL_TREE, integer_type_node,
11801 endlink)));
11802 tree int_ftype_v2si_int
11803 = build_function_type (integer_type_node,
11804 tree_cons (NULL_TREE, V2SI_type_node,
11805 tree_cons (NULL_TREE, integer_type_node,
11806 endlink)));
11807 tree v8qi_ftype_v8qi_int_int
11808 = build_function_type (V8QI_type_node,
11809 tree_cons (NULL_TREE, V8QI_type_node,
11810 tree_cons (NULL_TREE, integer_type_node,
11811 tree_cons (NULL_TREE,
11812 integer_type_node,
11813 endlink))));
11814 tree v4hi_ftype_v4hi_int_int
11815 = build_function_type (V4HI_type_node,
11816 tree_cons (NULL_TREE, V4HI_type_node,
11817 tree_cons (NULL_TREE, integer_type_node,
11818 tree_cons (NULL_TREE,
11819 integer_type_node,
11820 endlink))));
11821 tree v2si_ftype_v2si_int_int
11822 = build_function_type (V2SI_type_node,
11823 tree_cons (NULL_TREE, V2SI_type_node,
11824 tree_cons (NULL_TREE, integer_type_node,
11825 tree_cons (NULL_TREE,
11826 integer_type_node,
11827 endlink))));
11828 /* Miscellaneous. */
11829 tree v8qi_ftype_v4hi_v4hi
11830 = build_function_type (V8QI_type_node,
11831 tree_cons (NULL_TREE, V4HI_type_node,
11832 tree_cons (NULL_TREE, V4HI_type_node,
11833 endlink)));
11834 tree v4hi_ftype_v2si_v2si
11835 = build_function_type (V4HI_type_node,
11836 tree_cons (NULL_TREE, V2SI_type_node,
11837 tree_cons (NULL_TREE, V2SI_type_node,
11838 endlink)));
11839 tree v2si_ftype_v4hi_v4hi
11840 = build_function_type (V2SI_type_node,
11841 tree_cons (NULL_TREE, V4HI_type_node,
11842 tree_cons (NULL_TREE, V4HI_type_node,
11843 endlink)));
11844 tree v2si_ftype_v8qi_v8qi
11845 = build_function_type (V2SI_type_node,
11846 tree_cons (NULL_TREE, V8QI_type_node,
11847 tree_cons (NULL_TREE, V8QI_type_node,
11848 endlink)));
11849 tree v4hi_ftype_v4hi_di
11850 = build_function_type (V4HI_type_node,
11851 tree_cons (NULL_TREE, V4HI_type_node,
11852 tree_cons (NULL_TREE,
11853 long_long_integer_type_node,
11854 endlink)));
11855 tree v2si_ftype_v2si_di
11856 = build_function_type (V2SI_type_node,
11857 tree_cons (NULL_TREE, V2SI_type_node,
11858 tree_cons (NULL_TREE,
11859 long_long_integer_type_node,
11860 endlink)));
11861 tree void_ftype_int_int
11862 = build_function_type (void_type_node,
11863 tree_cons (NULL_TREE, integer_type_node,
11864 tree_cons (NULL_TREE, integer_type_node,
11865 endlink)));
11866 tree di_ftype_void
11867 = build_function_type (long_long_unsigned_type_node, endlink);
11868 tree di_ftype_v8qi
11869 = build_function_type (long_long_integer_type_node,
11870 tree_cons (NULL_TREE, V8QI_type_node,
11871 endlink));
11872 tree di_ftype_v4hi
11873 = build_function_type (long_long_integer_type_node,
11874 tree_cons (NULL_TREE, V4HI_type_node,
11875 endlink));
11876 tree di_ftype_v2si
11877 = build_function_type (long_long_integer_type_node,
11878 tree_cons (NULL_TREE, V2SI_type_node,
11879 endlink));
11880 tree v2si_ftype_v4hi
11881 = build_function_type (V2SI_type_node,
11882 tree_cons (NULL_TREE, V4HI_type_node,
11883 endlink));
11884 tree v4hi_ftype_v8qi
11885 = build_function_type (V4HI_type_node,
11886 tree_cons (NULL_TREE, V8QI_type_node,
11887 endlink));
11889 tree di_ftype_di_v4hi_v4hi
11890 = build_function_type (long_long_unsigned_type_node,
11891 tree_cons (NULL_TREE,
11892 long_long_unsigned_type_node,
11893 tree_cons (NULL_TREE, V4HI_type_node,
11894 tree_cons (NULL_TREE,
11895 V4HI_type_node,
11896 endlink))));
11898 tree di_ftype_v4hi_v4hi
11899 = build_function_type (long_long_unsigned_type_node,
11900 tree_cons (NULL_TREE, V4HI_type_node,
11901 tree_cons (NULL_TREE, V4HI_type_node,
11902 endlink)));
11904 /* Normal vector binops. */
11905 tree v8qi_ftype_v8qi_v8qi
11906 = build_function_type (V8QI_type_node,
11907 tree_cons (NULL_TREE, V8QI_type_node,
11908 tree_cons (NULL_TREE, V8QI_type_node,
11909 endlink)));
11910 tree v4hi_ftype_v4hi_v4hi
11911 = build_function_type (V4HI_type_node,
11912 tree_cons (NULL_TREE, V4HI_type_node,
11913 tree_cons (NULL_TREE, V4HI_type_node,
11914 endlink)));
11915 tree v2si_ftype_v2si_v2si
11916 = build_function_type (V2SI_type_node,
11917 tree_cons (NULL_TREE, V2SI_type_node,
11918 tree_cons (NULL_TREE, V2SI_type_node,
11919 endlink)));
11920 tree di_ftype_di_di
11921 = build_function_type (long_long_unsigned_type_node,
11922 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11923 tree_cons (NULL_TREE,
11924 long_long_unsigned_type_node,
11925 endlink)));
11927 /* Add all builtins that are more or less simple operations on two
11928 operands. */
11929 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11931 /* Use one of the operands; the target can have a different mode for
11932 mask-generating compares. */
11933 enum machine_mode mode;
11934 tree type;
11936 if (d->name == 0)
11937 continue;
11939 mode = insn_data[d->icode].operand[1].mode;
11941 switch (mode)
11943 case V8QImode:
11944 type = v8qi_ftype_v8qi_v8qi;
11945 break;
11946 case V4HImode:
11947 type = v4hi_ftype_v4hi_v4hi;
11948 break;
11949 case V2SImode:
11950 type = v2si_ftype_v2si_v2si;
11951 break;
11952 case DImode:
11953 type = di_ftype_di_di;
11954 break;
11956 default:
11957 gcc_unreachable ();
11960 def_mbuiltin (d->mask, d->name, type, d->code);
11963 /* Add the remaining MMX insns with somewhat more complicated types. */
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12015 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12030 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12034 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12041 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12048 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12050 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12055 static void
12056 arm_init_builtins (void)
12058 if (TARGET_REALLY_IWMMXT)
12059 arm_init_iwmmxt_builtins ();
12062 /* Errors in the source file can cause expand_expr to return const0_rtx
12063 where we expect a vector. To avoid crashing, use one of the vector
12064 clear instructions. */
12066 static rtx
12067 safe_vector_operand (rtx x, enum machine_mode mode)
12069 if (x != const0_rtx)
12070 return x;
12071 x = gen_reg_rtx (mode);
12073 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12074 : gen_rtx_SUBREG (DImode, x, 0)));
12075 return x;
12078 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12080 static rtx
12081 arm_expand_binop_builtin (enum insn_code icode,
12082 tree arglist, rtx target)
12084 rtx pat;
12085 tree arg0 = TREE_VALUE (arglist);
12086 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12087 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12088 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12089 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12090 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12091 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12093 if (VECTOR_MODE_P (mode0))
12094 op0 = safe_vector_operand (op0, mode0);
12095 if (VECTOR_MODE_P (mode1))
12096 op1 = safe_vector_operand (op1, mode1);
12098 if (! target
12099 || GET_MODE (target) != tmode
12100 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12101 target = gen_reg_rtx (tmode);
12103 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12105 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12106 op0 = copy_to_mode_reg (mode0, op0);
12107 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12108 op1 = copy_to_mode_reg (mode1, op1);
12110 pat = GEN_FCN (icode) (target, op0, op1);
12111 if (! pat)
12112 return 0;
12113 emit_insn (pat);
12114 return target;
12117 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12119 static rtx
12120 arm_expand_unop_builtin (enum insn_code icode,
12121 tree arglist, rtx target, int do_load)
12123 rtx pat;
12124 tree arg0 = TREE_VALUE (arglist);
12125 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12126 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12127 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12129 if (! target
12130 || GET_MODE (target) != tmode
12131 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12132 target = gen_reg_rtx (tmode);
12133 if (do_load)
12134 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12135 else
12137 if (VECTOR_MODE_P (mode0))
12138 op0 = safe_vector_operand (op0, mode0);
12140 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12141 op0 = copy_to_mode_reg (mode0, op0);
12144 pat = GEN_FCN (icode) (target, op0);
12145 if (! pat)
12146 return 0;
12147 emit_insn (pat);
12148 return target;
12151 /* Expand an expression EXP that calls a built-in function,
12152 with result going to TARGET if that's convenient
12153 (and in mode MODE if that's convenient).
12154 SUBTARGET may be used as the target for computing one of EXP's operands.
12155 IGNORE is nonzero if the value is to be ignored. */
12157 static rtx
12158 arm_expand_builtin (tree exp,
12159 rtx target,
12160 rtx subtarget ATTRIBUTE_UNUSED,
12161 enum machine_mode mode ATTRIBUTE_UNUSED,
12162 int ignore ATTRIBUTE_UNUSED)
12164 const struct builtin_description * d;
12165 enum insn_code icode;
12166 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12167 tree arglist = TREE_OPERAND (exp, 1);
12168 tree arg0;
12169 tree arg1;
12170 tree arg2;
12171 rtx op0;
12172 rtx op1;
12173 rtx op2;
12174 rtx pat;
12175 int fcode = DECL_FUNCTION_CODE (fndecl);
12176 size_t i;
12177 enum machine_mode tmode;
12178 enum machine_mode mode0;
12179 enum machine_mode mode1;
12180 enum machine_mode mode2;
12182 switch (fcode)
12184 case ARM_BUILTIN_TEXTRMSB:
12185 case ARM_BUILTIN_TEXTRMUB:
12186 case ARM_BUILTIN_TEXTRMSH:
12187 case ARM_BUILTIN_TEXTRMUH:
12188 case ARM_BUILTIN_TEXTRMSW:
12189 case ARM_BUILTIN_TEXTRMUW:
12190 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12191 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12192 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12193 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12194 : CODE_FOR_iwmmxt_textrmw);
12196 arg0 = TREE_VALUE (arglist);
12197 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12198 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12199 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12200 tmode = insn_data[icode].operand[0].mode;
12201 mode0 = insn_data[icode].operand[1].mode;
12202 mode1 = insn_data[icode].operand[2].mode;
12204 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12205 op0 = copy_to_mode_reg (mode0, op0);
12206 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12208 /* @@@ better error message */
12209 error ("selector must be an immediate");
12210 return gen_reg_rtx (tmode);
12212 if (target == 0
12213 || GET_MODE (target) != tmode
12214 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12215 target = gen_reg_rtx (tmode);
12216 pat = GEN_FCN (icode) (target, op0, op1);
12217 if (! pat)
12218 return 0;
12219 emit_insn (pat);
12220 return target;
12222 case ARM_BUILTIN_TINSRB:
12223 case ARM_BUILTIN_TINSRH:
12224 case ARM_BUILTIN_TINSRW:
12225 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12226 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12227 : CODE_FOR_iwmmxt_tinsrw);
12228 arg0 = TREE_VALUE (arglist);
12229 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12230 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12231 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12232 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12233 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12234 tmode = insn_data[icode].operand[0].mode;
12235 mode0 = insn_data[icode].operand[1].mode;
12236 mode1 = insn_data[icode].operand[2].mode;
12237 mode2 = insn_data[icode].operand[3].mode;
12239 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12240 op0 = copy_to_mode_reg (mode0, op0);
12241 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12242 op1 = copy_to_mode_reg (mode1, op1);
12243 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12245 /* @@@ better error message */
12246 error ("selector must be an immediate");
12247 return const0_rtx;
12249 if (target == 0
12250 || GET_MODE (target) != tmode
12251 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12252 target = gen_reg_rtx (tmode);
12253 pat = GEN_FCN (icode) (target, op0, op1, op2);
12254 if (! pat)
12255 return 0;
12256 emit_insn (pat);
12257 return target;
12259 case ARM_BUILTIN_SETWCX:
12260 arg0 = TREE_VALUE (arglist);
12261 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12262 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12263 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12264 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12265 return 0;
12267 case ARM_BUILTIN_GETWCX:
12268 arg0 = TREE_VALUE (arglist);
12269 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12270 target = gen_reg_rtx (SImode);
12271 emit_insn (gen_iwmmxt_tmrc (target, op0));
12272 return target;
12274 case ARM_BUILTIN_WSHUFH:
12275 icode = CODE_FOR_iwmmxt_wshufh;
12276 arg0 = TREE_VALUE (arglist);
12277 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12278 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12279 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12280 tmode = insn_data[icode].operand[0].mode;
12281 mode1 = insn_data[icode].operand[1].mode;
12282 mode2 = insn_data[icode].operand[2].mode;
12284 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12285 op0 = copy_to_mode_reg (mode1, op0);
12286 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12288 /* @@@ better error message */
12289 error ("mask must be an immediate");
12290 return const0_rtx;
12292 if (target == 0
12293 || GET_MODE (target) != tmode
12294 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12295 target = gen_reg_rtx (tmode);
12296 pat = GEN_FCN (icode) (target, op0, op1);
12297 if (! pat)
12298 return 0;
12299 emit_insn (pat);
12300 return target;
12302 case ARM_BUILTIN_WSADB:
12303 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12304 case ARM_BUILTIN_WSADH:
12305 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12306 case ARM_BUILTIN_WSADBZ:
12307 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12308 case ARM_BUILTIN_WSADHZ:
12309 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12311 /* Several three-argument builtins. */
12312 case ARM_BUILTIN_WMACS:
12313 case ARM_BUILTIN_WMACU:
12314 case ARM_BUILTIN_WALIGN:
12315 case ARM_BUILTIN_TMIA:
12316 case ARM_BUILTIN_TMIAPH:
12317 case ARM_BUILTIN_TMIATT:
12318 case ARM_BUILTIN_TMIATB:
12319 case ARM_BUILTIN_TMIABT:
12320 case ARM_BUILTIN_TMIABB:
12321 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12322 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12323 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12324 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12325 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12326 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12327 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12328 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12329 : CODE_FOR_iwmmxt_walign);
12330 arg0 = TREE_VALUE (arglist);
12331 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12332 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12333 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12334 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12335 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12336 tmode = insn_data[icode].operand[0].mode;
12337 mode0 = insn_data[icode].operand[1].mode;
12338 mode1 = insn_data[icode].operand[2].mode;
12339 mode2 = insn_data[icode].operand[3].mode;
12341 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12342 op0 = copy_to_mode_reg (mode0, op0);
12343 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12344 op1 = copy_to_mode_reg (mode1, op1);
12345 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12346 op2 = copy_to_mode_reg (mode2, op2);
12347 if (target == 0
12348 || GET_MODE (target) != tmode
12349 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12350 target = gen_reg_rtx (tmode);
12351 pat = GEN_FCN (icode) (target, op0, op1, op2);
12352 if (! pat)
12353 return 0;
12354 emit_insn (pat);
12355 return target;
12357 case ARM_BUILTIN_WZERO:
12358 target = gen_reg_rtx (DImode);
12359 emit_insn (gen_iwmmxt_clrdi (target));
12360 return target;
12362 default:
12363 break;
12366 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12367 if (d->code == (const enum arm_builtins) fcode)
12368 return arm_expand_binop_builtin (d->icode, arglist, target);
12370 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12371 if (d->code == (const enum arm_builtins) fcode)
12372 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12374 /* @@@ Should really do something sensible here. */
12375 return NULL_RTX;
12378 /* Return the number (counting from 0) of
12379 the least significant set bit in MASK. */
12381 inline static int
12382 number_of_first_bit_set (unsigned mask)
12384 int bit;
12386 for (bit = 0;
12387 (mask & (1 << bit)) == 0;
12388 ++bit)
12389 continue;
12391 return bit;
12394 /* Emit code to push or pop registers to or from the stack. F is the
12395 assembly file. MASK is the registers to push or pop. PUSH is
12396 nonzero if we should push, and zero if we should pop. For debugging
12397 output, if pushing, adjust CFA_OFFSET by the amount of space added
12398 to the stack. REAL_REGS should have the same number of bits set as
12399 MASK, and will be used instead (in the same order) to describe which
12400 registers were saved - this is used to mark the save slots when we
12401 push high registers after moving them to low registers. */
12402 static void
12403 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12404 unsigned long real_regs)
12406 int regno;
12407 int lo_mask = mask & 0xFF;
12408 int pushed_words = 0;
12410 gcc_assert (mask);
12412 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12414 /* Special case. Do not generate a POP PC statement here, do it in
12415 thumb_exit() */
12416 thumb_exit (f, -1);
12417 return;
12420 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12422 /* Look at the low registers first. */
12423 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12425 if (lo_mask & 1)
12427 asm_fprintf (f, "%r", regno);
12429 if ((lo_mask & ~1) != 0)
12430 fprintf (f, ", ");
12432 pushed_words++;
12436 if (push && (mask & (1 << LR_REGNUM)))
12438 /* Catch pushing the LR. */
12439 if (mask & 0xFF)
12440 fprintf (f, ", ");
12442 asm_fprintf (f, "%r", LR_REGNUM);
12444 pushed_words++;
12446 else if (!push && (mask & (1 << PC_REGNUM)))
12448 /* Catch popping the PC. */
12449 if (TARGET_INTERWORK || TARGET_BACKTRACE
12450 || current_function_calls_eh_return)
12452 /* The PC is never poped directly, instead
12453 it is popped into r3 and then BX is used. */
12454 fprintf (f, "}\n");
12456 thumb_exit (f, -1);
12458 return;
12460 else
12462 if (mask & 0xFF)
12463 fprintf (f, ", ");
12465 asm_fprintf (f, "%r", PC_REGNUM);
12469 fprintf (f, "}\n");
12471 if (push && pushed_words && dwarf2out_do_frame ())
12473 char *l = dwarf2out_cfi_label ();
12474 int pushed_mask = real_regs;
12476 *cfa_offset += pushed_words * 4;
12477 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12479 pushed_words = 0;
12480 pushed_mask = real_regs;
12481 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12483 if (pushed_mask & 1)
12484 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12489 /* Generate code to return from a thumb function.
12490 If 'reg_containing_return_addr' is -1, then the return address is
12491 actually on the stack, at the stack pointer. */
12492 static void
12493 thumb_exit (FILE *f, int reg_containing_return_addr)
12495 unsigned regs_available_for_popping;
12496 unsigned regs_to_pop;
12497 int pops_needed;
12498 unsigned available;
12499 unsigned required;
12500 int mode;
12501 int size;
12502 int restore_a4 = FALSE;
12504 /* Compute the registers we need to pop. */
12505 regs_to_pop = 0;
12506 pops_needed = 0;
12508 if (reg_containing_return_addr == -1)
12510 regs_to_pop |= 1 << LR_REGNUM;
12511 ++pops_needed;
12514 if (TARGET_BACKTRACE)
12516 /* Restore the (ARM) frame pointer and stack pointer. */
12517 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12518 pops_needed += 2;
12521 /* If there is nothing to pop then just emit the BX instruction and
12522 return. */
12523 if (pops_needed == 0)
12525 if (current_function_calls_eh_return)
12526 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12528 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12529 return;
12531 /* Otherwise if we are not supporting interworking and we have not created
12532 a backtrace structure and the function was not entered in ARM mode then
12533 just pop the return address straight into the PC. */
12534 else if (!TARGET_INTERWORK
12535 && !TARGET_BACKTRACE
12536 && !is_called_in_ARM_mode (current_function_decl)
12537 && !current_function_calls_eh_return)
12539 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12540 return;
12543 /* Find out how many of the (return) argument registers we can corrupt. */
12544 regs_available_for_popping = 0;
12546 /* If returning via __builtin_eh_return, the bottom three registers
12547 all contain information needed for the return. */
12548 if (current_function_calls_eh_return)
12549 size = 12;
12550 else
12552 /* If we can deduce the registers used from the function's
12553 return value. This is more reliable that examining
12554 regs_ever_live[] because that will be set if the register is
12555 ever used in the function, not just if the register is used
12556 to hold a return value. */
12558 if (current_function_return_rtx != 0)
12559 mode = GET_MODE (current_function_return_rtx);
12560 else
12561 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12563 size = GET_MODE_SIZE (mode);
12565 if (size == 0)
12567 /* In a void function we can use any argument register.
12568 In a function that returns a structure on the stack
12569 we can use the second and third argument registers. */
12570 if (mode == VOIDmode)
12571 regs_available_for_popping =
12572 (1 << ARG_REGISTER (1))
12573 | (1 << ARG_REGISTER (2))
12574 | (1 << ARG_REGISTER (3));
12575 else
12576 regs_available_for_popping =
12577 (1 << ARG_REGISTER (2))
12578 | (1 << ARG_REGISTER (3));
12580 else if (size <= 4)
12581 regs_available_for_popping =
12582 (1 << ARG_REGISTER (2))
12583 | (1 << ARG_REGISTER (3));
12584 else if (size <= 8)
12585 regs_available_for_popping =
12586 (1 << ARG_REGISTER (3));
12589 /* Match registers to be popped with registers into which we pop them. */
12590 for (available = regs_available_for_popping,
12591 required = regs_to_pop;
12592 required != 0 && available != 0;
12593 available &= ~(available & - available),
12594 required &= ~(required & - required))
12595 -- pops_needed;
12597 /* If we have any popping registers left over, remove them. */
12598 if (available > 0)
12599 regs_available_for_popping &= ~available;
12601 /* Otherwise if we need another popping register we can use
12602 the fourth argument register. */
12603 else if (pops_needed)
12605 /* If we have not found any free argument registers and
12606 reg a4 contains the return address, we must move it. */
12607 if (regs_available_for_popping == 0
12608 && reg_containing_return_addr == LAST_ARG_REGNUM)
12610 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12611 reg_containing_return_addr = LR_REGNUM;
12613 else if (size > 12)
12615 /* Register a4 is being used to hold part of the return value,
12616 but we have dire need of a free, low register. */
12617 restore_a4 = TRUE;
12619 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12622 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12624 /* The fourth argument register is available. */
12625 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12627 --pops_needed;
12631 /* Pop as many registers as we can. */
12632 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12633 regs_available_for_popping);
12635 /* Process the registers we popped. */
12636 if (reg_containing_return_addr == -1)
12638 /* The return address was popped into the lowest numbered register. */
12639 regs_to_pop &= ~(1 << LR_REGNUM);
12641 reg_containing_return_addr =
12642 number_of_first_bit_set (regs_available_for_popping);
12644 /* Remove this register for the mask of available registers, so that
12645 the return address will not be corrupted by further pops. */
12646 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12649 /* If we popped other registers then handle them here. */
12650 if (regs_available_for_popping)
12652 int frame_pointer;
12654 /* Work out which register currently contains the frame pointer. */
12655 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12657 /* Move it into the correct place. */
12658 asm_fprintf (f, "\tmov\t%r, %r\n",
12659 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12661 /* (Temporarily) remove it from the mask of popped registers. */
12662 regs_available_for_popping &= ~(1 << frame_pointer);
12663 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12665 if (regs_available_for_popping)
12667 int stack_pointer;
12669 /* We popped the stack pointer as well,
12670 find the register that contains it. */
12671 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12673 /* Move it into the stack register. */
12674 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12676 /* At this point we have popped all necessary registers, so
12677 do not worry about restoring regs_available_for_popping
12678 to its correct value:
12680 assert (pops_needed == 0)
12681 assert (regs_available_for_popping == (1 << frame_pointer))
12682 assert (regs_to_pop == (1 << STACK_POINTER)) */
12684 else
12686 /* Since we have just move the popped value into the frame
12687 pointer, the popping register is available for reuse, and
12688 we know that we still have the stack pointer left to pop. */
12689 regs_available_for_popping |= (1 << frame_pointer);
12693 /* If we still have registers left on the stack, but we no longer have
12694 any registers into which we can pop them, then we must move the return
12695 address into the link register and make available the register that
12696 contained it. */
12697 if (regs_available_for_popping == 0 && pops_needed > 0)
12699 regs_available_for_popping |= 1 << reg_containing_return_addr;
12701 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12702 reg_containing_return_addr);
12704 reg_containing_return_addr = LR_REGNUM;
12707 /* If we have registers left on the stack then pop some more.
12708 We know that at most we will want to pop FP and SP. */
12709 if (pops_needed > 0)
12711 int popped_into;
12712 int move_to;
12714 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12715 regs_available_for_popping);
12717 /* We have popped either FP or SP.
12718 Move whichever one it is into the correct register. */
12719 popped_into = number_of_first_bit_set (regs_available_for_popping);
12720 move_to = number_of_first_bit_set (regs_to_pop);
12722 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12724 regs_to_pop &= ~(1 << move_to);
12726 --pops_needed;
12729 /* If we still have not popped everything then we must have only
12730 had one register available to us and we are now popping the SP. */
12731 if (pops_needed > 0)
12733 int popped_into;
12735 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12736 regs_available_for_popping);
12738 popped_into = number_of_first_bit_set (regs_available_for_popping);
12740 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12742 assert (regs_to_pop == (1 << STACK_POINTER))
12743 assert (pops_needed == 1)
12747 /* If necessary restore the a4 register. */
12748 if (restore_a4)
12750 if (reg_containing_return_addr != LR_REGNUM)
12752 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12753 reg_containing_return_addr = LR_REGNUM;
12756 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12759 if (current_function_calls_eh_return)
12760 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12762 /* Return to caller. */
12763 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12767 void
12768 thumb_final_prescan_insn (rtx insn)
12770 if (flag_print_asm_name)
12771 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12772 INSN_ADDRESSES (INSN_UID (insn)));
12776 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12778 unsigned HOST_WIDE_INT mask = 0xff;
12779 int i;
12781 if (val == 0) /* XXX */
12782 return 0;
12784 for (i = 0; i < 25; i++)
12785 if ((val & (mask << i)) == val)
12786 return 1;
12788 return 0;
12791 /* Returns nonzero if the current function contains,
12792 or might contain a far jump. */
12793 static int
12794 thumb_far_jump_used_p (void)
12796 rtx insn;
12798 /* This test is only important for leaf functions. */
12799 /* assert (!leaf_function_p ()); */
12801 /* If we have already decided that far jumps may be used,
12802 do not bother checking again, and always return true even if
12803 it turns out that they are not being used. Once we have made
12804 the decision that far jumps are present (and that hence the link
12805 register will be pushed onto the stack) we cannot go back on it. */
12806 if (cfun->machine->far_jump_used)
12807 return 1;
12809 /* If this function is not being called from the prologue/epilogue
12810 generation code then it must be being called from the
12811 INITIAL_ELIMINATION_OFFSET macro. */
12812 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12814 /* In this case we know that we are being asked about the elimination
12815 of the arg pointer register. If that register is not being used,
12816 then there are no arguments on the stack, and we do not have to
12817 worry that a far jump might force the prologue to push the link
12818 register, changing the stack offsets. In this case we can just
12819 return false, since the presence of far jumps in the function will
12820 not affect stack offsets.
12822 If the arg pointer is live (or if it was live, but has now been
12823 eliminated and so set to dead) then we do have to test to see if
12824 the function might contain a far jump. This test can lead to some
12825 false negatives, since before reload is completed, then length of
12826 branch instructions is not known, so gcc defaults to returning their
12827 longest length, which in turn sets the far jump attribute to true.
12829 A false negative will not result in bad code being generated, but it
12830 will result in a needless push and pop of the link register. We
12831 hope that this does not occur too often.
12833 If we need doubleword stack alignment this could affect the other
12834 elimination offsets so we can't risk getting it wrong. */
12835 if (regs_ever_live [ARG_POINTER_REGNUM])
12836 cfun->machine->arg_pointer_live = 1;
12837 else if (!cfun->machine->arg_pointer_live)
12838 return 0;
12841 /* Check to see if the function contains a branch
12842 insn with the far jump attribute set. */
12843 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12845 if (GET_CODE (insn) == JUMP_INSN
12846 /* Ignore tablejump patterns. */
12847 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12848 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12849 && get_attr_far_jump (insn) == FAR_JUMP_YES
12852 /* Record the fact that we have decided that
12853 the function does use far jumps. */
12854 cfun->machine->far_jump_used = 1;
12855 return 1;
12859 return 0;
12862 /* Return nonzero if FUNC must be entered in ARM mode. */
12864 is_called_in_ARM_mode (tree func)
12866 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12868 /* Ignore the problem about functions whose address is taken. */
12869 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12870 return TRUE;
12872 #ifdef ARM_PE
12873 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12874 #else
12875 return FALSE;
12876 #endif
12879 /* The bits which aren't usefully expanded as rtl. */
12880 const char *
12881 thumb_unexpanded_epilogue (void)
12883 int regno;
12884 unsigned long live_regs_mask = 0;
12885 int high_regs_pushed = 0;
12886 int had_to_push_lr;
12887 int size;
12889 if (return_used_this_function)
12890 return "";
12892 if (IS_NAKED (arm_current_func_type ()))
12893 return "";
12895 live_regs_mask = thumb_compute_save_reg_mask ();
12896 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12898 /* If we can deduce the registers used from the function's return value.
12899 This is more reliable that examining regs_ever_live[] because that
12900 will be set if the register is ever used in the function, not just if
12901 the register is used to hold a return value. */
12902 size = arm_size_return_regs ();
12904 /* The prolog may have pushed some high registers to use as
12905 work registers. e.g. the testsuite file:
12906 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12907 compiles to produce:
12908 push {r4, r5, r6, r7, lr}
12909 mov r7, r9
12910 mov r6, r8
12911 push {r6, r7}
12912 as part of the prolog. We have to undo that pushing here. */
12914 if (high_regs_pushed)
12916 unsigned long mask = live_regs_mask & 0xff;
12917 int next_hi_reg;
12919 /* The available low registers depend on the size of the value we are
12920 returning. */
12921 if (size <= 12)
12922 mask |= 1 << 3;
12923 if (size <= 8)
12924 mask |= 1 << 2;
12926 if (mask == 0)
12927 /* Oh dear! We have no low registers into which we can pop
12928 high registers! */
12929 internal_error
12930 ("no low registers available for popping high registers");
12932 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12933 if (live_regs_mask & (1 << next_hi_reg))
12934 break;
12936 while (high_regs_pushed)
12938 /* Find lo register(s) into which the high register(s) can
12939 be popped. */
12940 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12942 if (mask & (1 << regno))
12943 high_regs_pushed--;
12944 if (high_regs_pushed == 0)
12945 break;
12948 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12950 /* Pop the values into the low register(s). */
12951 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12953 /* Move the value(s) into the high registers. */
12954 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12956 if (mask & (1 << regno))
12958 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12959 regno);
12961 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12962 if (live_regs_mask & (1 << next_hi_reg))
12963 break;
12967 live_regs_mask &= ~0x0f00;
12970 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12971 live_regs_mask &= 0xff;
12973 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12975 /* Pop the return address into the PC. */
12976 if (had_to_push_lr)
12977 live_regs_mask |= 1 << PC_REGNUM;
12979 /* Either no argument registers were pushed or a backtrace
12980 structure was created which includes an adjusted stack
12981 pointer, so just pop everything. */
12982 if (live_regs_mask)
12983 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12984 live_regs_mask);
12986 /* We have either just popped the return address into the
12987 PC or it is was kept in LR for the entire function. */
12988 if (!had_to_push_lr)
12989 thumb_exit (asm_out_file, LR_REGNUM);
12991 else
12993 /* Pop everything but the return address. */
12994 if (live_regs_mask)
12995 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12996 live_regs_mask);
12998 if (had_to_push_lr)
13000 if (size > 12)
13002 /* We have no free low regs, so save one. */
13003 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13004 LAST_ARG_REGNUM);
13007 /* Get the return address into a temporary register. */
13008 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13009 1 << LAST_ARG_REGNUM);
13011 if (size > 12)
13013 /* Move the return address to lr. */
13014 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13015 LAST_ARG_REGNUM);
13016 /* Restore the low register. */
13017 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13018 IP_REGNUM);
13019 regno = LR_REGNUM;
13021 else
13022 regno = LAST_ARG_REGNUM;
13024 else
13025 regno = LR_REGNUM;
13027 /* Remove the argument registers that were pushed onto the stack. */
13028 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13029 SP_REGNUM, SP_REGNUM,
13030 current_function_pretend_args_size);
13032 thumb_exit (asm_out_file, regno);
13035 return "";
13038 /* Functions to save and restore machine-specific function data. */
13039 static struct machine_function *
13040 arm_init_machine_status (void)
13042 struct machine_function *machine;
13043 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13045 #if ARM_FT_UNKNOWN != 0
13046 machine->func_type = ARM_FT_UNKNOWN;
13047 #endif
13048 return machine;
13051 /* Return an RTX indicating where the return address to the
13052 calling function can be found. */
13054 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13056 if (count != 0)
13057 return NULL_RTX;
13059 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13062 /* Do anything needed before RTL is emitted for each function. */
13063 void
13064 arm_init_expanders (void)
13066 /* Arrange to initialize and mark the machine per-function status. */
13067 init_machine_status = arm_init_machine_status;
13069 /* This is to stop the combine pass optimizing away the alignment
13070 adjustment of va_arg. */
13071 /* ??? It is claimed that this should not be necessary. */
13072 if (cfun)
13073 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13077 /* Like arm_compute_initial_elimination offset. Simpler because
13078 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13080 HOST_WIDE_INT
13081 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13083 arm_stack_offsets *offsets;
13085 offsets = arm_get_frame_offsets ();
13087 switch (from)
13089 case ARG_POINTER_REGNUM:
13090 switch (to)
13092 case STACK_POINTER_REGNUM:
13093 return offsets->outgoing_args - offsets->saved_args;
13095 case FRAME_POINTER_REGNUM:
13096 return offsets->soft_frame - offsets->saved_args;
13098 case THUMB_HARD_FRAME_POINTER_REGNUM:
13099 case ARM_HARD_FRAME_POINTER_REGNUM:
13100 return offsets->saved_regs - offsets->saved_args;
13102 default:
13103 gcc_unreachable ();
13105 break;
13107 case FRAME_POINTER_REGNUM:
13108 switch (to)
13110 case STACK_POINTER_REGNUM:
13111 return offsets->outgoing_args - offsets->soft_frame;
13113 case THUMB_HARD_FRAME_POINTER_REGNUM:
13114 case ARM_HARD_FRAME_POINTER_REGNUM:
13115 return offsets->saved_regs - offsets->soft_frame;
13117 default:
13118 gcc_unreachable ();
13120 break;
13122 default:
13123 gcc_unreachable ();
13128 /* Generate the rest of a function's prologue. */
13129 void
13130 thumb_expand_prologue (void)
13132 rtx insn, dwarf;
13134 HOST_WIDE_INT amount;
13135 arm_stack_offsets *offsets;
13136 unsigned long func_type;
13137 int regno;
13138 unsigned long live_regs_mask;
13140 func_type = arm_current_func_type ();
13142 /* Naked functions don't have prologues. */
13143 if (IS_NAKED (func_type))
13144 return;
13146 if (IS_INTERRUPT (func_type))
13148 error ("interrupt Service Routines cannot be coded in Thumb mode");
13149 return;
13152 live_regs_mask = thumb_compute_save_reg_mask ();
13153 /* Load the pic register before setting the frame pointer,
13154 so we can use r7 as a temporary work register. */
13155 if (flag_pic)
13156 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13158 offsets = arm_get_frame_offsets ();
13160 if (frame_pointer_needed)
13162 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13163 stack_pointer_rtx));
13164 RTX_FRAME_RELATED_P (insn) = 1;
13166 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13167 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13168 stack_pointer_rtx);
13170 amount = offsets->outgoing_args - offsets->saved_regs;
13171 if (amount)
13173 if (amount < 512)
13175 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13176 GEN_INT (- amount)));
13177 RTX_FRAME_RELATED_P (insn) = 1;
13179 else
13181 rtx reg;
13183 /* The stack decrement is too big for an immediate value in a single
13184 insn. In theory we could issue multiple subtracts, but after
13185 three of them it becomes more space efficient to place the full
13186 value in the constant pool and load into a register. (Also the
13187 ARM debugger really likes to see only one stack decrement per
13188 function). So instead we look for a scratch register into which
13189 we can load the decrement, and then we subtract this from the
13190 stack pointer. Unfortunately on the thumb the only available
13191 scratch registers are the argument registers, and we cannot use
13192 these as they may hold arguments to the function. Instead we
13193 attempt to locate a call preserved register which is used by this
13194 function. If we can find one, then we know that it will have
13195 been pushed at the start of the prologue and so we can corrupt
13196 it now. */
13197 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13198 if (live_regs_mask & (1 << regno)
13199 && !(frame_pointer_needed
13200 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13201 break;
13203 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13205 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13207 /* Choose an arbitrary, non-argument low register. */
13208 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13210 /* Save it by copying it into a high, scratch register. */
13211 emit_insn (gen_movsi (spare, reg));
13212 /* Add a USE to stop propagate_one_insn() from barfing. */
13213 emit_insn (gen_prologue_use (spare));
13215 /* Decrement the stack. */
13216 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13217 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13218 stack_pointer_rtx, reg));
13219 RTX_FRAME_RELATED_P (insn) = 1;
13220 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13221 plus_constant (stack_pointer_rtx,
13222 -amount));
13223 RTX_FRAME_RELATED_P (dwarf) = 1;
13224 REG_NOTES (insn)
13225 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13226 REG_NOTES (insn));
13228 /* Restore the low register's original value. */
13229 emit_insn (gen_movsi (reg, spare));
13231 /* Emit a USE of the restored scratch register, so that flow
13232 analysis will not consider the restore redundant. The
13233 register won't be used again in this function and isn't
13234 restored by the epilogue. */
13235 emit_insn (gen_prologue_use (reg));
13237 else
13239 reg = gen_rtx_REG (SImode, regno);
13241 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13243 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13244 stack_pointer_rtx, reg));
13245 RTX_FRAME_RELATED_P (insn) = 1;
13246 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13247 plus_constant (stack_pointer_rtx,
13248 -amount));
13249 RTX_FRAME_RELATED_P (dwarf) = 1;
13250 REG_NOTES (insn)
13251 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13252 REG_NOTES (insn));
13255 /* If the frame pointer is needed, emit a special barrier that
13256 will prevent the scheduler from moving stores to the frame
13257 before the stack adjustment. */
13258 if (frame_pointer_needed)
13259 emit_insn (gen_stack_tie (stack_pointer_rtx,
13260 hard_frame_pointer_rtx));
13263 if (current_function_profile || !TARGET_SCHED_PROLOG)
13264 emit_insn (gen_blockage ());
13266 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13267 if (live_regs_mask & 0xff)
13268 cfun->machine->lr_save_eliminated = 0;
13270 /* If the link register is being kept alive, with the return address in it,
13271 then make sure that it does not get reused by the ce2 pass. */
13272 if (cfun->machine->lr_save_eliminated)
13273 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13277 void
13278 thumb_expand_epilogue (void)
13280 HOST_WIDE_INT amount;
13281 arm_stack_offsets *offsets;
13282 int regno;
13284 /* Naked functions don't have prologues. */
13285 if (IS_NAKED (arm_current_func_type ()))
13286 return;
13288 offsets = arm_get_frame_offsets ();
13289 amount = offsets->outgoing_args - offsets->saved_regs;
13291 if (frame_pointer_needed)
13292 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13293 else if (amount)
13295 if (amount < 512)
13296 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13297 GEN_INT (amount)));
13298 else
13300 /* r3 is always free in the epilogue. */
13301 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13303 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13304 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13308 /* Emit a USE (stack_pointer_rtx), so that
13309 the stack adjustment will not be deleted. */
13310 emit_insn (gen_prologue_use (stack_pointer_rtx));
13312 if (current_function_profile || !TARGET_SCHED_PROLOG)
13313 emit_insn (gen_blockage ());
13315 /* Emit a clobber for each insn that will be restored in the epilogue,
13316 so that flow2 will get register lifetimes correct. */
13317 for (regno = 0; regno < 13; regno++)
13318 if (regs_ever_live[regno] && !call_used_regs[regno])
13319 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13321 if (! regs_ever_live[LR_REGNUM])
13322 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13325 static void
13326 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13328 unsigned long live_regs_mask = 0;
13329 unsigned long l_mask;
13330 unsigned high_regs_pushed = 0;
13331 int cfa_offset = 0;
13332 int regno;
13334 if (IS_NAKED (arm_current_func_type ()))
13335 return;
13337 if (is_called_in_ARM_mode (current_function_decl))
13339 const char * name;
13341 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13342 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13343 == SYMBOL_REF);
13344 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13346 /* Generate code sequence to switch us into Thumb mode. */
13347 /* The .code 32 directive has already been emitted by
13348 ASM_DECLARE_FUNCTION_NAME. */
13349 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13350 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13352 /* Generate a label, so that the debugger will notice the
13353 change in instruction sets. This label is also used by
13354 the assembler to bypass the ARM code when this function
13355 is called from a Thumb encoded function elsewhere in the
13356 same file. Hence the definition of STUB_NAME here must
13357 agree with the definition in gas/config/tc-arm.c. */
13359 #define STUB_NAME ".real_start_of"
13361 fprintf (f, "\t.code\t16\n");
13362 #ifdef ARM_PE
13363 if (arm_dllexport_name_p (name))
13364 name = arm_strip_name_encoding (name);
13365 #endif
13366 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13367 fprintf (f, "\t.thumb_func\n");
13368 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13371 if (current_function_pretend_args_size)
13373 if (cfun->machine->uses_anonymous_args)
13375 int num_pushes;
13377 fprintf (f, "\tpush\t{");
13379 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13381 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13382 regno <= LAST_ARG_REGNUM;
13383 regno++)
13384 asm_fprintf (f, "%r%s", regno,
13385 regno == LAST_ARG_REGNUM ? "" : ", ");
13387 fprintf (f, "}\n");
13389 else
13390 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13391 SP_REGNUM, SP_REGNUM,
13392 current_function_pretend_args_size);
13394 /* We don't need to record the stores for unwinding (would it
13395 help the debugger any if we did?), but record the change in
13396 the stack pointer. */
13397 if (dwarf2out_do_frame ())
13399 char *l = dwarf2out_cfi_label ();
13401 cfa_offset = cfa_offset + current_function_pretend_args_size;
13402 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13406 /* Get the registers we are going to push. */
13407 live_regs_mask = thumb_compute_save_reg_mask ();
13408 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13409 l_mask = live_regs_mask & 0x40ff;
13410 /* Then count how many other high registers will need to be pushed. */
13411 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13413 if (TARGET_BACKTRACE)
13415 unsigned offset;
13416 unsigned work_register;
13418 /* We have been asked to create a stack backtrace structure.
13419 The code looks like this:
13421 0 .align 2
13422 0 func:
13423 0 sub SP, #16 Reserve space for 4 registers.
13424 2 push {R7} Push low registers.
13425 4 add R7, SP, #20 Get the stack pointer before the push.
13426 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13427 8 mov R7, PC Get hold of the start of this code plus 12.
13428 10 str R7, [SP, #16] Store it.
13429 12 mov R7, FP Get hold of the current frame pointer.
13430 14 str R7, [SP, #4] Store it.
13431 16 mov R7, LR Get hold of the current return address.
13432 18 str R7, [SP, #12] Store it.
13433 20 add R7, SP, #16 Point at the start of the backtrace structure.
13434 22 mov FP, R7 Put this value into the frame pointer. */
13436 work_register = thumb_find_work_register (live_regs_mask);
13438 asm_fprintf
13439 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13440 SP_REGNUM, SP_REGNUM);
13442 if (dwarf2out_do_frame ())
13444 char *l = dwarf2out_cfi_label ();
13446 cfa_offset = cfa_offset + 16;
13447 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13450 if (l_mask)
13452 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13453 offset = bit_count (l_mask);
13455 else
13456 offset = 0;
13458 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13459 offset + 16 + current_function_pretend_args_size);
13461 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13462 offset + 4);
13464 /* Make sure that the instruction fetching the PC is in the right place
13465 to calculate "start of backtrace creation code + 12". */
13466 if (l_mask)
13468 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13469 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13470 offset + 12);
13471 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13472 ARM_HARD_FRAME_POINTER_REGNUM);
13473 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13474 offset);
13476 else
13478 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13479 ARM_HARD_FRAME_POINTER_REGNUM);
13480 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13481 offset);
13482 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13483 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13484 offset + 12);
13487 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13488 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13489 offset + 8);
13490 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13491 offset + 12);
13492 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13493 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13495 /* Optimization: If we are not pushing any low registers but we are going
13496 to push some high registers then delay our first push. This will just
13497 be a push of LR and we can combine it with the push of the first high
13498 register. */
13499 else if ((l_mask & 0xff) != 0
13500 || (high_regs_pushed == 0 && l_mask))
13501 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13503 if (high_regs_pushed)
13505 unsigned pushable_regs;
13506 unsigned next_hi_reg;
13508 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13509 if (live_regs_mask & (1 << next_hi_reg))
13510 break;
13512 pushable_regs = l_mask & 0xff;
13514 if (pushable_regs == 0)
13515 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13517 while (high_regs_pushed > 0)
13519 unsigned long real_regs_mask = 0;
13521 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13523 if (pushable_regs & (1 << regno))
13525 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13527 high_regs_pushed --;
13528 real_regs_mask |= (1 << next_hi_reg);
13530 if (high_regs_pushed)
13532 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13533 next_hi_reg --)
13534 if (live_regs_mask & (1 << next_hi_reg))
13535 break;
13537 else
13539 pushable_regs &= ~((1 << regno) - 1);
13540 break;
13545 /* If we had to find a work register and we have not yet
13546 saved the LR then add it to the list of regs to push. */
13547 if (l_mask == (1 << LR_REGNUM))
13549 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13550 1, &cfa_offset,
13551 real_regs_mask | (1 << LR_REGNUM));
13552 l_mask = 0;
13554 else
13555 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13560 /* Handle the case of a double word load into a low register from
13561 a computed memory address. The computed address may involve a
13562 register which is overwritten by the load. */
13563 const char *
13564 thumb_load_double_from_address (rtx *operands)
13566 rtx addr;
13567 rtx base;
13568 rtx offset;
13569 rtx arg1;
13570 rtx arg2;
13572 gcc_assert (GET_CODE (operands[0]) == REG);
13573 gcc_assert (GET_CODE (operands[1]) == MEM);
13575 /* Get the memory address. */
13576 addr = XEXP (operands[1], 0);
13578 /* Work out how the memory address is computed. */
13579 switch (GET_CODE (addr))
13581 case REG:
13582 operands[2] = gen_rtx_MEM (SImode,
13583 plus_constant (XEXP (operands[1], 0), 4));
13585 if (REGNO (operands[0]) == REGNO (addr))
13587 output_asm_insn ("ldr\t%H0, %2", operands);
13588 output_asm_insn ("ldr\t%0, %1", operands);
13590 else
13592 output_asm_insn ("ldr\t%0, %1", operands);
13593 output_asm_insn ("ldr\t%H0, %2", operands);
13595 break;
13597 case CONST:
13598 /* Compute <address> + 4 for the high order load. */
13599 operands[2] = gen_rtx_MEM (SImode,
13600 plus_constant (XEXP (operands[1], 0), 4));
13602 output_asm_insn ("ldr\t%0, %1", operands);
13603 output_asm_insn ("ldr\t%H0, %2", operands);
13604 break;
13606 case PLUS:
13607 arg1 = XEXP (addr, 0);
13608 arg2 = XEXP (addr, 1);
13610 if (CONSTANT_P (arg1))
13611 base = arg2, offset = arg1;
13612 else
13613 base = arg1, offset = arg2;
13615 gcc_assert (GET_CODE (base) == REG);
13617 /* Catch the case of <address> = <reg> + <reg> */
13618 if (GET_CODE (offset) == REG)
13620 int reg_offset = REGNO (offset);
13621 int reg_base = REGNO (base);
13622 int reg_dest = REGNO (operands[0]);
13624 /* Add the base and offset registers together into the
13625 higher destination register. */
13626 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13627 reg_dest + 1, reg_base, reg_offset);
13629 /* Load the lower destination register from the address in
13630 the higher destination register. */
13631 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13632 reg_dest, reg_dest + 1);
13634 /* Load the higher destination register from its own address
13635 plus 4. */
13636 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13637 reg_dest + 1, reg_dest + 1);
13639 else
13641 /* Compute <address> + 4 for the high order load. */
13642 operands[2] = gen_rtx_MEM (SImode,
13643 plus_constant (XEXP (operands[1], 0), 4));
13645 /* If the computed address is held in the low order register
13646 then load the high order register first, otherwise always
13647 load the low order register first. */
13648 if (REGNO (operands[0]) == REGNO (base))
13650 output_asm_insn ("ldr\t%H0, %2", operands);
13651 output_asm_insn ("ldr\t%0, %1", operands);
13653 else
13655 output_asm_insn ("ldr\t%0, %1", operands);
13656 output_asm_insn ("ldr\t%H0, %2", operands);
13659 break;
13661 case LABEL_REF:
13662 /* With no registers to worry about we can just load the value
13663 directly. */
13664 operands[2] = gen_rtx_MEM (SImode,
13665 plus_constant (XEXP (operands[1], 0), 4));
13667 output_asm_insn ("ldr\t%H0, %2", operands);
13668 output_asm_insn ("ldr\t%0, %1", operands);
13669 break;
13671 default:
13672 gcc_unreachable ();
13675 return "";
13678 const char *
13679 thumb_output_move_mem_multiple (int n, rtx *operands)
13681 rtx tmp;
13683 switch (n)
13685 case 2:
13686 if (REGNO (operands[4]) > REGNO (operands[5]))
13688 tmp = operands[4];
13689 operands[4] = operands[5];
13690 operands[5] = tmp;
13692 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13693 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13694 break;
13696 case 3:
13697 if (REGNO (operands[4]) > REGNO (operands[5]))
13699 tmp = operands[4];
13700 operands[4] = operands[5];
13701 operands[5] = tmp;
13703 if (REGNO (operands[5]) > REGNO (operands[6]))
13705 tmp = operands[5];
13706 operands[5] = operands[6];
13707 operands[6] = tmp;
13709 if (REGNO (operands[4]) > REGNO (operands[5]))
13711 tmp = operands[4];
13712 operands[4] = operands[5];
13713 operands[5] = tmp;
13716 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13717 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13718 break;
13720 default:
13721 gcc_unreachable ();
13724 return "";
13727 /* Output a call-via instruction for thumb state. */
13728 const char *
13729 thumb_call_via_reg (rtx reg)
13731 int regno = REGNO (reg);
13732 rtx *labelp;
13734 gcc_assert (regno < LR_REGNUM);
13736 /* If we are in the normal text section we can use a single instance
13737 per compilation unit. If we are doing function sections, then we need
13738 an entry per section, since we can't rely on reachability. */
13739 if (in_text_section ())
13741 thumb_call_reg_needed = 1;
13743 if (thumb_call_via_label[regno] == NULL)
13744 thumb_call_via_label[regno] = gen_label_rtx ();
13745 labelp = thumb_call_via_label + regno;
13747 else
13749 if (cfun->machine->call_via[regno] == NULL)
13750 cfun->machine->call_via[regno] = gen_label_rtx ();
13751 labelp = cfun->machine->call_via + regno;
13754 output_asm_insn ("bl\t%a0", labelp);
13755 return "";
13758 /* Routines for generating rtl. */
13759 void
13760 thumb_expand_movmemqi (rtx *operands)
13762 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13763 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13764 HOST_WIDE_INT len = INTVAL (operands[2]);
13765 HOST_WIDE_INT offset = 0;
13767 while (len >= 12)
13769 emit_insn (gen_movmem12b (out, in, out, in));
13770 len -= 12;
13773 if (len >= 8)
13775 emit_insn (gen_movmem8b (out, in, out, in));
13776 len -= 8;
13779 if (len >= 4)
13781 rtx reg = gen_reg_rtx (SImode);
13782 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13783 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13784 len -= 4;
13785 offset += 4;
13788 if (len >= 2)
13790 rtx reg = gen_reg_rtx (HImode);
13791 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13792 plus_constant (in, offset))));
13793 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13794 reg));
13795 len -= 2;
13796 offset += 2;
13799 if (len)
13801 rtx reg = gen_reg_rtx (QImode);
13802 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13803 plus_constant (in, offset))));
13804 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13805 reg));
13809 void
13810 thumb_reload_out_hi (rtx *operands)
13812 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13815 /* Handle reading a half-word from memory during reload. */
13816 void
13817 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13819 gcc_unreachable ();
13822 /* Return the length of a function name prefix
13823 that starts with the character 'c'. */
13824 static int
13825 arm_get_strip_length (int c)
13827 switch (c)
13829 ARM_NAME_ENCODING_LENGTHS
13830 default: return 0;
13834 /* Return a pointer to a function's name with any
13835 and all prefix encodings stripped from it. */
13836 const char *
13837 arm_strip_name_encoding (const char *name)
13839 int skip;
13841 while ((skip = arm_get_strip_length (* name)))
13842 name += skip;
13844 return name;
13847 /* If there is a '*' anywhere in the name's prefix, then
13848 emit the stripped name verbatim, otherwise prepend an
13849 underscore if leading underscores are being used. */
13850 void
13851 arm_asm_output_labelref (FILE *stream, const char *name)
13853 int skip;
13854 int verbatim = 0;
13856 while ((skip = arm_get_strip_length (* name)))
13858 verbatim |= (*name == '*');
13859 name += skip;
13862 if (verbatim)
13863 fputs (name, stream);
13864 else
13865 asm_fprintf (stream, "%U%s", name);
13868 static void
13869 arm_file_end (void)
13871 int regno;
13873 if (! thumb_call_reg_needed)
13874 return;
13876 text_section ();
13877 asm_fprintf (asm_out_file, "\t.code 16\n");
13878 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13880 for (regno = 0; regno < LR_REGNUM; regno++)
13882 rtx label = thumb_call_via_label[regno];
13884 if (label != 0)
13886 targetm.asm_out.internal_label (asm_out_file, "L",
13887 CODE_LABEL_NUMBER (label));
13888 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13893 rtx aof_pic_label;
13895 #ifdef AOF_ASSEMBLER
13896 /* Special functions only needed when producing AOF syntax assembler. */
13898 struct pic_chain
13900 struct pic_chain * next;
13901 const char * symname;
13904 static struct pic_chain * aof_pic_chain = NULL;
13907 aof_pic_entry (rtx x)
13909 struct pic_chain ** chainp;
13910 int offset;
13912 if (aof_pic_label == NULL_RTX)
13914 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13917 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13918 offset += 4, chainp = &(*chainp)->next)
13919 if ((*chainp)->symname == XSTR (x, 0))
13920 return plus_constant (aof_pic_label, offset);
13922 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13923 (*chainp)->next = NULL;
13924 (*chainp)->symname = XSTR (x, 0);
13925 return plus_constant (aof_pic_label, offset);
13928 void
13929 aof_dump_pic_table (FILE *f)
13931 struct pic_chain * chain;
13933 if (aof_pic_chain == NULL)
13934 return;
13936 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13937 PIC_OFFSET_TABLE_REGNUM,
13938 PIC_OFFSET_TABLE_REGNUM);
13939 fputs ("|x$adcons|\n", f);
13941 for (chain = aof_pic_chain; chain; chain = chain->next)
13943 fputs ("\tDCD\t", f);
13944 assemble_name (f, chain->symname);
13945 fputs ("\n", f);
13949 int arm_text_section_count = 1;
13951 char *
13952 aof_text_section (void )
13954 static char buf[100];
13955 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13956 arm_text_section_count++);
13957 if (flag_pic)
13958 strcat (buf, ", PIC, REENTRANT");
13959 return buf;
13962 static int arm_data_section_count = 1;
13964 char *
13965 aof_data_section (void)
13967 static char buf[100];
13968 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13969 return buf;
13972 /* The AOF assembler is religiously strict about declarations of
13973 imported and exported symbols, so that it is impossible to declare
13974 a function as imported near the beginning of the file, and then to
13975 export it later on. It is, however, possible to delay the decision
13976 until all the functions in the file have been compiled. To get
13977 around this, we maintain a list of the imports and exports, and
13978 delete from it any that are subsequently defined. At the end of
13979 compilation we spit the remainder of the list out before the END
13980 directive. */
13982 struct import
13984 struct import * next;
13985 const char * name;
13988 static struct import * imports_list = NULL;
13990 void
13991 aof_add_import (const char *name)
13993 struct import * new;
13995 for (new = imports_list; new; new = new->next)
13996 if (new->name == name)
13997 return;
13999 new = (struct import *) xmalloc (sizeof (struct import));
14000 new->next = imports_list;
14001 imports_list = new;
14002 new->name = name;
14005 void
14006 aof_delete_import (const char *name)
14008 struct import ** old;
14010 for (old = &imports_list; *old; old = & (*old)->next)
14012 if ((*old)->name == name)
14014 *old = (*old)->next;
14015 return;
14020 int arm_main_function = 0;
14022 static void
14023 aof_dump_imports (FILE *f)
14025 /* The AOF assembler needs this to cause the startup code to be extracted
14026 from the library. Brining in __main causes the whole thing to work
14027 automagically. */
14028 if (arm_main_function)
14030 text_section ();
14031 fputs ("\tIMPORT __main\n", f);
14032 fputs ("\tDCD __main\n", f);
14035 /* Now dump the remaining imports. */
14036 while (imports_list)
14038 fprintf (f, "\tIMPORT\t");
14039 assemble_name (f, imports_list->name);
14040 fputc ('\n', f);
14041 imports_list = imports_list->next;
14045 static void
14046 aof_globalize_label (FILE *stream, const char *name)
14048 default_globalize_label (stream, name);
14049 if (! strcmp (name, "main"))
14050 arm_main_function = 1;
14053 static void
14054 aof_file_start (void)
14056 fputs ("__r0\tRN\t0\n", asm_out_file);
14057 fputs ("__a1\tRN\t0\n", asm_out_file);
14058 fputs ("__a2\tRN\t1\n", asm_out_file);
14059 fputs ("__a3\tRN\t2\n", asm_out_file);
14060 fputs ("__a4\tRN\t3\n", asm_out_file);
14061 fputs ("__v1\tRN\t4\n", asm_out_file);
14062 fputs ("__v2\tRN\t5\n", asm_out_file);
14063 fputs ("__v3\tRN\t6\n", asm_out_file);
14064 fputs ("__v4\tRN\t7\n", asm_out_file);
14065 fputs ("__v5\tRN\t8\n", asm_out_file);
14066 fputs ("__v6\tRN\t9\n", asm_out_file);
14067 fputs ("__sl\tRN\t10\n", asm_out_file);
14068 fputs ("__fp\tRN\t11\n", asm_out_file);
14069 fputs ("__ip\tRN\t12\n", asm_out_file);
14070 fputs ("__sp\tRN\t13\n", asm_out_file);
14071 fputs ("__lr\tRN\t14\n", asm_out_file);
14072 fputs ("__pc\tRN\t15\n", asm_out_file);
14073 fputs ("__f0\tFN\t0\n", asm_out_file);
14074 fputs ("__f1\tFN\t1\n", asm_out_file);
14075 fputs ("__f2\tFN\t2\n", asm_out_file);
14076 fputs ("__f3\tFN\t3\n", asm_out_file);
14077 fputs ("__f4\tFN\t4\n", asm_out_file);
14078 fputs ("__f5\tFN\t5\n", asm_out_file);
14079 fputs ("__f6\tFN\t6\n", asm_out_file);
14080 fputs ("__f7\tFN\t7\n", asm_out_file);
14081 text_section ();
14084 static void
14085 aof_file_end (void)
14087 if (flag_pic)
14088 aof_dump_pic_table (asm_out_file);
14089 arm_file_end ();
14090 aof_dump_imports (asm_out_file);
14091 fputs ("\tEND\n", asm_out_file);
14093 #endif /* AOF_ASSEMBLER */
14095 #ifndef ARM_PE
14096 /* Symbols in the text segment can be accessed without indirecting via the
14097 constant pool; it may take an extra binary operation, but this is still
14098 faster than indirecting via memory. Don't do this when not optimizing,
14099 since we won't be calculating al of the offsets necessary to do this
14100 simplification. */
14102 static void
14103 arm_encode_section_info (tree decl, rtx rtl, int first)
14105 /* This doesn't work with AOF syntax, since the string table may be in
14106 a different AREA. */
14107 #ifndef AOF_ASSEMBLER
14108 if (optimize > 0 && TREE_CONSTANT (decl))
14109 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14110 #endif
14112 /* If we are referencing a function that is weak then encode a long call
14113 flag in the function name, otherwise if the function is static or
14114 or known to be defined in this file then encode a short call flag. */
14115 if (first && DECL_P (decl))
14117 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14118 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14119 else if (! TREE_PUBLIC (decl))
14120 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14123 #endif /* !ARM_PE */
14125 static void
14126 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14128 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14129 && !strcmp (prefix, "L"))
14131 arm_ccfsm_state = 0;
14132 arm_target_insn = NULL;
14134 default_internal_label (stream, prefix, labelno);
14137 /* Output code to add DELTA to the first argument, and then jump
14138 to FUNCTION. Used for C++ multiple inheritance. */
14139 static void
14140 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14141 HOST_WIDE_INT delta,
14142 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14143 tree function)
14145 static int thunk_label = 0;
14146 char label[256];
14147 int mi_delta = delta;
14148 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14149 int shift = 0;
14150 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14151 ? 1 : 0);
14152 if (mi_delta < 0)
14153 mi_delta = - mi_delta;
14154 if (TARGET_THUMB)
14156 int labelno = thunk_label++;
14157 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14158 fputs ("\tldr\tr12, ", file);
14159 assemble_name (file, label);
14160 fputc ('\n', file);
14162 while (mi_delta != 0)
14164 if ((mi_delta & (3 << shift)) == 0)
14165 shift += 2;
14166 else
14168 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14169 mi_op, this_regno, this_regno,
14170 mi_delta & (0xff << shift));
14171 mi_delta &= ~(0xff << shift);
14172 shift += 8;
14175 if (TARGET_THUMB)
14177 fprintf (file, "\tbx\tr12\n");
14178 ASM_OUTPUT_ALIGN (file, 2);
14179 assemble_name (file, label);
14180 fputs (":\n", file);
14181 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14183 else
14185 fputs ("\tb\t", file);
14186 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14187 if (NEED_PLT_RELOC)
14188 fputs ("(PLT)", file);
14189 fputc ('\n', file);
14194 arm_emit_vector_const (FILE *file, rtx x)
14196 int i;
14197 const char * pattern;
14199 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14201 switch (GET_MODE (x))
14203 case V2SImode: pattern = "%08x"; break;
14204 case V4HImode: pattern = "%04x"; break;
14205 case V8QImode: pattern = "%02x"; break;
14206 default: gcc_unreachable ();
14209 fprintf (file, "0x");
14210 for (i = CONST_VECTOR_NUNITS (x); i--;)
14212 rtx element;
14214 element = CONST_VECTOR_ELT (x, i);
14215 fprintf (file, pattern, INTVAL (element));
14218 return 1;
14221 const char *
14222 arm_output_load_gr (rtx *operands)
14224 rtx reg;
14225 rtx offset;
14226 rtx wcgr;
14227 rtx sum;
14229 if (GET_CODE (operands [1]) != MEM
14230 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14231 || GET_CODE (reg = XEXP (sum, 0)) != REG
14232 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14233 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14234 return "wldrw%?\t%0, %1";
14236 /* Fix up an out-of-range load of a GR register. */
14237 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14238 wcgr = operands[0];
14239 operands[0] = reg;
14240 output_asm_insn ("ldr%?\t%0, %1", operands);
14242 operands[0] = wcgr;
14243 operands[1] = reg;
14244 output_asm_insn ("tmcr%?\t%0, %1", operands);
14245 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14247 return "";
14250 static rtx
14251 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14252 int incoming ATTRIBUTE_UNUSED)
14254 #if 0
14255 /* FIXME: The ARM backend has special code to handle structure
14256 returns, and will reserve its own hidden first argument. So
14257 if this macro is enabled a *second* hidden argument will be
14258 reserved, which will break binary compatibility with old
14259 toolchains and also thunk handling. One day this should be
14260 fixed. */
14261 return 0;
14262 #else
14263 /* Register in which address to store a structure value
14264 is passed to a function. */
14265 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14266 #endif
14269 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14271 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14272 named arg and all anonymous args onto the stack.
14273 XXX I know the prologue shouldn't be pushing registers, but it is faster
14274 that way. */
14276 static void
14277 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14278 enum machine_mode mode ATTRIBUTE_UNUSED,
14279 tree type ATTRIBUTE_UNUSED,
14280 int *pretend_size,
14281 int second_time ATTRIBUTE_UNUSED)
14283 cfun->machine->uses_anonymous_args = 1;
14284 if (cum->nregs < NUM_ARG_REGS)
14285 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14288 /* Return nonzero if the CONSUMER instruction (a store) does not need
14289 PRODUCER's value to calculate the address. */
14292 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14294 rtx value = PATTERN (producer);
14295 rtx addr = PATTERN (consumer);
14297 if (GET_CODE (value) == COND_EXEC)
14298 value = COND_EXEC_CODE (value);
14299 if (GET_CODE (value) == PARALLEL)
14300 value = XVECEXP (value, 0, 0);
14301 value = XEXP (value, 0);
14302 if (GET_CODE (addr) == COND_EXEC)
14303 addr = COND_EXEC_CODE (addr);
14304 if (GET_CODE (addr) == PARALLEL)
14305 addr = XVECEXP (addr, 0, 0);
14306 addr = XEXP (addr, 0);
14308 return !reg_overlap_mentioned_p (value, addr);
14311 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14312 have an early register shift value or amount dependency on the
14313 result of PRODUCER. */
14316 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14318 rtx value = PATTERN (producer);
14319 rtx op = PATTERN (consumer);
14320 rtx early_op;
14322 if (GET_CODE (value) == COND_EXEC)
14323 value = COND_EXEC_CODE (value);
14324 if (GET_CODE (value) == PARALLEL)
14325 value = XVECEXP (value, 0, 0);
14326 value = XEXP (value, 0);
14327 if (GET_CODE (op) == COND_EXEC)
14328 op = COND_EXEC_CODE (op);
14329 if (GET_CODE (op) == PARALLEL)
14330 op = XVECEXP (op, 0, 0);
14331 op = XEXP (op, 1);
14333 early_op = XEXP (op, 0);
14334 /* This is either an actual independent shift, or a shift applied to
14335 the first operand of another operation. We want the whole shift
14336 operation. */
14337 if (GET_CODE (early_op) == REG)
14338 early_op = op;
14340 return !reg_overlap_mentioned_p (value, early_op);
14343 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14344 have an early register shift value dependency on the result of
14345 PRODUCER. */
14348 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14350 rtx value = PATTERN (producer);
14351 rtx op = PATTERN (consumer);
14352 rtx early_op;
14354 if (GET_CODE (value) == COND_EXEC)
14355 value = COND_EXEC_CODE (value);
14356 if (GET_CODE (value) == PARALLEL)
14357 value = XVECEXP (value, 0, 0);
14358 value = XEXP (value, 0);
14359 if (GET_CODE (op) == COND_EXEC)
14360 op = COND_EXEC_CODE (op);
14361 if (GET_CODE (op) == PARALLEL)
14362 op = XVECEXP (op, 0, 0);
14363 op = XEXP (op, 1);
14365 early_op = XEXP (op, 0);
14367 /* This is either an actual independent shift, or a shift applied to
14368 the first operand of another operation. We want the value being
14369 shifted, in either case. */
14370 if (GET_CODE (early_op) != REG)
14371 early_op = XEXP (early_op, 0);
14373 return !reg_overlap_mentioned_p (value, early_op);
14376 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14377 have an early register mult dependency on the result of
14378 PRODUCER. */
14381 arm_no_early_mul_dep (rtx producer, rtx consumer)
14383 rtx value = PATTERN (producer);
14384 rtx op = PATTERN (consumer);
14386 if (GET_CODE (value) == COND_EXEC)
14387 value = COND_EXEC_CODE (value);
14388 if (GET_CODE (value) == PARALLEL)
14389 value = XVECEXP (value, 0, 0);
14390 value = XEXP (value, 0);
14391 if (GET_CODE (op) == COND_EXEC)
14392 op = COND_EXEC_CODE (op);
14393 if (GET_CODE (op) == PARALLEL)
14394 op = XVECEXP (op, 0, 0);
14395 op = XEXP (op, 1);
14397 return (GET_CODE (op) == PLUS
14398 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14402 /* We can't rely on the caller doing the proper promotion when
14403 using APCS or ATPCS. */
14405 static bool
14406 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14408 return !TARGET_AAPCS_BASED;
14412 /* AAPCS based ABIs use short enums by default. */
14414 static bool
14415 arm_default_short_enums (void)
14417 return TARGET_AAPCS_BASED;
14421 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14423 static bool
14424 arm_align_anon_bitfield (void)
14426 return TARGET_AAPCS_BASED;
14430 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14432 static tree
14433 arm_cxx_guard_type (void)
14435 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14439 /* The EABI says test the least significant bit of a guard variable. */
14441 static bool
14442 arm_cxx_guard_mask_bit (void)
14444 return TARGET_AAPCS_BASED;
14448 /* The EABI specifies that all array cookies are 8 bytes long. */
14450 static tree
14451 arm_get_cookie_size (tree type)
14453 tree size;
14455 if (!TARGET_AAPCS_BASED)
14456 return default_cxx_get_cookie_size (type);
14458 size = build_int_cst (sizetype, 8);
14459 return size;
14463 /* The EABI says that array cookies should also contain the element size. */
14465 static bool
14466 arm_cookie_has_size (void)
14468 return TARGET_AAPCS_BASED;
14472 /* The EABI says constructors and destructors should return a pointer to
14473 the object constructed/destroyed. */
14475 static bool
14476 arm_cxx_cdtor_returns_this (void)
14478 return TARGET_AAPCS_BASED;
14481 /* The EABI says that an inline function may never be the key
14482 method. */
14484 static bool
14485 arm_cxx_key_method_may_be_inline (void)
14487 return !TARGET_AAPCS_BASED;
14490 static void
14491 arm_cxx_determine_class_data_visibility (tree decl)
14493 if (!TARGET_AAPCS_BASED)
14494 return;
14496 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14497 is exported. However, on systems without dynamic vague linkage,
14498 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14499 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14500 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14501 else
14502 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14503 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14506 static bool
14507 arm_cxx_class_data_always_comdat (void)
14509 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14510 vague linkage if the class has no key function. */
14511 return !TARGET_AAPCS_BASED;
14515 /* The EABI says __aeabi_atexit should be used to register static
14516 destructors. */
14518 static bool
14519 arm_cxx_use_aeabi_atexit (void)
14521 return TARGET_AAPCS_BASED;
14525 void
14526 arm_set_return_address (rtx source, rtx scratch)
14528 arm_stack_offsets *offsets;
14529 HOST_WIDE_INT delta;
14530 rtx addr;
14531 unsigned long saved_regs;
14533 saved_regs = arm_compute_save_reg_mask ();
14535 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14536 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14537 else
14539 if (frame_pointer_needed)
14540 addr = plus_constant(hard_frame_pointer_rtx, -4);
14541 else
14543 /* LR will be the first saved register. */
14544 offsets = arm_get_frame_offsets ();
14545 delta = offsets->outgoing_args - (offsets->frame + 4);
14548 if (delta >= 4096)
14550 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14551 GEN_INT (delta & ~4095)));
14552 addr = scratch;
14553 delta &= 4095;
14555 else
14556 addr = stack_pointer_rtx;
14558 addr = plus_constant (addr, delta);
14560 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14565 void
14566 thumb_set_return_address (rtx source, rtx scratch)
14568 arm_stack_offsets *offsets;
14569 HOST_WIDE_INT delta;
14570 int reg;
14571 rtx addr;
14572 unsigned long mask;
14574 emit_insn (gen_rtx_USE (VOIDmode, source));
14576 mask = thumb_compute_save_reg_mask ();
14577 if (mask & (1 << LR_REGNUM))
14579 offsets = arm_get_frame_offsets ();
14581 /* Find the saved regs. */
14582 if (frame_pointer_needed)
14584 delta = offsets->soft_frame - offsets->saved_args;
14585 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14587 else
14589 delta = offsets->outgoing_args - offsets->saved_args;
14590 reg = SP_REGNUM;
14592 /* Allow for the stack frame. */
14593 if (TARGET_BACKTRACE)
14594 delta -= 16;
14595 /* The link register is always the first saved register. */
14596 delta -= 4;
14598 /* Construct the address. */
14599 addr = gen_rtx_REG (SImode, reg);
14600 if ((reg != SP_REGNUM && delta >= 128)
14601 || delta >= 1024)
14603 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14604 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14605 addr = scratch;
14607 else
14608 addr = plus_constant (addr, delta);
14610 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14612 else
14613 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14616 /* Implements target hook vector_mode_supported_p. */
14617 bool
14618 arm_vector_mode_supported_p (enum machine_mode mode)
14620 if ((mode == V2SImode)
14621 || (mode == V4HImode)
14622 || (mode == V8QImode))
14623 return true;
14625 return false;
14628 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14629 ARM insns and therefore guarantee that the shift count is modulo 256.
14630 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14631 guarantee no particular behavior for out-of-range counts. */
14633 static unsigned HOST_WIDE_INT
14634 arm_shift_truncation_mask (enum machine_mode mode)
14636 return mode == SImode ? 255 : 0;
14640 /* Map internal gcc register numbers to DWARF2 register numbers. */
14642 unsigned int
14643 arm_dbx_register_number (unsigned int regno)
14645 if (regno < 16)
14646 return regno;
14648 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14649 compatibility. The EABI defines them as registers 96-103. */
14650 if (IS_FPA_REGNUM (regno))
14651 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14653 if (IS_VFP_REGNUM (regno))
14654 return 64 + regno - FIRST_VFP_REGNUM;
14656 if (IS_IWMMXT_GR_REGNUM (regno))
14657 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14659 if (IS_IWMMXT_REGNUM (regno))
14660 return 112 + regno - FIRST_IWMMXT_REGNUM;
14662 gcc_unreachable ();