* arm.c (const_ok_for_arm): Use a faster algorithm.
[official-gcc.git] / gcc / config / arm / arm.c
blob905187ad2f97df35014e732798fb7ef33281c7f3
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void thumb_exit (FILE *, int);
88 static rtx is_jump_table (rtx);
89 static HOST_WIDE_INT get_jump_table_size (rtx);
90 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
91 static Mnode *add_minipool_forward_ref (Mfix *);
92 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_backward_ref (Mfix *);
94 static void assign_minipool_offsets (Mfix *);
95 static void arm_print_value (FILE *, rtx);
96 static void dump_minipool (rtx);
97 static int arm_barrier_cost (rtx);
98 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
99 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
100 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
101 rtx);
102 static void arm_reorg (void);
103 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
104 static int current_file_function_operand (rtx);
105 static unsigned long arm_compute_save_reg0_reg12_mask (void);
106 static unsigned long arm_compute_save_reg_mask (void);
107 static unsigned long arm_isr_value (tree);
108 static unsigned long arm_compute_func_type (void);
109 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
110 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
111 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
112 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
113 #endif
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_size_rtx_costs (rtx, int, int, int *);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
144 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
145 tree, bool);
147 #ifdef OBJECT_FORMAT_ELF
148 static void arm_elf_asm_constructor (rtx, int);
149 #endif
150 #ifndef ARM_PE
151 static void arm_encode_section_info (tree, rtx, int);
152 #endif
154 static void arm_file_end (void);
156 #ifdef AOF_ASSEMBLER
157 static void aof_globalize_label (FILE *, const char *);
158 static void aof_dump_imports (FILE *);
159 static void aof_dump_pic_table (FILE *);
160 static void aof_file_start (void);
161 static void aof_file_end (void);
162 #endif
163 static rtx arm_struct_value_rtx (tree, int);
164 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
165 tree, int *, int);
166 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
167 enum machine_mode, tree, bool);
168 static bool arm_promote_prototypes (tree);
169 static bool arm_default_short_enums (void);
170 static bool arm_align_anon_bitfield (void);
171 static bool arm_return_in_msb (tree);
172 static bool arm_must_pass_in_stack (enum machine_mode, tree);
174 static tree arm_cxx_guard_type (void);
175 static bool arm_cxx_guard_mask_bit (void);
176 static tree arm_get_cookie_size (tree);
177 static bool arm_cookie_has_size (void);
178 static bool arm_cxx_cdtor_returns_this (void);
179 static bool arm_cxx_key_method_may_be_inline (void);
180 static void arm_cxx_determine_class_data_visibility (tree);
181 static bool arm_cxx_class_data_always_comdat (void);
182 static bool arm_cxx_use_aeabi_atexit (void);
183 static void arm_init_libfuncs (void);
184 static bool arm_handle_option (size_t, const char *, int);
185 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
187 /* Initialize the GCC target structure. */
188 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
189 #undef TARGET_MERGE_DECL_ATTRIBUTES
190 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
191 #endif
193 #undef TARGET_ATTRIBUTE_TABLE
194 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
196 #undef TARGET_ASM_FILE_END
197 #define TARGET_ASM_FILE_END arm_file_end
199 #ifdef AOF_ASSEMBLER
200 #undef TARGET_ASM_BYTE_OP
201 #define TARGET_ASM_BYTE_OP "\tDCB\t"
202 #undef TARGET_ASM_ALIGNED_HI_OP
203 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
204 #undef TARGET_ASM_ALIGNED_SI_OP
205 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
206 #undef TARGET_ASM_GLOBALIZE_LABEL
207 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
208 #undef TARGET_ASM_FILE_START
209 #define TARGET_ASM_FILE_START aof_file_start
210 #undef TARGET_ASM_FILE_END
211 #define TARGET_ASM_FILE_END aof_file_end
212 #else
213 #undef TARGET_ASM_ALIGNED_SI_OP
214 #define TARGET_ASM_ALIGNED_SI_OP NULL
215 #undef TARGET_ASM_INTEGER
216 #define TARGET_ASM_INTEGER arm_assemble_integer
217 #endif
219 #undef TARGET_ASM_FUNCTION_PROLOGUE
220 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
222 #undef TARGET_ASM_FUNCTION_EPILOGUE
223 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
225 #undef TARGET_DEFAULT_TARGET_FLAGS
226 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
227 #undef TARGET_HANDLE_OPTION
228 #define TARGET_HANDLE_OPTION arm_handle_option
230 #undef TARGET_COMP_TYPE_ATTRIBUTES
231 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
233 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
234 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
236 #undef TARGET_SCHED_ADJUST_COST
237 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
239 #undef TARGET_ENCODE_SECTION_INFO
240 #ifdef ARM_PE
241 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
242 #else
243 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
244 #endif
246 #undef TARGET_STRIP_NAME_ENCODING
247 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
249 #undef TARGET_ASM_INTERNAL_LABEL
250 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
252 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
253 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
255 #undef TARGET_ASM_OUTPUT_MI_THUNK
256 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
257 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
258 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
260 /* This will be overridden in arm_override_options. */
261 #undef TARGET_RTX_COSTS
262 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
263 #undef TARGET_ADDRESS_COST
264 #define TARGET_ADDRESS_COST arm_address_cost
266 #undef TARGET_SHIFT_TRUNCATION_MASK
267 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
268 #undef TARGET_VECTOR_MODE_SUPPORTED_P
269 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
271 #undef TARGET_MACHINE_DEPENDENT_REORG
272 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
274 #undef TARGET_INIT_BUILTINS
275 #define TARGET_INIT_BUILTINS arm_init_builtins
276 #undef TARGET_EXPAND_BUILTIN
277 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
279 #undef TARGET_INIT_LIBFUNCS
280 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
282 #undef TARGET_PROMOTE_FUNCTION_ARGS
283 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
284 #undef TARGET_PROMOTE_FUNCTION_RETURN
285 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
286 #undef TARGET_PROMOTE_PROTOTYPES
287 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
288 #undef TARGET_PASS_BY_REFERENCE
289 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
290 #undef TARGET_ARG_PARTIAL_BYTES
291 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
293 #undef TARGET_STRUCT_VALUE_RTX
294 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
296 #undef TARGET_SETUP_INCOMING_VARARGS
297 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
299 #undef TARGET_DEFAULT_SHORT_ENUMS
300 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
302 #undef TARGET_ALIGN_ANON_BITFIELD
303 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
305 #undef TARGET_CXX_GUARD_TYPE
306 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
308 #undef TARGET_CXX_GUARD_MASK_BIT
309 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
311 #undef TARGET_CXX_GET_COOKIE_SIZE
312 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
314 #undef TARGET_CXX_COOKIE_HAS_SIZE
315 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
317 #undef TARGET_CXX_CDTOR_RETURNS_THIS
318 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
320 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
321 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
323 #undef TARGET_CXX_USE_AEABI_ATEXIT
324 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
326 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
327 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
328 arm_cxx_determine_class_data_visibility
330 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
331 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
333 #undef TARGET_RETURN_IN_MSB
334 #define TARGET_RETURN_IN_MSB arm_return_in_msb
336 #undef TARGET_MUST_PASS_IN_STACK
337 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
339 struct gcc_target targetm = TARGET_INITIALIZER;
341 /* Obstack for minipool constant handling. */
342 static struct obstack minipool_obstack;
343 static char * minipool_startobj;
345 /* The maximum number of insns skipped which
346 will be conditionalised if possible. */
347 static int max_insns_skipped = 5;
349 extern FILE * asm_out_file;
351 /* True if we are currently building a constant table. */
352 int making_const_table;
354 /* Define the information needed to generate branch insns. This is
355 stored from the compare operation. */
356 rtx arm_compare_op0, arm_compare_op1;
358 /* The processor for which instructions should be scheduled. */
359 enum processor_type arm_tune = arm_none;
361 /* Which floating point model to use. */
362 enum arm_fp_model arm_fp_model;
364 /* Which floating point hardware is available. */
365 enum fputype arm_fpu_arch;
367 /* Which floating point hardware to schedule for. */
368 enum fputype arm_fpu_tune;
370 /* Whether to use floating point hardware. */
371 enum float_abi_type arm_float_abi;
373 /* Which ABI to use. */
374 enum arm_abi_type arm_abi;
376 /* Set by the -mfpu=... option. */
377 static const char * target_fpu_name = NULL;
379 /* Set by the -mfpe=... option. */
380 static const char * target_fpe_name = NULL;
382 /* Set by the -mfloat-abi=... option. */
383 static const char * target_float_abi_name = NULL;
385 /* Set by the -mabi=... option. */
386 static const char * target_abi_name = NULL;
388 /* Used to parse -mstructure_size_boundary command line option. */
389 static const char * structure_size_string = NULL;
390 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
392 /* Used for Thumb call_via trampolines. */
393 rtx thumb_call_via_label[14];
394 static int thumb_call_reg_needed;
396 /* Bit values used to identify processor capabilities. */
397 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
398 #define FL_ARCH3M (1 << 1) /* Extended multiply */
399 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
400 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
401 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
402 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
403 #define FL_THUMB (1 << 6) /* Thumb aware */
404 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
405 #define FL_STRONG (1 << 8) /* StrongARM */
406 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
407 #define FL_XSCALE (1 << 10) /* XScale */
408 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
409 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
410 media instructions. */
411 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
412 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
413 Note: ARM6 & 7 derivatives only. */
415 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
417 #define FL_FOR_ARCH2 0
418 #define FL_FOR_ARCH3 FL_MODE32
419 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
420 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
421 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
422 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
423 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
424 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
425 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
426 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
427 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
428 #define FL_FOR_ARCH6J FL_FOR_ARCH6
429 #define FL_FOR_ARCH6K FL_FOR_ARCH6
430 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
431 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
433 /* The bits in this mask specify which
434 instructions we are allowed to generate. */
435 static unsigned long insn_flags = 0;
437 /* The bits in this mask specify which instruction scheduling options should
438 be used. */
439 static unsigned long tune_flags = 0;
441 /* The following are used in the arm.md file as equivalents to bits
442 in the above two flag variables. */
444 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
445 int arm_arch3m = 0;
447 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
448 int arm_arch4 = 0;
450 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
451 int arm_arch4t = 0;
453 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
454 int arm_arch5 = 0;
456 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
457 int arm_arch5e = 0;
459 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
460 int arm_arch6 = 0;
462 /* Nonzero if this chip can benefit from load scheduling. */
463 int arm_ld_sched = 0;
465 /* Nonzero if this chip is a StrongARM. */
466 int arm_tune_strongarm = 0;
468 /* Nonzero if this chip is a Cirrus variant. */
469 int arm_arch_cirrus = 0;
471 /* Nonzero if this chip supports Intel Wireless MMX technology. */
472 int arm_arch_iwmmxt = 0;
474 /* Nonzero if this chip is an XScale. */
475 int arm_arch_xscale = 0;
477 /* Nonzero if tuning for XScale */
478 int arm_tune_xscale = 0;
480 /* Nonzero if we want to tune for stores that access the write-buffer.
481 This typically means an ARM6 or ARM7 with MMU or MPU. */
482 int arm_tune_wbuf = 0;
484 /* Nonzero if generating Thumb instructions. */
485 int thumb_code = 0;
487 /* Nonzero if we should define __THUMB_INTERWORK__ in the
488 preprocessor.
489 XXX This is a bit of a hack, it's intended to help work around
490 problems in GLD which doesn't understand that armv5t code is
491 interworking clean. */
492 int arm_cpp_interwork = 0;
494 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
495 must report the mode of the memory reference from PRINT_OPERAND to
496 PRINT_OPERAND_ADDRESS. */
497 enum machine_mode output_memory_reference_mode;
499 /* The register number to be used for the PIC offset register. */
500 static const char * arm_pic_register_string = NULL;
501 int arm_pic_register = INVALID_REGNUM;
503 /* Set to 1 when a return insn is output, this means that the epilogue
504 is not needed. */
505 int return_used_this_function;
507 /* Set to 1 after arm_reorg has started. Reset to start at the start of
508 the next function. */
509 static int after_arm_reorg = 0;
511 /* The maximum number of insns to be used when loading a constant. */
512 static int arm_constant_limit = 3;
514 /* For an explanation of these variables, see final_prescan_insn below. */
515 int arm_ccfsm_state;
516 enum arm_cond_code arm_current_cc;
517 rtx arm_target_insn;
518 int arm_target_label;
520 /* The condition codes of the ARM, and the inverse function. */
521 static const char * const arm_condition_codes[] =
523 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
524 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
527 #define streq(string1, string2) (strcmp (string1, string2) == 0)
529 /* Initialization code. */
531 struct processors
533 const char *const name;
534 enum processor_type core;
535 const char *arch;
536 const unsigned long flags;
537 bool (* rtx_costs) (rtx, int, int, int *);
540 /* Not all of these give usefully different compilation alternatives,
541 but there is no simple way of generalizing them. */
542 static const struct processors all_cores[] =
544 /* ARM Cores */
545 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
546 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
547 #include "arm-cores.def"
548 #undef ARM_CORE
549 {NULL, arm_none, NULL, 0, NULL}
552 static const struct processors all_architectures[] =
554 /* ARM Architectures */
555 /* We don't specify rtx_costs here as it will be figured out
556 from the core. */
558 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
559 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
560 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
561 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
562 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
563 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
564 implementations that support it, so we will leave it out for now. */
565 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
566 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
567 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
568 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
569 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
570 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
571 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
572 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
573 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
574 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
575 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
576 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
577 {NULL, arm_none, NULL, 0 , NULL}
580 struct arm_cpu_select
582 const char * string;
583 const char * name;
584 const struct processors * processors;
587 /* This is a magic structure. The 'string' field is magically filled in
588 with a pointer to the value specified by the user on the command line
589 assuming that the user has specified such a value. */
591 static struct arm_cpu_select arm_select[] =
593 /* string name processors */
594 { NULL, "-mcpu=", all_cores },
595 { NULL, "-march=", all_architectures },
596 { NULL, "-mtune=", all_cores }
599 /* Defines representing the indexes into the above table. */
600 #define ARM_OPT_SET_CPU 0
601 #define ARM_OPT_SET_ARCH 1
602 #define ARM_OPT_SET_TUNE 2
604 /* The name of the proprocessor macro to define for this architecture. */
606 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
608 struct fpu_desc
610 const char * name;
611 enum fputype fpu;
615 /* Available values for for -mfpu=. */
617 static const struct fpu_desc all_fpus[] =
619 {"fpa", FPUTYPE_FPA},
620 {"fpe2", FPUTYPE_FPA_EMU2},
621 {"fpe3", FPUTYPE_FPA_EMU2},
622 {"maverick", FPUTYPE_MAVERICK},
623 {"vfp", FPUTYPE_VFP}
627 /* Floating point models used by the different hardware.
628 See fputype in arm.h. */
630 static const enum fputype fp_model_for_fpu[] =
632 /* No FP hardware. */
633 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
634 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
635 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
636 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
637 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
638 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
642 struct float_abi
644 const char * name;
645 enum float_abi_type abi_type;
649 /* Available values for -mfloat-abi=. */
651 static const struct float_abi all_float_abis[] =
653 {"soft", ARM_FLOAT_ABI_SOFT},
654 {"softfp", ARM_FLOAT_ABI_SOFTFP},
655 {"hard", ARM_FLOAT_ABI_HARD}
659 struct abi_name
661 const char *name;
662 enum arm_abi_type abi_type;
666 /* Available values for -mabi=. */
668 static const struct abi_name arm_all_abis[] =
670 {"apcs-gnu", ARM_ABI_APCS},
671 {"atpcs", ARM_ABI_ATPCS},
672 {"aapcs", ARM_ABI_AAPCS},
673 {"iwmmxt", ARM_ABI_IWMMXT}
676 /* Return the number of bits set in VALUE. */
677 static unsigned
678 bit_count (unsigned long value)
680 unsigned long count = 0;
682 while (value)
684 count++;
685 value &= value - 1; /* Clear the least-significant set bit. */
688 return count;
691 /* Set up library functions unique to ARM. */
693 static void
694 arm_init_libfuncs (void)
696 /* There are no special library functions unless we are using the
697 ARM BPABI. */
698 if (!TARGET_BPABI)
699 return;
701 /* The functions below are described in Section 4 of the "Run-Time
702 ABI for the ARM architecture", Version 1.0. */
704 /* Double-precision floating-point arithmetic. Table 2. */
705 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
706 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
707 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
708 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
709 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
711 /* Double-precision comparisons. Table 3. */
712 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
713 set_optab_libfunc (ne_optab, DFmode, NULL);
714 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
715 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
716 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
717 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
718 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
720 /* Single-precision floating-point arithmetic. Table 4. */
721 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
722 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
723 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
724 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
725 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
727 /* Single-precision comparisons. Table 5. */
728 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
729 set_optab_libfunc (ne_optab, SFmode, NULL);
730 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
731 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
732 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
733 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
734 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
736 /* Floating-point to integer conversions. Table 6. */
737 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
738 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
739 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
740 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
741 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
742 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
743 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
744 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
746 /* Conversions between floating types. Table 7. */
747 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
748 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
750 /* Integer to floating-point conversions. Table 8. */
751 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
752 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
753 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
754 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
755 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
756 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
757 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
758 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
760 /* Long long. Table 9. */
761 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
762 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
763 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
764 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
765 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
766 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
767 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
768 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
770 /* Integer (32/32->32) division. \S 4.3.1. */
771 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
772 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
774 /* The divmod functions are designed so that they can be used for
775 plain division, even though they return both the quotient and the
776 remainder. The quotient is returned in the usual location (i.e.,
777 r0 for SImode, {r0, r1} for DImode), just as would be expected
778 for an ordinary division routine. Because the AAPCS calling
779 conventions specify that all of { r0, r1, r2, r3 } are
780 callee-saved registers, there is no need to tell the compiler
781 explicitly that those registers are clobbered by these
782 routines. */
783 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
784 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
785 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
786 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
788 /* We don't have mod libcalls. Fortunately gcc knows how to use the
789 divmod libcalls instead. */
790 set_optab_libfunc (smod_optab, DImode, NULL);
791 set_optab_libfunc (umod_optab, DImode, NULL);
792 set_optab_libfunc (smod_optab, SImode, NULL);
793 set_optab_libfunc (umod_optab, SImode, NULL);
796 /* Implement TARGET_HANDLE_OPTION. */
798 static bool
799 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
801 switch (code)
803 case OPT_mabi_:
804 target_abi_name = arg;
805 return true;
807 case OPT_march_:
808 arm_select[1].string = arg;
809 return true;
811 case OPT_mcpu_:
812 arm_select[0].string = arg;
813 return true;
815 case OPT_mfloat_abi_:
816 target_float_abi_name = arg;
817 return true;
819 case OPT_mfp_:
820 case OPT_mfpe_:
821 target_fpe_name = arg;
822 return true;
824 case OPT_mfpu_:
825 target_fpu_name = arg;
826 return true;
828 case OPT_mhard_float:
829 target_float_abi_name = "hard";
830 return true;
832 case OPT_mpic_register_:
833 arm_pic_register_string = arg;
834 return true;
836 case OPT_msoft_float:
837 target_float_abi_name = "soft";
838 return true;
840 case OPT_mstructure_size_boundary_:
841 structure_size_string = arg;
842 return true;
844 case OPT_mtune_:
845 arm_select[2].string = arg;
846 return true;
848 default:
849 return true;
853 /* Fix up any incompatible options that the user has specified.
854 This has now turned into a maze. */
855 void
856 arm_override_options (void)
858 unsigned i;
859 enum processor_type target_arch_cpu = arm_none;
861 /* Set up the flags based on the cpu/architecture selected by the user. */
862 for (i = ARRAY_SIZE (arm_select); i--;)
864 struct arm_cpu_select * ptr = arm_select + i;
866 if (ptr->string != NULL && ptr->string[0] != '\0')
868 const struct processors * sel;
870 for (sel = ptr->processors; sel->name != NULL; sel++)
871 if (streq (ptr->string, sel->name))
873 /* Set the architecture define. */
874 if (i != ARM_OPT_SET_TUNE)
875 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
877 /* Determine the processor core for which we should
878 tune code-generation. */
879 if (/* -mcpu= is a sensible default. */
880 i == ARM_OPT_SET_CPU
881 /* -mtune= overrides -mcpu= and -march=. */
882 || i == ARM_OPT_SET_TUNE)
883 arm_tune = (enum processor_type) (sel - ptr->processors);
885 /* Remember the CPU associated with this architecture.
886 If no other option is used to set the CPU type,
887 we'll use this to guess the most suitable tuning
888 options. */
889 if (i == ARM_OPT_SET_ARCH)
890 target_arch_cpu = sel->core;
892 if (i != ARM_OPT_SET_TUNE)
894 /* If we have been given an architecture and a processor
895 make sure that they are compatible. We only generate
896 a warning though, and we prefer the CPU over the
897 architecture. */
898 if (insn_flags != 0 && (insn_flags ^ sel->flags))
899 warning (0, "switch -mcpu=%s conflicts with -march= switch",
900 ptr->string);
902 insn_flags = sel->flags;
905 break;
908 if (sel->name == NULL)
909 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
913 /* Guess the tuning options from the architecture if necessary. */
914 if (arm_tune == arm_none)
915 arm_tune = target_arch_cpu;
917 /* If the user did not specify a processor, choose one for them. */
918 if (insn_flags == 0)
920 const struct processors * sel;
921 unsigned int sought;
922 enum processor_type cpu;
924 cpu = TARGET_CPU_DEFAULT;
925 if (cpu == arm_none)
927 #ifdef SUBTARGET_CPU_DEFAULT
928 /* Use the subtarget default CPU if none was specified by
929 configure. */
930 cpu = SUBTARGET_CPU_DEFAULT;
931 #endif
932 /* Default to ARM6. */
933 if (cpu == arm_none)
934 cpu = arm6;
936 sel = &all_cores[cpu];
938 insn_flags = sel->flags;
940 /* Now check to see if the user has specified some command line
941 switch that require certain abilities from the cpu. */
942 sought = 0;
944 if (TARGET_INTERWORK || TARGET_THUMB)
946 sought |= (FL_THUMB | FL_MODE32);
948 /* There are no ARM processors that support both APCS-26 and
949 interworking. Therefore we force FL_MODE26 to be removed
950 from insn_flags here (if it was set), so that the search
951 below will always be able to find a compatible processor. */
952 insn_flags &= ~FL_MODE26;
955 if (sought != 0 && ((sought & insn_flags) != sought))
957 /* Try to locate a CPU type that supports all of the abilities
958 of the default CPU, plus the extra abilities requested by
959 the user. */
960 for (sel = all_cores; sel->name != NULL; sel++)
961 if ((sel->flags & sought) == (sought | insn_flags))
962 break;
964 if (sel->name == NULL)
966 unsigned current_bit_count = 0;
967 const struct processors * best_fit = NULL;
969 /* Ideally we would like to issue an error message here
970 saying that it was not possible to find a CPU compatible
971 with the default CPU, but which also supports the command
972 line options specified by the programmer, and so they
973 ought to use the -mcpu=<name> command line option to
974 override the default CPU type.
976 If we cannot find a cpu that has both the
977 characteristics of the default cpu and the given
978 command line options we scan the array again looking
979 for a best match. */
980 for (sel = all_cores; sel->name != NULL; sel++)
981 if ((sel->flags & sought) == sought)
983 unsigned count;
985 count = bit_count (sel->flags & insn_flags);
987 if (count >= current_bit_count)
989 best_fit = sel;
990 current_bit_count = count;
994 gcc_assert (best_fit);
995 sel = best_fit;
998 insn_flags = sel->flags;
1000 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1001 if (arm_tune == arm_none)
1002 arm_tune = (enum processor_type) (sel - all_cores);
1005 /* The processor for which we should tune should now have been
1006 chosen. */
1007 gcc_assert (arm_tune != arm_none);
1009 tune_flags = all_cores[(int)arm_tune].flags;
1010 if (optimize_size)
1011 targetm.rtx_costs = arm_size_rtx_costs;
1012 else
1013 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1015 /* Make sure that the processor choice does not conflict with any of the
1016 other command line choices. */
1017 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1019 warning (0, "target CPU does not support interworking" );
1020 target_flags &= ~MASK_INTERWORK;
1023 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1025 warning (0, "target CPU does not support THUMB instructions");
1026 target_flags &= ~MASK_THUMB;
1029 if (TARGET_APCS_FRAME && TARGET_THUMB)
1031 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1032 target_flags &= ~MASK_APCS_FRAME;
1035 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1036 from here where no function is being compiled currently. */
1037 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1038 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1040 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1041 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1043 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1044 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1046 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1048 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1049 target_flags |= MASK_APCS_FRAME;
1052 if (TARGET_POKE_FUNCTION_NAME)
1053 target_flags |= MASK_APCS_FRAME;
1055 if (TARGET_APCS_REENT && flag_pic)
1056 error ("-fpic and -mapcs-reent are incompatible");
1058 if (TARGET_APCS_REENT)
1059 warning (0, "APCS reentrant code not supported. Ignored");
1061 /* If this target is normally configured to use APCS frames, warn if they
1062 are turned off and debugging is turned on. */
1063 if (TARGET_ARM
1064 && write_symbols != NO_DEBUG
1065 && !TARGET_APCS_FRAME
1066 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1067 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1069 /* If stack checking is disabled, we can use r10 as the PIC register,
1070 which keeps r9 available. */
1071 if (flag_pic)
1072 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1074 if (TARGET_APCS_FLOAT)
1075 warning (0, "passing floating point arguments in fp regs not yet supported");
1077 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1078 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1079 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1080 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1081 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1082 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1083 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1084 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1085 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1087 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1088 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1089 thumb_code = (TARGET_ARM == 0);
1090 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1091 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1092 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1094 /* V5 code we generate is completely interworking capable, so we turn off
1095 TARGET_INTERWORK here to avoid many tests later on. */
1097 /* XXX However, we must pass the right pre-processor defines to CPP
1098 or GLD can get confused. This is a hack. */
1099 if (TARGET_INTERWORK)
1100 arm_cpp_interwork = 1;
1102 if (arm_arch5)
1103 target_flags &= ~MASK_INTERWORK;
1105 if (target_abi_name)
1107 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1109 if (streq (arm_all_abis[i].name, target_abi_name))
1111 arm_abi = arm_all_abis[i].abi_type;
1112 break;
1115 if (i == ARRAY_SIZE (arm_all_abis))
1116 error ("invalid ABI option: -mabi=%s", target_abi_name);
1118 else
1119 arm_abi = ARM_DEFAULT_ABI;
1121 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1122 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1124 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1125 error ("iwmmxt abi requires an iwmmxt capable cpu");
1127 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1128 if (target_fpu_name == NULL && target_fpe_name != NULL)
1130 if (streq (target_fpe_name, "2"))
1131 target_fpu_name = "fpe2";
1132 else if (streq (target_fpe_name, "3"))
1133 target_fpu_name = "fpe3";
1134 else
1135 error ("invalid floating point emulation option: -mfpe=%s",
1136 target_fpe_name);
1138 if (target_fpu_name != NULL)
1140 /* The user specified a FPU. */
1141 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1143 if (streq (all_fpus[i].name, target_fpu_name))
1145 arm_fpu_arch = all_fpus[i].fpu;
1146 arm_fpu_tune = arm_fpu_arch;
1147 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1148 break;
1151 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1152 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1154 else
1156 #ifdef FPUTYPE_DEFAULT
1157 /* Use the default if it is specified for this platform. */
1158 arm_fpu_arch = FPUTYPE_DEFAULT;
1159 arm_fpu_tune = FPUTYPE_DEFAULT;
1160 #else
1161 /* Pick one based on CPU type. */
1162 /* ??? Some targets assume FPA is the default.
1163 if ((insn_flags & FL_VFP) != 0)
1164 arm_fpu_arch = FPUTYPE_VFP;
1165 else
1167 if (arm_arch_cirrus)
1168 arm_fpu_arch = FPUTYPE_MAVERICK;
1169 else
1170 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1171 #endif
1172 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1173 arm_fpu_tune = FPUTYPE_FPA;
1174 else
1175 arm_fpu_tune = arm_fpu_arch;
1176 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1177 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1180 if (target_float_abi_name != NULL)
1182 /* The user specified a FP ABI. */
1183 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1185 if (streq (all_float_abis[i].name, target_float_abi_name))
1187 arm_float_abi = all_float_abis[i].abi_type;
1188 break;
1191 if (i == ARRAY_SIZE (all_float_abis))
1192 error ("invalid floating point abi: -mfloat-abi=%s",
1193 target_float_abi_name);
1195 else
1196 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1198 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1199 sorry ("-mfloat-abi=hard and VFP");
1201 /* If soft-float is specified then don't use FPU. */
1202 if (TARGET_SOFT_FLOAT)
1203 arm_fpu_arch = FPUTYPE_NONE;
1205 /* For arm2/3 there is no need to do any scheduling if there is only
1206 a floating point emulator, or we are doing software floating-point. */
1207 if ((TARGET_SOFT_FLOAT
1208 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1209 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1210 && (tune_flags & FL_MODE32) == 0)
1211 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1213 /* Override the default structure alignment for AAPCS ABI. */
1214 if (arm_abi == ARM_ABI_AAPCS)
1215 arm_structure_size_boundary = 8;
1217 if (structure_size_string != NULL)
1219 int size = strtol (structure_size_string, NULL, 0);
1221 if (size == 8 || size == 32
1222 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1223 arm_structure_size_boundary = size;
1224 else
1225 warning (0, "structure size boundary can only be set to %s",
1226 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1229 if (arm_pic_register_string != NULL)
1231 int pic_register = decode_reg_name (arm_pic_register_string);
1233 if (!flag_pic)
1234 warning (0, "-mpic-register= is useless without -fpic");
1236 /* Prevent the user from choosing an obviously stupid PIC register. */
1237 else if (pic_register < 0 || call_used_regs[pic_register]
1238 || pic_register == HARD_FRAME_POINTER_REGNUM
1239 || pic_register == STACK_POINTER_REGNUM
1240 || pic_register >= PC_REGNUM)
1241 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1242 else
1243 arm_pic_register = pic_register;
1246 if (TARGET_THUMB && flag_schedule_insns)
1248 /* Don't warn since it's on by default in -O2. */
1249 flag_schedule_insns = 0;
1252 if (optimize_size)
1254 /* There's some dispute as to whether this should be 1 or 2. However,
1255 experiments seem to show that in pathological cases a setting of
1256 1 degrades less severely than a setting of 2. This could change if
1257 other parts of the compiler change their behavior. */
1258 arm_constant_limit = 1;
1260 /* If optimizing for size, bump the number of instructions that we
1261 are prepared to conditionally execute (even on a StrongARM). */
1262 max_insns_skipped = 6;
1264 else
1266 /* For processors with load scheduling, it never costs more than
1267 2 cycles to load a constant, and the load scheduler may well
1268 reduce that to 1. */
1269 if (arm_ld_sched)
1270 arm_constant_limit = 1;
1272 /* On XScale the longer latency of a load makes it more difficult
1273 to achieve a good schedule, so it's faster to synthesize
1274 constants that can be done in two insns. */
1275 if (arm_tune_xscale)
1276 arm_constant_limit = 2;
1278 /* StrongARM has early execution of branches, so a sequence
1279 that is worth skipping is shorter. */
1280 if (arm_tune_strongarm)
1281 max_insns_skipped = 3;
1284 /* Register global variables with the garbage collector. */
1285 arm_add_gc_roots ();
1288 static void
1289 arm_add_gc_roots (void)
1291 gcc_obstack_init(&minipool_obstack);
1292 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1295 /* A table of known ARM exception types.
1296 For use with the interrupt function attribute. */
1298 typedef struct
1300 const char *const arg;
1301 const unsigned long return_value;
1303 isr_attribute_arg;
1305 static const isr_attribute_arg isr_attribute_args [] =
1307 { "IRQ", ARM_FT_ISR },
1308 { "irq", ARM_FT_ISR },
1309 { "FIQ", ARM_FT_FIQ },
1310 { "fiq", ARM_FT_FIQ },
1311 { "ABORT", ARM_FT_ISR },
1312 { "abort", ARM_FT_ISR },
1313 { "ABORT", ARM_FT_ISR },
1314 { "abort", ARM_FT_ISR },
1315 { "UNDEF", ARM_FT_EXCEPTION },
1316 { "undef", ARM_FT_EXCEPTION },
1317 { "SWI", ARM_FT_EXCEPTION },
1318 { "swi", ARM_FT_EXCEPTION },
1319 { NULL, ARM_FT_NORMAL }
1322 /* Returns the (interrupt) function type of the current
1323 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1325 static unsigned long
1326 arm_isr_value (tree argument)
1328 const isr_attribute_arg * ptr;
1329 const char * arg;
1331 /* No argument - default to IRQ. */
1332 if (argument == NULL_TREE)
1333 return ARM_FT_ISR;
1335 /* Get the value of the argument. */
1336 if (TREE_VALUE (argument) == NULL_TREE
1337 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1338 return ARM_FT_UNKNOWN;
1340 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1342 /* Check it against the list of known arguments. */
1343 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1344 if (streq (arg, ptr->arg))
1345 return ptr->return_value;
1347 /* An unrecognized interrupt type. */
1348 return ARM_FT_UNKNOWN;
1351 /* Computes the type of the current function. */
1353 static unsigned long
1354 arm_compute_func_type (void)
1356 unsigned long type = ARM_FT_UNKNOWN;
1357 tree a;
1358 tree attr;
1360 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1362 /* Decide if the current function is volatile. Such functions
1363 never return, and many memory cycles can be saved by not storing
1364 register values that will never be needed again. This optimization
1365 was added to speed up context switching in a kernel application. */
1366 if (optimize > 0
1367 && TREE_NOTHROW (current_function_decl)
1368 && TREE_THIS_VOLATILE (current_function_decl))
1369 type |= ARM_FT_VOLATILE;
1371 if (cfun->static_chain_decl != NULL)
1372 type |= ARM_FT_NESTED;
1374 attr = DECL_ATTRIBUTES (current_function_decl);
1376 a = lookup_attribute ("naked", attr);
1377 if (a != NULL_TREE)
1378 type |= ARM_FT_NAKED;
1380 a = lookup_attribute ("isr", attr);
1381 if (a == NULL_TREE)
1382 a = lookup_attribute ("interrupt", attr);
1384 if (a == NULL_TREE)
1385 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1386 else
1387 type |= arm_isr_value (TREE_VALUE (a));
1389 return type;
1392 /* Returns the type of the current function. */
1394 unsigned long
1395 arm_current_func_type (void)
1397 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1398 cfun->machine->func_type = arm_compute_func_type ();
1400 return cfun->machine->func_type;
1403 /* Return 1 if it is possible to return using a single instruction.
1404 If SIBLING is non-null, this is a test for a return before a sibling
1405 call. SIBLING is the call insn, so we can examine its register usage. */
1408 use_return_insn (int iscond, rtx sibling)
1410 int regno;
1411 unsigned int func_type;
1412 unsigned long saved_int_regs;
1413 unsigned HOST_WIDE_INT stack_adjust;
1414 arm_stack_offsets *offsets;
1416 /* Never use a return instruction before reload has run. */
1417 if (!reload_completed)
1418 return 0;
1420 func_type = arm_current_func_type ();
1422 /* Naked functions and volatile functions need special
1423 consideration. */
1424 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1425 return 0;
1427 /* So do interrupt functions that use the frame pointer. */
1428 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1429 return 0;
1431 offsets = arm_get_frame_offsets ();
1432 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1434 /* As do variadic functions. */
1435 if (current_function_pretend_args_size
1436 || cfun->machine->uses_anonymous_args
1437 /* Or if the function calls __builtin_eh_return () */
1438 || current_function_calls_eh_return
1439 /* Or if the function calls alloca */
1440 || current_function_calls_alloca
1441 /* Or if there is a stack adjustment. However, if the stack pointer
1442 is saved on the stack, we can use a pre-incrementing stack load. */
1443 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1444 return 0;
1446 saved_int_regs = arm_compute_save_reg_mask ();
1448 /* Unfortunately, the insn
1450 ldmib sp, {..., sp, ...}
1452 triggers a bug on most SA-110 based devices, such that the stack
1453 pointer won't be correctly restored if the instruction takes a
1454 page fault. We work around this problem by popping r3 along with
1455 the other registers, since that is never slower than executing
1456 another instruction.
1458 We test for !arm_arch5 here, because code for any architecture
1459 less than this could potentially be run on one of the buggy
1460 chips. */
1461 if (stack_adjust == 4 && !arm_arch5)
1463 /* Validate that r3 is a call-clobbered register (always true in
1464 the default abi) ... */
1465 if (!call_used_regs[3])
1466 return 0;
1468 /* ... that it isn't being used for a return value (always true
1469 until we implement return-in-regs), or for a tail-call
1470 argument ... */
1471 if (sibling)
1473 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1475 if (find_regno_fusage (sibling, USE, 3))
1476 return 0;
1479 /* ... and that there are no call-saved registers in r0-r2
1480 (always true in the default ABI). */
1481 if (saved_int_regs & 0x7)
1482 return 0;
1485 /* Can't be done if interworking with Thumb, and any registers have been
1486 stacked. */
1487 if (TARGET_INTERWORK && saved_int_regs != 0)
1488 return 0;
1490 /* On StrongARM, conditional returns are expensive if they aren't
1491 taken and multiple registers have been stacked. */
1492 if (iscond && arm_tune_strongarm)
1494 /* Conditional return when just the LR is stored is a simple
1495 conditional-load instruction, that's not expensive. */
1496 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1497 return 0;
1499 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1500 return 0;
1503 /* If there are saved registers but the LR isn't saved, then we need
1504 two instructions for the return. */
1505 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1506 return 0;
1508 /* Can't be done if any of the FPA regs are pushed,
1509 since this also requires an insn. */
1510 if (TARGET_HARD_FLOAT && TARGET_FPA)
1511 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1512 if (regs_ever_live[regno] && !call_used_regs[regno])
1513 return 0;
1515 /* Likewise VFP regs. */
1516 if (TARGET_HARD_FLOAT && TARGET_VFP)
1517 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1518 if (regs_ever_live[regno] && !call_used_regs[regno])
1519 return 0;
1521 if (TARGET_REALLY_IWMMXT)
1522 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1523 if (regs_ever_live[regno] && ! call_used_regs [regno])
1524 return 0;
1526 return 1;
1529 /* Return TRUE if int I is a valid immediate ARM constant. */
1532 const_ok_for_arm (HOST_WIDE_INT i)
1534 int lowbit;
1536 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1537 be all zero, or all one. */
1538 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1539 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1540 != ((~(unsigned HOST_WIDE_INT) 0)
1541 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1542 return FALSE;
1544 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1546 /* Fast return for 0 and small values. We must do this for zero, since
1547 the code below can't handle that one case. */
1548 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1549 return TRUE;
1551 /* Get the number of trailing zeros, rounded down to the nearest even
1552 number. */
1553 lowbit = (ffs ((int) i) - 1) & ~1;
1555 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1556 return TRUE;
1557 else if (lowbit <= 4
1558 && ((i & ~0xc000003f) == 0
1559 || (i & ~0xf000000f) == 0
1560 || (i & ~0xfc000003) == 0))
1561 return TRUE;
1563 return FALSE;
1566 /* Return true if I is a valid constant for the operation CODE. */
1567 static int
1568 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1570 if (const_ok_for_arm (i))
1571 return 1;
1573 switch (code)
1575 case PLUS:
1576 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1578 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1579 case XOR:
1580 case IOR:
1581 return 0;
1583 case AND:
1584 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1586 default:
1587 gcc_unreachable ();
1591 /* Emit a sequence of insns to handle a large constant.
1592 CODE is the code of the operation required, it can be any of SET, PLUS,
1593 IOR, AND, XOR, MINUS;
1594 MODE is the mode in which the operation is being performed;
1595 VAL is the integer to operate on;
1596 SOURCE is the other operand (a register, or a null-pointer for SET);
1597 SUBTARGETS means it is safe to create scratch registers if that will
1598 either produce a simpler sequence, or we will want to cse the values.
1599 Return value is the number of insns emitted. */
1602 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1603 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1605 rtx cond;
1607 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1608 cond = COND_EXEC_TEST (PATTERN (insn));
1609 else
1610 cond = NULL_RTX;
1612 if (subtargets || code == SET
1613 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1614 && REGNO (target) != REGNO (source)))
1616 /* After arm_reorg has been called, we can't fix up expensive
1617 constants by pushing them into memory so we must synthesize
1618 them in-line, regardless of the cost. This is only likely to
1619 be more costly on chips that have load delay slots and we are
1620 compiling without running the scheduler (so no splitting
1621 occurred before the final instruction emission).
1623 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1625 if (!after_arm_reorg
1626 && !cond
1627 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1628 1, 0)
1629 > arm_constant_limit + (code != SET)))
1631 if (code == SET)
1633 /* Currently SET is the only monadic value for CODE, all
1634 the rest are diadic. */
1635 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1636 return 1;
1638 else
1640 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1642 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1643 /* For MINUS, the value is subtracted from, since we never
1644 have subtraction of a constant. */
1645 if (code == MINUS)
1646 emit_insn (gen_rtx_SET (VOIDmode, target,
1647 gen_rtx_MINUS (mode, temp, source)));
1648 else
1649 emit_insn (gen_rtx_SET (VOIDmode, target,
1650 gen_rtx_fmt_ee (code, mode, source, temp)));
1651 return 2;
1656 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1660 static int
1661 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1663 HOST_WIDE_INT temp1;
1664 int num_insns = 0;
1667 int end;
1669 if (i <= 0)
1670 i += 32;
1671 if (remainder & (3 << (i - 2)))
1673 end = i - 8;
1674 if (end < 0)
1675 end += 32;
1676 temp1 = remainder & ((0x0ff << end)
1677 | ((i < end) ? (0xff >> (32 - end)) : 0));
1678 remainder &= ~temp1;
1679 num_insns++;
1680 i -= 6;
1682 i -= 2;
1683 } while (remainder);
1684 return num_insns;
1687 /* Emit an instruction with the indicated PATTERN. If COND is
1688 non-NULL, conditionalize the execution of the instruction on COND
1689 being true. */
1691 static void
1692 emit_constant_insn (rtx cond, rtx pattern)
1694 if (cond)
1695 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1696 emit_insn (pattern);
1699 /* As above, but extra parameter GENERATE which, if clear, suppresses
1700 RTL generation. */
1702 static int
1703 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1704 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1705 int generate)
1707 int can_invert = 0;
1708 int can_negate = 0;
1709 int can_negate_initial = 0;
1710 int can_shift = 0;
1711 int i;
1712 int num_bits_set = 0;
1713 int set_sign_bit_copies = 0;
1714 int clear_sign_bit_copies = 0;
1715 int clear_zero_bit_copies = 0;
1716 int set_zero_bit_copies = 0;
1717 int insns = 0;
1718 unsigned HOST_WIDE_INT temp1, temp2;
1719 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1721 /* Find out which operations are safe for a given CODE. Also do a quick
1722 check for degenerate cases; these can occur when DImode operations
1723 are split. */
1724 switch (code)
1726 case SET:
1727 can_invert = 1;
1728 can_shift = 1;
1729 can_negate = 1;
1730 break;
1732 case PLUS:
1733 can_negate = 1;
1734 can_negate_initial = 1;
1735 break;
1737 case IOR:
1738 if (remainder == 0xffffffff)
1740 if (generate)
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, target,
1743 GEN_INT (ARM_SIGN_EXTEND (val))));
1744 return 1;
1746 if (remainder == 0)
1748 if (reload_completed && rtx_equal_p (target, source))
1749 return 0;
1750 if (generate)
1751 emit_constant_insn (cond,
1752 gen_rtx_SET (VOIDmode, target, source));
1753 return 1;
1755 break;
1757 case AND:
1758 if (remainder == 0)
1760 if (generate)
1761 emit_constant_insn (cond,
1762 gen_rtx_SET (VOIDmode, target, const0_rtx));
1763 return 1;
1765 if (remainder == 0xffffffff)
1767 if (reload_completed && rtx_equal_p (target, source))
1768 return 0;
1769 if (generate)
1770 emit_constant_insn (cond,
1771 gen_rtx_SET (VOIDmode, target, source));
1772 return 1;
1774 can_invert = 1;
1775 break;
1777 case XOR:
1778 if (remainder == 0)
1780 if (reload_completed && rtx_equal_p (target, source))
1781 return 0;
1782 if (generate)
1783 emit_constant_insn (cond,
1784 gen_rtx_SET (VOIDmode, target, source));
1785 return 1;
1788 /* We don't know how to handle other cases yet. */
1789 gcc_assert (remainder == 0xffffffff);
1791 if (generate)
1792 emit_constant_insn (cond,
1793 gen_rtx_SET (VOIDmode, target,
1794 gen_rtx_NOT (mode, source)));
1795 return 1;
1797 case MINUS:
1798 /* We treat MINUS as (val - source), since (source - val) is always
1799 passed as (source + (-val)). */
1800 if (remainder == 0)
1802 if (generate)
1803 emit_constant_insn (cond,
1804 gen_rtx_SET (VOIDmode, target,
1805 gen_rtx_NEG (mode, source)));
1806 return 1;
1808 if (const_ok_for_arm (val))
1810 if (generate)
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, target,
1813 gen_rtx_MINUS (mode, GEN_INT (val),
1814 source)));
1815 return 1;
1817 can_negate = 1;
1819 break;
1821 default:
1822 gcc_unreachable ();
1825 /* If we can do it in one insn get out quickly. */
1826 if (const_ok_for_arm (val)
1827 || (can_negate_initial && const_ok_for_arm (-val))
1828 || (can_invert && const_ok_for_arm (~val)))
1830 if (generate)
1831 emit_constant_insn (cond,
1832 gen_rtx_SET (VOIDmode, target,
1833 (source
1834 ? gen_rtx_fmt_ee (code, mode, source,
1835 GEN_INT (val))
1836 : GEN_INT (val))));
1837 return 1;
1840 /* Calculate a few attributes that may be useful for specific
1841 optimizations. */
1842 for (i = 31; i >= 0; i--)
1844 if ((remainder & (1 << i)) == 0)
1845 clear_sign_bit_copies++;
1846 else
1847 break;
1850 for (i = 31; i >= 0; i--)
1852 if ((remainder & (1 << i)) != 0)
1853 set_sign_bit_copies++;
1854 else
1855 break;
1858 for (i = 0; i <= 31; i++)
1860 if ((remainder & (1 << i)) == 0)
1861 clear_zero_bit_copies++;
1862 else
1863 break;
1866 for (i = 0; i <= 31; i++)
1868 if ((remainder & (1 << i)) != 0)
1869 set_zero_bit_copies++;
1870 else
1871 break;
1874 switch (code)
1876 case SET:
1877 /* See if we can do this by sign_extending a constant that is known
1878 to be negative. This is a good, way of doing it, since the shift
1879 may well merge into a subsequent insn. */
1880 if (set_sign_bit_copies > 1)
1882 if (const_ok_for_arm
1883 (temp1 = ARM_SIGN_EXTEND (remainder
1884 << (set_sign_bit_copies - 1))))
1886 if (generate)
1888 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1889 emit_constant_insn (cond,
1890 gen_rtx_SET (VOIDmode, new_src,
1891 GEN_INT (temp1)));
1892 emit_constant_insn (cond,
1893 gen_ashrsi3 (target, new_src,
1894 GEN_INT (set_sign_bit_copies - 1)));
1896 return 2;
1898 /* For an inverted constant, we will need to set the low bits,
1899 these will be shifted out of harm's way. */
1900 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1901 if (const_ok_for_arm (~temp1))
1903 if (generate)
1905 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1906 emit_constant_insn (cond,
1907 gen_rtx_SET (VOIDmode, new_src,
1908 GEN_INT (temp1)));
1909 emit_constant_insn (cond,
1910 gen_ashrsi3 (target, new_src,
1911 GEN_INT (set_sign_bit_copies - 1)));
1913 return 2;
1917 /* See if we can calculate the value as the difference between two
1918 valid immediates. */
1919 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1921 int topshift = clear_sign_bit_copies & ~1;
1923 temp1 = ((remainder + (0x00800000 >> topshift))
1924 & (0xff000000 >> topshift));
1926 /* If temp1 is zero, then that means the 9 most significant
1927 bits of remainder were 1 and we've caused it to overflow.
1928 When topshift is 0 we don't need to do anything since we
1929 can borrow from 'bit 32'. */
1930 if (temp1 == 0 && topshift != 0)
1931 temp1 = 0x80000000 >> (topshift - 1);
1933 temp2 = temp1 - remainder;
1935 if (const_ok_for_arm (temp2))
1937 if (generate)
1939 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1940 emit_constant_insn (cond,
1941 gen_rtx_SET (VOIDmode, new_src,
1942 GEN_INT (temp1)));
1943 emit_constant_insn (cond,
1944 gen_addsi3 (target, new_src,
1945 GEN_INT (-temp2)));
1948 return 2;
1952 /* See if we can generate this by setting the bottom (or the top)
1953 16 bits, and then shifting these into the other half of the
1954 word. We only look for the simplest cases, to do more would cost
1955 too much. Be careful, however, not to generate this when the
1956 alternative would take fewer insns. */
1957 if (val & 0xffff0000)
1959 temp1 = remainder & 0xffff0000;
1960 temp2 = remainder & 0x0000ffff;
1962 /* Overlaps outside this range are best done using other methods. */
1963 for (i = 9; i < 24; i++)
1965 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1966 && !const_ok_for_arm (temp2))
1968 rtx new_src = (subtargets
1969 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1970 : target);
1971 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1972 source, subtargets, generate);
1973 source = new_src;
1974 if (generate)
1975 emit_constant_insn
1976 (cond,
1977 gen_rtx_SET
1978 (VOIDmode, target,
1979 gen_rtx_IOR (mode,
1980 gen_rtx_ASHIFT (mode, source,
1981 GEN_INT (i)),
1982 source)));
1983 return insns + 1;
1987 /* Don't duplicate cases already considered. */
1988 for (i = 17; i < 24; i++)
1990 if (((temp1 | (temp1 >> i)) == remainder)
1991 && !const_ok_for_arm (temp1))
1993 rtx new_src = (subtargets
1994 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1995 : target);
1996 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1997 source, subtargets, generate);
1998 source = new_src;
1999 if (generate)
2000 emit_constant_insn
2001 (cond,
2002 gen_rtx_SET (VOIDmode, target,
2003 gen_rtx_IOR
2004 (mode,
2005 gen_rtx_LSHIFTRT (mode, source,
2006 GEN_INT (i)),
2007 source)));
2008 return insns + 1;
2012 break;
2014 case IOR:
2015 case XOR:
2016 /* If we have IOR or XOR, and the constant can be loaded in a
2017 single instruction, and we can find a temporary to put it in,
2018 then this can be done in two instructions instead of 3-4. */
2019 if (subtargets
2020 /* TARGET can't be NULL if SUBTARGETS is 0 */
2021 || (reload_completed && !reg_mentioned_p (target, source)))
2023 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2025 if (generate)
2027 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2029 emit_constant_insn (cond,
2030 gen_rtx_SET (VOIDmode, sub,
2031 GEN_INT (val)));
2032 emit_constant_insn (cond,
2033 gen_rtx_SET (VOIDmode, target,
2034 gen_rtx_fmt_ee (code, mode,
2035 source, sub)));
2037 return 2;
2041 if (code == XOR)
2042 break;
2044 if (set_sign_bit_copies > 8
2045 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2047 if (generate)
2049 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2050 rtx shift = GEN_INT (set_sign_bit_copies);
2052 emit_constant_insn
2053 (cond,
2054 gen_rtx_SET (VOIDmode, sub,
2055 gen_rtx_NOT (mode,
2056 gen_rtx_ASHIFT (mode,
2057 source,
2058 shift))));
2059 emit_constant_insn
2060 (cond,
2061 gen_rtx_SET (VOIDmode, target,
2062 gen_rtx_NOT (mode,
2063 gen_rtx_LSHIFTRT (mode, sub,
2064 shift))));
2066 return 2;
2069 if (set_zero_bit_copies > 8
2070 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2072 if (generate)
2074 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2075 rtx shift = GEN_INT (set_zero_bit_copies);
2077 emit_constant_insn
2078 (cond,
2079 gen_rtx_SET (VOIDmode, sub,
2080 gen_rtx_NOT (mode,
2081 gen_rtx_LSHIFTRT (mode,
2082 source,
2083 shift))));
2084 emit_constant_insn
2085 (cond,
2086 gen_rtx_SET (VOIDmode, target,
2087 gen_rtx_NOT (mode,
2088 gen_rtx_ASHIFT (mode, sub,
2089 shift))));
2091 return 2;
2094 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2096 if (generate)
2098 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2099 emit_constant_insn (cond,
2100 gen_rtx_SET (VOIDmode, sub,
2101 gen_rtx_NOT (mode, source)));
2102 source = sub;
2103 if (subtargets)
2104 sub = gen_reg_rtx (mode);
2105 emit_constant_insn (cond,
2106 gen_rtx_SET (VOIDmode, sub,
2107 gen_rtx_AND (mode, source,
2108 GEN_INT (temp1))));
2109 emit_constant_insn (cond,
2110 gen_rtx_SET (VOIDmode, target,
2111 gen_rtx_NOT (mode, sub)));
2113 return 3;
2115 break;
2117 case AND:
2118 /* See if two shifts will do 2 or more insn's worth of work. */
2119 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2121 HOST_WIDE_INT shift_mask = ((0xffffffff
2122 << (32 - clear_sign_bit_copies))
2123 & 0xffffffff);
2125 if ((remainder | shift_mask) != 0xffffffff)
2127 if (generate)
2129 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2130 insns = arm_gen_constant (AND, mode, cond,
2131 remainder | shift_mask,
2132 new_src, source, subtargets, 1);
2133 source = new_src;
2135 else
2137 rtx targ = subtargets ? NULL_RTX : target;
2138 insns = arm_gen_constant (AND, mode, cond,
2139 remainder | shift_mask,
2140 targ, source, subtargets, 0);
2144 if (generate)
2146 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2147 rtx shift = GEN_INT (clear_sign_bit_copies);
2149 emit_insn (gen_ashlsi3 (new_src, source, shift));
2150 emit_insn (gen_lshrsi3 (target, new_src, shift));
2153 return insns + 2;
2156 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2158 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2160 if ((remainder | shift_mask) != 0xffffffff)
2162 if (generate)
2164 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2166 insns = arm_gen_constant (AND, mode, cond,
2167 remainder | shift_mask,
2168 new_src, source, subtargets, 1);
2169 source = new_src;
2171 else
2173 rtx targ = subtargets ? NULL_RTX : target;
2175 insns = arm_gen_constant (AND, mode, cond,
2176 remainder | shift_mask,
2177 targ, source, subtargets, 0);
2181 if (generate)
2183 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2184 rtx shift = GEN_INT (clear_zero_bit_copies);
2186 emit_insn (gen_lshrsi3 (new_src, source, shift));
2187 emit_insn (gen_ashlsi3 (target, new_src, shift));
2190 return insns + 2;
2193 break;
2195 default:
2196 break;
2199 for (i = 0; i < 32; i++)
2200 if (remainder & (1 << i))
2201 num_bits_set++;
2203 if (code == AND || (can_invert && num_bits_set > 16))
2204 remainder = (~remainder) & 0xffffffff;
2205 else if (code == PLUS && num_bits_set > 16)
2206 remainder = (-remainder) & 0xffffffff;
2207 else
2209 can_invert = 0;
2210 can_negate = 0;
2213 /* Now try and find a way of doing the job in either two or three
2214 instructions.
2215 We start by looking for the largest block of zeros that are aligned on
2216 a 2-bit boundary, we then fill up the temps, wrapping around to the
2217 top of the word when we drop off the bottom.
2218 In the worst case this code should produce no more than four insns. */
2220 int best_start = 0;
2221 int best_consecutive_zeros = 0;
2223 for (i = 0; i < 32; i += 2)
2225 int consecutive_zeros = 0;
2227 if (!(remainder & (3 << i)))
2229 while ((i < 32) && !(remainder & (3 << i)))
2231 consecutive_zeros += 2;
2232 i += 2;
2234 if (consecutive_zeros > best_consecutive_zeros)
2236 best_consecutive_zeros = consecutive_zeros;
2237 best_start = i - consecutive_zeros;
2239 i -= 2;
2243 /* So long as it won't require any more insns to do so, it's
2244 desirable to emit a small constant (in bits 0...9) in the last
2245 insn. This way there is more chance that it can be combined with
2246 a later addressing insn to form a pre-indexed load or store
2247 operation. Consider:
2249 *((volatile int *)0xe0000100) = 1;
2250 *((volatile int *)0xe0000110) = 2;
2252 We want this to wind up as:
2254 mov rA, #0xe0000000
2255 mov rB, #1
2256 str rB, [rA, #0x100]
2257 mov rB, #2
2258 str rB, [rA, #0x110]
2260 rather than having to synthesize both large constants from scratch.
2262 Therefore, we calculate how many insns would be required to emit
2263 the constant starting from `best_start', and also starting from
2264 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2265 yield a shorter sequence, we may as well use zero. */
2266 if (best_start != 0
2267 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2268 && (count_insns_for_constant (remainder, 0) <=
2269 count_insns_for_constant (remainder, best_start)))
2270 best_start = 0;
2272 /* Now start emitting the insns. */
2273 i = best_start;
2276 int end;
2278 if (i <= 0)
2279 i += 32;
2280 if (remainder & (3 << (i - 2)))
2282 end = i - 8;
2283 if (end < 0)
2284 end += 32;
2285 temp1 = remainder & ((0x0ff << end)
2286 | ((i < end) ? (0xff >> (32 - end)) : 0));
2287 remainder &= ~temp1;
2289 if (generate)
2291 rtx new_src, temp1_rtx;
2293 if (code == SET || code == MINUS)
2295 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2296 if (can_invert && code != MINUS)
2297 temp1 = ~temp1;
2299 else
2301 if (remainder && subtargets)
2302 new_src = gen_reg_rtx (mode);
2303 else
2304 new_src = target;
2305 if (can_invert)
2306 temp1 = ~temp1;
2307 else if (can_negate)
2308 temp1 = -temp1;
2311 temp1 = trunc_int_for_mode (temp1, mode);
2312 temp1_rtx = GEN_INT (temp1);
2314 if (code == SET)
2316 else if (code == MINUS)
2317 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2318 else
2319 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2321 emit_constant_insn (cond,
2322 gen_rtx_SET (VOIDmode, new_src,
2323 temp1_rtx));
2324 source = new_src;
2327 if (code == SET)
2329 can_invert = 0;
2330 code = PLUS;
2332 else if (code == MINUS)
2333 code = PLUS;
2335 insns++;
2336 i -= 6;
2338 i -= 2;
2340 while (remainder);
2343 return insns;
2346 /* Canonicalize a comparison so that we are more likely to recognize it.
2347 This can be done for a few constant compares, where we can make the
2348 immediate value easier to load. */
2350 enum rtx_code
2351 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2353 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2355 switch (code)
2357 case EQ:
2358 case NE:
2359 return code;
2361 case GT:
2362 case LE:
2363 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2364 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2366 *op1 = GEN_INT (i + 1);
2367 return code == GT ? GE : LT;
2369 break;
2371 case GE:
2372 case LT:
2373 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2374 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2376 *op1 = GEN_INT (i - 1);
2377 return code == GE ? GT : LE;
2379 break;
2381 case GTU:
2382 case LEU:
2383 if (i != ~((unsigned HOST_WIDE_INT) 0)
2384 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2386 *op1 = GEN_INT (i + 1);
2387 return code == GTU ? GEU : LTU;
2389 break;
2391 case GEU:
2392 case LTU:
2393 if (i != 0
2394 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2396 *op1 = GEN_INT (i - 1);
2397 return code == GEU ? GTU : LEU;
2399 break;
2401 default:
2402 gcc_unreachable ();
2405 return code;
2409 /* Define how to find the value returned by a function. */
2412 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2414 enum machine_mode mode;
2415 int unsignedp ATTRIBUTE_UNUSED;
2416 rtx r ATTRIBUTE_UNUSED;
2418 mode = TYPE_MODE (type);
2419 /* Promote integer types. */
2420 if (INTEGRAL_TYPE_P (type))
2421 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2423 /* Promotes small structs returned in a register to full-word size
2424 for big-endian AAPCS. */
2425 if (arm_return_in_msb (type))
2427 HOST_WIDE_INT size = int_size_in_bytes (type);
2428 if (size % UNITS_PER_WORD != 0)
2430 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2431 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2435 return LIBCALL_VALUE(mode);
2438 /* Determine the amount of memory needed to store the possible return
2439 registers of an untyped call. */
2441 arm_apply_result_size (void)
2443 int size = 16;
2445 if (TARGET_ARM)
2447 if (TARGET_HARD_FLOAT_ABI)
2449 if (TARGET_FPA)
2450 size += 12;
2451 if (TARGET_MAVERICK)
2452 size += 8;
2454 if (TARGET_IWMMXT_ABI)
2455 size += 8;
2458 return size;
2461 /* Decide whether a type should be returned in memory (true)
2462 or in a register (false). This is called by the macro
2463 RETURN_IN_MEMORY. */
2465 arm_return_in_memory (tree type)
2467 HOST_WIDE_INT size;
2469 if (!AGGREGATE_TYPE_P (type) &&
2470 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2471 /* All simple types are returned in registers.
2472 For AAPCS, complex types are treated the same as aggregates. */
2473 return 0;
2475 size = int_size_in_bytes (type);
2477 if (arm_abi != ARM_ABI_APCS)
2479 /* ATPCS and later return aggregate types in memory only if they are
2480 larger than a word (or are variable size). */
2481 return (size < 0 || size > UNITS_PER_WORD);
2484 /* For the arm-wince targets we choose to be compatible with Microsoft's
2485 ARM and Thumb compilers, which always return aggregates in memory. */
2486 #ifndef ARM_WINCE
2487 /* All structures/unions bigger than one word are returned in memory.
2488 Also catch the case where int_size_in_bytes returns -1. In this case
2489 the aggregate is either huge or of variable size, and in either case
2490 we will want to return it via memory and not in a register. */
2491 if (size < 0 || size > UNITS_PER_WORD)
2492 return 1;
2494 if (TREE_CODE (type) == RECORD_TYPE)
2496 tree field;
2498 /* For a struct the APCS says that we only return in a register
2499 if the type is 'integer like' and every addressable element
2500 has an offset of zero. For practical purposes this means
2501 that the structure can have at most one non bit-field element
2502 and that this element must be the first one in the structure. */
2504 /* Find the first field, ignoring non FIELD_DECL things which will
2505 have been created by C++. */
2506 for (field = TYPE_FIELDS (type);
2507 field && TREE_CODE (field) != FIELD_DECL;
2508 field = TREE_CHAIN (field))
2509 continue;
2511 if (field == NULL)
2512 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2514 /* Check that the first field is valid for returning in a register. */
2516 /* ... Floats are not allowed */
2517 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2518 return 1;
2520 /* ... Aggregates that are not themselves valid for returning in
2521 a register are not allowed. */
2522 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2523 return 1;
2525 /* Now check the remaining fields, if any. Only bitfields are allowed,
2526 since they are not addressable. */
2527 for (field = TREE_CHAIN (field);
2528 field;
2529 field = TREE_CHAIN (field))
2531 if (TREE_CODE (field) != FIELD_DECL)
2532 continue;
2534 if (!DECL_BIT_FIELD_TYPE (field))
2535 return 1;
2538 return 0;
2541 if (TREE_CODE (type) == UNION_TYPE)
2543 tree field;
2545 /* Unions can be returned in registers if every element is
2546 integral, or can be returned in an integer register. */
2547 for (field = TYPE_FIELDS (type);
2548 field;
2549 field = TREE_CHAIN (field))
2551 if (TREE_CODE (field) != FIELD_DECL)
2552 continue;
2554 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2555 return 1;
2557 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2558 return 1;
2561 return 0;
2563 #endif /* not ARM_WINCE */
2565 /* Return all other types in memory. */
2566 return 1;
2569 /* Indicate whether or not words of a double are in big-endian order. */
2572 arm_float_words_big_endian (void)
2574 if (TARGET_MAVERICK)
2575 return 0;
2577 /* For FPA, float words are always big-endian. For VFP, floats words
2578 follow the memory system mode. */
2580 if (TARGET_FPA)
2582 return 1;
2585 if (TARGET_VFP)
2586 return (TARGET_BIG_END ? 1 : 0);
2588 return 1;
2591 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2592 for a call to a function whose data type is FNTYPE.
2593 For a library call, FNTYPE is NULL. */
2594 void
2595 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2596 rtx libname ATTRIBUTE_UNUSED,
2597 tree fndecl ATTRIBUTE_UNUSED)
2599 /* On the ARM, the offset starts at 0. */
2600 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2601 pcum->iwmmxt_nregs = 0;
2602 pcum->can_split = true;
2604 pcum->call_cookie = CALL_NORMAL;
2606 if (TARGET_LONG_CALLS)
2607 pcum->call_cookie = CALL_LONG;
2609 /* Check for long call/short call attributes. The attributes
2610 override any command line option. */
2611 if (fntype)
2613 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2614 pcum->call_cookie = CALL_SHORT;
2615 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2616 pcum->call_cookie = CALL_LONG;
2619 /* Varargs vectors are treated the same as long long.
2620 named_count avoids having to change the way arm handles 'named' */
2621 pcum->named_count = 0;
2622 pcum->nargs = 0;
2624 if (TARGET_REALLY_IWMMXT && fntype)
2626 tree fn_arg;
2628 for (fn_arg = TYPE_ARG_TYPES (fntype);
2629 fn_arg;
2630 fn_arg = TREE_CHAIN (fn_arg))
2631 pcum->named_count += 1;
2633 if (! pcum->named_count)
2634 pcum->named_count = INT_MAX;
2639 /* Return true if mode/type need doubleword alignment. */
2640 bool
2641 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2643 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2644 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2648 /* Determine where to put an argument to a function.
2649 Value is zero to push the argument on the stack,
2650 or a hard register in which to store the argument.
2652 MODE is the argument's machine mode.
2653 TYPE is the data type of the argument (as a tree).
2654 This is null for libcalls where that information may
2655 not be available.
2656 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2657 the preceding args and about the function being called.
2658 NAMED is nonzero if this argument is a named parameter
2659 (otherwise it is an extra parameter matching an ellipsis). */
2662 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2663 tree type, int named)
2665 int nregs;
2667 /* Varargs vectors are treated the same as long long.
2668 named_count avoids having to change the way arm handles 'named' */
2669 if (TARGET_IWMMXT_ABI
2670 && arm_vector_mode_supported_p (mode)
2671 && pcum->named_count > pcum->nargs + 1)
2673 if (pcum->iwmmxt_nregs <= 9)
2674 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2675 else
2677 pcum->can_split = false;
2678 return NULL_RTX;
2682 /* Put doubleword aligned quantities in even register pairs. */
2683 if (pcum->nregs & 1
2684 && ARM_DOUBLEWORD_ALIGN
2685 && arm_needs_doubleword_align (mode, type))
2686 pcum->nregs++;
2688 if (mode == VOIDmode)
2689 /* Compute operand 2 of the call insn. */
2690 return GEN_INT (pcum->call_cookie);
2692 /* Only allow splitting an arg between regs and memory if all preceding
2693 args were allocated to regs. For args passed by reference we only count
2694 the reference pointer. */
2695 if (pcum->can_split)
2696 nregs = 1;
2697 else
2698 nregs = ARM_NUM_REGS2 (mode, type);
2700 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2701 return NULL_RTX;
2703 return gen_rtx_REG (mode, pcum->nregs);
2706 static int
2707 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2708 tree type, bool named ATTRIBUTE_UNUSED)
2710 int nregs = pcum->nregs;
2712 if (arm_vector_mode_supported_p (mode))
2713 return 0;
2715 if (NUM_ARG_REGS > nregs
2716 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2717 && pcum->can_split)
2718 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2720 return 0;
2723 /* Variable sized types are passed by reference. This is a GCC
2724 extension to the ARM ABI. */
2726 static bool
2727 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2728 enum machine_mode mode ATTRIBUTE_UNUSED,
2729 tree type, bool named ATTRIBUTE_UNUSED)
2731 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2734 /* Encode the current state of the #pragma [no_]long_calls. */
2735 typedef enum
2737 OFF, /* No #pramgma [no_]long_calls is in effect. */
2738 LONG, /* #pragma long_calls is in effect. */
2739 SHORT /* #pragma no_long_calls is in effect. */
2740 } arm_pragma_enum;
2742 static arm_pragma_enum arm_pragma_long_calls = OFF;
2744 void
2745 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2747 arm_pragma_long_calls = LONG;
2750 void
2751 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2753 arm_pragma_long_calls = SHORT;
2756 void
2757 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2759 arm_pragma_long_calls = OFF;
2762 /* Table of machine attributes. */
2763 const struct attribute_spec arm_attribute_table[] =
2765 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2766 /* Function calls made to this symbol must be done indirectly, because
2767 it may lie outside of the 26 bit addressing range of a normal function
2768 call. */
2769 { "long_call", 0, 0, false, true, true, NULL },
2770 /* Whereas these functions are always known to reside within the 26 bit
2771 addressing range. */
2772 { "short_call", 0, 0, false, true, true, NULL },
2773 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2774 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2775 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2776 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2777 #ifdef ARM_PE
2778 /* ARM/PE has three new attributes:
2779 interfacearm - ?
2780 dllexport - for exporting a function/variable that will live in a dll
2781 dllimport - for importing a function/variable from a dll
2783 Microsoft allows multiple declspecs in one __declspec, separating
2784 them with spaces. We do NOT support this. Instead, use __declspec
2785 multiple times.
2787 { "dllimport", 0, 0, true, false, false, NULL },
2788 { "dllexport", 0, 0, true, false, false, NULL },
2789 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2790 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2791 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2792 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2793 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2794 #endif
2795 { NULL, 0, 0, false, false, false, NULL }
2798 /* Handle an attribute requiring a FUNCTION_DECL;
2799 arguments as in struct attribute_spec.handler. */
2800 static tree
2801 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2802 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2804 if (TREE_CODE (*node) != FUNCTION_DECL)
2806 warning (0, "%qs attribute only applies to functions",
2807 IDENTIFIER_POINTER (name));
2808 *no_add_attrs = true;
2811 return NULL_TREE;
2814 /* Handle an "interrupt" or "isr" attribute;
2815 arguments as in struct attribute_spec.handler. */
2816 static tree
2817 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2818 bool *no_add_attrs)
2820 if (DECL_P (*node))
2822 if (TREE_CODE (*node) != FUNCTION_DECL)
2824 warning (0, "%qs attribute only applies to functions",
2825 IDENTIFIER_POINTER (name));
2826 *no_add_attrs = true;
2828 /* FIXME: the argument if any is checked for type attributes;
2829 should it be checked for decl ones? */
2831 else
2833 if (TREE_CODE (*node) == FUNCTION_TYPE
2834 || TREE_CODE (*node) == METHOD_TYPE)
2836 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2838 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2839 *no_add_attrs = true;
2842 else if (TREE_CODE (*node) == POINTER_TYPE
2843 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2844 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2845 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2847 *node = build_variant_type_copy (*node);
2848 TREE_TYPE (*node) = build_type_attribute_variant
2849 (TREE_TYPE (*node),
2850 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2851 *no_add_attrs = true;
2853 else
2855 /* Possibly pass this attribute on from the type to a decl. */
2856 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2857 | (int) ATTR_FLAG_FUNCTION_NEXT
2858 | (int) ATTR_FLAG_ARRAY_NEXT))
2860 *no_add_attrs = true;
2861 return tree_cons (name, args, NULL_TREE);
2863 else
2865 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2870 return NULL_TREE;
2873 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2874 /* Handle the "notshared" attribute. This attribute is another way of
2875 requesting hidden visibility. ARM's compiler supports
2876 "__declspec(notshared)"; we support the same thing via an
2877 attribute. */
2879 static tree
2880 arm_handle_notshared_attribute (tree *node,
2881 tree name ATTRIBUTE_UNUSED,
2882 tree args ATTRIBUTE_UNUSED,
2883 int flags ATTRIBUTE_UNUSED,
2884 bool *no_add_attrs)
2886 tree decl = TYPE_NAME (*node);
2888 if (decl)
2890 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2891 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2892 *no_add_attrs = false;
2894 return NULL_TREE;
2896 #endif
2898 /* Return 0 if the attributes for two types are incompatible, 1 if they
2899 are compatible, and 2 if they are nearly compatible (which causes a
2900 warning to be generated). */
2901 static int
2902 arm_comp_type_attributes (tree type1, tree type2)
2904 int l1, l2, s1, s2;
2906 /* Check for mismatch of non-default calling convention. */
2907 if (TREE_CODE (type1) != FUNCTION_TYPE)
2908 return 1;
2910 /* Check for mismatched call attributes. */
2911 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2912 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2913 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2914 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2916 /* Only bother to check if an attribute is defined. */
2917 if (l1 | l2 | s1 | s2)
2919 /* If one type has an attribute, the other must have the same attribute. */
2920 if ((l1 != l2) || (s1 != s2))
2921 return 0;
2923 /* Disallow mixed attributes. */
2924 if ((l1 & s2) || (l2 & s1))
2925 return 0;
2928 /* Check for mismatched ISR attribute. */
2929 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2930 if (! l1)
2931 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2932 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2933 if (! l2)
2934 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2935 if (l1 != l2)
2936 return 0;
2938 return 1;
2941 /* Encode long_call or short_call attribute by prefixing
2942 symbol name in DECL with a special character FLAG. */
2943 void
2944 arm_encode_call_attribute (tree decl, int flag)
2946 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2947 int len = strlen (str);
2948 char * newstr;
2950 /* Do not allow weak functions to be treated as short call. */
2951 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2952 return;
2954 newstr = alloca (len + 2);
2955 newstr[0] = flag;
2956 strcpy (newstr + 1, str);
2958 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2959 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2962 /* Assigns default attributes to newly defined type. This is used to
2963 set short_call/long_call attributes for function types of
2964 functions defined inside corresponding #pragma scopes. */
2965 static void
2966 arm_set_default_type_attributes (tree type)
2968 /* Add __attribute__ ((long_call)) to all functions, when
2969 inside #pragma long_calls or __attribute__ ((short_call)),
2970 when inside #pragma no_long_calls. */
2971 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2973 tree type_attr_list, attr_name;
2974 type_attr_list = TYPE_ATTRIBUTES (type);
2976 if (arm_pragma_long_calls == LONG)
2977 attr_name = get_identifier ("long_call");
2978 else if (arm_pragma_long_calls == SHORT)
2979 attr_name = get_identifier ("short_call");
2980 else
2981 return;
2983 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2984 TYPE_ATTRIBUTES (type) = type_attr_list;
2988 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2989 defined within the current compilation unit. If this cannot be
2990 determined, then 0 is returned. */
2991 static int
2992 current_file_function_operand (rtx sym_ref)
2994 /* This is a bit of a fib. A function will have a short call flag
2995 applied to its name if it has the short call attribute, or it has
2996 already been defined within the current compilation unit. */
2997 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2998 return 1;
3000 /* The current function is always defined within the current compilation
3001 unit. If it s a weak definition however, then this may not be the real
3002 definition of the function, and so we have to say no. */
3003 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3004 && !DECL_WEAK (current_function_decl))
3005 return 1;
3007 /* We cannot make the determination - default to returning 0. */
3008 return 0;
3011 /* Return nonzero if a 32 bit "long_call" should be generated for
3012 this call. We generate a long_call if the function:
3014 a. has an __attribute__((long call))
3015 or b. is within the scope of a #pragma long_calls
3016 or c. the -mlong-calls command line switch has been specified
3017 . and either:
3018 1. -ffunction-sections is in effect
3019 or 2. the current function has __attribute__ ((section))
3020 or 3. the target function has __attribute__ ((section))
3022 However we do not generate a long call if the function:
3024 d. has an __attribute__ ((short_call))
3025 or e. is inside the scope of a #pragma no_long_calls
3026 or f. is defined within the current compilation unit.
3028 This function will be called by C fragments contained in the machine
3029 description file. SYM_REF and CALL_COOKIE correspond to the matched
3030 rtl operands. CALL_SYMBOL is used to distinguish between
3031 two different callers of the function. It is set to 1 in the
3032 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3033 and "call_value" patterns. This is because of the difference in the
3034 SYM_REFs passed by these patterns. */
3036 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3038 if (!call_symbol)
3040 if (GET_CODE (sym_ref) != MEM)
3041 return 0;
3043 sym_ref = XEXP (sym_ref, 0);
3046 if (GET_CODE (sym_ref) != SYMBOL_REF)
3047 return 0;
3049 if (call_cookie & CALL_SHORT)
3050 return 0;
3052 if (TARGET_LONG_CALLS)
3054 if (flag_function_sections
3055 || DECL_SECTION_NAME (current_function_decl))
3056 /* c.3 is handled by the definition of the
3057 ARM_DECLARE_FUNCTION_SIZE macro. */
3058 return 1;
3061 if (current_file_function_operand (sym_ref))
3062 return 0;
3064 return (call_cookie & CALL_LONG)
3065 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3066 || TARGET_LONG_CALLS;
3069 /* Return nonzero if it is ok to make a tail-call to DECL. */
3070 static bool
3071 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3073 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3075 if (cfun->machine->sibcall_blocked)
3076 return false;
3078 /* Never tailcall something for which we have no decl, or if we
3079 are in Thumb mode. */
3080 if (decl == NULL || TARGET_THUMB)
3081 return false;
3083 /* Get the calling method. */
3084 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3085 call_type = CALL_SHORT;
3086 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3087 call_type = CALL_LONG;
3089 /* Cannot tail-call to long calls, since these are out of range of
3090 a branch instruction. However, if not compiling PIC, we know
3091 we can reach the symbol if it is in this compilation unit. */
3092 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3093 return false;
3095 /* If we are interworking and the function is not declared static
3096 then we can't tail-call it unless we know that it exists in this
3097 compilation unit (since it might be a Thumb routine). */
3098 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3099 return false;
3101 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3102 if (IS_INTERRUPT (arm_current_func_type ()))
3103 return false;
3105 /* Everything else is ok. */
3106 return true;
3110 /* Addressing mode support functions. */
3112 /* Return nonzero if X is a legitimate immediate operand when compiling
3113 for PIC. */
3115 legitimate_pic_operand_p (rtx x)
3117 if (CONSTANT_P (x)
3118 && flag_pic
3119 && (GET_CODE (x) == SYMBOL_REF
3120 || (GET_CODE (x) == CONST
3121 && GET_CODE (XEXP (x, 0)) == PLUS
3122 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3123 return 0;
3125 return 1;
3129 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3131 if (GET_CODE (orig) == SYMBOL_REF
3132 || GET_CODE (orig) == LABEL_REF)
3134 #ifndef AOF_ASSEMBLER
3135 rtx pic_ref, address;
3136 #endif
3137 rtx insn;
3138 int subregs = 0;
3140 if (reg == 0)
3142 gcc_assert (!no_new_pseudos);
3143 reg = gen_reg_rtx (Pmode);
3145 subregs = 1;
3148 #ifdef AOF_ASSEMBLER
3149 /* The AOF assembler can generate relocations for these directly, and
3150 understands that the PIC register has to be added into the offset. */
3151 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3152 #else
3153 if (subregs)
3154 address = gen_reg_rtx (Pmode);
3155 else
3156 address = reg;
3158 if (TARGET_ARM)
3159 emit_insn (gen_pic_load_addr_arm (address, orig));
3160 else
3161 emit_insn (gen_pic_load_addr_thumb (address, orig));
3163 if ((GET_CODE (orig) == LABEL_REF
3164 || (GET_CODE (orig) == SYMBOL_REF &&
3165 SYMBOL_REF_LOCAL_P (orig)))
3166 && NEED_GOT_RELOC)
3167 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3168 else
3170 pic_ref = gen_const_mem (Pmode,
3171 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3172 address));
3175 insn = emit_move_insn (reg, pic_ref);
3176 #endif
3177 current_function_uses_pic_offset_table = 1;
3178 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3179 by loop. */
3180 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3181 REG_NOTES (insn));
3182 return reg;
3184 else if (GET_CODE (orig) == CONST)
3186 rtx base, offset;
3188 if (GET_CODE (XEXP (orig, 0)) == PLUS
3189 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3190 return orig;
3192 if (reg == 0)
3194 gcc_assert (!no_new_pseudos);
3195 reg = gen_reg_rtx (Pmode);
3198 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3200 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3201 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3202 base == reg ? 0 : reg);
3204 if (GET_CODE (offset) == CONST_INT)
3206 /* The base register doesn't really matter, we only want to
3207 test the index for the appropriate mode. */
3208 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3210 gcc_assert (!no_new_pseudos);
3211 offset = force_reg (Pmode, offset);
3214 if (GET_CODE (offset) == CONST_INT)
3215 return plus_constant (base, INTVAL (offset));
3218 if (GET_MODE_SIZE (mode) > 4
3219 && (GET_MODE_CLASS (mode) == MODE_INT
3220 || TARGET_SOFT_FLOAT))
3222 emit_insn (gen_addsi3 (reg, base, offset));
3223 return reg;
3226 return gen_rtx_PLUS (Pmode, base, offset);
3229 return orig;
3233 /* Find a spare low register to use during the prolog of a function. */
3235 static int
3236 thumb_find_work_register (unsigned long pushed_regs_mask)
3238 int reg;
3240 /* Check the argument registers first as these are call-used. The
3241 register allocation order means that sometimes r3 might be used
3242 but earlier argument registers might not, so check them all. */
3243 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3244 if (!regs_ever_live[reg])
3245 return reg;
3247 /* Before going on to check the call-saved registers we can try a couple
3248 more ways of deducing that r3 is available. The first is when we are
3249 pushing anonymous arguments onto the stack and we have less than 4
3250 registers worth of fixed arguments(*). In this case r3 will be part of
3251 the variable argument list and so we can be sure that it will be
3252 pushed right at the start of the function. Hence it will be available
3253 for the rest of the prologue.
3254 (*): ie current_function_pretend_args_size is greater than 0. */
3255 if (cfun->machine->uses_anonymous_args
3256 && current_function_pretend_args_size > 0)
3257 return LAST_ARG_REGNUM;
3259 /* The other case is when we have fixed arguments but less than 4 registers
3260 worth. In this case r3 might be used in the body of the function, but
3261 it is not being used to convey an argument into the function. In theory
3262 we could just check current_function_args_size to see how many bytes are
3263 being passed in argument registers, but it seems that it is unreliable.
3264 Sometimes it will have the value 0 when in fact arguments are being
3265 passed. (See testcase execute/20021111-1.c for an example). So we also
3266 check the args_info.nregs field as well. The problem with this field is
3267 that it makes no allowances for arguments that are passed to the
3268 function but which are not used. Hence we could miss an opportunity
3269 when a function has an unused argument in r3. But it is better to be
3270 safe than to be sorry. */
3271 if (! cfun->machine->uses_anonymous_args
3272 && current_function_args_size >= 0
3273 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3274 && cfun->args_info.nregs < 4)
3275 return LAST_ARG_REGNUM;
3277 /* Otherwise look for a call-saved register that is going to be pushed. */
3278 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3279 if (pushed_regs_mask & (1 << reg))
3280 return reg;
3282 /* Something went wrong - thumb_compute_save_reg_mask()
3283 should have arranged for a suitable register to be pushed. */
3284 gcc_unreachable ();
3288 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3289 low register. */
3291 void
3292 arm_load_pic_register (unsigned int scratch)
3294 #ifndef AOF_ASSEMBLER
3295 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3296 rtx global_offset_table;
3298 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3299 return;
3301 gcc_assert (flag_pic);
3303 l1 = gen_label_rtx ();
3305 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3306 /* On the ARM the PC register contains 'dot + 8' at the time of the
3307 addition, on the Thumb it is 'dot + 4'. */
3308 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3309 if (GOT_PCREL)
3310 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3311 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3312 else
3313 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3315 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3317 if (TARGET_ARM)
3319 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3320 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3322 else
3324 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3326 /* We will have pushed the pic register, so should always be
3327 able to find a work register. */
3328 pic_tmp = gen_rtx_REG (SImode, scratch);
3329 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3330 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3332 else
3333 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3334 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3337 /* Need to emit this whether or not we obey regdecls,
3338 since setjmp/longjmp can cause life info to screw up. */
3339 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3340 #endif /* AOF_ASSEMBLER */
3344 /* Return nonzero if X is valid as an ARM state addressing register. */
3345 static int
3346 arm_address_register_rtx_p (rtx x, int strict_p)
3348 int regno;
3350 if (GET_CODE (x) != REG)
3351 return 0;
3353 regno = REGNO (x);
3355 if (strict_p)
3356 return ARM_REGNO_OK_FOR_BASE_P (regno);
3358 return (regno <= LAST_ARM_REGNUM
3359 || regno >= FIRST_PSEUDO_REGISTER
3360 || regno == FRAME_POINTER_REGNUM
3361 || regno == ARG_POINTER_REGNUM);
3364 /* Return nonzero if X is a valid ARM state address operand. */
3366 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3367 int strict_p)
3369 bool use_ldrd;
3370 enum rtx_code code = GET_CODE (x);
3372 if (arm_address_register_rtx_p (x, strict_p))
3373 return 1;
3375 use_ldrd = (TARGET_LDRD
3376 && (mode == DImode
3377 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3379 if (code == POST_INC || code == PRE_DEC
3380 || ((code == PRE_INC || code == POST_DEC)
3381 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3382 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3384 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3385 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3386 && GET_CODE (XEXP (x, 1)) == PLUS
3387 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3389 rtx addend = XEXP (XEXP (x, 1), 1);
3391 /* Don't allow ldrd post increment by register because it's hard
3392 to fixup invalid register choices. */
3393 if (use_ldrd
3394 && GET_CODE (x) == POST_MODIFY
3395 && GET_CODE (addend) == REG)
3396 return 0;
3398 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3399 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3402 /* After reload constants split into minipools will have addresses
3403 from a LABEL_REF. */
3404 else if (reload_completed
3405 && (code == LABEL_REF
3406 || (code == CONST
3407 && GET_CODE (XEXP (x, 0)) == PLUS
3408 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3409 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3410 return 1;
3412 else if (mode == TImode)
3413 return 0;
3415 else if (code == PLUS)
3417 rtx xop0 = XEXP (x, 0);
3418 rtx xop1 = XEXP (x, 1);
3420 return ((arm_address_register_rtx_p (xop0, strict_p)
3421 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3422 || (arm_address_register_rtx_p (xop1, strict_p)
3423 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3426 #if 0
3427 /* Reload currently can't handle MINUS, so disable this for now */
3428 else if (GET_CODE (x) == MINUS)
3430 rtx xop0 = XEXP (x, 0);
3431 rtx xop1 = XEXP (x, 1);
3433 return (arm_address_register_rtx_p (xop0, strict_p)
3434 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3436 #endif
3438 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3439 && code == SYMBOL_REF
3440 && CONSTANT_POOL_ADDRESS_P (x)
3441 && ! (flag_pic
3442 && symbol_mentioned_p (get_pool_constant (x))))
3443 return 1;
3445 return 0;
3448 /* Return nonzero if INDEX is valid for an address index operand in
3449 ARM state. */
3450 static int
3451 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3452 int strict_p)
3454 HOST_WIDE_INT range;
3455 enum rtx_code code = GET_CODE (index);
3457 /* Standard coprocessor addressing modes. */
3458 if (TARGET_HARD_FLOAT
3459 && (TARGET_FPA || TARGET_MAVERICK)
3460 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3461 || (TARGET_MAVERICK && mode == DImode)))
3462 return (code == CONST_INT && INTVAL (index) < 1024
3463 && INTVAL (index) > -1024
3464 && (INTVAL (index) & 3) == 0);
3466 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3467 return (code == CONST_INT
3468 && INTVAL (index) < 1024
3469 && INTVAL (index) > -1024
3470 && (INTVAL (index) & 3) == 0);
3472 if (arm_address_register_rtx_p (index, strict_p)
3473 && (GET_MODE_SIZE (mode) <= 4))
3474 return 1;
3476 if (mode == DImode || mode == DFmode)
3478 if (code == CONST_INT)
3480 HOST_WIDE_INT val = INTVAL (index);
3482 if (TARGET_LDRD)
3483 return val > -256 && val < 256;
3484 else
3485 return val > -4096 && val < 4092;
3488 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3491 if (GET_MODE_SIZE (mode) <= 4
3492 && ! (arm_arch4
3493 && (mode == HImode
3494 || (mode == QImode && outer == SIGN_EXTEND))))
3496 if (code == MULT)
3498 rtx xiop0 = XEXP (index, 0);
3499 rtx xiop1 = XEXP (index, 1);
3501 return ((arm_address_register_rtx_p (xiop0, strict_p)
3502 && power_of_two_operand (xiop1, SImode))
3503 || (arm_address_register_rtx_p (xiop1, strict_p)
3504 && power_of_two_operand (xiop0, SImode)));
3506 else if (code == LSHIFTRT || code == ASHIFTRT
3507 || code == ASHIFT || code == ROTATERT)
3509 rtx op = XEXP (index, 1);
3511 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3512 && GET_CODE (op) == CONST_INT
3513 && INTVAL (op) > 0
3514 && INTVAL (op) <= 31);
3518 /* For ARM v4 we may be doing a sign-extend operation during the
3519 load. */
3520 if (arm_arch4)
3522 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3523 range = 256;
3524 else
3525 range = 4096;
3527 else
3528 range = (mode == HImode) ? 4095 : 4096;
3530 return (code == CONST_INT
3531 && INTVAL (index) < range
3532 && INTVAL (index) > -range);
3535 /* Return nonzero if X is valid as a Thumb state base register. */
3536 static int
3537 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3539 int regno;
3541 if (GET_CODE (x) != REG)
3542 return 0;
3544 regno = REGNO (x);
3546 if (strict_p)
3547 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3549 return (regno <= LAST_LO_REGNUM
3550 || regno > LAST_VIRTUAL_REGISTER
3551 || regno == FRAME_POINTER_REGNUM
3552 || (GET_MODE_SIZE (mode) >= 4
3553 && (regno == STACK_POINTER_REGNUM
3554 || regno >= FIRST_PSEUDO_REGISTER
3555 || x == hard_frame_pointer_rtx
3556 || x == arg_pointer_rtx)));
3559 /* Return nonzero if x is a legitimate index register. This is the case
3560 for any base register that can access a QImode object. */
3561 inline static int
3562 thumb_index_register_rtx_p (rtx x, int strict_p)
3564 return thumb_base_register_rtx_p (x, QImode, strict_p);
3567 /* Return nonzero if x is a legitimate Thumb-state address.
3569 The AP may be eliminated to either the SP or the FP, so we use the
3570 least common denominator, e.g. SImode, and offsets from 0 to 64.
3572 ??? Verify whether the above is the right approach.
3574 ??? Also, the FP may be eliminated to the SP, so perhaps that
3575 needs special handling also.
3577 ??? Look at how the mips16 port solves this problem. It probably uses
3578 better ways to solve some of these problems.
3580 Although it is not incorrect, we don't accept QImode and HImode
3581 addresses based on the frame pointer or arg pointer until the
3582 reload pass starts. This is so that eliminating such addresses
3583 into stack based ones won't produce impossible code. */
3585 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3587 /* ??? Not clear if this is right. Experiment. */
3588 if (GET_MODE_SIZE (mode) < 4
3589 && !(reload_in_progress || reload_completed)
3590 && (reg_mentioned_p (frame_pointer_rtx, x)
3591 || reg_mentioned_p (arg_pointer_rtx, x)
3592 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3593 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3594 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3595 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3596 return 0;
3598 /* Accept any base register. SP only in SImode or larger. */
3599 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3600 return 1;
3602 /* This is PC relative data before arm_reorg runs. */
3603 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3604 && GET_CODE (x) == SYMBOL_REF
3605 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3606 return 1;
3608 /* This is PC relative data after arm_reorg runs. */
3609 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3610 && (GET_CODE (x) == LABEL_REF
3611 || (GET_CODE (x) == CONST
3612 && GET_CODE (XEXP (x, 0)) == PLUS
3613 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3614 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3615 return 1;
3617 /* Post-inc indexing only supported for SImode and larger. */
3618 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3619 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3620 return 1;
3622 else if (GET_CODE (x) == PLUS)
3624 /* REG+REG address can be any two index registers. */
3625 /* We disallow FRAME+REG addressing since we know that FRAME
3626 will be replaced with STACK, and SP relative addressing only
3627 permits SP+OFFSET. */
3628 if (GET_MODE_SIZE (mode) <= 4
3629 && XEXP (x, 0) != frame_pointer_rtx
3630 && XEXP (x, 1) != frame_pointer_rtx
3631 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3632 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3633 return 1;
3635 /* REG+const has 5-7 bit offset for non-SP registers. */
3636 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3637 || XEXP (x, 0) == arg_pointer_rtx)
3638 && GET_CODE (XEXP (x, 1)) == CONST_INT
3639 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3640 return 1;
3642 /* REG+const has 10 bit offset for SP, but only SImode and
3643 larger is supported. */
3644 /* ??? Should probably check for DI/DFmode overflow here
3645 just like GO_IF_LEGITIMATE_OFFSET does. */
3646 else if (GET_CODE (XEXP (x, 0)) == REG
3647 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3648 && GET_MODE_SIZE (mode) >= 4
3649 && GET_CODE (XEXP (x, 1)) == CONST_INT
3650 && INTVAL (XEXP (x, 1)) >= 0
3651 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3652 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3653 return 1;
3655 else if (GET_CODE (XEXP (x, 0)) == REG
3656 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3657 && GET_MODE_SIZE (mode) >= 4
3658 && GET_CODE (XEXP (x, 1)) == CONST_INT
3659 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3660 return 1;
3663 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3664 && GET_MODE_SIZE (mode) == 4
3665 && GET_CODE (x) == SYMBOL_REF
3666 && CONSTANT_POOL_ADDRESS_P (x)
3667 && !(flag_pic
3668 && symbol_mentioned_p (get_pool_constant (x))))
3669 return 1;
3671 return 0;
3674 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3675 instruction of mode MODE. */
3677 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3679 switch (GET_MODE_SIZE (mode))
3681 case 1:
3682 return val >= 0 && val < 32;
3684 case 2:
3685 return val >= 0 && val < 64 && (val & 1) == 0;
3687 default:
3688 return (val >= 0
3689 && (val + GET_MODE_SIZE (mode)) <= 128
3690 && (val & 3) == 0);
3694 /* Try machine-dependent ways of modifying an illegitimate address
3695 to be legitimate. If we find one, return the new, valid address. */
3697 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3699 if (GET_CODE (x) == PLUS)
3701 rtx xop0 = XEXP (x, 0);
3702 rtx xop1 = XEXP (x, 1);
3704 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3705 xop0 = force_reg (SImode, xop0);
3707 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3708 xop1 = force_reg (SImode, xop1);
3710 if (ARM_BASE_REGISTER_RTX_P (xop0)
3711 && GET_CODE (xop1) == CONST_INT)
3713 HOST_WIDE_INT n, low_n;
3714 rtx base_reg, val;
3715 n = INTVAL (xop1);
3717 /* VFP addressing modes actually allow greater offsets, but for
3718 now we just stick with the lowest common denominator. */
3719 if (mode == DImode
3720 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3722 low_n = n & 0x0f;
3723 n &= ~0x0f;
3724 if (low_n > 4)
3726 n += 16;
3727 low_n -= 16;
3730 else
3732 low_n = ((mode) == TImode ? 0
3733 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3734 n -= low_n;
3737 base_reg = gen_reg_rtx (SImode);
3738 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3739 GEN_INT (n)), NULL_RTX);
3740 emit_move_insn (base_reg, val);
3741 x = (low_n == 0 ? base_reg
3742 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3744 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3745 x = gen_rtx_PLUS (SImode, xop0, xop1);
3748 /* XXX We don't allow MINUS any more -- see comment in
3749 arm_legitimate_address_p (). */
3750 else if (GET_CODE (x) == MINUS)
3752 rtx xop0 = XEXP (x, 0);
3753 rtx xop1 = XEXP (x, 1);
3755 if (CONSTANT_P (xop0))
3756 xop0 = force_reg (SImode, xop0);
3758 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3759 xop1 = force_reg (SImode, xop1);
3761 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3762 x = gen_rtx_MINUS (SImode, xop0, xop1);
3765 if (flag_pic)
3767 /* We need to find and carefully transform any SYMBOL and LABEL
3768 references; so go back to the original address expression. */
3769 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3771 if (new_x != orig_x)
3772 x = new_x;
3775 return x;
3779 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3780 to be legitimate. If we find one, return the new, valid address. */
3782 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3784 if (GET_CODE (x) == PLUS
3785 && GET_CODE (XEXP (x, 1)) == CONST_INT
3786 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3787 || INTVAL (XEXP (x, 1)) < 0))
3789 rtx xop0 = XEXP (x, 0);
3790 rtx xop1 = XEXP (x, 1);
3791 HOST_WIDE_INT offset = INTVAL (xop1);
3793 /* Try and fold the offset into a biasing of the base register and
3794 then offsetting that. Don't do this when optimizing for space
3795 since it can cause too many CSEs. */
3796 if (optimize_size && offset >= 0
3797 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3799 HOST_WIDE_INT delta;
3801 if (offset >= 256)
3802 delta = offset - (256 - GET_MODE_SIZE (mode));
3803 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3804 delta = 31 * GET_MODE_SIZE (mode);
3805 else
3806 delta = offset & (~31 * GET_MODE_SIZE (mode));
3808 xop0 = force_operand (plus_constant (xop0, offset - delta),
3809 NULL_RTX);
3810 x = plus_constant (xop0, delta);
3812 else if (offset < 0 && offset > -256)
3813 /* Small negative offsets are best done with a subtract before the
3814 dereference, forcing these into a register normally takes two
3815 instructions. */
3816 x = force_operand (x, NULL_RTX);
3817 else
3819 /* For the remaining cases, force the constant into a register. */
3820 xop1 = force_reg (SImode, xop1);
3821 x = gen_rtx_PLUS (SImode, xop0, xop1);
3824 else if (GET_CODE (x) == PLUS
3825 && s_register_operand (XEXP (x, 1), SImode)
3826 && !s_register_operand (XEXP (x, 0), SImode))
3828 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3830 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3833 if (flag_pic)
3835 /* We need to find and carefully transform any SYMBOL and LABEL
3836 references; so go back to the original address expression. */
3837 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3839 if (new_x != orig_x)
3840 x = new_x;
3843 return x;
3848 #define REG_OR_SUBREG_REG(X) \
3849 (GET_CODE (X) == REG \
3850 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3852 #define REG_OR_SUBREG_RTX(X) \
3853 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3855 #ifndef COSTS_N_INSNS
3856 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3857 #endif
3858 static inline int
3859 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3861 enum machine_mode mode = GET_MODE (x);
3863 switch (code)
3865 case ASHIFT:
3866 case ASHIFTRT:
3867 case LSHIFTRT:
3868 case ROTATERT:
3869 case PLUS:
3870 case MINUS:
3871 case COMPARE:
3872 case NEG:
3873 case NOT:
3874 return COSTS_N_INSNS (1);
3876 case MULT:
3877 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3879 int cycles = 0;
3880 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3882 while (i)
3884 i >>= 2;
3885 cycles++;
3887 return COSTS_N_INSNS (2) + cycles;
3889 return COSTS_N_INSNS (1) + 16;
3891 case SET:
3892 return (COSTS_N_INSNS (1)
3893 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3894 + GET_CODE (SET_DEST (x)) == MEM));
3896 case CONST_INT:
3897 if (outer == SET)
3899 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3900 return 0;
3901 if (thumb_shiftable_const (INTVAL (x)))
3902 return COSTS_N_INSNS (2);
3903 return COSTS_N_INSNS (3);
3905 else if ((outer == PLUS || outer == COMPARE)
3906 && INTVAL (x) < 256 && INTVAL (x) > -256)
3907 return 0;
3908 else if (outer == AND
3909 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3910 return COSTS_N_INSNS (1);
3911 else if (outer == ASHIFT || outer == ASHIFTRT
3912 || outer == LSHIFTRT)
3913 return 0;
3914 return COSTS_N_INSNS (2);
3916 case CONST:
3917 case CONST_DOUBLE:
3918 case LABEL_REF:
3919 case SYMBOL_REF:
3920 return COSTS_N_INSNS (3);
3922 case UDIV:
3923 case UMOD:
3924 case DIV:
3925 case MOD:
3926 return 100;
3928 case TRUNCATE:
3929 return 99;
3931 case AND:
3932 case XOR:
3933 case IOR:
3934 /* XXX guess. */
3935 return 8;
3937 case MEM:
3938 /* XXX another guess. */
3939 /* Memory costs quite a lot for the first word, but subsequent words
3940 load at the equivalent of a single insn each. */
3941 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3942 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3943 ? 4 : 0));
3945 case IF_THEN_ELSE:
3946 /* XXX a guess. */
3947 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3948 return 14;
3949 return 2;
3951 case ZERO_EXTEND:
3952 /* XXX still guessing. */
3953 switch (GET_MODE (XEXP (x, 0)))
3955 case QImode:
3956 return (1 + (mode == DImode ? 4 : 0)
3957 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3959 case HImode:
3960 return (4 + (mode == DImode ? 4 : 0)
3961 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3963 case SImode:
3964 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3966 default:
3967 return 99;
3970 default:
3971 return 99;
3976 /* Worker routine for arm_rtx_costs. */
3977 static inline int
3978 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3980 enum machine_mode mode = GET_MODE (x);
3981 enum rtx_code subcode;
3982 int extra_cost;
3984 switch (code)
3986 case MEM:
3987 /* Memory costs quite a lot for the first word, but subsequent words
3988 load at the equivalent of a single insn each. */
3989 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3990 + (GET_CODE (x) == SYMBOL_REF
3991 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3993 case DIV:
3994 case MOD:
3995 case UDIV:
3996 case UMOD:
3997 return optimize_size ? COSTS_N_INSNS (2) : 100;
3999 case ROTATE:
4000 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4001 return 4;
4002 /* Fall through */
4003 case ROTATERT:
4004 if (mode != SImode)
4005 return 8;
4006 /* Fall through */
4007 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4008 if (mode == DImode)
4009 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4010 + ((GET_CODE (XEXP (x, 0)) == REG
4011 || (GET_CODE (XEXP (x, 0)) == SUBREG
4012 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4013 ? 0 : 8));
4014 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4015 || (GET_CODE (XEXP (x, 0)) == SUBREG
4016 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4017 ? 0 : 4)
4018 + ((GET_CODE (XEXP (x, 1)) == REG
4019 || (GET_CODE (XEXP (x, 1)) == SUBREG
4020 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4021 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4022 ? 0 : 4));
4024 case MINUS:
4025 if (mode == DImode)
4026 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4027 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4028 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4029 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4030 ? 0 : 8));
4032 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4033 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4034 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4035 && arm_const_double_rtx (XEXP (x, 1))))
4036 ? 0 : 8)
4037 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4038 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4039 && arm_const_double_rtx (XEXP (x, 0))))
4040 ? 0 : 8));
4042 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4043 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4044 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4045 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4046 || subcode == ASHIFTRT || subcode == LSHIFTRT
4047 || subcode == ROTATE || subcode == ROTATERT
4048 || (subcode == MULT
4049 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4050 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4051 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4052 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4053 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4054 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4055 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4056 return 1;
4057 /* Fall through */
4059 case PLUS:
4060 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4061 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4062 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4063 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4064 && arm_const_double_rtx (XEXP (x, 1))))
4065 ? 0 : 8));
4067 /* Fall through */
4068 case AND: case XOR: case IOR:
4069 extra_cost = 0;
4071 /* Normally the frame registers will be spilt into reg+const during
4072 reload, so it is a bad idea to combine them with other instructions,
4073 since then they might not be moved outside of loops. As a compromise
4074 we allow integration with ops that have a constant as their second
4075 operand. */
4076 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4077 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4078 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4079 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4080 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4081 extra_cost = 4;
4083 if (mode == DImode)
4084 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4085 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4086 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4087 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4088 ? 0 : 8));
4090 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4091 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4092 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4093 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4094 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4095 ? 0 : 4));
4097 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4098 return (1 + extra_cost
4099 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4100 || subcode == LSHIFTRT || subcode == ASHIFTRT
4101 || subcode == ROTATE || subcode == ROTATERT
4102 || (subcode == MULT
4103 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4104 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4105 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4106 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4107 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4108 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4109 ? 0 : 4));
4111 return 8;
4113 case MULT:
4114 /* This should have been handled by the CPU specific routines. */
4115 gcc_unreachable ();
4117 case TRUNCATE:
4118 if (arm_arch3m && mode == SImode
4119 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4120 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4121 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4122 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4123 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4124 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4125 return 8;
4126 return 99;
4128 case NEG:
4129 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4130 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4131 /* Fall through */
4132 case NOT:
4133 if (mode == DImode)
4134 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4136 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4138 case IF_THEN_ELSE:
4139 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4140 return 14;
4141 return 2;
4143 case COMPARE:
4144 return 1;
4146 case ABS:
4147 return 4 + (mode == DImode ? 4 : 0);
4149 case SIGN_EXTEND:
4150 if (GET_MODE (XEXP (x, 0)) == QImode)
4151 return (4 + (mode == DImode ? 4 : 0)
4152 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4153 /* Fall through */
4154 case ZERO_EXTEND:
4155 switch (GET_MODE (XEXP (x, 0)))
4157 case QImode:
4158 return (1 + (mode == DImode ? 4 : 0)
4159 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4161 case HImode:
4162 return (4 + (mode == DImode ? 4 : 0)
4163 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4165 case SImode:
4166 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4168 case V8QImode:
4169 case V4HImode:
4170 case V2SImode:
4171 case V4QImode:
4172 case V2HImode:
4173 return 1;
4175 default:
4176 gcc_unreachable ();
4178 gcc_unreachable ();
4180 case CONST_INT:
4181 if (const_ok_for_arm (INTVAL (x)))
4182 return outer == SET ? 2 : -1;
4183 else if (outer == AND
4184 && const_ok_for_arm (~INTVAL (x)))
4185 return -1;
4186 else if ((outer == COMPARE
4187 || outer == PLUS || outer == MINUS)
4188 && const_ok_for_arm (-INTVAL (x)))
4189 return -1;
4190 else
4191 return 5;
4193 case CONST:
4194 case LABEL_REF:
4195 case SYMBOL_REF:
4196 return 6;
4198 case CONST_DOUBLE:
4199 if (arm_const_double_rtx (x))
4200 return outer == SET ? 2 : -1;
4201 else if ((outer == COMPARE || outer == PLUS)
4202 && neg_const_double_rtx_ok_for_fpa (x))
4203 return -1;
4204 return 7;
4206 default:
4207 return 99;
4211 /* RTX costs when optimizing for size. */
4212 static bool
4213 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4215 enum machine_mode mode = GET_MODE (x);
4217 if (TARGET_THUMB)
4219 /* XXX TBD. For now, use the standard costs. */
4220 *total = thumb_rtx_costs (x, code, outer_code);
4221 return true;
4224 switch (code)
4226 case MEM:
4227 /* A memory access costs 1 insn if the mode is small, or the address is
4228 a single register, otherwise it costs one insn per word. */
4229 if (REG_P (XEXP (x, 0)))
4230 *total = COSTS_N_INSNS (1);
4231 else
4232 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4233 return true;
4235 case DIV:
4236 case MOD:
4237 case UDIV:
4238 case UMOD:
4239 /* Needs a libcall, so it costs about this. */
4240 *total = COSTS_N_INSNS (2);
4241 return false;
4243 case ROTATE:
4244 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4246 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4247 return true;
4249 /* Fall through */
4250 case ROTATERT:
4251 case ASHIFT:
4252 case LSHIFTRT:
4253 case ASHIFTRT:
4254 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4256 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4257 return true;
4259 else if (mode == SImode)
4261 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4262 /* Slightly disparage register shifts, but not by much. */
4263 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4264 *total += 1 + rtx_cost (XEXP (x, 1), code);
4265 return true;
4268 /* Needs a libcall. */
4269 *total = COSTS_N_INSNS (2);
4270 return false;
4272 case MINUS:
4273 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4275 *total = COSTS_N_INSNS (1);
4276 return false;
4279 if (mode == SImode)
4281 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4282 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4284 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4285 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4286 || subcode1 == ROTATE || subcode1 == ROTATERT
4287 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4288 || subcode1 == ASHIFTRT)
4290 /* It's just the cost of the two operands. */
4291 *total = 0;
4292 return false;
4295 *total = COSTS_N_INSNS (1);
4296 return false;
4299 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4300 return false;
4302 case PLUS:
4303 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4305 *total = COSTS_N_INSNS (1);
4306 return false;
4309 /* Fall through */
4310 case AND: case XOR: case IOR:
4311 if (mode == SImode)
4313 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4315 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4316 || subcode == LSHIFTRT || subcode == ASHIFTRT
4317 || (code == AND && subcode == NOT))
4319 /* It's just the cost of the two operands. */
4320 *total = 0;
4321 return false;
4325 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4326 return false;
4328 case MULT:
4329 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4330 return false;
4332 case NEG:
4333 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4334 *total = COSTS_N_INSNS (1);
4335 /* Fall through */
4336 case NOT:
4337 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4339 return false;
4341 case IF_THEN_ELSE:
4342 *total = 0;
4343 return false;
4345 case COMPARE:
4346 if (cc_register (XEXP (x, 0), VOIDmode))
4347 * total = 0;
4348 else
4349 *total = COSTS_N_INSNS (1);
4350 return false;
4352 case ABS:
4353 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4354 *total = COSTS_N_INSNS (1);
4355 else
4356 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4357 return false;
4359 case SIGN_EXTEND:
4360 *total = 0;
4361 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4363 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4364 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4366 if (mode == DImode)
4367 *total += COSTS_N_INSNS (1);
4368 return false;
4370 case ZERO_EXTEND:
4371 *total = 0;
4372 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4374 switch (GET_MODE (XEXP (x, 0)))
4376 case QImode:
4377 *total += COSTS_N_INSNS (1);
4378 break;
4380 case HImode:
4381 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4383 case SImode:
4384 break;
4386 default:
4387 *total += COSTS_N_INSNS (2);
4391 if (mode == DImode)
4392 *total += COSTS_N_INSNS (1);
4394 return false;
4396 case CONST_INT:
4397 if (const_ok_for_arm (INTVAL (x)))
4398 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4399 else if (const_ok_for_arm (~INTVAL (x)))
4400 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4401 else if (const_ok_for_arm (-INTVAL (x)))
4403 if (outer_code == COMPARE || outer_code == PLUS
4404 || outer_code == MINUS)
4405 *total = 0;
4406 else
4407 *total = COSTS_N_INSNS (1);
4409 else
4410 *total = COSTS_N_INSNS (2);
4411 return true;
4413 case CONST:
4414 case LABEL_REF:
4415 case SYMBOL_REF:
4416 *total = COSTS_N_INSNS (2);
4417 return true;
4419 case CONST_DOUBLE:
4420 *total = COSTS_N_INSNS (4);
4421 return true;
4423 default:
4424 if (mode != VOIDmode)
4425 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4426 else
4427 *total = COSTS_N_INSNS (4); /* How knows? */
4428 return false;
4432 /* RTX costs for cores with a slow MUL implementation. */
4434 static bool
4435 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4437 enum machine_mode mode = GET_MODE (x);
4439 if (TARGET_THUMB)
4441 *total = thumb_rtx_costs (x, code, outer_code);
4442 return true;
4445 switch (code)
4447 case MULT:
4448 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4449 || mode == DImode)
4451 *total = 30;
4452 return true;
4455 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4457 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4458 & (unsigned HOST_WIDE_INT) 0xffffffff);
4459 int cost, const_ok = const_ok_for_arm (i);
4460 int j, booth_unit_size;
4462 /* Tune as appropriate. */
4463 cost = const_ok ? 4 : 8;
4464 booth_unit_size = 2;
4465 for (j = 0; i && j < 32; j += booth_unit_size)
4467 i >>= booth_unit_size;
4468 cost += 2;
4471 *total = cost;
4472 return true;
4475 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4476 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4477 return true;
4479 default:
4480 *total = arm_rtx_costs_1 (x, code, outer_code);
4481 return true;
4486 /* RTX cost for cores with a fast multiply unit (M variants). */
4488 static bool
4489 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4491 enum machine_mode mode = GET_MODE (x);
4493 if (TARGET_THUMB)
4495 *total = thumb_rtx_costs (x, code, outer_code);
4496 return true;
4499 switch (code)
4501 case MULT:
4502 /* There is no point basing this on the tuning, since it is always the
4503 fast variant if it exists at all. */
4504 if (mode == DImode
4505 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4506 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4507 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4509 *total = 8;
4510 return true;
4514 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4515 || mode == DImode)
4517 *total = 30;
4518 return true;
4521 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4523 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4524 & (unsigned HOST_WIDE_INT) 0xffffffff);
4525 int cost, const_ok = const_ok_for_arm (i);
4526 int j, booth_unit_size;
4528 /* Tune as appropriate. */
4529 cost = const_ok ? 4 : 8;
4530 booth_unit_size = 8;
4531 for (j = 0; i && j < 32; j += booth_unit_size)
4533 i >>= booth_unit_size;
4534 cost += 2;
4537 *total = cost;
4538 return true;
4541 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4542 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4543 return true;
4545 default:
4546 *total = arm_rtx_costs_1 (x, code, outer_code);
4547 return true;
4552 /* RTX cost for XScale CPUs. */
4554 static bool
4555 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4557 enum machine_mode mode = GET_MODE (x);
4559 if (TARGET_THUMB)
4561 *total = thumb_rtx_costs (x, code, outer_code);
4562 return true;
4565 switch (code)
4567 case MULT:
4568 /* There is no point basing this on the tuning, since it is always the
4569 fast variant if it exists at all. */
4570 if (mode == DImode
4571 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4572 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4573 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4575 *total = 8;
4576 return true;
4580 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4581 || mode == DImode)
4583 *total = 30;
4584 return true;
4587 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4589 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4590 & (unsigned HOST_WIDE_INT) 0xffffffff);
4591 int cost, const_ok = const_ok_for_arm (i);
4592 unsigned HOST_WIDE_INT masked_const;
4594 /* The cost will be related to two insns.
4595 First a load of the constant (MOV or LDR), then a multiply. */
4596 cost = 2;
4597 if (! const_ok)
4598 cost += 1; /* LDR is probably more expensive because
4599 of longer result latency. */
4600 masked_const = i & 0xffff8000;
4601 if (masked_const != 0 && masked_const != 0xffff8000)
4603 masked_const = i & 0xf8000000;
4604 if (masked_const == 0 || masked_const == 0xf8000000)
4605 cost += 1;
4606 else
4607 cost += 2;
4609 *total = cost;
4610 return true;
4613 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4614 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4615 return true;
4617 case COMPARE:
4618 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4619 will stall until the multiplication is complete. */
4620 if (GET_CODE (XEXP (x, 0)) == MULT)
4621 *total = 4 + rtx_cost (XEXP (x, 0), code);
4622 else
4623 *total = arm_rtx_costs_1 (x, code, outer_code);
4624 return true;
4626 default:
4627 *total = arm_rtx_costs_1 (x, code, outer_code);
4628 return true;
4633 /* RTX costs for 9e (and later) cores. */
4635 static bool
4636 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4638 enum machine_mode mode = GET_MODE (x);
4639 int nonreg_cost;
4640 int cost;
4642 if (TARGET_THUMB)
4644 switch (code)
4646 case MULT:
4647 *total = COSTS_N_INSNS (3);
4648 return true;
4650 default:
4651 *total = thumb_rtx_costs (x, code, outer_code);
4652 return true;
4656 switch (code)
4658 case MULT:
4659 /* There is no point basing this on the tuning, since it is always the
4660 fast variant if it exists at all. */
4661 if (mode == DImode
4662 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4663 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4664 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4666 *total = 3;
4667 return true;
4671 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4673 *total = 30;
4674 return true;
4676 if (mode == DImode)
4678 cost = 7;
4679 nonreg_cost = 8;
4681 else
4683 cost = 2;
4684 nonreg_cost = 4;
4688 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4689 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4690 return true;
4692 default:
4693 *total = arm_rtx_costs_1 (x, code, outer_code);
4694 return true;
4697 /* All address computations that can be done are free, but rtx cost returns
4698 the same for practically all of them. So we weight the different types
4699 of address here in the order (most pref first):
4700 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4701 static inline int
4702 arm_arm_address_cost (rtx x)
4704 enum rtx_code c = GET_CODE (x);
4706 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4707 return 0;
4708 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4709 return 10;
4711 if (c == PLUS || c == MINUS)
4713 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4714 return 2;
4716 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4717 return 3;
4719 return 4;
4722 return 6;
4725 static inline int
4726 arm_thumb_address_cost (rtx x)
4728 enum rtx_code c = GET_CODE (x);
4730 if (c == REG)
4731 return 1;
4732 if (c == PLUS
4733 && GET_CODE (XEXP (x, 0)) == REG
4734 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4735 return 1;
4737 return 2;
4740 static int
4741 arm_address_cost (rtx x)
4743 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4746 static int
4747 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4749 rtx i_pat, d_pat;
4751 /* Some true dependencies can have a higher cost depending
4752 on precisely how certain input operands are used. */
4753 if (arm_tune_xscale
4754 && REG_NOTE_KIND (link) == 0
4755 && recog_memoized (insn) >= 0
4756 && recog_memoized (dep) >= 0)
4758 int shift_opnum = get_attr_shift (insn);
4759 enum attr_type attr_type = get_attr_type (dep);
4761 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4762 operand for INSN. If we have a shifted input operand and the
4763 instruction we depend on is another ALU instruction, then we may
4764 have to account for an additional stall. */
4765 if (shift_opnum != 0
4766 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4768 rtx shifted_operand;
4769 int opno;
4771 /* Get the shifted operand. */
4772 extract_insn (insn);
4773 shifted_operand = recog_data.operand[shift_opnum];
4775 /* Iterate over all the operands in DEP. If we write an operand
4776 that overlaps with SHIFTED_OPERAND, then we have increase the
4777 cost of this dependency. */
4778 extract_insn (dep);
4779 preprocess_constraints ();
4780 for (opno = 0; opno < recog_data.n_operands; opno++)
4782 /* We can ignore strict inputs. */
4783 if (recog_data.operand_type[opno] == OP_IN)
4784 continue;
4786 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4787 shifted_operand))
4788 return 2;
4793 /* XXX This is not strictly true for the FPA. */
4794 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4795 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4796 return 0;
4798 /* Call insns don't incur a stall, even if they follow a load. */
4799 if (REG_NOTE_KIND (link) == 0
4800 && GET_CODE (insn) == CALL_INSN)
4801 return 1;
4803 if ((i_pat = single_set (insn)) != NULL
4804 && GET_CODE (SET_SRC (i_pat)) == MEM
4805 && (d_pat = single_set (dep)) != NULL
4806 && GET_CODE (SET_DEST (d_pat)) == MEM)
4808 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4809 /* This is a load after a store, there is no conflict if the load reads
4810 from a cached area. Assume that loads from the stack, and from the
4811 constant pool are cached, and that others will miss. This is a
4812 hack. */
4814 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4815 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4816 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4817 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4818 return 1;
4821 return cost;
4824 static int fp_consts_inited = 0;
4826 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4827 static const char * const strings_fp[8] =
4829 "0", "1", "2", "3",
4830 "4", "5", "0.5", "10"
4833 static REAL_VALUE_TYPE values_fp[8];
4835 static void
4836 init_fp_table (void)
4838 int i;
4839 REAL_VALUE_TYPE r;
4841 if (TARGET_VFP)
4842 fp_consts_inited = 1;
4843 else
4844 fp_consts_inited = 8;
4846 for (i = 0; i < fp_consts_inited; i++)
4848 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4849 values_fp[i] = r;
4853 /* Return TRUE if rtx X is a valid immediate FP constant. */
4855 arm_const_double_rtx (rtx x)
4857 REAL_VALUE_TYPE r;
4858 int i;
4860 if (!fp_consts_inited)
4861 init_fp_table ();
4863 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4864 if (REAL_VALUE_MINUS_ZERO (r))
4865 return 0;
4867 for (i = 0; i < fp_consts_inited; i++)
4868 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4869 return 1;
4871 return 0;
4874 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4876 neg_const_double_rtx_ok_for_fpa (rtx x)
4878 REAL_VALUE_TYPE r;
4879 int i;
4881 if (!fp_consts_inited)
4882 init_fp_table ();
4884 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4885 r = REAL_VALUE_NEGATE (r);
4886 if (REAL_VALUE_MINUS_ZERO (r))
4887 return 0;
4889 for (i = 0; i < 8; i++)
4890 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4891 return 1;
4893 return 0;
4896 /* Predicates for `match_operand' and `match_operator'. */
4898 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4900 cirrus_memory_offset (rtx op)
4902 /* Reject eliminable registers. */
4903 if (! (reload_in_progress || reload_completed)
4904 && ( reg_mentioned_p (frame_pointer_rtx, op)
4905 || reg_mentioned_p (arg_pointer_rtx, op)
4906 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4907 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4908 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4909 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4910 return 0;
4912 if (GET_CODE (op) == MEM)
4914 rtx ind;
4916 ind = XEXP (op, 0);
4918 /* Match: (mem (reg)). */
4919 if (GET_CODE (ind) == REG)
4920 return 1;
4922 /* Match:
4923 (mem (plus (reg)
4924 (const))). */
4925 if (GET_CODE (ind) == PLUS
4926 && GET_CODE (XEXP (ind, 0)) == REG
4927 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4928 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4929 return 1;
4932 return 0;
4935 /* Return TRUE if OP is a valid VFP memory address pattern.
4936 WB if true if writeback address modes are allowed. */
4939 arm_coproc_mem_operand (rtx op, bool wb)
4941 rtx ind;
4943 /* Reject eliminable registers. */
4944 if (! (reload_in_progress || reload_completed)
4945 && ( reg_mentioned_p (frame_pointer_rtx, op)
4946 || reg_mentioned_p (arg_pointer_rtx, op)
4947 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4948 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4949 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4950 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4951 return FALSE;
4953 /* Constants are converted into offsets from labels. */
4954 if (GET_CODE (op) != MEM)
4955 return FALSE;
4957 ind = XEXP (op, 0);
4959 if (reload_completed
4960 && (GET_CODE (ind) == LABEL_REF
4961 || (GET_CODE (ind) == CONST
4962 && GET_CODE (XEXP (ind, 0)) == PLUS
4963 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4964 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4965 return TRUE;
4967 /* Match: (mem (reg)). */
4968 if (GET_CODE (ind) == REG)
4969 return arm_address_register_rtx_p (ind, 0);
4971 /* Autoincremment addressing modes. */
4972 if (wb
4973 && (GET_CODE (ind) == PRE_INC
4974 || GET_CODE (ind) == POST_INC
4975 || GET_CODE (ind) == PRE_DEC
4976 || GET_CODE (ind) == POST_DEC))
4977 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4979 if (wb
4980 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4981 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4982 && GET_CODE (XEXP (ind, 1)) == PLUS
4983 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4984 ind = XEXP (ind, 1);
4986 /* Match:
4987 (plus (reg)
4988 (const)). */
4989 if (GET_CODE (ind) == PLUS
4990 && GET_CODE (XEXP (ind, 0)) == REG
4991 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4992 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4993 && INTVAL (XEXP (ind, 1)) > -1024
4994 && INTVAL (XEXP (ind, 1)) < 1024
4995 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4996 return TRUE;
4998 return FALSE;
5001 /* Return true if X is a register that will be eliminated later on. */
5003 arm_eliminable_register (rtx x)
5005 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5006 || REGNO (x) == ARG_POINTER_REGNUM
5007 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5008 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5011 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5012 VFP registers. Otherwise return NO_REGS. */
5014 enum reg_class
5015 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5017 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5018 return NO_REGS;
5020 return GENERAL_REGS;
5023 /* Values which must be returned in the most-significant end of the return
5024 register. */
5026 static bool
5027 arm_return_in_msb (tree valtype)
5029 return (TARGET_AAPCS_BASED
5030 && BYTES_BIG_ENDIAN
5031 && (AGGREGATE_TYPE_P (valtype)
5032 || TREE_CODE (valtype) == COMPLEX_TYPE));
5035 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5036 Use by the Cirrus Maverick code which has to workaround
5037 a hardware bug triggered by such instructions. */
5038 static bool
5039 arm_memory_load_p (rtx insn)
5041 rtx body, lhs, rhs;;
5043 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5044 return false;
5046 body = PATTERN (insn);
5048 if (GET_CODE (body) != SET)
5049 return false;
5051 lhs = XEXP (body, 0);
5052 rhs = XEXP (body, 1);
5054 lhs = REG_OR_SUBREG_RTX (lhs);
5056 /* If the destination is not a general purpose
5057 register we do not have to worry. */
5058 if (GET_CODE (lhs) != REG
5059 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5060 return false;
5062 /* As well as loads from memory we also have to react
5063 to loads of invalid constants which will be turned
5064 into loads from the minipool. */
5065 return (GET_CODE (rhs) == MEM
5066 || GET_CODE (rhs) == SYMBOL_REF
5067 || note_invalid_constants (insn, -1, false));
5070 /* Return TRUE if INSN is a Cirrus instruction. */
5071 static bool
5072 arm_cirrus_insn_p (rtx insn)
5074 enum attr_cirrus attr;
5076 /* get_attr cannot accept USE or CLOBBER. */
5077 if (!insn
5078 || GET_CODE (insn) != INSN
5079 || GET_CODE (PATTERN (insn)) == USE
5080 || GET_CODE (PATTERN (insn)) == CLOBBER)
5081 return 0;
5083 attr = get_attr_cirrus (insn);
5085 return attr != CIRRUS_NOT;
5088 /* Cirrus reorg for invalid instruction combinations. */
5089 static void
5090 cirrus_reorg (rtx first)
5092 enum attr_cirrus attr;
5093 rtx body = PATTERN (first);
5094 rtx t;
5095 int nops;
5097 /* Any branch must be followed by 2 non Cirrus instructions. */
5098 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5100 nops = 0;
5101 t = next_nonnote_insn (first);
5103 if (arm_cirrus_insn_p (t))
5104 ++ nops;
5106 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5107 ++ nops;
5109 while (nops --)
5110 emit_insn_after (gen_nop (), first);
5112 return;
5115 /* (float (blah)) is in parallel with a clobber. */
5116 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5117 body = XVECEXP (body, 0, 0);
5119 if (GET_CODE (body) == SET)
5121 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5123 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5124 be followed by a non Cirrus insn. */
5125 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5127 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5128 emit_insn_after (gen_nop (), first);
5130 return;
5132 else if (arm_memory_load_p (first))
5134 unsigned int arm_regno;
5136 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5137 ldr/cfmv64hr combination where the Rd field is the same
5138 in both instructions must be split with a non Cirrus
5139 insn. Example:
5141 ldr r0, blah
5143 cfmvsr mvf0, r0. */
5145 /* Get Arm register number for ldr insn. */
5146 if (GET_CODE (lhs) == REG)
5147 arm_regno = REGNO (lhs);
5148 else
5150 gcc_assert (GET_CODE (rhs) == REG);
5151 arm_regno = REGNO (rhs);
5154 /* Next insn. */
5155 first = next_nonnote_insn (first);
5157 if (! arm_cirrus_insn_p (first))
5158 return;
5160 body = PATTERN (first);
5162 /* (float (blah)) is in parallel with a clobber. */
5163 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5164 body = XVECEXP (body, 0, 0);
5166 if (GET_CODE (body) == FLOAT)
5167 body = XEXP (body, 0);
5169 if (get_attr_cirrus (first) == CIRRUS_MOVE
5170 && GET_CODE (XEXP (body, 1)) == REG
5171 && arm_regno == REGNO (XEXP (body, 1)))
5172 emit_insn_after (gen_nop (), first);
5174 return;
5178 /* get_attr cannot accept USE or CLOBBER. */
5179 if (!first
5180 || GET_CODE (first) != INSN
5181 || GET_CODE (PATTERN (first)) == USE
5182 || GET_CODE (PATTERN (first)) == CLOBBER)
5183 return;
5185 attr = get_attr_cirrus (first);
5187 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5188 must be followed by a non-coprocessor instruction. */
5189 if (attr == CIRRUS_COMPARE)
5191 nops = 0;
5193 t = next_nonnote_insn (first);
5195 if (arm_cirrus_insn_p (t))
5196 ++ nops;
5198 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5199 ++ nops;
5201 while (nops --)
5202 emit_insn_after (gen_nop (), first);
5204 return;
5208 /* Return TRUE if X references a SYMBOL_REF. */
5210 symbol_mentioned_p (rtx x)
5212 const char * fmt;
5213 int i;
5215 if (GET_CODE (x) == SYMBOL_REF)
5216 return 1;
5218 fmt = GET_RTX_FORMAT (GET_CODE (x));
5220 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5222 if (fmt[i] == 'E')
5224 int j;
5226 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5227 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5228 return 1;
5230 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5231 return 1;
5234 return 0;
5237 /* Return TRUE if X references a LABEL_REF. */
5239 label_mentioned_p (rtx x)
5241 const char * fmt;
5242 int i;
5244 if (GET_CODE (x) == LABEL_REF)
5245 return 1;
5247 fmt = GET_RTX_FORMAT (GET_CODE (x));
5248 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5250 if (fmt[i] == 'E')
5252 int j;
5254 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5255 if (label_mentioned_p (XVECEXP (x, i, j)))
5256 return 1;
5258 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5259 return 1;
5262 return 0;
5265 enum rtx_code
5266 minmax_code (rtx x)
5268 enum rtx_code code = GET_CODE (x);
5270 switch (code)
5272 case SMAX:
5273 return GE;
5274 case SMIN:
5275 return LE;
5276 case UMIN:
5277 return LEU;
5278 case UMAX:
5279 return GEU;
5280 default:
5281 gcc_unreachable ();
5285 /* Return 1 if memory locations are adjacent. */
5287 adjacent_mem_locations (rtx a, rtx b)
5289 /* We don't guarantee to preserve the order of these memory refs. */
5290 if (volatile_refs_p (a) || volatile_refs_p (b))
5291 return 0;
5293 if ((GET_CODE (XEXP (a, 0)) == REG
5294 || (GET_CODE (XEXP (a, 0)) == PLUS
5295 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5296 && (GET_CODE (XEXP (b, 0)) == REG
5297 || (GET_CODE (XEXP (b, 0)) == PLUS
5298 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5300 HOST_WIDE_INT val0 = 0, val1 = 0;
5301 rtx reg0, reg1;
5302 int val_diff;
5304 if (GET_CODE (XEXP (a, 0)) == PLUS)
5306 reg0 = XEXP (XEXP (a, 0), 0);
5307 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5309 else
5310 reg0 = XEXP (a, 0);
5312 if (GET_CODE (XEXP (b, 0)) == PLUS)
5314 reg1 = XEXP (XEXP (b, 0), 0);
5315 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5317 else
5318 reg1 = XEXP (b, 0);
5320 /* Don't accept any offset that will require multiple
5321 instructions to handle, since this would cause the
5322 arith_adjacentmem pattern to output an overlong sequence. */
5323 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5324 return 0;
5326 /* Don't allow an eliminable register: register elimination can make
5327 the offset too large. */
5328 if (arm_eliminable_register (reg0))
5329 return 0;
5331 val_diff = val1 - val0;
5333 if (arm_ld_sched)
5335 /* If the target has load delay slots, then there's no benefit
5336 to using an ldm instruction unless the offset is zero and
5337 we are optimizing for size. */
5338 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5339 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5340 && (val_diff == 4 || val_diff == -4));
5343 return ((REGNO (reg0) == REGNO (reg1))
5344 && (val_diff == 4 || val_diff == -4));
5347 return 0;
5351 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5352 HOST_WIDE_INT *load_offset)
5354 int unsorted_regs[4];
5355 HOST_WIDE_INT unsorted_offsets[4];
5356 int order[4];
5357 int base_reg = -1;
5358 int i;
5360 /* Can only handle 2, 3, or 4 insns at present,
5361 though could be easily extended if required. */
5362 gcc_assert (nops >= 2 && nops <= 4);
5364 /* Loop over the operands and check that the memory references are
5365 suitable (i.e. immediate offsets from the same base register). At
5366 the same time, extract the target register, and the memory
5367 offsets. */
5368 for (i = 0; i < nops; i++)
5370 rtx reg;
5371 rtx offset;
5373 /* Convert a subreg of a mem into the mem itself. */
5374 if (GET_CODE (operands[nops + i]) == SUBREG)
5375 operands[nops + i] = alter_subreg (operands + (nops + i));
5377 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5379 /* Don't reorder volatile memory references; it doesn't seem worth
5380 looking for the case where the order is ok anyway. */
5381 if (MEM_VOLATILE_P (operands[nops + i]))
5382 return 0;
5384 offset = const0_rtx;
5386 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5387 || (GET_CODE (reg) == SUBREG
5388 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5389 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5390 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5391 == REG)
5392 || (GET_CODE (reg) == SUBREG
5393 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5394 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5395 == CONST_INT)))
5397 if (i == 0)
5399 base_reg = REGNO (reg);
5400 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5401 ? REGNO (operands[i])
5402 : REGNO (SUBREG_REG (operands[i])));
5403 order[0] = 0;
5405 else
5407 if (base_reg != (int) REGNO (reg))
5408 /* Not addressed from the same base register. */
5409 return 0;
5411 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5412 ? REGNO (operands[i])
5413 : REGNO (SUBREG_REG (operands[i])));
5414 if (unsorted_regs[i] < unsorted_regs[order[0]])
5415 order[0] = i;
5418 /* If it isn't an integer register, or if it overwrites the
5419 base register but isn't the last insn in the list, then
5420 we can't do this. */
5421 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5422 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5423 return 0;
5425 unsorted_offsets[i] = INTVAL (offset);
5427 else
5428 /* Not a suitable memory address. */
5429 return 0;
5432 /* All the useful information has now been extracted from the
5433 operands into unsorted_regs and unsorted_offsets; additionally,
5434 order[0] has been set to the lowest numbered register in the
5435 list. Sort the registers into order, and check that the memory
5436 offsets are ascending and adjacent. */
5438 for (i = 1; i < nops; i++)
5440 int j;
5442 order[i] = order[i - 1];
5443 for (j = 0; j < nops; j++)
5444 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5445 && (order[i] == order[i - 1]
5446 || unsorted_regs[j] < unsorted_regs[order[i]]))
5447 order[i] = j;
5449 /* Have we found a suitable register? if not, one must be used more
5450 than once. */
5451 if (order[i] == order[i - 1])
5452 return 0;
5454 /* Is the memory address adjacent and ascending? */
5455 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5456 return 0;
5459 if (base)
5461 *base = base_reg;
5463 for (i = 0; i < nops; i++)
5464 regs[i] = unsorted_regs[order[i]];
5466 *load_offset = unsorted_offsets[order[0]];
5469 if (unsorted_offsets[order[0]] == 0)
5470 return 1; /* ldmia */
5472 if (unsorted_offsets[order[0]] == 4)
5473 return 2; /* ldmib */
5475 if (unsorted_offsets[order[nops - 1]] == 0)
5476 return 3; /* ldmda */
5478 if (unsorted_offsets[order[nops - 1]] == -4)
5479 return 4; /* ldmdb */
5481 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5482 if the offset isn't small enough. The reason 2 ldrs are faster
5483 is because these ARMs are able to do more than one cache access
5484 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5485 whilst the ARM8 has a double bandwidth cache. This means that
5486 these cores can do both an instruction fetch and a data fetch in
5487 a single cycle, so the trick of calculating the address into a
5488 scratch register (one of the result regs) and then doing a load
5489 multiple actually becomes slower (and no smaller in code size).
5490 That is the transformation
5492 ldr rd1, [rbase + offset]
5493 ldr rd2, [rbase + offset + 4]
5497 add rd1, rbase, offset
5498 ldmia rd1, {rd1, rd2}
5500 produces worse code -- '3 cycles + any stalls on rd2' instead of
5501 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5502 access per cycle, the first sequence could never complete in less
5503 than 6 cycles, whereas the ldm sequence would only take 5 and
5504 would make better use of sequential accesses if not hitting the
5505 cache.
5507 We cheat here and test 'arm_ld_sched' which we currently know to
5508 only be true for the ARM8, ARM9 and StrongARM. If this ever
5509 changes, then the test below needs to be reworked. */
5510 if (nops == 2 && arm_ld_sched)
5511 return 0;
5513 /* Can't do it without setting up the offset, only do this if it takes
5514 no more than one insn. */
5515 return (const_ok_for_arm (unsorted_offsets[order[0]])
5516 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5519 const char *
5520 emit_ldm_seq (rtx *operands, int nops)
5522 int regs[4];
5523 int base_reg;
5524 HOST_WIDE_INT offset;
5525 char buf[100];
5526 int i;
5528 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5530 case 1:
5531 strcpy (buf, "ldm%?ia\t");
5532 break;
5534 case 2:
5535 strcpy (buf, "ldm%?ib\t");
5536 break;
5538 case 3:
5539 strcpy (buf, "ldm%?da\t");
5540 break;
5542 case 4:
5543 strcpy (buf, "ldm%?db\t");
5544 break;
5546 case 5:
5547 if (offset >= 0)
5548 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5549 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5550 (long) offset);
5551 else
5552 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5553 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5554 (long) -offset);
5555 output_asm_insn (buf, operands);
5556 base_reg = regs[0];
5557 strcpy (buf, "ldm%?ia\t");
5558 break;
5560 default:
5561 gcc_unreachable ();
5564 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5565 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5567 for (i = 1; i < nops; i++)
5568 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5569 reg_names[regs[i]]);
5571 strcat (buf, "}\t%@ phole ldm");
5573 output_asm_insn (buf, operands);
5574 return "";
5578 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5579 HOST_WIDE_INT * load_offset)
5581 int unsorted_regs[4];
5582 HOST_WIDE_INT unsorted_offsets[4];
5583 int order[4];
5584 int base_reg = -1;
5585 int i;
5587 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5588 extended if required. */
5589 gcc_assert (nops >= 2 && nops <= 4);
5591 /* Loop over the operands and check that the memory references are
5592 suitable (i.e. immediate offsets from the same base register). At
5593 the same time, extract the target register, and the memory
5594 offsets. */
5595 for (i = 0; i < nops; i++)
5597 rtx reg;
5598 rtx offset;
5600 /* Convert a subreg of a mem into the mem itself. */
5601 if (GET_CODE (operands[nops + i]) == SUBREG)
5602 operands[nops + i] = alter_subreg (operands + (nops + i));
5604 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5606 /* Don't reorder volatile memory references; it doesn't seem worth
5607 looking for the case where the order is ok anyway. */
5608 if (MEM_VOLATILE_P (operands[nops + i]))
5609 return 0;
5611 offset = const0_rtx;
5613 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5614 || (GET_CODE (reg) == SUBREG
5615 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5616 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5617 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5618 == REG)
5619 || (GET_CODE (reg) == SUBREG
5620 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5621 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5622 == CONST_INT)))
5624 if (i == 0)
5626 base_reg = REGNO (reg);
5627 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5628 ? REGNO (operands[i])
5629 : REGNO (SUBREG_REG (operands[i])));
5630 order[0] = 0;
5632 else
5634 if (base_reg != (int) REGNO (reg))
5635 /* Not addressed from the same base register. */
5636 return 0;
5638 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5639 ? REGNO (operands[i])
5640 : REGNO (SUBREG_REG (operands[i])));
5641 if (unsorted_regs[i] < unsorted_regs[order[0]])
5642 order[0] = i;
5645 /* If it isn't an integer register, then we can't do this. */
5646 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5647 return 0;
5649 unsorted_offsets[i] = INTVAL (offset);
5651 else
5652 /* Not a suitable memory address. */
5653 return 0;
5656 /* All the useful information has now been extracted from the
5657 operands into unsorted_regs and unsorted_offsets; additionally,
5658 order[0] has been set to the lowest numbered register in the
5659 list. Sort the registers into order, and check that the memory
5660 offsets are ascending and adjacent. */
5662 for (i = 1; i < nops; i++)
5664 int j;
5666 order[i] = order[i - 1];
5667 for (j = 0; j < nops; j++)
5668 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5669 && (order[i] == order[i - 1]
5670 || unsorted_regs[j] < unsorted_regs[order[i]]))
5671 order[i] = j;
5673 /* Have we found a suitable register? if not, one must be used more
5674 than once. */
5675 if (order[i] == order[i - 1])
5676 return 0;
5678 /* Is the memory address adjacent and ascending? */
5679 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5680 return 0;
5683 if (base)
5685 *base = base_reg;
5687 for (i = 0; i < nops; i++)
5688 regs[i] = unsorted_regs[order[i]];
5690 *load_offset = unsorted_offsets[order[0]];
5693 if (unsorted_offsets[order[0]] == 0)
5694 return 1; /* stmia */
5696 if (unsorted_offsets[order[0]] == 4)
5697 return 2; /* stmib */
5699 if (unsorted_offsets[order[nops - 1]] == 0)
5700 return 3; /* stmda */
5702 if (unsorted_offsets[order[nops - 1]] == -4)
5703 return 4; /* stmdb */
5705 return 0;
5708 const char *
5709 emit_stm_seq (rtx *operands, int nops)
5711 int regs[4];
5712 int base_reg;
5713 HOST_WIDE_INT offset;
5714 char buf[100];
5715 int i;
5717 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5719 case 1:
5720 strcpy (buf, "stm%?ia\t");
5721 break;
5723 case 2:
5724 strcpy (buf, "stm%?ib\t");
5725 break;
5727 case 3:
5728 strcpy (buf, "stm%?da\t");
5729 break;
5731 case 4:
5732 strcpy (buf, "stm%?db\t");
5733 break;
5735 default:
5736 gcc_unreachable ();
5739 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5740 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5742 for (i = 1; i < nops; i++)
5743 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5744 reg_names[regs[i]]);
5746 strcat (buf, "}\t%@ phole stm");
5748 output_asm_insn (buf, operands);
5749 return "";
5753 /* Routines for use in generating RTL. */
5756 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5757 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5759 HOST_WIDE_INT offset = *offsetp;
5760 int i = 0, j;
5761 rtx result;
5762 int sign = up ? 1 : -1;
5763 rtx mem, addr;
5765 /* XScale has load-store double instructions, but they have stricter
5766 alignment requirements than load-store multiple, so we cannot
5767 use them.
5769 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5770 the pipeline until completion.
5772 NREGS CYCLES
5778 An ldr instruction takes 1-3 cycles, but does not block the
5779 pipeline.
5781 NREGS CYCLES
5782 1 1-3
5783 2 2-6
5784 3 3-9
5785 4 4-12
5787 Best case ldr will always win. However, the more ldr instructions
5788 we issue, the less likely we are to be able to schedule them well.
5789 Using ldr instructions also increases code size.
5791 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5792 for counts of 3 or 4 regs. */
5793 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5795 rtx seq;
5797 start_sequence ();
5799 for (i = 0; i < count; i++)
5801 addr = plus_constant (from, i * 4 * sign);
5802 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5803 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5804 offset += 4 * sign;
5807 if (write_back)
5809 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5810 *offsetp = offset;
5813 seq = get_insns ();
5814 end_sequence ();
5816 return seq;
5819 result = gen_rtx_PARALLEL (VOIDmode,
5820 rtvec_alloc (count + (write_back ? 1 : 0)));
5821 if (write_back)
5823 XVECEXP (result, 0, 0)
5824 = gen_rtx_SET (GET_MODE (from), from,
5825 plus_constant (from, count * 4 * sign));
5826 i = 1;
5827 count++;
5830 for (j = 0; i < count; i++, j++)
5832 addr = plus_constant (from, j * 4 * sign);
5833 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5834 XVECEXP (result, 0, i)
5835 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5836 offset += 4 * sign;
5839 if (write_back)
5840 *offsetp = offset;
5842 return result;
5846 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5847 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5849 HOST_WIDE_INT offset = *offsetp;
5850 int i = 0, j;
5851 rtx result;
5852 int sign = up ? 1 : -1;
5853 rtx mem, addr;
5855 /* See arm_gen_load_multiple for discussion of
5856 the pros/cons of ldm/stm usage for XScale. */
5857 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5859 rtx seq;
5861 start_sequence ();
5863 for (i = 0; i < count; i++)
5865 addr = plus_constant (to, i * 4 * sign);
5866 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5867 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5868 offset += 4 * sign;
5871 if (write_back)
5873 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5874 *offsetp = offset;
5877 seq = get_insns ();
5878 end_sequence ();
5880 return seq;
5883 result = gen_rtx_PARALLEL (VOIDmode,
5884 rtvec_alloc (count + (write_back ? 1 : 0)));
5885 if (write_back)
5887 XVECEXP (result, 0, 0)
5888 = gen_rtx_SET (GET_MODE (to), to,
5889 plus_constant (to, count * 4 * sign));
5890 i = 1;
5891 count++;
5894 for (j = 0; i < count; i++, j++)
5896 addr = plus_constant (to, j * 4 * sign);
5897 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5898 XVECEXP (result, 0, i)
5899 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5900 offset += 4 * sign;
5903 if (write_back)
5904 *offsetp = offset;
5906 return result;
5910 arm_gen_movmemqi (rtx *operands)
5912 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5913 HOST_WIDE_INT srcoffset, dstoffset;
5914 int i;
5915 rtx src, dst, srcbase, dstbase;
5916 rtx part_bytes_reg = NULL;
5917 rtx mem;
5919 if (GET_CODE (operands[2]) != CONST_INT
5920 || GET_CODE (operands[3]) != CONST_INT
5921 || INTVAL (operands[2]) > 64
5922 || INTVAL (operands[3]) & 3)
5923 return 0;
5925 dstbase = operands[0];
5926 srcbase = operands[1];
5928 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5929 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5931 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5932 out_words_to_go = INTVAL (operands[2]) / 4;
5933 last_bytes = INTVAL (operands[2]) & 3;
5934 dstoffset = srcoffset = 0;
5936 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5937 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5939 for (i = 0; in_words_to_go >= 2; i+=4)
5941 if (in_words_to_go > 4)
5942 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5943 srcbase, &srcoffset));
5944 else
5945 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5946 FALSE, srcbase, &srcoffset));
5948 if (out_words_to_go)
5950 if (out_words_to_go > 4)
5951 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5952 dstbase, &dstoffset));
5953 else if (out_words_to_go != 1)
5954 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5955 dst, TRUE,
5956 (last_bytes == 0
5957 ? FALSE : TRUE),
5958 dstbase, &dstoffset));
5959 else
5961 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5962 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5963 if (last_bytes != 0)
5965 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5966 dstoffset += 4;
5971 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5972 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5975 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5976 if (out_words_to_go)
5978 rtx sreg;
5980 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5981 sreg = copy_to_reg (mem);
5983 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5984 emit_move_insn (mem, sreg);
5985 in_words_to_go--;
5987 gcc_assert (!in_words_to_go); /* Sanity check */
5990 if (in_words_to_go)
5992 gcc_assert (in_words_to_go > 0);
5994 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5995 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5998 gcc_assert (!last_bytes || part_bytes_reg);
6000 if (BYTES_BIG_ENDIAN && last_bytes)
6002 rtx tmp = gen_reg_rtx (SImode);
6004 /* The bytes we want are in the top end of the word. */
6005 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6006 GEN_INT (8 * (4 - last_bytes))));
6007 part_bytes_reg = tmp;
6009 while (last_bytes)
6011 mem = adjust_automodify_address (dstbase, QImode,
6012 plus_constant (dst, last_bytes - 1),
6013 dstoffset + last_bytes - 1);
6014 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6016 if (--last_bytes)
6018 tmp = gen_reg_rtx (SImode);
6019 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6020 part_bytes_reg = tmp;
6025 else
6027 if (last_bytes > 1)
6029 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6030 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6031 last_bytes -= 2;
6032 if (last_bytes)
6034 rtx tmp = gen_reg_rtx (SImode);
6035 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6036 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6037 part_bytes_reg = tmp;
6038 dstoffset += 2;
6042 if (last_bytes)
6044 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6045 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6049 return 1;
6052 /* Generate a memory reference for a half word, such that it will be loaded
6053 into the top 16 bits of the word. We can assume that the address is
6054 known to be alignable and of the form reg, or plus (reg, const). */
6057 arm_gen_rotated_half_load (rtx memref)
6059 HOST_WIDE_INT offset = 0;
6060 rtx base = XEXP (memref, 0);
6062 if (GET_CODE (base) == PLUS)
6064 offset = INTVAL (XEXP (base, 1));
6065 base = XEXP (base, 0);
6068 /* If we aren't allowed to generate unaligned addresses, then fail. */
6069 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6070 return NULL;
6072 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6074 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6075 return base;
6077 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6080 /* Select a dominance comparison mode if possible for a test of the general
6081 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6082 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6083 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6084 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6085 In all cases OP will be either EQ or NE, but we don't need to know which
6086 here. If we are unable to support a dominance comparison we return
6087 CC mode. This will then fail to match for the RTL expressions that
6088 generate this call. */
6089 enum machine_mode
6090 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6092 enum rtx_code cond1, cond2;
6093 int swapped = 0;
6095 /* Currently we will probably get the wrong result if the individual
6096 comparisons are not simple. This also ensures that it is safe to
6097 reverse a comparison if necessary. */
6098 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6099 != CCmode)
6100 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6101 != CCmode))
6102 return CCmode;
6104 /* The if_then_else variant of this tests the second condition if the
6105 first passes, but is true if the first fails. Reverse the first
6106 condition to get a true "inclusive-or" expression. */
6107 if (cond_or == DOM_CC_NX_OR_Y)
6108 cond1 = reverse_condition (cond1);
6110 /* If the comparisons are not equal, and one doesn't dominate the other,
6111 then we can't do this. */
6112 if (cond1 != cond2
6113 && !comparison_dominates_p (cond1, cond2)
6114 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6115 return CCmode;
6117 if (swapped)
6119 enum rtx_code temp = cond1;
6120 cond1 = cond2;
6121 cond2 = temp;
6124 switch (cond1)
6126 case EQ:
6127 if (cond_or == DOM_CC_X_AND_Y)
6128 return CC_DEQmode;
6130 switch (cond2)
6132 case EQ: return CC_DEQmode;
6133 case LE: return CC_DLEmode;
6134 case LEU: return CC_DLEUmode;
6135 case GE: return CC_DGEmode;
6136 case GEU: return CC_DGEUmode;
6137 default: gcc_unreachable ();
6140 case LT:
6141 if (cond_or == DOM_CC_X_AND_Y)
6142 return CC_DLTmode;
6144 switch (cond2)
6146 case LT:
6147 return CC_DLTmode;
6148 case LE:
6149 return CC_DLEmode;
6150 case NE:
6151 return CC_DNEmode;
6152 default:
6153 gcc_unreachable ();
6156 case GT:
6157 if (cond_or == DOM_CC_X_AND_Y)
6158 return CC_DGTmode;
6160 switch (cond2)
6162 case GT:
6163 return CC_DGTmode;
6164 case GE:
6165 return CC_DGEmode;
6166 case NE:
6167 return CC_DNEmode;
6168 default:
6169 gcc_unreachable ();
6172 case LTU:
6173 if (cond_or == DOM_CC_X_AND_Y)
6174 return CC_DLTUmode;
6176 switch (cond2)
6178 case LTU:
6179 return CC_DLTUmode;
6180 case LEU:
6181 return CC_DLEUmode;
6182 case NE:
6183 return CC_DNEmode;
6184 default:
6185 gcc_unreachable ();
6188 case GTU:
6189 if (cond_or == DOM_CC_X_AND_Y)
6190 return CC_DGTUmode;
6192 switch (cond2)
6194 case GTU:
6195 return CC_DGTUmode;
6196 case GEU:
6197 return CC_DGEUmode;
6198 case NE:
6199 return CC_DNEmode;
6200 default:
6201 gcc_unreachable ();
6204 /* The remaining cases only occur when both comparisons are the
6205 same. */
6206 case NE:
6207 gcc_assert (cond1 == cond2);
6208 return CC_DNEmode;
6210 case LE:
6211 gcc_assert (cond1 == cond2);
6212 return CC_DLEmode;
6214 case GE:
6215 gcc_assert (cond1 == cond2);
6216 return CC_DGEmode;
6218 case LEU:
6219 gcc_assert (cond1 == cond2);
6220 return CC_DLEUmode;
6222 case GEU:
6223 gcc_assert (cond1 == cond2);
6224 return CC_DGEUmode;
6226 default:
6227 gcc_unreachable ();
6231 enum machine_mode
6232 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6234 /* All floating point compares return CCFP if it is an equality
6235 comparison, and CCFPE otherwise. */
6236 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6238 switch (op)
6240 case EQ:
6241 case NE:
6242 case UNORDERED:
6243 case ORDERED:
6244 case UNLT:
6245 case UNLE:
6246 case UNGT:
6247 case UNGE:
6248 case UNEQ:
6249 case LTGT:
6250 return CCFPmode;
6252 case LT:
6253 case LE:
6254 case GT:
6255 case GE:
6256 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6257 return CCFPmode;
6258 return CCFPEmode;
6260 default:
6261 gcc_unreachable ();
6265 /* A compare with a shifted operand. Because of canonicalization, the
6266 comparison will have to be swapped when we emit the assembler. */
6267 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6268 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6269 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6270 || GET_CODE (x) == ROTATERT))
6271 return CC_SWPmode;
6273 /* This operation is performed swapped, but since we only rely on the Z
6274 flag we don't need an additional mode. */
6275 if (GET_MODE (y) == SImode && REG_P (y)
6276 && GET_CODE (x) == NEG
6277 && (op == EQ || op == NE))
6278 return CC_Zmode;
6280 /* This is a special case that is used by combine to allow a
6281 comparison of a shifted byte load to be split into a zero-extend
6282 followed by a comparison of the shifted integer (only valid for
6283 equalities and unsigned inequalities). */
6284 if (GET_MODE (x) == SImode
6285 && GET_CODE (x) == ASHIFT
6286 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6287 && GET_CODE (XEXP (x, 0)) == SUBREG
6288 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6289 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6290 && (op == EQ || op == NE
6291 || op == GEU || op == GTU || op == LTU || op == LEU)
6292 && GET_CODE (y) == CONST_INT)
6293 return CC_Zmode;
6295 /* A construct for a conditional compare, if the false arm contains
6296 0, then both conditions must be true, otherwise either condition
6297 must be true. Not all conditions are possible, so CCmode is
6298 returned if it can't be done. */
6299 if (GET_CODE (x) == IF_THEN_ELSE
6300 && (XEXP (x, 2) == const0_rtx
6301 || XEXP (x, 2) == const1_rtx)
6302 && COMPARISON_P (XEXP (x, 0))
6303 && COMPARISON_P (XEXP (x, 1)))
6304 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6305 INTVAL (XEXP (x, 2)));
6307 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6308 if (GET_CODE (x) == AND
6309 && COMPARISON_P (XEXP (x, 0))
6310 && COMPARISON_P (XEXP (x, 1)))
6311 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6312 DOM_CC_X_AND_Y);
6314 if (GET_CODE (x) == IOR
6315 && COMPARISON_P (XEXP (x, 0))
6316 && COMPARISON_P (XEXP (x, 1)))
6317 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6318 DOM_CC_X_OR_Y);
6320 /* An operation (on Thumb) where we want to test for a single bit.
6321 This is done by shifting that bit up into the top bit of a
6322 scratch register; we can then branch on the sign bit. */
6323 if (TARGET_THUMB
6324 && GET_MODE (x) == SImode
6325 && (op == EQ || op == NE)
6326 && (GET_CODE (x) == ZERO_EXTRACT))
6327 return CC_Nmode;
6329 /* An operation that sets the condition codes as a side-effect, the
6330 V flag is not set correctly, so we can only use comparisons where
6331 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6332 instead.) */
6333 if (GET_MODE (x) == SImode
6334 && y == const0_rtx
6335 && (op == EQ || op == NE || op == LT || op == GE)
6336 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6337 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6338 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6339 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6340 || GET_CODE (x) == LSHIFTRT
6341 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6342 || GET_CODE (x) == ROTATERT
6343 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6344 return CC_NOOVmode;
6346 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6347 return CC_Zmode;
6349 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6350 && GET_CODE (x) == PLUS
6351 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6352 return CC_Cmode;
6354 return CCmode;
6357 /* X and Y are two things to compare using CODE. Emit the compare insn and
6358 return the rtx for register 0 in the proper mode. FP means this is a
6359 floating point compare: I don't think that it is needed on the arm. */
6361 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6363 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6364 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6366 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6367 gen_rtx_COMPARE (mode, x, y)));
6369 return cc_reg;
6372 /* Generate a sequence of insns that will generate the correct return
6373 address mask depending on the physical architecture that the program
6374 is running on. */
6376 arm_gen_return_addr_mask (void)
6378 rtx reg = gen_reg_rtx (Pmode);
6380 emit_insn (gen_return_addr_mask (reg));
6381 return reg;
6384 void
6385 arm_reload_in_hi (rtx *operands)
6387 rtx ref = operands[1];
6388 rtx base, scratch;
6389 HOST_WIDE_INT offset = 0;
6391 if (GET_CODE (ref) == SUBREG)
6393 offset = SUBREG_BYTE (ref);
6394 ref = SUBREG_REG (ref);
6397 if (GET_CODE (ref) == REG)
6399 /* We have a pseudo which has been spilt onto the stack; there
6400 are two cases here: the first where there is a simple
6401 stack-slot replacement and a second where the stack-slot is
6402 out of range, or is used as a subreg. */
6403 if (reg_equiv_mem[REGNO (ref)])
6405 ref = reg_equiv_mem[REGNO (ref)];
6406 base = find_replacement (&XEXP (ref, 0));
6408 else
6409 /* The slot is out of range, or was dressed up in a SUBREG. */
6410 base = reg_equiv_address[REGNO (ref)];
6412 else
6413 base = find_replacement (&XEXP (ref, 0));
6415 /* Handle the case where the address is too complex to be offset by 1. */
6416 if (GET_CODE (base) == MINUS
6417 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6419 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6421 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6422 base = base_plus;
6424 else if (GET_CODE (base) == PLUS)
6426 /* The addend must be CONST_INT, or we would have dealt with it above. */
6427 HOST_WIDE_INT hi, lo;
6429 offset += INTVAL (XEXP (base, 1));
6430 base = XEXP (base, 0);
6432 /* Rework the address into a legal sequence of insns. */
6433 /* Valid range for lo is -4095 -> 4095 */
6434 lo = (offset >= 0
6435 ? (offset & 0xfff)
6436 : -((-offset) & 0xfff));
6438 /* Corner case, if lo is the max offset then we would be out of range
6439 once we have added the additional 1 below, so bump the msb into the
6440 pre-loading insn(s). */
6441 if (lo == 4095)
6442 lo &= 0x7ff;
6444 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6445 ^ (HOST_WIDE_INT) 0x80000000)
6446 - (HOST_WIDE_INT) 0x80000000);
6448 gcc_assert (hi + lo == offset);
6450 if (hi != 0)
6452 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6454 /* Get the base address; addsi3 knows how to handle constants
6455 that require more than one insn. */
6456 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6457 base = base_plus;
6458 offset = lo;
6462 /* Operands[2] may overlap operands[0] (though it won't overlap
6463 operands[1]), that's why we asked for a DImode reg -- so we can
6464 use the bit that does not overlap. */
6465 if (REGNO (operands[2]) == REGNO (operands[0]))
6466 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6467 else
6468 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6470 emit_insn (gen_zero_extendqisi2 (scratch,
6471 gen_rtx_MEM (QImode,
6472 plus_constant (base,
6473 offset))));
6474 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6475 gen_rtx_MEM (QImode,
6476 plus_constant (base,
6477 offset + 1))));
6478 if (!BYTES_BIG_ENDIAN)
6479 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6480 gen_rtx_IOR (SImode,
6481 gen_rtx_ASHIFT
6482 (SImode,
6483 gen_rtx_SUBREG (SImode, operands[0], 0),
6484 GEN_INT (8)),
6485 scratch)));
6486 else
6487 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6488 gen_rtx_IOR (SImode,
6489 gen_rtx_ASHIFT (SImode, scratch,
6490 GEN_INT (8)),
6491 gen_rtx_SUBREG (SImode, operands[0],
6492 0))));
6495 /* Handle storing a half-word to memory during reload by synthesizing as two
6496 byte stores. Take care not to clobber the input values until after we
6497 have moved them somewhere safe. This code assumes that if the DImode
6498 scratch in operands[2] overlaps either the input value or output address
6499 in some way, then that value must die in this insn (we absolutely need
6500 two scratch registers for some corner cases). */
6501 void
6502 arm_reload_out_hi (rtx *operands)
6504 rtx ref = operands[0];
6505 rtx outval = operands[1];
6506 rtx base, scratch;
6507 HOST_WIDE_INT offset = 0;
6509 if (GET_CODE (ref) == SUBREG)
6511 offset = SUBREG_BYTE (ref);
6512 ref = SUBREG_REG (ref);
6515 if (GET_CODE (ref) == REG)
6517 /* We have a pseudo which has been spilt onto the stack; there
6518 are two cases here: the first where there is a simple
6519 stack-slot replacement and a second where the stack-slot is
6520 out of range, or is used as a subreg. */
6521 if (reg_equiv_mem[REGNO (ref)])
6523 ref = reg_equiv_mem[REGNO (ref)];
6524 base = find_replacement (&XEXP (ref, 0));
6526 else
6527 /* The slot is out of range, or was dressed up in a SUBREG. */
6528 base = reg_equiv_address[REGNO (ref)];
6530 else
6531 base = find_replacement (&XEXP (ref, 0));
6533 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6535 /* Handle the case where the address is too complex to be offset by 1. */
6536 if (GET_CODE (base) == MINUS
6537 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6539 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6541 /* Be careful not to destroy OUTVAL. */
6542 if (reg_overlap_mentioned_p (base_plus, outval))
6544 /* Updating base_plus might destroy outval, see if we can
6545 swap the scratch and base_plus. */
6546 if (!reg_overlap_mentioned_p (scratch, outval))
6548 rtx tmp = scratch;
6549 scratch = base_plus;
6550 base_plus = tmp;
6552 else
6554 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6556 /* Be conservative and copy OUTVAL into the scratch now,
6557 this should only be necessary if outval is a subreg
6558 of something larger than a word. */
6559 /* XXX Might this clobber base? I can't see how it can,
6560 since scratch is known to overlap with OUTVAL, and
6561 must be wider than a word. */
6562 emit_insn (gen_movhi (scratch_hi, outval));
6563 outval = scratch_hi;
6567 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6568 base = base_plus;
6570 else if (GET_CODE (base) == PLUS)
6572 /* The addend must be CONST_INT, or we would have dealt with it above. */
6573 HOST_WIDE_INT hi, lo;
6575 offset += INTVAL (XEXP (base, 1));
6576 base = XEXP (base, 0);
6578 /* Rework the address into a legal sequence of insns. */
6579 /* Valid range for lo is -4095 -> 4095 */
6580 lo = (offset >= 0
6581 ? (offset & 0xfff)
6582 : -((-offset) & 0xfff));
6584 /* Corner case, if lo is the max offset then we would be out of range
6585 once we have added the additional 1 below, so bump the msb into the
6586 pre-loading insn(s). */
6587 if (lo == 4095)
6588 lo &= 0x7ff;
6590 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6591 ^ (HOST_WIDE_INT) 0x80000000)
6592 - (HOST_WIDE_INT) 0x80000000);
6594 gcc_assert (hi + lo == offset);
6596 if (hi != 0)
6598 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6600 /* Be careful not to destroy OUTVAL. */
6601 if (reg_overlap_mentioned_p (base_plus, outval))
6603 /* Updating base_plus might destroy outval, see if we
6604 can swap the scratch and base_plus. */
6605 if (!reg_overlap_mentioned_p (scratch, outval))
6607 rtx tmp = scratch;
6608 scratch = base_plus;
6609 base_plus = tmp;
6611 else
6613 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6615 /* Be conservative and copy outval into scratch now,
6616 this should only be necessary if outval is a
6617 subreg of something larger than a word. */
6618 /* XXX Might this clobber base? I can't see how it
6619 can, since scratch is known to overlap with
6620 outval. */
6621 emit_insn (gen_movhi (scratch_hi, outval));
6622 outval = scratch_hi;
6626 /* Get the base address; addsi3 knows how to handle constants
6627 that require more than one insn. */
6628 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6629 base = base_plus;
6630 offset = lo;
6634 if (BYTES_BIG_ENDIAN)
6636 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6637 plus_constant (base, offset + 1)),
6638 gen_lowpart (QImode, outval)));
6639 emit_insn (gen_lshrsi3 (scratch,
6640 gen_rtx_SUBREG (SImode, outval, 0),
6641 GEN_INT (8)));
6642 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6643 gen_lowpart (QImode, scratch)));
6645 else
6647 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6648 gen_lowpart (QImode, outval)));
6649 emit_insn (gen_lshrsi3 (scratch,
6650 gen_rtx_SUBREG (SImode, outval, 0),
6651 GEN_INT (8)));
6652 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6653 plus_constant (base, offset + 1)),
6654 gen_lowpart (QImode, scratch)));
6658 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6659 (padded to the size of a word) should be passed in a register. */
6661 static bool
6662 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6664 if (TARGET_AAPCS_BASED)
6665 return must_pass_in_stack_var_size (mode, type);
6666 else
6667 return must_pass_in_stack_var_size_or_pad (mode, type);
6671 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6672 Return true if an argument passed on the stack should be padded upwards,
6673 i.e. if the least-significant byte has useful data. */
6675 bool
6676 arm_pad_arg_upward (enum machine_mode mode, tree type)
6678 if (!TARGET_AAPCS_BASED)
6679 return DEFAULT_FUNCTION_ARG_PADDING(mode, type);
6681 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6682 return false;
6684 return true;
6688 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6689 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6690 byte of the register has useful data, and return the opposite if the
6691 most significant byte does.
6692 For AAPCS, small aggregates and small complex types are always padded
6693 upwards. */
6695 bool
6696 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6697 tree type, int first ATTRIBUTE_UNUSED)
6699 if (TARGET_AAPCS_BASED
6700 && BYTES_BIG_ENDIAN
6701 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6702 && int_size_in_bytes (type) <= 4)
6703 return true;
6705 /* Otherwise, use default padding. */
6706 return !BYTES_BIG_ENDIAN;
6711 /* Print a symbolic form of X to the debug file, F. */
6712 static void
6713 arm_print_value (FILE *f, rtx x)
6715 switch (GET_CODE (x))
6717 case CONST_INT:
6718 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6719 return;
6721 case CONST_DOUBLE:
6722 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6723 return;
6725 case CONST_VECTOR:
6727 int i;
6729 fprintf (f, "<");
6730 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6732 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6733 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6734 fputc (',', f);
6736 fprintf (f, ">");
6738 return;
6740 case CONST_STRING:
6741 fprintf (f, "\"%s\"", XSTR (x, 0));
6742 return;
6744 case SYMBOL_REF:
6745 fprintf (f, "`%s'", XSTR (x, 0));
6746 return;
6748 case LABEL_REF:
6749 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6750 return;
6752 case CONST:
6753 arm_print_value (f, XEXP (x, 0));
6754 return;
6756 case PLUS:
6757 arm_print_value (f, XEXP (x, 0));
6758 fprintf (f, "+");
6759 arm_print_value (f, XEXP (x, 1));
6760 return;
6762 case PC:
6763 fprintf (f, "pc");
6764 return;
6766 default:
6767 fprintf (f, "????");
6768 return;
6772 /* Routines for manipulation of the constant pool. */
6774 /* Arm instructions cannot load a large constant directly into a
6775 register; they have to come from a pc relative load. The constant
6776 must therefore be placed in the addressable range of the pc
6777 relative load. Depending on the precise pc relative load
6778 instruction the range is somewhere between 256 bytes and 4k. This
6779 means that we often have to dump a constant inside a function, and
6780 generate code to branch around it.
6782 It is important to minimize this, since the branches will slow
6783 things down and make the code larger.
6785 Normally we can hide the table after an existing unconditional
6786 branch so that there is no interruption of the flow, but in the
6787 worst case the code looks like this:
6789 ldr rn, L1
6791 b L2
6792 align
6793 L1: .long value
6797 ldr rn, L3
6799 b L4
6800 align
6801 L3: .long value
6805 We fix this by performing a scan after scheduling, which notices
6806 which instructions need to have their operands fetched from the
6807 constant table and builds the table.
6809 The algorithm starts by building a table of all the constants that
6810 need fixing up and all the natural barriers in the function (places
6811 where a constant table can be dropped without breaking the flow).
6812 For each fixup we note how far the pc-relative replacement will be
6813 able to reach and the offset of the instruction into the function.
6815 Having built the table we then group the fixes together to form
6816 tables that are as large as possible (subject to addressing
6817 constraints) and emit each table of constants after the last
6818 barrier that is within range of all the instructions in the group.
6819 If a group does not contain a barrier, then we forcibly create one
6820 by inserting a jump instruction into the flow. Once the table has
6821 been inserted, the insns are then modified to reference the
6822 relevant entry in the pool.
6824 Possible enhancements to the algorithm (not implemented) are:
6826 1) For some processors and object formats, there may be benefit in
6827 aligning the pools to the start of cache lines; this alignment
6828 would need to be taken into account when calculating addressability
6829 of a pool. */
6831 /* These typedefs are located at the start of this file, so that
6832 they can be used in the prototypes there. This comment is to
6833 remind readers of that fact so that the following structures
6834 can be understood more easily.
6836 typedef struct minipool_node Mnode;
6837 typedef struct minipool_fixup Mfix; */
6839 struct minipool_node
6841 /* Doubly linked chain of entries. */
6842 Mnode * next;
6843 Mnode * prev;
6844 /* The maximum offset into the code that this entry can be placed. While
6845 pushing fixes for forward references, all entries are sorted in order
6846 of increasing max_address. */
6847 HOST_WIDE_INT max_address;
6848 /* Similarly for an entry inserted for a backwards ref. */
6849 HOST_WIDE_INT min_address;
6850 /* The number of fixes referencing this entry. This can become zero
6851 if we "unpush" an entry. In this case we ignore the entry when we
6852 come to emit the code. */
6853 int refcount;
6854 /* The offset from the start of the minipool. */
6855 HOST_WIDE_INT offset;
6856 /* The value in table. */
6857 rtx value;
6858 /* The mode of value. */
6859 enum machine_mode mode;
6860 /* The size of the value. With iWMMXt enabled
6861 sizes > 4 also imply an alignment of 8-bytes. */
6862 int fix_size;
6865 struct minipool_fixup
6867 Mfix * next;
6868 rtx insn;
6869 HOST_WIDE_INT address;
6870 rtx * loc;
6871 enum machine_mode mode;
6872 int fix_size;
6873 rtx value;
6874 Mnode * minipool;
6875 HOST_WIDE_INT forwards;
6876 HOST_WIDE_INT backwards;
6879 /* Fixes less than a word need padding out to a word boundary. */
6880 #define MINIPOOL_FIX_SIZE(mode) \
6881 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6883 static Mnode * minipool_vector_head;
6884 static Mnode * minipool_vector_tail;
6885 static rtx minipool_vector_label;
6887 /* The linked list of all minipool fixes required for this function. */
6888 Mfix * minipool_fix_head;
6889 Mfix * minipool_fix_tail;
6890 /* The fix entry for the current minipool, once it has been placed. */
6891 Mfix * minipool_barrier;
6893 /* Determines if INSN is the start of a jump table. Returns the end
6894 of the TABLE or NULL_RTX. */
6895 static rtx
6896 is_jump_table (rtx insn)
6898 rtx table;
6900 if (GET_CODE (insn) == JUMP_INSN
6901 && JUMP_LABEL (insn) != NULL
6902 && ((table = next_real_insn (JUMP_LABEL (insn)))
6903 == next_real_insn (insn))
6904 && table != NULL
6905 && GET_CODE (table) == JUMP_INSN
6906 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6907 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6908 return table;
6910 return NULL_RTX;
6913 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6914 #define JUMP_TABLES_IN_TEXT_SECTION 0
6915 #endif
6917 static HOST_WIDE_INT
6918 get_jump_table_size (rtx insn)
6920 /* ADDR_VECs only take room if read-only data does into the text
6921 section. */
6922 if (JUMP_TABLES_IN_TEXT_SECTION
6923 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6924 || 1
6925 #endif
6928 rtx body = PATTERN (insn);
6929 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6931 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6934 return 0;
6937 /* Move a minipool fix MP from its current location to before MAX_MP.
6938 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6939 constraints may need updating. */
6940 static Mnode *
6941 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6942 HOST_WIDE_INT max_address)
6944 /* The code below assumes these are different. */
6945 gcc_assert (mp != max_mp);
6947 if (max_mp == NULL)
6949 if (max_address < mp->max_address)
6950 mp->max_address = max_address;
6952 else
6954 if (max_address > max_mp->max_address - mp->fix_size)
6955 mp->max_address = max_mp->max_address - mp->fix_size;
6956 else
6957 mp->max_address = max_address;
6959 /* Unlink MP from its current position. Since max_mp is non-null,
6960 mp->prev must be non-null. */
6961 mp->prev->next = mp->next;
6962 if (mp->next != NULL)
6963 mp->next->prev = mp->prev;
6964 else
6965 minipool_vector_tail = mp->prev;
6967 /* Re-insert it before MAX_MP. */
6968 mp->next = max_mp;
6969 mp->prev = max_mp->prev;
6970 max_mp->prev = mp;
6972 if (mp->prev != NULL)
6973 mp->prev->next = mp;
6974 else
6975 minipool_vector_head = mp;
6978 /* Save the new entry. */
6979 max_mp = mp;
6981 /* Scan over the preceding entries and adjust their addresses as
6982 required. */
6983 while (mp->prev != NULL
6984 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6986 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6987 mp = mp->prev;
6990 return max_mp;
6993 /* Add a constant to the minipool for a forward reference. Returns the
6994 node added or NULL if the constant will not fit in this pool. */
6995 static Mnode *
6996 add_minipool_forward_ref (Mfix *fix)
6998 /* If set, max_mp is the first pool_entry that has a lower
6999 constraint than the one we are trying to add. */
7000 Mnode * max_mp = NULL;
7001 HOST_WIDE_INT max_address = fix->address + fix->forwards;
7002 Mnode * mp;
7004 /* If this fix's address is greater than the address of the first
7005 entry, then we can't put the fix in this pool. We subtract the
7006 size of the current fix to ensure that if the table is fully
7007 packed we still have enough room to insert this value by suffling
7008 the other fixes forwards. */
7009 if (minipool_vector_head &&
7010 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7011 return NULL;
7013 /* Scan the pool to see if a constant with the same value has
7014 already been added. While we are doing this, also note the
7015 location where we must insert the constant if it doesn't already
7016 exist. */
7017 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7019 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7020 && fix->mode == mp->mode
7021 && (GET_CODE (fix->value) != CODE_LABEL
7022 || (CODE_LABEL_NUMBER (fix->value)
7023 == CODE_LABEL_NUMBER (mp->value)))
7024 && rtx_equal_p (fix->value, mp->value))
7026 /* More than one fix references this entry. */
7027 mp->refcount++;
7028 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7031 /* Note the insertion point if necessary. */
7032 if (max_mp == NULL
7033 && mp->max_address > max_address)
7034 max_mp = mp;
7036 /* If we are inserting an 8-bytes aligned quantity and
7037 we have not already found an insertion point, then
7038 make sure that all such 8-byte aligned quantities are
7039 placed at the start of the pool. */
7040 if (ARM_DOUBLEWORD_ALIGN
7041 && max_mp == NULL
7042 && fix->fix_size == 8
7043 && mp->fix_size != 8)
7045 max_mp = mp;
7046 max_address = mp->max_address;
7050 /* The value is not currently in the minipool, so we need to create
7051 a new entry for it. If MAX_MP is NULL, the entry will be put on
7052 the end of the list since the placement is less constrained than
7053 any existing entry. Otherwise, we insert the new fix before
7054 MAX_MP and, if necessary, adjust the constraints on the other
7055 entries. */
7056 mp = xmalloc (sizeof (* mp));
7057 mp->fix_size = fix->fix_size;
7058 mp->mode = fix->mode;
7059 mp->value = fix->value;
7060 mp->refcount = 1;
7061 /* Not yet required for a backwards ref. */
7062 mp->min_address = -65536;
7064 if (max_mp == NULL)
7066 mp->max_address = max_address;
7067 mp->next = NULL;
7068 mp->prev = minipool_vector_tail;
7070 if (mp->prev == NULL)
7072 minipool_vector_head = mp;
7073 minipool_vector_label = gen_label_rtx ();
7075 else
7076 mp->prev->next = mp;
7078 minipool_vector_tail = mp;
7080 else
7082 if (max_address > max_mp->max_address - mp->fix_size)
7083 mp->max_address = max_mp->max_address - mp->fix_size;
7084 else
7085 mp->max_address = max_address;
7087 mp->next = max_mp;
7088 mp->prev = max_mp->prev;
7089 max_mp->prev = mp;
7090 if (mp->prev != NULL)
7091 mp->prev->next = mp;
7092 else
7093 minipool_vector_head = mp;
7096 /* Save the new entry. */
7097 max_mp = mp;
7099 /* Scan over the preceding entries and adjust their addresses as
7100 required. */
7101 while (mp->prev != NULL
7102 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7104 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7105 mp = mp->prev;
7108 return max_mp;
7111 static Mnode *
7112 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7113 HOST_WIDE_INT min_address)
7115 HOST_WIDE_INT offset;
7117 /* The code below assumes these are different. */
7118 gcc_assert (mp != min_mp);
7120 if (min_mp == NULL)
7122 if (min_address > mp->min_address)
7123 mp->min_address = min_address;
7125 else
7127 /* We will adjust this below if it is too loose. */
7128 mp->min_address = min_address;
7130 /* Unlink MP from its current position. Since min_mp is non-null,
7131 mp->next must be non-null. */
7132 mp->next->prev = mp->prev;
7133 if (mp->prev != NULL)
7134 mp->prev->next = mp->next;
7135 else
7136 minipool_vector_head = mp->next;
7138 /* Reinsert it after MIN_MP. */
7139 mp->prev = min_mp;
7140 mp->next = min_mp->next;
7141 min_mp->next = mp;
7142 if (mp->next != NULL)
7143 mp->next->prev = mp;
7144 else
7145 minipool_vector_tail = mp;
7148 min_mp = mp;
7150 offset = 0;
7151 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7153 mp->offset = offset;
7154 if (mp->refcount > 0)
7155 offset += mp->fix_size;
7157 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7158 mp->next->min_address = mp->min_address + mp->fix_size;
7161 return min_mp;
7164 /* Add a constant to the minipool for a backward reference. Returns the
7165 node added or NULL if the constant will not fit in this pool.
7167 Note that the code for insertion for a backwards reference can be
7168 somewhat confusing because the calculated offsets for each fix do
7169 not take into account the size of the pool (which is still under
7170 construction. */
7171 static Mnode *
7172 add_minipool_backward_ref (Mfix *fix)
7174 /* If set, min_mp is the last pool_entry that has a lower constraint
7175 than the one we are trying to add. */
7176 Mnode *min_mp = NULL;
7177 /* This can be negative, since it is only a constraint. */
7178 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7179 Mnode *mp;
7181 /* If we can't reach the current pool from this insn, or if we can't
7182 insert this entry at the end of the pool without pushing other
7183 fixes out of range, then we don't try. This ensures that we
7184 can't fail later on. */
7185 if (min_address >= minipool_barrier->address
7186 || (minipool_vector_tail->min_address + fix->fix_size
7187 >= minipool_barrier->address))
7188 return NULL;
7190 /* Scan the pool to see if a constant with the same value has
7191 already been added. While we are doing this, also note the
7192 location where we must insert the constant if it doesn't already
7193 exist. */
7194 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7196 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7197 && fix->mode == mp->mode
7198 && (GET_CODE (fix->value) != CODE_LABEL
7199 || (CODE_LABEL_NUMBER (fix->value)
7200 == CODE_LABEL_NUMBER (mp->value)))
7201 && rtx_equal_p (fix->value, mp->value)
7202 /* Check that there is enough slack to move this entry to the
7203 end of the table (this is conservative). */
7204 && (mp->max_address
7205 > (minipool_barrier->address
7206 + minipool_vector_tail->offset
7207 + minipool_vector_tail->fix_size)))
7209 mp->refcount++;
7210 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7213 if (min_mp != NULL)
7214 mp->min_address += fix->fix_size;
7215 else
7217 /* Note the insertion point if necessary. */
7218 if (mp->min_address < min_address)
7220 /* For now, we do not allow the insertion of 8-byte alignment
7221 requiring nodes anywhere but at the start of the pool. */
7222 if (ARM_DOUBLEWORD_ALIGN
7223 && fix->fix_size == 8 && mp->fix_size != 8)
7224 return NULL;
7225 else
7226 min_mp = mp;
7228 else if (mp->max_address
7229 < minipool_barrier->address + mp->offset + fix->fix_size)
7231 /* Inserting before this entry would push the fix beyond
7232 its maximum address (which can happen if we have
7233 re-located a forwards fix); force the new fix to come
7234 after it. */
7235 min_mp = mp;
7236 min_address = mp->min_address + fix->fix_size;
7238 /* If we are inserting an 8-bytes aligned quantity and
7239 we have not already found an insertion point, then
7240 make sure that all such 8-byte aligned quantities are
7241 placed at the start of the pool. */
7242 else if (ARM_DOUBLEWORD_ALIGN
7243 && min_mp == NULL
7244 && fix->fix_size == 8
7245 && mp->fix_size < 8)
7247 min_mp = mp;
7248 min_address = mp->min_address + fix->fix_size;
7253 /* We need to create a new entry. */
7254 mp = xmalloc (sizeof (* mp));
7255 mp->fix_size = fix->fix_size;
7256 mp->mode = fix->mode;
7257 mp->value = fix->value;
7258 mp->refcount = 1;
7259 mp->max_address = minipool_barrier->address + 65536;
7261 mp->min_address = min_address;
7263 if (min_mp == NULL)
7265 mp->prev = NULL;
7266 mp->next = minipool_vector_head;
7268 if (mp->next == NULL)
7270 minipool_vector_tail = mp;
7271 minipool_vector_label = gen_label_rtx ();
7273 else
7274 mp->next->prev = mp;
7276 minipool_vector_head = mp;
7278 else
7280 mp->next = min_mp->next;
7281 mp->prev = min_mp;
7282 min_mp->next = mp;
7284 if (mp->next != NULL)
7285 mp->next->prev = mp;
7286 else
7287 minipool_vector_tail = mp;
7290 /* Save the new entry. */
7291 min_mp = mp;
7293 if (mp->prev)
7294 mp = mp->prev;
7295 else
7296 mp->offset = 0;
7298 /* Scan over the following entries and adjust their offsets. */
7299 while (mp->next != NULL)
7301 if (mp->next->min_address < mp->min_address + mp->fix_size)
7302 mp->next->min_address = mp->min_address + mp->fix_size;
7304 if (mp->refcount)
7305 mp->next->offset = mp->offset + mp->fix_size;
7306 else
7307 mp->next->offset = mp->offset;
7309 mp = mp->next;
7312 return min_mp;
7315 static void
7316 assign_minipool_offsets (Mfix *barrier)
7318 HOST_WIDE_INT offset = 0;
7319 Mnode *mp;
7321 minipool_barrier = barrier;
7323 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7325 mp->offset = offset;
7327 if (mp->refcount > 0)
7328 offset += mp->fix_size;
7332 /* Output the literal table */
7333 static void
7334 dump_minipool (rtx scan)
7336 Mnode * mp;
7337 Mnode * nmp;
7338 int align64 = 0;
7340 if (ARM_DOUBLEWORD_ALIGN)
7341 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7342 if (mp->refcount > 0 && mp->fix_size == 8)
7344 align64 = 1;
7345 break;
7348 if (dump_file)
7349 fprintf (dump_file,
7350 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7351 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7353 scan = emit_label_after (gen_label_rtx (), scan);
7354 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7355 scan = emit_label_after (minipool_vector_label, scan);
7357 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7359 if (mp->refcount > 0)
7361 if (dump_file)
7363 fprintf (dump_file,
7364 ";; Offset %u, min %ld, max %ld ",
7365 (unsigned) mp->offset, (unsigned long) mp->min_address,
7366 (unsigned long) mp->max_address);
7367 arm_print_value (dump_file, mp->value);
7368 fputc ('\n', dump_file);
7371 switch (mp->fix_size)
7373 #ifdef HAVE_consttable_1
7374 case 1:
7375 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7376 break;
7378 #endif
7379 #ifdef HAVE_consttable_2
7380 case 2:
7381 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7382 break;
7384 #endif
7385 #ifdef HAVE_consttable_4
7386 case 4:
7387 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7388 break;
7390 #endif
7391 #ifdef HAVE_consttable_8
7392 case 8:
7393 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7394 break;
7396 #endif
7397 default:
7398 gcc_unreachable ();
7402 nmp = mp->next;
7403 free (mp);
7406 minipool_vector_head = minipool_vector_tail = NULL;
7407 scan = emit_insn_after (gen_consttable_end (), scan);
7408 scan = emit_barrier_after (scan);
7411 /* Return the cost of forcibly inserting a barrier after INSN. */
7412 static int
7413 arm_barrier_cost (rtx insn)
7415 /* Basing the location of the pool on the loop depth is preferable,
7416 but at the moment, the basic block information seems to be
7417 corrupt by this stage of the compilation. */
7418 int base_cost = 50;
7419 rtx next = next_nonnote_insn (insn);
7421 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7422 base_cost -= 20;
7424 switch (GET_CODE (insn))
7426 case CODE_LABEL:
7427 /* It will always be better to place the table before the label, rather
7428 than after it. */
7429 return 50;
7431 case INSN:
7432 case CALL_INSN:
7433 return base_cost;
7435 case JUMP_INSN:
7436 return base_cost - 10;
7438 default:
7439 return base_cost + 10;
7443 /* Find the best place in the insn stream in the range
7444 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7445 Create the barrier by inserting a jump and add a new fix entry for
7446 it. */
7447 static Mfix *
7448 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7450 HOST_WIDE_INT count = 0;
7451 rtx barrier;
7452 rtx from = fix->insn;
7453 rtx selected = from;
7454 int selected_cost;
7455 HOST_WIDE_INT selected_address;
7456 Mfix * new_fix;
7457 HOST_WIDE_INT max_count = max_address - fix->address;
7458 rtx label = gen_label_rtx ();
7460 selected_cost = arm_barrier_cost (from);
7461 selected_address = fix->address;
7463 while (from && count < max_count)
7465 rtx tmp;
7466 int new_cost;
7468 /* This code shouldn't have been called if there was a natural barrier
7469 within range. */
7470 gcc_assert (GET_CODE (from) != BARRIER);
7472 /* Count the length of this insn. */
7473 count += get_attr_length (from);
7475 /* If there is a jump table, add its length. */
7476 tmp = is_jump_table (from);
7477 if (tmp != NULL)
7479 count += get_jump_table_size (tmp);
7481 /* Jump tables aren't in a basic block, so base the cost on
7482 the dispatch insn. If we select this location, we will
7483 still put the pool after the table. */
7484 new_cost = arm_barrier_cost (from);
7486 if (count < max_count && new_cost <= selected_cost)
7488 selected = tmp;
7489 selected_cost = new_cost;
7490 selected_address = fix->address + count;
7493 /* Continue after the dispatch table. */
7494 from = NEXT_INSN (tmp);
7495 continue;
7498 new_cost = arm_barrier_cost (from);
7500 if (count < max_count && new_cost <= selected_cost)
7502 selected = from;
7503 selected_cost = new_cost;
7504 selected_address = fix->address + count;
7507 from = NEXT_INSN (from);
7510 /* Create a new JUMP_INSN that branches around a barrier. */
7511 from = emit_jump_insn_after (gen_jump (label), selected);
7512 JUMP_LABEL (from) = label;
7513 barrier = emit_barrier_after (from);
7514 emit_label_after (label, barrier);
7516 /* Create a minipool barrier entry for the new barrier. */
7517 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7518 new_fix->insn = barrier;
7519 new_fix->address = selected_address;
7520 new_fix->next = fix->next;
7521 fix->next = new_fix;
7523 return new_fix;
7526 /* Record that there is a natural barrier in the insn stream at
7527 ADDRESS. */
7528 static void
7529 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7531 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7533 fix->insn = insn;
7534 fix->address = address;
7536 fix->next = NULL;
7537 if (minipool_fix_head != NULL)
7538 minipool_fix_tail->next = fix;
7539 else
7540 minipool_fix_head = fix;
7542 minipool_fix_tail = fix;
7545 /* Record INSN, which will need fixing up to load a value from the
7546 minipool. ADDRESS is the offset of the insn since the start of the
7547 function; LOC is a pointer to the part of the insn which requires
7548 fixing; VALUE is the constant that must be loaded, which is of type
7549 MODE. */
7550 static void
7551 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7552 enum machine_mode mode, rtx value)
7554 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7556 #ifdef AOF_ASSEMBLER
7557 /* PIC symbol references need to be converted into offsets into the
7558 based area. */
7559 /* XXX This shouldn't be done here. */
7560 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7561 value = aof_pic_entry (value);
7562 #endif /* AOF_ASSEMBLER */
7564 fix->insn = insn;
7565 fix->address = address;
7566 fix->loc = loc;
7567 fix->mode = mode;
7568 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7569 fix->value = value;
7570 fix->forwards = get_attr_pool_range (insn);
7571 fix->backwards = get_attr_neg_pool_range (insn);
7572 fix->minipool = NULL;
7574 /* If an insn doesn't have a range defined for it, then it isn't
7575 expecting to be reworked by this code. Better to stop now than
7576 to generate duff assembly code. */
7577 gcc_assert (fix->forwards || fix->backwards);
7579 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7580 So there might be an empty word before the start of the pool.
7581 Hence we reduce the forward range by 4 to allow for this
7582 possibility. */
7583 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7584 fix->forwards -= 4;
7586 if (dump_file)
7588 fprintf (dump_file,
7589 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7590 GET_MODE_NAME (mode),
7591 INSN_UID (insn), (unsigned long) address,
7592 -1 * (long)fix->backwards, (long)fix->forwards);
7593 arm_print_value (dump_file, fix->value);
7594 fprintf (dump_file, "\n");
7597 /* Add it to the chain of fixes. */
7598 fix->next = NULL;
7600 if (minipool_fix_head != NULL)
7601 minipool_fix_tail->next = fix;
7602 else
7603 minipool_fix_head = fix;
7605 minipool_fix_tail = fix;
7608 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7609 Returns the number of insns needed, or 99 if we don't know how to
7610 do it. */
7612 arm_const_double_inline_cost (rtx val)
7614 rtx lowpart, highpart;
7615 enum machine_mode mode;
7617 mode = GET_MODE (val);
7619 if (mode == VOIDmode)
7620 mode = DImode;
7622 gcc_assert (GET_MODE_SIZE (mode) == 8);
7624 lowpart = gen_lowpart (SImode, val);
7625 highpart = gen_highpart_mode (SImode, mode, val);
7627 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7628 gcc_assert (GET_CODE (highpart) == CONST_INT);
7630 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7631 NULL_RTX, NULL_RTX, 0, 0)
7632 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7633 NULL_RTX, NULL_RTX, 0, 0));
7636 /* Return true if it is worthwhile to split a 64-bit constant into two
7637 32-bit operations. This is the case if optimizing for size, or
7638 if we have load delay slots, or if one 32-bit part can be done with
7639 a single data operation. */
7640 bool
7641 arm_const_double_by_parts (rtx val)
7643 enum machine_mode mode = GET_MODE (val);
7644 rtx part;
7646 if (optimize_size || arm_ld_sched)
7647 return true;
7649 if (mode == VOIDmode)
7650 mode = DImode;
7652 part = gen_highpart_mode (SImode, mode, val);
7654 gcc_assert (GET_CODE (part) == CONST_INT);
7656 if (const_ok_for_arm (INTVAL (part))
7657 || const_ok_for_arm (~INTVAL (part)))
7658 return true;
7660 part = gen_lowpart (SImode, val);
7662 gcc_assert (GET_CODE (part) == CONST_INT);
7664 if (const_ok_for_arm (INTVAL (part))
7665 || const_ok_for_arm (~INTVAL (part)))
7666 return true;
7668 return false;
7671 /* Scan INSN and note any of its operands that need fixing.
7672 If DO_PUSHES is false we do not actually push any of the fixups
7673 needed. The function returns TRUE if any fixups were needed/pushed.
7674 This is used by arm_memory_load_p() which needs to know about loads
7675 of constants that will be converted into minipool loads. */
7676 static bool
7677 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7679 bool result = false;
7680 int opno;
7682 extract_insn (insn);
7684 if (!constrain_operands (1))
7685 fatal_insn_not_found (insn);
7687 if (recog_data.n_alternatives == 0)
7688 return false;
7690 /* Fill in recog_op_alt with information about the constraints of
7691 this insn. */
7692 preprocess_constraints ();
7694 for (opno = 0; opno < recog_data.n_operands; opno++)
7696 /* Things we need to fix can only occur in inputs. */
7697 if (recog_data.operand_type[opno] != OP_IN)
7698 continue;
7700 /* If this alternative is a memory reference, then any mention
7701 of constants in this alternative is really to fool reload
7702 into allowing us to accept one there. We need to fix them up
7703 now so that we output the right code. */
7704 if (recog_op_alt[opno][which_alternative].memory_ok)
7706 rtx op = recog_data.operand[opno];
7708 if (CONSTANT_P (op))
7710 if (do_pushes)
7711 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7712 recog_data.operand_mode[opno], op);
7713 result = true;
7715 else if (GET_CODE (op) == MEM
7716 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7717 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7719 if (do_pushes)
7721 rtx cop = avoid_constant_pool_reference (op);
7723 /* Casting the address of something to a mode narrower
7724 than a word can cause avoid_constant_pool_reference()
7725 to return the pool reference itself. That's no good to
7726 us here. Lets just hope that we can use the
7727 constant pool value directly. */
7728 if (op == cop)
7729 cop = get_pool_constant (XEXP (op, 0));
7731 push_minipool_fix (insn, address,
7732 recog_data.operand_loc[opno],
7733 recog_data.operand_mode[opno], cop);
7736 result = true;
7741 return result;
7744 /* Gcc puts the pool in the wrong place for ARM, since we can only
7745 load addresses a limited distance around the pc. We do some
7746 special munging to move the constant pool values to the correct
7747 point in the code. */
7748 static void
7749 arm_reorg (void)
7751 rtx insn;
7752 HOST_WIDE_INT address = 0;
7753 Mfix * fix;
7755 minipool_fix_head = minipool_fix_tail = NULL;
7757 /* The first insn must always be a note, or the code below won't
7758 scan it properly. */
7759 insn = get_insns ();
7760 gcc_assert (GET_CODE (insn) == NOTE);
7762 /* Scan all the insns and record the operands that will need fixing. */
7763 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7765 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7766 && (arm_cirrus_insn_p (insn)
7767 || GET_CODE (insn) == JUMP_INSN
7768 || arm_memory_load_p (insn)))
7769 cirrus_reorg (insn);
7771 if (GET_CODE (insn) == BARRIER)
7772 push_minipool_barrier (insn, address);
7773 else if (INSN_P (insn))
7775 rtx table;
7777 note_invalid_constants (insn, address, true);
7778 address += get_attr_length (insn);
7780 /* If the insn is a vector jump, add the size of the table
7781 and skip the table. */
7782 if ((table = is_jump_table (insn)) != NULL)
7784 address += get_jump_table_size (table);
7785 insn = table;
7790 fix = minipool_fix_head;
7792 /* Now scan the fixups and perform the required changes. */
7793 while (fix)
7795 Mfix * ftmp;
7796 Mfix * fdel;
7797 Mfix * last_added_fix;
7798 Mfix * last_barrier = NULL;
7799 Mfix * this_fix;
7801 /* Skip any further barriers before the next fix. */
7802 while (fix && GET_CODE (fix->insn) == BARRIER)
7803 fix = fix->next;
7805 /* No more fixes. */
7806 if (fix == NULL)
7807 break;
7809 last_added_fix = NULL;
7811 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7813 if (GET_CODE (ftmp->insn) == BARRIER)
7815 if (ftmp->address >= minipool_vector_head->max_address)
7816 break;
7818 last_barrier = ftmp;
7820 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7821 break;
7823 last_added_fix = ftmp; /* Keep track of the last fix added. */
7826 /* If we found a barrier, drop back to that; any fixes that we
7827 could have reached but come after the barrier will now go in
7828 the next mini-pool. */
7829 if (last_barrier != NULL)
7831 /* Reduce the refcount for those fixes that won't go into this
7832 pool after all. */
7833 for (fdel = last_barrier->next;
7834 fdel && fdel != ftmp;
7835 fdel = fdel->next)
7837 fdel->minipool->refcount--;
7838 fdel->minipool = NULL;
7841 ftmp = last_barrier;
7843 else
7845 /* ftmp is first fix that we can't fit into this pool and
7846 there no natural barriers that we could use. Insert a
7847 new barrier in the code somewhere between the previous
7848 fix and this one, and arrange to jump around it. */
7849 HOST_WIDE_INT max_address;
7851 /* The last item on the list of fixes must be a barrier, so
7852 we can never run off the end of the list of fixes without
7853 last_barrier being set. */
7854 gcc_assert (ftmp);
7856 max_address = minipool_vector_head->max_address;
7857 /* Check that there isn't another fix that is in range that
7858 we couldn't fit into this pool because the pool was
7859 already too large: we need to put the pool before such an
7860 instruction. */
7861 if (ftmp->address < max_address)
7862 max_address = ftmp->address;
7864 last_barrier = create_fix_barrier (last_added_fix, max_address);
7867 assign_minipool_offsets (last_barrier);
7869 while (ftmp)
7871 if (GET_CODE (ftmp->insn) != BARRIER
7872 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7873 == NULL))
7874 break;
7876 ftmp = ftmp->next;
7879 /* Scan over the fixes we have identified for this pool, fixing them
7880 up and adding the constants to the pool itself. */
7881 for (this_fix = fix; this_fix && ftmp != this_fix;
7882 this_fix = this_fix->next)
7883 if (GET_CODE (this_fix->insn) != BARRIER)
7885 rtx addr
7886 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7887 minipool_vector_label),
7888 this_fix->minipool->offset);
7889 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7892 dump_minipool (last_barrier->insn);
7893 fix = ftmp;
7896 /* From now on we must synthesize any constants that we can't handle
7897 directly. This can happen if the RTL gets split during final
7898 instruction generation. */
7899 after_arm_reorg = 1;
7901 /* Free the minipool memory. */
7902 obstack_free (&minipool_obstack, minipool_startobj);
7905 /* Routines to output assembly language. */
7907 /* If the rtx is the correct value then return the string of the number.
7908 In this way we can ensure that valid double constants are generated even
7909 when cross compiling. */
7910 const char *
7911 fp_immediate_constant (rtx x)
7913 REAL_VALUE_TYPE r;
7914 int i;
7916 if (!fp_consts_inited)
7917 init_fp_table ();
7919 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7920 for (i = 0; i < 8; i++)
7921 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7922 return strings_fp[i];
7924 gcc_unreachable ();
7927 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7928 static const char *
7929 fp_const_from_val (REAL_VALUE_TYPE *r)
7931 int i;
7933 if (!fp_consts_inited)
7934 init_fp_table ();
7936 for (i = 0; i < 8; i++)
7937 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7938 return strings_fp[i];
7940 gcc_unreachable ();
7943 /* Output the operands of a LDM/STM instruction to STREAM.
7944 MASK is the ARM register set mask of which only bits 0-15 are important.
7945 REG is the base register, either the frame pointer or the stack pointer,
7946 INSTR is the possibly suffixed load or store instruction. */
7948 static void
7949 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7950 unsigned long mask)
7952 unsigned i;
7953 bool not_first = FALSE;
7955 fputc ('\t', stream);
7956 asm_fprintf (stream, instr, reg);
7957 fputs (", {", stream);
7959 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7960 if (mask & (1 << i))
7962 if (not_first)
7963 fprintf (stream, ", ");
7965 asm_fprintf (stream, "%r", i);
7966 not_first = TRUE;
7969 fprintf (stream, "}\n");
7973 /* Output a FLDMX instruction to STREAM.
7974 BASE if the register containing the address.
7975 REG and COUNT specify the register range.
7976 Extra registers may be added to avoid hardware bugs. */
7978 static void
7979 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7981 int i;
7983 /* Workaround ARM10 VFPr1 bug. */
7984 if (count == 2 && !arm_arch6)
7986 if (reg == 15)
7987 reg--;
7988 count++;
7991 fputc ('\t', stream);
7992 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7994 for (i = reg; i < reg + count; i++)
7996 if (i > reg)
7997 fputs (", ", stream);
7998 asm_fprintf (stream, "d%d", i);
8000 fputs ("}\n", stream);
8005 /* Output the assembly for a store multiple. */
8007 const char *
8008 vfp_output_fstmx (rtx * operands)
8010 char pattern[100];
8011 int p;
8012 int base;
8013 int i;
8015 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8016 p = strlen (pattern);
8018 gcc_assert (GET_CODE (operands[1]) == REG);
8020 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8021 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8023 p += sprintf (&pattern[p], ", d%d", base + i);
8025 strcpy (&pattern[p], "}");
8027 output_asm_insn (pattern, operands);
8028 return "";
8032 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8033 number of bytes pushed. */
8035 static int
8036 vfp_emit_fstmx (int base_reg, int count)
8038 rtx par;
8039 rtx dwarf;
8040 rtx tmp, reg;
8041 int i;
8043 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8044 register pairs are stored by a store multiple insn. We avoid this
8045 by pushing an extra pair. */
8046 if (count == 2 && !arm_arch6)
8048 if (base_reg == LAST_VFP_REGNUM - 3)
8049 base_reg -= 2;
8050 count++;
8053 /* ??? The frame layout is implementation defined. We describe
8054 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8055 We really need some way of representing the whole block so that the
8056 unwinder can figure it out at runtime. */
8057 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8058 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8060 reg = gen_rtx_REG (DFmode, base_reg);
8061 base_reg += 2;
8063 XVECEXP (par, 0, 0)
8064 = gen_rtx_SET (VOIDmode,
8065 gen_rtx_MEM (BLKmode,
8066 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8067 gen_rtx_UNSPEC (BLKmode,
8068 gen_rtvec (1, reg),
8069 UNSPEC_PUSH_MULT));
8071 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8072 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8073 GEN_INT (-(count * 8 + 4))));
8074 RTX_FRAME_RELATED_P (tmp) = 1;
8075 XVECEXP (dwarf, 0, 0) = tmp;
8077 tmp = gen_rtx_SET (VOIDmode,
8078 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8079 reg);
8080 RTX_FRAME_RELATED_P (tmp) = 1;
8081 XVECEXP (dwarf, 0, 1) = tmp;
8083 for (i = 1; i < count; i++)
8085 reg = gen_rtx_REG (DFmode, base_reg);
8086 base_reg += 2;
8087 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8089 tmp = gen_rtx_SET (VOIDmode,
8090 gen_rtx_MEM (DFmode,
8091 gen_rtx_PLUS (SImode,
8092 stack_pointer_rtx,
8093 GEN_INT (i * 8))),
8094 reg);
8095 RTX_FRAME_RELATED_P (tmp) = 1;
8096 XVECEXP (dwarf, 0, i + 1) = tmp;
8099 par = emit_insn (par);
8100 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8101 REG_NOTES (par));
8102 RTX_FRAME_RELATED_P (par) = 1;
8104 return count * 8 + 4;
8108 /* Output a 'call' insn. */
8109 const char *
8110 output_call (rtx *operands)
8112 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8114 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8115 if (REGNO (operands[0]) == LR_REGNUM)
8117 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8118 output_asm_insn ("mov%?\t%0, %|lr", operands);
8121 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8123 if (TARGET_INTERWORK || arm_arch4t)
8124 output_asm_insn ("bx%?\t%0", operands);
8125 else
8126 output_asm_insn ("mov%?\t%|pc, %0", operands);
8128 return "";
8131 /* Output a 'call' insn that is a reference in memory. */
8132 const char *
8133 output_call_mem (rtx *operands)
8135 if (TARGET_INTERWORK && !arm_arch5)
8137 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8138 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8139 output_asm_insn ("bx%?\t%|ip", operands);
8141 else if (regno_use_in (LR_REGNUM, operands[0]))
8143 /* LR is used in the memory address. We load the address in the
8144 first instruction. It's safe to use IP as the target of the
8145 load since the call will kill it anyway. */
8146 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8147 if (arm_arch5)
8148 output_asm_insn ("blx%?\t%|ip", operands);
8149 else
8151 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8152 if (arm_arch4t)
8153 output_asm_insn ("bx%?\t%|ip", operands);
8154 else
8155 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8158 else
8160 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8161 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8164 return "";
8168 /* Output a move from arm registers to an fpa registers.
8169 OPERANDS[0] is an fpa register.
8170 OPERANDS[1] is the first registers of an arm register pair. */
8171 const char *
8172 output_mov_long_double_fpa_from_arm (rtx *operands)
8174 int arm_reg0 = REGNO (operands[1]);
8175 rtx ops[3];
8177 gcc_assert (arm_reg0 != IP_REGNUM);
8179 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8180 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8181 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8183 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8184 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8186 return "";
8189 /* Output a move from an fpa register to arm registers.
8190 OPERANDS[0] is the first registers of an arm register pair.
8191 OPERANDS[1] is an fpa register. */
8192 const char *
8193 output_mov_long_double_arm_from_fpa (rtx *operands)
8195 int arm_reg0 = REGNO (operands[0]);
8196 rtx ops[3];
8198 gcc_assert (arm_reg0 != IP_REGNUM);
8200 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8201 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8202 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8204 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8205 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8206 return "";
8209 /* Output a move from arm registers to arm registers of a long double
8210 OPERANDS[0] is the destination.
8211 OPERANDS[1] is the source. */
8212 const char *
8213 output_mov_long_double_arm_from_arm (rtx *operands)
8215 /* We have to be careful here because the two might overlap. */
8216 int dest_start = REGNO (operands[0]);
8217 int src_start = REGNO (operands[1]);
8218 rtx ops[2];
8219 int i;
8221 if (dest_start < src_start)
8223 for (i = 0; i < 3; i++)
8225 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8226 ops[1] = gen_rtx_REG (SImode, src_start + i);
8227 output_asm_insn ("mov%?\t%0, %1", ops);
8230 else
8232 for (i = 2; i >= 0; i--)
8234 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8235 ops[1] = gen_rtx_REG (SImode, src_start + i);
8236 output_asm_insn ("mov%?\t%0, %1", ops);
8240 return "";
8244 /* Output a move from arm registers to an fpa registers.
8245 OPERANDS[0] is an fpa register.
8246 OPERANDS[1] is the first registers of an arm register pair. */
8247 const char *
8248 output_mov_double_fpa_from_arm (rtx *operands)
8250 int arm_reg0 = REGNO (operands[1]);
8251 rtx ops[2];
8253 gcc_assert (arm_reg0 != IP_REGNUM);
8255 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8256 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8257 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8258 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8259 return "";
8262 /* Output a move from an fpa register to arm registers.
8263 OPERANDS[0] is the first registers of an arm register pair.
8264 OPERANDS[1] is an fpa register. */
8265 const char *
8266 output_mov_double_arm_from_fpa (rtx *operands)
8268 int arm_reg0 = REGNO (operands[0]);
8269 rtx ops[2];
8271 gcc_assert (arm_reg0 != IP_REGNUM);
8273 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8274 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8275 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8276 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8277 return "";
8280 /* Output a move between double words.
8281 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8282 or MEM<-REG and all MEMs must be offsettable addresses. */
8283 const char *
8284 output_move_double (rtx *operands)
8286 enum rtx_code code0 = GET_CODE (operands[0]);
8287 enum rtx_code code1 = GET_CODE (operands[1]);
8288 rtx otherops[3];
8290 if (code0 == REG)
8292 int reg0 = REGNO (operands[0]);
8294 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8296 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8298 switch (GET_CODE (XEXP (operands[1], 0)))
8300 case REG:
8301 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8302 break;
8304 case PRE_INC:
8305 gcc_assert (TARGET_LDRD);
8306 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8307 break;
8309 case PRE_DEC:
8310 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8311 break;
8313 case POST_INC:
8314 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8315 break;
8317 case POST_DEC:
8318 gcc_assert (TARGET_LDRD);
8319 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8320 break;
8322 case PRE_MODIFY:
8323 case POST_MODIFY:
8324 otherops[0] = operands[0];
8325 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8326 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8328 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8330 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8332 /* Registers overlap so split out the increment. */
8333 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8334 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8336 else
8337 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8339 else
8341 /* We only allow constant increments, so this is safe. */
8342 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8344 break;
8346 case LABEL_REF:
8347 case CONST:
8348 output_asm_insn ("adr%?\t%0, %1", operands);
8349 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8350 break;
8352 default:
8353 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8354 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8356 otherops[0] = operands[0];
8357 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8358 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8360 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8362 if (GET_CODE (otherops[2]) == CONST_INT)
8364 switch ((int) INTVAL (otherops[2]))
8366 case -8:
8367 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8368 return "";
8369 case -4:
8370 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8371 return "";
8372 case 4:
8373 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8374 return "";
8377 if (TARGET_LDRD
8378 && (GET_CODE (otherops[2]) == REG
8379 || (GET_CODE (otherops[2]) == CONST_INT
8380 && INTVAL (otherops[2]) > -256
8381 && INTVAL (otherops[2]) < 256)))
8383 if (reg_overlap_mentioned_p (otherops[0],
8384 otherops[2]))
8386 /* Swap base and index registers over to
8387 avoid a conflict. */
8388 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8389 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8392 /* If both registers conflict, it will usually
8393 have been fixed by a splitter. */
8394 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8396 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8397 output_asm_insn ("ldr%?d\t%0, [%1]",
8398 otherops);
8400 else
8401 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8402 return "";
8405 if (GET_CODE (otherops[2]) == CONST_INT)
8407 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8408 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8409 else
8410 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8412 else
8413 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8415 else
8416 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8418 return "ldm%?ia\t%0, %M0";
8420 else
8422 otherops[1] = adjust_address (operands[1], SImode, 4);
8423 /* Take care of overlapping base/data reg. */
8424 if (reg_mentioned_p (operands[0], operands[1]))
8426 output_asm_insn ("ldr%?\t%0, %1", otherops);
8427 output_asm_insn ("ldr%?\t%0, %1", operands);
8429 else
8431 output_asm_insn ("ldr%?\t%0, %1", operands);
8432 output_asm_insn ("ldr%?\t%0, %1", otherops);
8437 else
8439 /* Constraints should ensure this. */
8440 gcc_assert (code0 == MEM && code1 == REG);
8441 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8443 switch (GET_CODE (XEXP (operands[0], 0)))
8445 case REG:
8446 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8447 break;
8449 case PRE_INC:
8450 gcc_assert (TARGET_LDRD);
8451 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8452 break;
8454 case PRE_DEC:
8455 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8456 break;
8458 case POST_INC:
8459 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8460 break;
8462 case POST_DEC:
8463 gcc_assert (TARGET_LDRD);
8464 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8465 break;
8467 case PRE_MODIFY:
8468 case POST_MODIFY:
8469 otherops[0] = operands[1];
8470 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8471 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8473 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8474 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8475 else
8476 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8477 break;
8479 case PLUS:
8480 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8481 if (GET_CODE (otherops[2]) == CONST_INT)
8483 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8485 case -8:
8486 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8487 return "";
8489 case -4:
8490 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8491 return "";
8493 case 4:
8494 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8495 return "";
8498 if (TARGET_LDRD
8499 && (GET_CODE (otherops[2]) == REG
8500 || (GET_CODE (otherops[2]) == CONST_INT
8501 && INTVAL (otherops[2]) > -256
8502 && INTVAL (otherops[2]) < 256)))
8504 otherops[0] = operands[1];
8505 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8506 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8507 return "";
8509 /* Fall through */
8511 default:
8512 otherops[0] = adjust_address (operands[0], SImode, 4);
8513 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8514 output_asm_insn ("str%?\t%1, %0", operands);
8515 output_asm_insn ("str%?\t%1, %0", otherops);
8519 return "";
8522 /* Output an ADD r, s, #n where n may be too big for one instruction.
8523 If adding zero to one register, output nothing. */
8524 const char *
8525 output_add_immediate (rtx *operands)
8527 HOST_WIDE_INT n = INTVAL (operands[2]);
8529 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8531 if (n < 0)
8532 output_multi_immediate (operands,
8533 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8534 -n);
8535 else
8536 output_multi_immediate (operands,
8537 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8541 return "";
8544 /* Output a multiple immediate operation.
8545 OPERANDS is the vector of operands referred to in the output patterns.
8546 INSTR1 is the output pattern to use for the first constant.
8547 INSTR2 is the output pattern to use for subsequent constants.
8548 IMMED_OP is the index of the constant slot in OPERANDS.
8549 N is the constant value. */
8550 static const char *
8551 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8552 int immed_op, HOST_WIDE_INT n)
8554 #if HOST_BITS_PER_WIDE_INT > 32
8555 n &= 0xffffffff;
8556 #endif
8558 if (n == 0)
8560 /* Quick and easy output. */
8561 operands[immed_op] = const0_rtx;
8562 output_asm_insn (instr1, operands);
8564 else
8566 int i;
8567 const char * instr = instr1;
8569 /* Note that n is never zero here (which would give no output). */
8570 for (i = 0; i < 32; i += 2)
8572 if (n & (3 << i))
8574 operands[immed_op] = GEN_INT (n & (255 << i));
8575 output_asm_insn (instr, operands);
8576 instr = instr2;
8577 i += 6;
8582 return "";
8585 /* Return the appropriate ARM instruction for the operation code.
8586 The returned result should not be overwritten. OP is the rtx of the
8587 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8588 was shifted. */
8589 const char *
8590 arithmetic_instr (rtx op, int shift_first_arg)
8592 switch (GET_CODE (op))
8594 case PLUS:
8595 return "add";
8597 case MINUS:
8598 return shift_first_arg ? "rsb" : "sub";
8600 case IOR:
8601 return "orr";
8603 case XOR:
8604 return "eor";
8606 case AND:
8607 return "and";
8609 default:
8610 gcc_unreachable ();
8614 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8615 for the operation code. The returned result should not be overwritten.
8616 OP is the rtx code of the shift.
8617 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8618 shift. */
8619 static const char *
8620 shift_op (rtx op, HOST_WIDE_INT *amountp)
8622 const char * mnem;
8623 enum rtx_code code = GET_CODE (op);
8625 switch (GET_CODE (XEXP (op, 1)))
8627 case REG:
8628 case SUBREG:
8629 *amountp = -1;
8630 break;
8632 case CONST_INT:
8633 *amountp = INTVAL (XEXP (op, 1));
8634 break;
8636 default:
8637 gcc_unreachable ();
8640 switch (code)
8642 case ASHIFT:
8643 mnem = "asl";
8644 break;
8646 case ASHIFTRT:
8647 mnem = "asr";
8648 break;
8650 case LSHIFTRT:
8651 mnem = "lsr";
8652 break;
8654 case ROTATE:
8655 gcc_assert (*amountp != -1);
8656 *amountp = 32 - *amountp;
8658 /* Fall through. */
8660 case ROTATERT:
8661 mnem = "ror";
8662 break;
8664 case MULT:
8665 /* We never have to worry about the amount being other than a
8666 power of 2, since this case can never be reloaded from a reg. */
8667 gcc_assert (*amountp != -1);
8668 *amountp = int_log2 (*amountp);
8669 return "asl";
8671 default:
8672 gcc_unreachable ();
8675 if (*amountp != -1)
8677 /* This is not 100% correct, but follows from the desire to merge
8678 multiplication by a power of 2 with the recognizer for a
8679 shift. >=32 is not a valid shift for "asl", so we must try and
8680 output a shift that produces the correct arithmetical result.
8681 Using lsr #32 is identical except for the fact that the carry bit
8682 is not set correctly if we set the flags; but we never use the
8683 carry bit from such an operation, so we can ignore that. */
8684 if (code == ROTATERT)
8685 /* Rotate is just modulo 32. */
8686 *amountp &= 31;
8687 else if (*amountp != (*amountp & 31))
8689 if (code == ASHIFT)
8690 mnem = "lsr";
8691 *amountp = 32;
8694 /* Shifts of 0 are no-ops. */
8695 if (*amountp == 0)
8696 return NULL;
8699 return mnem;
8702 /* Obtain the shift from the POWER of two. */
8704 static HOST_WIDE_INT
8705 int_log2 (HOST_WIDE_INT power)
8707 HOST_WIDE_INT shift = 0;
8709 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8711 gcc_assert (shift <= 31);
8712 shift++;
8715 return shift;
8718 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8719 because /bin/as is horribly restrictive. The judgement about
8720 whether or not each character is 'printable' (and can be output as
8721 is) or not (and must be printed with an octal escape) must be made
8722 with reference to the *host* character set -- the situation is
8723 similar to that discussed in the comments above pp_c_char in
8724 c-pretty-print.c. */
8726 #define MAX_ASCII_LEN 51
8728 void
8729 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8731 int i;
8732 int len_so_far = 0;
8734 fputs ("\t.ascii\t\"", stream);
8736 for (i = 0; i < len; i++)
8738 int c = p[i];
8740 if (len_so_far >= MAX_ASCII_LEN)
8742 fputs ("\"\n\t.ascii\t\"", stream);
8743 len_so_far = 0;
8746 if (ISPRINT (c))
8748 if (c == '\\' || c == '\"')
8750 putc ('\\', stream);
8751 len_so_far++;
8753 putc (c, stream);
8754 len_so_far++;
8756 else
8758 fprintf (stream, "\\%03o", c);
8759 len_so_far += 4;
8763 fputs ("\"\n", stream);
8766 /* Compute the register save mask for registers 0 through 12
8767 inclusive. This code is used by arm_compute_save_reg_mask. */
8769 static unsigned long
8770 arm_compute_save_reg0_reg12_mask (void)
8772 unsigned long func_type = arm_current_func_type ();
8773 unsigned long save_reg_mask = 0;
8774 unsigned int reg;
8776 if (IS_INTERRUPT (func_type))
8778 unsigned int max_reg;
8779 /* Interrupt functions must not corrupt any registers,
8780 even call clobbered ones. If this is a leaf function
8781 we can just examine the registers used by the RTL, but
8782 otherwise we have to assume that whatever function is
8783 called might clobber anything, and so we have to save
8784 all the call-clobbered registers as well. */
8785 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8786 /* FIQ handlers have registers r8 - r12 banked, so
8787 we only need to check r0 - r7, Normal ISRs only
8788 bank r14 and r15, so we must check up to r12.
8789 r13 is the stack pointer which is always preserved,
8790 so we do not need to consider it here. */
8791 max_reg = 7;
8792 else
8793 max_reg = 12;
8795 for (reg = 0; reg <= max_reg; reg++)
8796 if (regs_ever_live[reg]
8797 || (! current_function_is_leaf && call_used_regs [reg]))
8798 save_reg_mask |= (1 << reg);
8800 /* Also save the pic base register if necessary. */
8801 if (flag_pic
8802 && !TARGET_SINGLE_PIC_BASE
8803 && current_function_uses_pic_offset_table)
8804 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8806 else
8808 /* In the normal case we only need to save those registers
8809 which are call saved and which are used by this function. */
8810 for (reg = 0; reg <= 10; reg++)
8811 if (regs_ever_live[reg] && ! call_used_regs [reg])
8812 save_reg_mask |= (1 << reg);
8814 /* Handle the frame pointer as a special case. */
8815 if (! TARGET_APCS_FRAME
8816 && ! frame_pointer_needed
8817 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8818 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8819 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8821 /* If we aren't loading the PIC register,
8822 don't stack it even though it may be live. */
8823 if (flag_pic
8824 && !TARGET_SINGLE_PIC_BASE
8825 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8826 || current_function_uses_pic_offset_table))
8827 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8830 /* Save registers so the exception handler can modify them. */
8831 if (current_function_calls_eh_return)
8833 unsigned int i;
8835 for (i = 0; ; i++)
8837 reg = EH_RETURN_DATA_REGNO (i);
8838 if (reg == INVALID_REGNUM)
8839 break;
8840 save_reg_mask |= 1 << reg;
8844 return save_reg_mask;
8847 /* Compute a bit mask of which registers need to be
8848 saved on the stack for the current function. */
8850 static unsigned long
8851 arm_compute_save_reg_mask (void)
8853 unsigned int save_reg_mask = 0;
8854 unsigned long func_type = arm_current_func_type ();
8856 if (IS_NAKED (func_type))
8857 /* This should never really happen. */
8858 return 0;
8860 /* If we are creating a stack frame, then we must save the frame pointer,
8861 IP (which will hold the old stack pointer), LR and the PC. */
8862 if (frame_pointer_needed)
8863 save_reg_mask |=
8864 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8865 | (1 << IP_REGNUM)
8866 | (1 << LR_REGNUM)
8867 | (1 << PC_REGNUM);
8869 /* Volatile functions do not return, so there
8870 is no need to save any other registers. */
8871 if (IS_VOLATILE (func_type))
8872 return save_reg_mask;
8874 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8876 /* Decide if we need to save the link register.
8877 Interrupt routines have their own banked link register,
8878 so they never need to save it.
8879 Otherwise if we do not use the link register we do not need to save
8880 it. If we are pushing other registers onto the stack however, we
8881 can save an instruction in the epilogue by pushing the link register
8882 now and then popping it back into the PC. This incurs extra memory
8883 accesses though, so we only do it when optimizing for size, and only
8884 if we know that we will not need a fancy return sequence. */
8885 if (regs_ever_live [LR_REGNUM]
8886 || (save_reg_mask
8887 && optimize_size
8888 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8889 && !current_function_calls_eh_return))
8890 save_reg_mask |= 1 << LR_REGNUM;
8892 if (cfun->machine->lr_save_eliminated)
8893 save_reg_mask &= ~ (1 << LR_REGNUM);
8895 if (TARGET_REALLY_IWMMXT
8896 && ((bit_count (save_reg_mask)
8897 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8899 unsigned int reg;
8901 /* The total number of registers that are going to be pushed
8902 onto the stack is odd. We need to ensure that the stack
8903 is 64-bit aligned before we start to save iWMMXt registers,
8904 and also before we start to create locals. (A local variable
8905 might be a double or long long which we will load/store using
8906 an iWMMXt instruction). Therefore we need to push another
8907 ARM register, so that the stack will be 64-bit aligned. We
8908 try to avoid using the arg registers (r0 -r3) as they might be
8909 used to pass values in a tail call. */
8910 for (reg = 4; reg <= 12; reg++)
8911 if ((save_reg_mask & (1 << reg)) == 0)
8912 break;
8914 if (reg <= 12)
8915 save_reg_mask |= (1 << reg);
8916 else
8918 cfun->machine->sibcall_blocked = 1;
8919 save_reg_mask |= (1 << 3);
8923 return save_reg_mask;
8927 /* Compute a bit mask of which registers need to be
8928 saved on the stack for the current function. */
8929 static unsigned long
8930 thumb_compute_save_reg_mask (void)
8932 unsigned long mask;
8933 unsigned reg;
8935 mask = 0;
8936 for (reg = 0; reg < 12; reg ++)
8937 if (regs_ever_live[reg] && !call_used_regs[reg])
8938 mask |= 1 << reg;
8940 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8941 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8943 if (TARGET_SINGLE_PIC_BASE)
8944 mask &= ~(1 << arm_pic_register);
8946 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8947 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8948 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8950 /* LR will also be pushed if any lo regs are pushed. */
8951 if (mask & 0xff || thumb_force_lr_save ())
8952 mask |= (1 << LR_REGNUM);
8954 /* Make sure we have a low work register if we need one.
8955 We will need one if we are going to push a high register,
8956 but we are not currently intending to push a low register. */
8957 if ((mask & 0xff) == 0
8958 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8960 /* Use thumb_find_work_register to choose which register
8961 we will use. If the register is live then we will
8962 have to push it. Use LAST_LO_REGNUM as our fallback
8963 choice for the register to select. */
8964 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8966 if (! call_used_regs[reg])
8967 mask |= 1 << reg;
8970 return mask;
8974 /* Return the number of bytes required to save VFP registers. */
8975 static int
8976 arm_get_vfp_saved_size (void)
8978 unsigned int regno;
8979 int count;
8980 int saved;
8982 saved = 0;
8983 /* Space for saved VFP registers. */
8984 if (TARGET_HARD_FLOAT && TARGET_VFP)
8986 count = 0;
8987 for (regno = FIRST_VFP_REGNUM;
8988 regno < LAST_VFP_REGNUM;
8989 regno += 2)
8991 if ((!regs_ever_live[regno] || call_used_regs[regno])
8992 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8994 if (count > 0)
8996 /* Workaround ARM10 VFPr1 bug. */
8997 if (count == 2 && !arm_arch6)
8998 count++;
8999 saved += count * 8 + 4;
9001 count = 0;
9003 else
9004 count++;
9006 if (count > 0)
9008 if (count == 2 && !arm_arch6)
9009 count++;
9010 saved += count * 8 + 4;
9013 return saved;
9017 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9018 everything bar the final return instruction. */
9019 const char *
9020 output_return_instruction (rtx operand, int really_return, int reverse)
9022 char conditional[10];
9023 char instr[100];
9024 unsigned reg;
9025 unsigned long live_regs_mask;
9026 unsigned long func_type;
9027 arm_stack_offsets *offsets;
9029 func_type = arm_current_func_type ();
9031 if (IS_NAKED (func_type))
9032 return "";
9034 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9036 /* If this function was declared non-returning, and we have
9037 found a tail call, then we have to trust that the called
9038 function won't return. */
9039 if (really_return)
9041 rtx ops[2];
9043 /* Otherwise, trap an attempted return by aborting. */
9044 ops[0] = operand;
9045 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9046 : "abort");
9047 assemble_external_libcall (ops[1]);
9048 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9051 return "";
9054 gcc_assert (!current_function_calls_alloca || really_return);
9056 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9058 return_used_this_function = 1;
9060 live_regs_mask = arm_compute_save_reg_mask ();
9062 if (live_regs_mask)
9064 const char * return_reg;
9066 /* If we do not have any special requirements for function exit
9067 (e.g. interworking, or ISR) then we can load the return address
9068 directly into the PC. Otherwise we must load it into LR. */
9069 if (really_return
9070 && ! TARGET_INTERWORK)
9071 return_reg = reg_names[PC_REGNUM];
9072 else
9073 return_reg = reg_names[LR_REGNUM];
9075 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9077 /* There are three possible reasons for the IP register
9078 being saved. 1) a stack frame was created, in which case
9079 IP contains the old stack pointer, or 2) an ISR routine
9080 corrupted it, or 3) it was saved to align the stack on
9081 iWMMXt. In case 1, restore IP into SP, otherwise just
9082 restore IP. */
9083 if (frame_pointer_needed)
9085 live_regs_mask &= ~ (1 << IP_REGNUM);
9086 live_regs_mask |= (1 << SP_REGNUM);
9088 else
9089 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9092 /* On some ARM architectures it is faster to use LDR rather than
9093 LDM to load a single register. On other architectures, the
9094 cost is the same. In 26 bit mode, or for exception handlers,
9095 we have to use LDM to load the PC so that the CPSR is also
9096 restored. */
9097 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9098 if (live_regs_mask == (1U << reg))
9099 break;
9101 if (reg <= LAST_ARM_REGNUM
9102 && (reg != LR_REGNUM
9103 || ! really_return
9104 || ! IS_INTERRUPT (func_type)))
9106 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9107 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9109 else
9111 char *p;
9112 int first = 1;
9114 /* Generate the load multiple instruction to restore the
9115 registers. Note we can get here, even if
9116 frame_pointer_needed is true, but only if sp already
9117 points to the base of the saved core registers. */
9118 if (live_regs_mask & (1 << SP_REGNUM))
9120 unsigned HOST_WIDE_INT stack_adjust;
9122 offsets = arm_get_frame_offsets ();
9123 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9124 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9126 if (stack_adjust && arm_arch5)
9127 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9128 else
9130 /* If we can't use ldmib (SA110 bug),
9131 then try to pop r3 instead. */
9132 if (stack_adjust)
9133 live_regs_mask |= 1 << 3;
9134 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9137 else
9138 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9140 p = instr + strlen (instr);
9142 for (reg = 0; reg <= SP_REGNUM; reg++)
9143 if (live_regs_mask & (1 << reg))
9145 int l = strlen (reg_names[reg]);
9147 if (first)
9148 first = 0;
9149 else
9151 memcpy (p, ", ", 2);
9152 p += 2;
9155 memcpy (p, "%|", 2);
9156 memcpy (p + 2, reg_names[reg], l);
9157 p += l + 2;
9160 if (live_regs_mask & (1 << LR_REGNUM))
9162 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9163 /* If returning from an interrupt, restore the CPSR. */
9164 if (IS_INTERRUPT (func_type))
9165 strcat (p, "^");
9167 else
9168 strcpy (p, "}");
9171 output_asm_insn (instr, & operand);
9173 /* See if we need to generate an extra instruction to
9174 perform the actual function return. */
9175 if (really_return
9176 && func_type != ARM_FT_INTERWORKED
9177 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9179 /* The return has already been handled
9180 by loading the LR into the PC. */
9181 really_return = 0;
9185 if (really_return)
9187 switch ((int) ARM_FUNC_TYPE (func_type))
9189 case ARM_FT_ISR:
9190 case ARM_FT_FIQ:
9191 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9192 break;
9194 case ARM_FT_INTERWORKED:
9195 sprintf (instr, "bx%s\t%%|lr", conditional);
9196 break;
9198 case ARM_FT_EXCEPTION:
9199 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9200 break;
9202 default:
9203 /* Use bx if it's available. */
9204 if (arm_arch5 || arm_arch4t)
9205 sprintf (instr, "bx%s\t%%|lr", conditional);
9206 else
9207 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9208 break;
9211 output_asm_insn (instr, & operand);
9214 return "";
9217 /* Write the function name into the code section, directly preceding
9218 the function prologue.
9220 Code will be output similar to this:
9222 .ascii "arm_poke_function_name", 0
9223 .align
9225 .word 0xff000000 + (t1 - t0)
9226 arm_poke_function_name
9227 mov ip, sp
9228 stmfd sp!, {fp, ip, lr, pc}
9229 sub fp, ip, #4
9231 When performing a stack backtrace, code can inspect the value
9232 of 'pc' stored at 'fp' + 0. If the trace function then looks
9233 at location pc - 12 and the top 8 bits are set, then we know
9234 that there is a function name embedded immediately preceding this
9235 location and has length ((pc[-3]) & 0xff000000).
9237 We assume that pc is declared as a pointer to an unsigned long.
9239 It is of no benefit to output the function name if we are assembling
9240 a leaf function. These function types will not contain a stack
9241 backtrace structure, therefore it is not possible to determine the
9242 function name. */
9243 void
9244 arm_poke_function_name (FILE *stream, const char *name)
9246 unsigned long alignlength;
9247 unsigned long length;
9248 rtx x;
9250 length = strlen (name) + 1;
9251 alignlength = ROUND_UP_WORD (length);
9253 ASM_OUTPUT_ASCII (stream, name, length);
9254 ASM_OUTPUT_ALIGN (stream, 2);
9255 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9256 assemble_aligned_integer (UNITS_PER_WORD, x);
9259 /* Place some comments into the assembler stream
9260 describing the current function. */
9261 static void
9262 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9264 unsigned long func_type;
9266 if (!TARGET_ARM)
9268 thumb_output_function_prologue (f, frame_size);
9269 return;
9272 /* Sanity check. */
9273 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9275 func_type = arm_current_func_type ();
9277 switch ((int) ARM_FUNC_TYPE (func_type))
9279 default:
9280 case ARM_FT_NORMAL:
9281 break;
9282 case ARM_FT_INTERWORKED:
9283 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9284 break;
9285 case ARM_FT_ISR:
9286 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9287 break;
9288 case ARM_FT_FIQ:
9289 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9290 break;
9291 case ARM_FT_EXCEPTION:
9292 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9293 break;
9296 if (IS_NAKED (func_type))
9297 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9299 if (IS_VOLATILE (func_type))
9300 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9302 if (IS_NESTED (func_type))
9303 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9305 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9306 current_function_args_size,
9307 current_function_pretend_args_size, frame_size);
9309 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9310 frame_pointer_needed,
9311 cfun->machine->uses_anonymous_args);
9313 if (cfun->machine->lr_save_eliminated)
9314 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9316 if (current_function_calls_eh_return)
9317 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9319 #ifdef AOF_ASSEMBLER
9320 if (flag_pic)
9321 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9322 #endif
9324 return_used_this_function = 0;
9327 const char *
9328 arm_output_epilogue (rtx sibling)
9330 int reg;
9331 unsigned long saved_regs_mask;
9332 unsigned long func_type;
9333 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9334 frame that is $fp + 4 for a non-variadic function. */
9335 int floats_offset = 0;
9336 rtx operands[3];
9337 FILE * f = asm_out_file;
9338 unsigned int lrm_count = 0;
9339 int really_return = (sibling == NULL);
9340 int start_reg;
9341 arm_stack_offsets *offsets;
9343 /* If we have already generated the return instruction
9344 then it is futile to generate anything else. */
9345 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9346 return "";
9348 func_type = arm_current_func_type ();
9350 if (IS_NAKED (func_type))
9351 /* Naked functions don't have epilogues. */
9352 return "";
9354 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9356 rtx op;
9358 /* A volatile function should never return. Call abort. */
9359 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9360 assemble_external_libcall (op);
9361 output_asm_insn ("bl\t%a0", &op);
9363 return "";
9366 /* If we are throwing an exception, then we really must be doing a
9367 return, so we can't tail-call. */
9368 gcc_assert (!current_function_calls_eh_return || really_return);
9370 offsets = arm_get_frame_offsets ();
9371 saved_regs_mask = arm_compute_save_reg_mask ();
9373 if (TARGET_IWMMXT)
9374 lrm_count = bit_count (saved_regs_mask);
9376 floats_offset = offsets->saved_args;
9377 /* Compute how far away the floats will be. */
9378 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9379 if (saved_regs_mask & (1 << reg))
9380 floats_offset += 4;
9382 if (frame_pointer_needed)
9384 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9385 int vfp_offset = offsets->frame;
9387 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9389 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9390 if (regs_ever_live[reg] && !call_used_regs[reg])
9392 floats_offset += 12;
9393 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9394 reg, FP_REGNUM, floats_offset - vfp_offset);
9397 else
9399 start_reg = LAST_FPA_REGNUM;
9401 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9403 if (regs_ever_live[reg] && !call_used_regs[reg])
9405 floats_offset += 12;
9407 /* We can't unstack more than four registers at once. */
9408 if (start_reg - reg == 3)
9410 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9411 reg, FP_REGNUM, floats_offset - vfp_offset);
9412 start_reg = reg - 1;
9415 else
9417 if (reg != start_reg)
9418 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9419 reg + 1, start_reg - reg,
9420 FP_REGNUM, floats_offset - vfp_offset);
9421 start_reg = reg - 1;
9425 /* Just in case the last register checked also needs unstacking. */
9426 if (reg != start_reg)
9427 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9428 reg + 1, start_reg - reg,
9429 FP_REGNUM, floats_offset - vfp_offset);
9432 if (TARGET_HARD_FLOAT && TARGET_VFP)
9434 int saved_size;
9436 /* The fldmx insn does not have base+offset addressing modes,
9437 so we use IP to hold the address. */
9438 saved_size = arm_get_vfp_saved_size ();
9440 if (saved_size > 0)
9442 floats_offset += saved_size;
9443 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9444 FP_REGNUM, floats_offset - vfp_offset);
9446 start_reg = FIRST_VFP_REGNUM;
9447 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9449 if ((!regs_ever_live[reg] || call_used_regs[reg])
9450 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9452 if (start_reg != reg)
9453 arm_output_fldmx (f, IP_REGNUM,
9454 (start_reg - FIRST_VFP_REGNUM) / 2,
9455 (reg - start_reg) / 2);
9456 start_reg = reg + 2;
9459 if (start_reg != reg)
9460 arm_output_fldmx (f, IP_REGNUM,
9461 (start_reg - FIRST_VFP_REGNUM) / 2,
9462 (reg - start_reg) / 2);
9465 if (TARGET_IWMMXT)
9467 /* The frame pointer is guaranteed to be non-double-word aligned.
9468 This is because it is set to (old_stack_pointer - 4) and the
9469 old_stack_pointer was double word aligned. Thus the offset to
9470 the iWMMXt registers to be loaded must also be non-double-word
9471 sized, so that the resultant address *is* double-word aligned.
9472 We can ignore floats_offset since that was already included in
9473 the live_regs_mask. */
9474 lrm_count += (lrm_count % 2 ? 2 : 1);
9476 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9477 if (regs_ever_live[reg] && !call_used_regs[reg])
9479 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9480 reg, FP_REGNUM, lrm_count * 4);
9481 lrm_count += 2;
9485 /* saved_regs_mask should contain the IP, which at the time of stack
9486 frame generation actually contains the old stack pointer. So a
9487 quick way to unwind the stack is just pop the IP register directly
9488 into the stack pointer. */
9489 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9490 saved_regs_mask &= ~ (1 << IP_REGNUM);
9491 saved_regs_mask |= (1 << SP_REGNUM);
9493 /* There are two registers left in saved_regs_mask - LR and PC. We
9494 only need to restore the LR register (the return address), but to
9495 save time we can load it directly into the PC, unless we need a
9496 special function exit sequence, or we are not really returning. */
9497 if (really_return
9498 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9499 && !current_function_calls_eh_return)
9500 /* Delete the LR from the register mask, so that the LR on
9501 the stack is loaded into the PC in the register mask. */
9502 saved_regs_mask &= ~ (1 << LR_REGNUM);
9503 else
9504 saved_regs_mask &= ~ (1 << PC_REGNUM);
9506 /* We must use SP as the base register, because SP is one of the
9507 registers being restored. If an interrupt or page fault
9508 happens in the ldm instruction, the SP might or might not
9509 have been restored. That would be bad, as then SP will no
9510 longer indicate the safe area of stack, and we can get stack
9511 corruption. Using SP as the base register means that it will
9512 be reset correctly to the original value, should an interrupt
9513 occur. If the stack pointer already points at the right
9514 place, then omit the subtraction. */
9515 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9516 || current_function_calls_alloca)
9517 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9518 4 * bit_count (saved_regs_mask));
9519 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9521 if (IS_INTERRUPT (func_type))
9522 /* Interrupt handlers will have pushed the
9523 IP onto the stack, so restore it now. */
9524 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9526 else
9528 /* Restore stack pointer if necessary. */
9529 if (offsets->outgoing_args != offsets->saved_regs)
9531 operands[0] = operands[1] = stack_pointer_rtx;
9532 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9533 output_add_immediate (operands);
9536 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9538 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9539 if (regs_ever_live[reg] && !call_used_regs[reg])
9540 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9541 reg, SP_REGNUM);
9543 else
9545 start_reg = FIRST_FPA_REGNUM;
9547 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9549 if (regs_ever_live[reg] && !call_used_regs[reg])
9551 if (reg - start_reg == 3)
9553 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9554 start_reg, SP_REGNUM);
9555 start_reg = reg + 1;
9558 else
9560 if (reg != start_reg)
9561 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9562 start_reg, reg - start_reg,
9563 SP_REGNUM);
9565 start_reg = reg + 1;
9569 /* Just in case the last register checked also needs unstacking. */
9570 if (reg != start_reg)
9571 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9572 start_reg, reg - start_reg, SP_REGNUM);
9575 if (TARGET_HARD_FLOAT && TARGET_VFP)
9577 start_reg = FIRST_VFP_REGNUM;
9578 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9580 if ((!regs_ever_live[reg] || call_used_regs[reg])
9581 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9583 if (start_reg != reg)
9584 arm_output_fldmx (f, SP_REGNUM,
9585 (start_reg - FIRST_VFP_REGNUM) / 2,
9586 (reg - start_reg) / 2);
9587 start_reg = reg + 2;
9590 if (start_reg != reg)
9591 arm_output_fldmx (f, SP_REGNUM,
9592 (start_reg - FIRST_VFP_REGNUM) / 2,
9593 (reg - start_reg) / 2);
9595 if (TARGET_IWMMXT)
9596 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9597 if (regs_ever_live[reg] && !call_used_regs[reg])
9598 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9600 /* If we can, restore the LR into the PC. */
9601 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9602 && really_return
9603 && current_function_pretend_args_size == 0
9604 && saved_regs_mask & (1 << LR_REGNUM)
9605 && !current_function_calls_eh_return)
9607 saved_regs_mask &= ~ (1 << LR_REGNUM);
9608 saved_regs_mask |= (1 << PC_REGNUM);
9611 /* Load the registers off the stack. If we only have one register
9612 to load use the LDR instruction - it is faster. */
9613 if (saved_regs_mask == (1 << LR_REGNUM))
9615 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9617 else if (saved_regs_mask)
9619 if (saved_regs_mask & (1 << SP_REGNUM))
9620 /* Note - write back to the stack register is not enabled
9621 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9622 in the list of registers and if we add writeback the
9623 instruction becomes UNPREDICTABLE. */
9624 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9625 else
9626 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9629 if (current_function_pretend_args_size)
9631 /* Unwind the pre-pushed regs. */
9632 operands[0] = operands[1] = stack_pointer_rtx;
9633 operands[2] = GEN_INT (current_function_pretend_args_size);
9634 output_add_immediate (operands);
9638 /* We may have already restored PC directly from the stack. */
9639 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9640 return "";
9642 /* Stack adjustment for exception handler. */
9643 if (current_function_calls_eh_return)
9644 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9645 ARM_EH_STACKADJ_REGNUM);
9647 /* Generate the return instruction. */
9648 switch ((int) ARM_FUNC_TYPE (func_type))
9650 case ARM_FT_ISR:
9651 case ARM_FT_FIQ:
9652 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9653 break;
9655 case ARM_FT_EXCEPTION:
9656 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9657 break;
9659 case ARM_FT_INTERWORKED:
9660 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9661 break;
9663 default:
9664 if (arm_arch5 || arm_arch4t)
9665 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9666 else
9667 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9668 break;
9671 return "";
9674 static void
9675 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9676 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9678 arm_stack_offsets *offsets;
9680 if (TARGET_THUMB)
9682 int regno;
9684 /* Emit any call-via-reg trampolines that are needed for v4t support
9685 of call_reg and call_value_reg type insns. */
9686 for (regno = 0; regno < LR_REGNUM; regno++)
9688 rtx label = cfun->machine->call_via[regno];
9690 if (label != NULL)
9692 function_section (current_function_decl);
9693 targetm.asm_out.internal_label (asm_out_file, "L",
9694 CODE_LABEL_NUMBER (label));
9695 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9699 /* ??? Probably not safe to set this here, since it assumes that a
9700 function will be emitted as assembly immediately after we generate
9701 RTL for it. This does not happen for inline functions. */
9702 return_used_this_function = 0;
9704 else
9706 /* We need to take into account any stack-frame rounding. */
9707 offsets = arm_get_frame_offsets ();
9709 gcc_assert (!use_return_insn (FALSE, NULL)
9710 || !return_used_this_function
9711 || offsets->saved_regs == offsets->outgoing_args
9712 || frame_pointer_needed);
9714 /* Reset the ARM-specific per-function variables. */
9715 after_arm_reorg = 0;
9719 /* Generate and emit an insn that we will recognize as a push_multi.
9720 Unfortunately, since this insn does not reflect very well the actual
9721 semantics of the operation, we need to annotate the insn for the benefit
9722 of DWARF2 frame unwind information. */
9723 static rtx
9724 emit_multi_reg_push (unsigned long mask)
9726 int num_regs = 0;
9727 int num_dwarf_regs;
9728 int i, j;
9729 rtx par;
9730 rtx dwarf;
9731 int dwarf_par_index;
9732 rtx tmp, reg;
9734 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9735 if (mask & (1 << i))
9736 num_regs++;
9738 gcc_assert (num_regs && num_regs <= 16);
9740 /* We don't record the PC in the dwarf frame information. */
9741 num_dwarf_regs = num_regs;
9742 if (mask & (1 << PC_REGNUM))
9743 num_dwarf_regs--;
9745 /* For the body of the insn we are going to generate an UNSPEC in
9746 parallel with several USEs. This allows the insn to be recognized
9747 by the push_multi pattern in the arm.md file. The insn looks
9748 something like this:
9750 (parallel [
9751 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9752 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9753 (use (reg:SI 11 fp))
9754 (use (reg:SI 12 ip))
9755 (use (reg:SI 14 lr))
9756 (use (reg:SI 15 pc))
9759 For the frame note however, we try to be more explicit and actually
9760 show each register being stored into the stack frame, plus a (single)
9761 decrement of the stack pointer. We do it this way in order to be
9762 friendly to the stack unwinding code, which only wants to see a single
9763 stack decrement per instruction. The RTL we generate for the note looks
9764 something like this:
9766 (sequence [
9767 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9768 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9769 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9770 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9771 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9774 This sequence is used both by the code to support stack unwinding for
9775 exceptions handlers and the code to generate dwarf2 frame debugging. */
9777 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9778 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9779 dwarf_par_index = 1;
9781 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9783 if (mask & (1 << i))
9785 reg = gen_rtx_REG (SImode, i);
9787 XVECEXP (par, 0, 0)
9788 = gen_rtx_SET (VOIDmode,
9789 gen_rtx_MEM (BLKmode,
9790 gen_rtx_PRE_DEC (BLKmode,
9791 stack_pointer_rtx)),
9792 gen_rtx_UNSPEC (BLKmode,
9793 gen_rtvec (1, reg),
9794 UNSPEC_PUSH_MULT));
9796 if (i != PC_REGNUM)
9798 tmp = gen_rtx_SET (VOIDmode,
9799 gen_rtx_MEM (SImode, stack_pointer_rtx),
9800 reg);
9801 RTX_FRAME_RELATED_P (tmp) = 1;
9802 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9803 dwarf_par_index++;
9806 break;
9810 for (j = 1, i++; j < num_regs; i++)
9812 if (mask & (1 << i))
9814 reg = gen_rtx_REG (SImode, i);
9816 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9818 if (i != PC_REGNUM)
9820 tmp = gen_rtx_SET (VOIDmode,
9821 gen_rtx_MEM (SImode,
9822 plus_constant (stack_pointer_rtx,
9823 4 * j)),
9824 reg);
9825 RTX_FRAME_RELATED_P (tmp) = 1;
9826 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9829 j++;
9833 par = emit_insn (par);
9835 tmp = gen_rtx_SET (SImode,
9836 stack_pointer_rtx,
9837 gen_rtx_PLUS (SImode,
9838 stack_pointer_rtx,
9839 GEN_INT (-4 * num_regs)));
9840 RTX_FRAME_RELATED_P (tmp) = 1;
9841 XVECEXP (dwarf, 0, 0) = tmp;
9843 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9844 REG_NOTES (par));
9845 return par;
9848 static rtx
9849 emit_sfm (int base_reg, int count)
9851 rtx par;
9852 rtx dwarf;
9853 rtx tmp, reg;
9854 int i;
9856 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9857 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9859 reg = gen_rtx_REG (XFmode, base_reg++);
9861 XVECEXP (par, 0, 0)
9862 = gen_rtx_SET (VOIDmode,
9863 gen_rtx_MEM (BLKmode,
9864 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9865 gen_rtx_UNSPEC (BLKmode,
9866 gen_rtvec (1, reg),
9867 UNSPEC_PUSH_MULT));
9868 tmp = gen_rtx_SET (VOIDmode,
9869 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9870 RTX_FRAME_RELATED_P (tmp) = 1;
9871 XVECEXP (dwarf, 0, 1) = tmp;
9873 for (i = 1; i < count; i++)
9875 reg = gen_rtx_REG (XFmode, base_reg++);
9876 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9878 tmp = gen_rtx_SET (VOIDmode,
9879 gen_rtx_MEM (XFmode,
9880 plus_constant (stack_pointer_rtx,
9881 i * 12)),
9882 reg);
9883 RTX_FRAME_RELATED_P (tmp) = 1;
9884 XVECEXP (dwarf, 0, i + 1) = tmp;
9887 tmp = gen_rtx_SET (VOIDmode,
9888 stack_pointer_rtx,
9889 gen_rtx_PLUS (SImode,
9890 stack_pointer_rtx,
9891 GEN_INT (-12 * count)));
9892 RTX_FRAME_RELATED_P (tmp) = 1;
9893 XVECEXP (dwarf, 0, 0) = tmp;
9895 par = emit_insn (par);
9896 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9897 REG_NOTES (par));
9898 return par;
9902 /* Return true if the current function needs to save/restore LR. */
9904 static bool
9905 thumb_force_lr_save (void)
9907 return !cfun->machine->lr_save_eliminated
9908 && (!leaf_function_p ()
9909 || thumb_far_jump_used_p ()
9910 || regs_ever_live [LR_REGNUM]);
9914 /* Compute the distance from register FROM to register TO.
9915 These can be the arg pointer (26), the soft frame pointer (25),
9916 the stack pointer (13) or the hard frame pointer (11).
9917 In thumb mode r7 is used as the soft frame pointer, if needed.
9918 Typical stack layout looks like this:
9920 old stack pointer -> | |
9921 ----
9922 | | \
9923 | | saved arguments for
9924 | | vararg functions
9925 | | /
9927 hard FP & arg pointer -> | | \
9928 | | stack
9929 | | frame
9930 | | /
9932 | | \
9933 | | call saved
9934 | | registers
9935 soft frame pointer -> | | /
9937 | | \
9938 | | local
9939 | | variables
9940 | | /
9942 | | \
9943 | | outgoing
9944 | | arguments
9945 current stack pointer -> | | /
9948 For a given function some or all of these stack components
9949 may not be needed, giving rise to the possibility of
9950 eliminating some of the registers.
9952 The values returned by this function must reflect the behavior
9953 of arm_expand_prologue() and arm_compute_save_reg_mask().
9955 The sign of the number returned reflects the direction of stack
9956 growth, so the values are positive for all eliminations except
9957 from the soft frame pointer to the hard frame pointer.
9959 SFP may point just inside the local variables block to ensure correct
9960 alignment. */
9963 /* Calculate stack offsets. These are used to calculate register elimination
9964 offsets and in prologue/epilogue code. */
9966 static arm_stack_offsets *
9967 arm_get_frame_offsets (void)
9969 struct arm_stack_offsets *offsets;
9970 unsigned long func_type;
9971 int leaf;
9972 int saved;
9973 HOST_WIDE_INT frame_size;
9975 offsets = &cfun->machine->stack_offsets;
9977 /* We need to know if we are a leaf function. Unfortunately, it
9978 is possible to be called after start_sequence has been called,
9979 which causes get_insns to return the insns for the sequence,
9980 not the function, which will cause leaf_function_p to return
9981 the incorrect result.
9983 to know about leaf functions once reload has completed, and the
9984 frame size cannot be changed after that time, so we can safely
9985 use the cached value. */
9987 if (reload_completed)
9988 return offsets;
9990 /* Initially this is the size of the local variables. It will translated
9991 into an offset once we have determined the size of preceding data. */
9992 frame_size = ROUND_UP_WORD (get_frame_size ());
9994 leaf = leaf_function_p ();
9996 /* Space for variadic functions. */
9997 offsets->saved_args = current_function_pretend_args_size;
9999 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10001 if (TARGET_ARM)
10003 unsigned int regno;
10005 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10007 /* We know that SP will be doubleword aligned on entry, and we must
10008 preserve that condition at any subroutine call. We also require the
10009 soft frame pointer to be doubleword aligned. */
10011 if (TARGET_REALLY_IWMMXT)
10013 /* Check for the call-saved iWMMXt registers. */
10014 for (regno = FIRST_IWMMXT_REGNUM;
10015 regno <= LAST_IWMMXT_REGNUM;
10016 regno++)
10017 if (regs_ever_live [regno] && ! call_used_regs [regno])
10018 saved += 8;
10021 func_type = arm_current_func_type ();
10022 if (! IS_VOLATILE (func_type))
10024 /* Space for saved FPA registers. */
10025 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10026 if (regs_ever_live[regno] && ! call_used_regs[regno])
10027 saved += 12;
10029 /* Space for saved VFP registers. */
10030 if (TARGET_HARD_FLOAT && TARGET_VFP)
10031 saved += arm_get_vfp_saved_size ();
10034 else /* TARGET_THUMB */
10036 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10037 if (TARGET_BACKTRACE)
10038 saved += 16;
10041 /* Saved registers include the stack frame. */
10042 offsets->saved_regs = offsets->saved_args + saved;
10043 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10044 /* A leaf function does not need any stack alignment if it has nothing
10045 on the stack. */
10046 if (leaf && frame_size == 0)
10048 offsets->outgoing_args = offsets->soft_frame;
10049 return offsets;
10052 /* Ensure SFP has the correct alignment. */
10053 if (ARM_DOUBLEWORD_ALIGN
10054 && (offsets->soft_frame & 7))
10055 offsets->soft_frame += 4;
10057 offsets->outgoing_args = offsets->soft_frame + frame_size
10058 + current_function_outgoing_args_size;
10060 if (ARM_DOUBLEWORD_ALIGN)
10062 /* Ensure SP remains doubleword aligned. */
10063 if (offsets->outgoing_args & 7)
10064 offsets->outgoing_args += 4;
10065 gcc_assert (!(offsets->outgoing_args & 7));
10068 return offsets;
10072 /* Calculate the relative offsets for the different stack pointers. Positive
10073 offsets are in the direction of stack growth. */
10075 HOST_WIDE_INT
10076 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10078 arm_stack_offsets *offsets;
10080 offsets = arm_get_frame_offsets ();
10082 /* OK, now we have enough information to compute the distances.
10083 There must be an entry in these switch tables for each pair
10084 of registers in ELIMINABLE_REGS, even if some of the entries
10085 seem to be redundant or useless. */
10086 switch (from)
10088 case ARG_POINTER_REGNUM:
10089 switch (to)
10091 case THUMB_HARD_FRAME_POINTER_REGNUM:
10092 return 0;
10094 case FRAME_POINTER_REGNUM:
10095 /* This is the reverse of the soft frame pointer
10096 to hard frame pointer elimination below. */
10097 return offsets->soft_frame - offsets->saved_args;
10099 case ARM_HARD_FRAME_POINTER_REGNUM:
10100 /* If there is no stack frame then the hard
10101 frame pointer and the arg pointer coincide. */
10102 if (offsets->frame == offsets->saved_regs)
10103 return 0;
10104 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10105 return (frame_pointer_needed
10106 && cfun->static_chain_decl != NULL
10107 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10109 case STACK_POINTER_REGNUM:
10110 /* If nothing has been pushed on the stack at all
10111 then this will return -4. This *is* correct! */
10112 return offsets->outgoing_args - (offsets->saved_args + 4);
10114 default:
10115 gcc_unreachable ();
10117 gcc_unreachable ();
10119 case FRAME_POINTER_REGNUM:
10120 switch (to)
10122 case THUMB_HARD_FRAME_POINTER_REGNUM:
10123 return 0;
10125 case ARM_HARD_FRAME_POINTER_REGNUM:
10126 /* The hard frame pointer points to the top entry in the
10127 stack frame. The soft frame pointer to the bottom entry
10128 in the stack frame. If there is no stack frame at all,
10129 then they are identical. */
10131 return offsets->frame - offsets->soft_frame;
10133 case STACK_POINTER_REGNUM:
10134 return offsets->outgoing_args - offsets->soft_frame;
10136 default:
10137 gcc_unreachable ();
10139 gcc_unreachable ();
10141 default:
10142 /* You cannot eliminate from the stack pointer.
10143 In theory you could eliminate from the hard frame
10144 pointer to the stack pointer, but this will never
10145 happen, since if a stack frame is not needed the
10146 hard frame pointer will never be used. */
10147 gcc_unreachable ();
10152 /* Generate the prologue instructions for entry into an ARM function. */
10153 void
10154 arm_expand_prologue (void)
10156 int reg;
10157 rtx amount;
10158 rtx insn;
10159 rtx ip_rtx;
10160 unsigned long live_regs_mask;
10161 unsigned long func_type;
10162 int fp_offset = 0;
10163 int saved_pretend_args = 0;
10164 int saved_regs = 0;
10165 unsigned HOST_WIDE_INT args_to_push;
10166 arm_stack_offsets *offsets;
10168 func_type = arm_current_func_type ();
10170 /* Naked functions don't have prologues. */
10171 if (IS_NAKED (func_type))
10172 return;
10174 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10175 args_to_push = current_function_pretend_args_size;
10177 /* Compute which register we will have to save onto the stack. */
10178 live_regs_mask = arm_compute_save_reg_mask ();
10180 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10182 if (frame_pointer_needed)
10184 if (IS_INTERRUPT (func_type))
10186 /* Interrupt functions must not corrupt any registers.
10187 Creating a frame pointer however, corrupts the IP
10188 register, so we must push it first. */
10189 insn = emit_multi_reg_push (1 << IP_REGNUM);
10191 /* Do not set RTX_FRAME_RELATED_P on this insn.
10192 The dwarf stack unwinding code only wants to see one
10193 stack decrement per function, and this is not it. If
10194 this instruction is labeled as being part of the frame
10195 creation sequence then dwarf2out_frame_debug_expr will
10196 die when it encounters the assignment of IP to FP
10197 later on, since the use of SP here establishes SP as
10198 the CFA register and not IP.
10200 Anyway this instruction is not really part of the stack
10201 frame creation although it is part of the prologue. */
10203 else if (IS_NESTED (func_type))
10205 /* The Static chain register is the same as the IP register
10206 used as a scratch register during stack frame creation.
10207 To get around this need to find somewhere to store IP
10208 whilst the frame is being created. We try the following
10209 places in order:
10211 1. The last argument register.
10212 2. A slot on the stack above the frame. (This only
10213 works if the function is not a varargs function).
10214 3. Register r3, after pushing the argument registers
10215 onto the stack.
10217 Note - we only need to tell the dwarf2 backend about the SP
10218 adjustment in the second variant; the static chain register
10219 doesn't need to be unwound, as it doesn't contain a value
10220 inherited from the caller. */
10222 if (regs_ever_live[3] == 0)
10224 insn = gen_rtx_REG (SImode, 3);
10225 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10226 insn = emit_insn (insn);
10228 else if (args_to_push == 0)
10230 rtx dwarf;
10231 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10232 insn = gen_rtx_MEM (SImode, insn);
10233 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10234 insn = emit_insn (insn);
10236 fp_offset = 4;
10238 /* Just tell the dwarf backend that we adjusted SP. */
10239 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10240 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10241 GEN_INT (-fp_offset)));
10242 RTX_FRAME_RELATED_P (insn) = 1;
10243 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10244 dwarf, REG_NOTES (insn));
10246 else
10248 /* Store the args on the stack. */
10249 if (cfun->machine->uses_anonymous_args)
10250 insn = emit_multi_reg_push
10251 ((0xf0 >> (args_to_push / 4)) & 0xf);
10252 else
10253 insn = emit_insn
10254 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10255 GEN_INT (- args_to_push)));
10257 RTX_FRAME_RELATED_P (insn) = 1;
10259 saved_pretend_args = 1;
10260 fp_offset = args_to_push;
10261 args_to_push = 0;
10263 /* Now reuse r3 to preserve IP. */
10264 insn = gen_rtx_REG (SImode, 3);
10265 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10266 (void) emit_insn (insn);
10270 if (fp_offset)
10272 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10273 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10275 else
10276 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10278 insn = emit_insn (insn);
10279 RTX_FRAME_RELATED_P (insn) = 1;
10282 if (args_to_push)
10284 /* Push the argument registers, or reserve space for them. */
10285 if (cfun->machine->uses_anonymous_args)
10286 insn = emit_multi_reg_push
10287 ((0xf0 >> (args_to_push / 4)) & 0xf);
10288 else
10289 insn = emit_insn
10290 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10291 GEN_INT (- args_to_push)));
10292 RTX_FRAME_RELATED_P (insn) = 1;
10295 /* If this is an interrupt service routine, and the link register
10296 is going to be pushed, and we are not creating a stack frame,
10297 (which would involve an extra push of IP and a pop in the epilogue)
10298 subtracting four from LR now will mean that the function return
10299 can be done with a single instruction. */
10300 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10301 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10302 && ! frame_pointer_needed)
10303 emit_insn (gen_rtx_SET (SImode,
10304 gen_rtx_REG (SImode, LR_REGNUM),
10305 gen_rtx_PLUS (SImode,
10306 gen_rtx_REG (SImode, LR_REGNUM),
10307 GEN_INT (-4))));
10309 if (live_regs_mask)
10311 insn = emit_multi_reg_push (live_regs_mask);
10312 saved_regs += bit_count (live_regs_mask) * 4;
10313 RTX_FRAME_RELATED_P (insn) = 1;
10316 if (TARGET_IWMMXT)
10317 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10318 if (regs_ever_live[reg] && ! call_used_regs [reg])
10320 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10321 insn = gen_rtx_MEM (V2SImode, insn);
10322 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10323 gen_rtx_REG (V2SImode, reg)));
10324 RTX_FRAME_RELATED_P (insn) = 1;
10325 saved_regs += 8;
10328 if (! IS_VOLATILE (func_type))
10330 int start_reg;
10332 /* Save any floating point call-saved registers used by this
10333 function. */
10334 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10336 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10337 if (regs_ever_live[reg] && !call_used_regs[reg])
10339 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10340 insn = gen_rtx_MEM (XFmode, insn);
10341 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10342 gen_rtx_REG (XFmode, reg)));
10343 RTX_FRAME_RELATED_P (insn) = 1;
10344 saved_regs += 12;
10347 else
10349 start_reg = LAST_FPA_REGNUM;
10351 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10353 if (regs_ever_live[reg] && !call_used_regs[reg])
10355 if (start_reg - reg == 3)
10357 insn = emit_sfm (reg, 4);
10358 RTX_FRAME_RELATED_P (insn) = 1;
10359 saved_regs += 48;
10360 start_reg = reg - 1;
10363 else
10365 if (start_reg != reg)
10367 insn = emit_sfm (reg + 1, start_reg - reg);
10368 RTX_FRAME_RELATED_P (insn) = 1;
10369 saved_regs += (start_reg - reg) * 12;
10371 start_reg = reg - 1;
10375 if (start_reg != reg)
10377 insn = emit_sfm (reg + 1, start_reg - reg);
10378 saved_regs += (start_reg - reg) * 12;
10379 RTX_FRAME_RELATED_P (insn) = 1;
10382 if (TARGET_HARD_FLOAT && TARGET_VFP)
10384 start_reg = FIRST_VFP_REGNUM;
10386 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10388 if ((!regs_ever_live[reg] || call_used_regs[reg])
10389 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10391 if (start_reg != reg)
10392 saved_regs += vfp_emit_fstmx (start_reg,
10393 (reg - start_reg) / 2);
10394 start_reg = reg + 2;
10397 if (start_reg != reg)
10398 saved_regs += vfp_emit_fstmx (start_reg,
10399 (reg - start_reg) / 2);
10403 if (frame_pointer_needed)
10405 /* Create the new frame pointer. */
10406 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10407 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10408 RTX_FRAME_RELATED_P (insn) = 1;
10410 if (IS_NESTED (func_type))
10412 /* Recover the static chain register. */
10413 if (regs_ever_live [3] == 0
10414 || saved_pretend_args)
10415 insn = gen_rtx_REG (SImode, 3);
10416 else /* if (current_function_pretend_args_size == 0) */
10418 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10419 GEN_INT (4));
10420 insn = gen_rtx_MEM (SImode, insn);
10423 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10424 /* Add a USE to stop propagate_one_insn() from barfing. */
10425 emit_insn (gen_prologue_use (ip_rtx));
10429 offsets = arm_get_frame_offsets ();
10430 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10432 /* This add can produce multiple insns for a large constant, so we
10433 need to get tricky. */
10434 rtx last = get_last_insn ();
10436 amount = GEN_INT (offsets->saved_args + saved_regs
10437 - offsets->outgoing_args);
10439 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10440 amount));
10443 last = last ? NEXT_INSN (last) : get_insns ();
10444 RTX_FRAME_RELATED_P (last) = 1;
10446 while (last != insn);
10448 /* If the frame pointer is needed, emit a special barrier that
10449 will prevent the scheduler from moving stores to the frame
10450 before the stack adjustment. */
10451 if (frame_pointer_needed)
10452 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10453 hard_frame_pointer_rtx));
10457 if (flag_pic)
10458 arm_load_pic_register (INVALID_REGNUM);
10460 /* If we are profiling, make sure no instructions are scheduled before
10461 the call to mcount. Similarly if the user has requested no
10462 scheduling in the prolog. */
10463 if (current_function_profile || !TARGET_SCHED_PROLOG)
10464 emit_insn (gen_blockage ());
10466 /* If the link register is being kept alive, with the return address in it,
10467 then make sure that it does not get reused by the ce2 pass. */
10468 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10470 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10471 cfun->machine->lr_save_eliminated = 1;
10475 /* If CODE is 'd', then the X is a condition operand and the instruction
10476 should only be executed if the condition is true.
10477 if CODE is 'D', then the X is a condition operand and the instruction
10478 should only be executed if the condition is false: however, if the mode
10479 of the comparison is CCFPEmode, then always execute the instruction -- we
10480 do this because in these circumstances !GE does not necessarily imply LT;
10481 in these cases the instruction pattern will take care to make sure that
10482 an instruction containing %d will follow, thereby undoing the effects of
10483 doing this instruction unconditionally.
10484 If CODE is 'N' then X is a floating point operand that must be negated
10485 before output.
10486 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10487 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10488 void
10489 arm_print_operand (FILE *stream, rtx x, int code)
10491 switch (code)
10493 case '@':
10494 fputs (ASM_COMMENT_START, stream);
10495 return;
10497 case '_':
10498 fputs (user_label_prefix, stream);
10499 return;
10501 case '|':
10502 fputs (REGISTER_PREFIX, stream);
10503 return;
10505 case '?':
10506 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10508 if (TARGET_THUMB)
10510 output_operand_lossage ("predicated Thumb instruction");
10511 break;
10513 if (current_insn_predicate != NULL)
10515 output_operand_lossage
10516 ("predicated instruction in conditional sequence");
10517 break;
10520 fputs (arm_condition_codes[arm_current_cc], stream);
10522 else if (current_insn_predicate)
10524 enum arm_cond_code code;
10526 if (TARGET_THUMB)
10528 output_operand_lossage ("predicated Thumb instruction");
10529 break;
10532 code = get_arm_condition_code (current_insn_predicate);
10533 fputs (arm_condition_codes[code], stream);
10535 return;
10537 case 'N':
10539 REAL_VALUE_TYPE r;
10540 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10541 r = REAL_VALUE_NEGATE (r);
10542 fprintf (stream, "%s", fp_const_from_val (&r));
10544 return;
10546 case 'B':
10547 if (GET_CODE (x) == CONST_INT)
10549 HOST_WIDE_INT val;
10550 val = ARM_SIGN_EXTEND (~INTVAL (x));
10551 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10553 else
10555 putc ('~', stream);
10556 output_addr_const (stream, x);
10558 return;
10560 case 'i':
10561 fprintf (stream, "%s", arithmetic_instr (x, 1));
10562 return;
10564 /* Truncate Cirrus shift counts. */
10565 case 's':
10566 if (GET_CODE (x) == CONST_INT)
10568 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10569 return;
10571 arm_print_operand (stream, x, 0);
10572 return;
10574 case 'I':
10575 fprintf (stream, "%s", arithmetic_instr (x, 0));
10576 return;
10578 case 'S':
10580 HOST_WIDE_INT val;
10581 const char * shift = shift_op (x, &val);
10583 if (shift)
10585 fprintf (stream, ", %s ", shift_op (x, &val));
10586 if (val == -1)
10587 arm_print_operand (stream, XEXP (x, 1), 0);
10588 else
10589 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10592 return;
10594 /* An explanation of the 'Q', 'R' and 'H' register operands:
10596 In a pair of registers containing a DI or DF value the 'Q'
10597 operand returns the register number of the register containing
10598 the least significant part of the value. The 'R' operand returns
10599 the register number of the register containing the most
10600 significant part of the value.
10602 The 'H' operand returns the higher of the two register numbers.
10603 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10604 same as the 'Q' operand, since the most significant part of the
10605 value is held in the lower number register. The reverse is true
10606 on systems where WORDS_BIG_ENDIAN is false.
10608 The purpose of these operands is to distinguish between cases
10609 where the endian-ness of the values is important (for example
10610 when they are added together), and cases where the endian-ness
10611 is irrelevant, but the order of register operations is important.
10612 For example when loading a value from memory into a register
10613 pair, the endian-ness does not matter. Provided that the value
10614 from the lower memory address is put into the lower numbered
10615 register, and the value from the higher address is put into the
10616 higher numbered register, the load will work regardless of whether
10617 the value being loaded is big-wordian or little-wordian. The
10618 order of the two register loads can matter however, if the address
10619 of the memory location is actually held in one of the registers
10620 being overwritten by the load. */
10621 case 'Q':
10622 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10624 output_operand_lossage ("invalid operand for code '%c'", code);
10625 return;
10628 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10629 return;
10631 case 'R':
10632 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10634 output_operand_lossage ("invalid operand for code '%c'", code);
10635 return;
10638 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10639 return;
10641 case 'H':
10642 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10644 output_operand_lossage ("invalid operand for code '%c'", code);
10645 return;
10648 asm_fprintf (stream, "%r", REGNO (x) + 1);
10649 return;
10651 case 'm':
10652 asm_fprintf (stream, "%r",
10653 GET_CODE (XEXP (x, 0)) == REG
10654 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10655 return;
10657 case 'M':
10658 asm_fprintf (stream, "{%r-%r}",
10659 REGNO (x),
10660 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10661 return;
10663 case 'd':
10664 /* CONST_TRUE_RTX means always -- that's the default. */
10665 if (x == const_true_rtx)
10666 return;
10668 if (!COMPARISON_P (x))
10670 output_operand_lossage ("invalid operand for code '%c'", code);
10671 return;
10674 fputs (arm_condition_codes[get_arm_condition_code (x)],
10675 stream);
10676 return;
10678 case 'D':
10679 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10680 want to do that. */
10681 if (x == const_true_rtx)
10683 output_operand_lossage ("instruction never exectued");
10684 return;
10686 if (!COMPARISON_P (x))
10688 output_operand_lossage ("invalid operand for code '%c'", code);
10689 return;
10692 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10693 (get_arm_condition_code (x))],
10694 stream);
10695 return;
10697 /* Cirrus registers can be accessed in a variety of ways:
10698 single floating point (f)
10699 double floating point (d)
10700 32bit integer (fx)
10701 64bit integer (dx). */
10702 case 'W': /* Cirrus register in F mode. */
10703 case 'X': /* Cirrus register in D mode. */
10704 case 'Y': /* Cirrus register in FX mode. */
10705 case 'Z': /* Cirrus register in DX mode. */
10706 gcc_assert (GET_CODE (x) == REG
10707 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10709 fprintf (stream, "mv%s%s",
10710 code == 'W' ? "f"
10711 : code == 'X' ? "d"
10712 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10714 return;
10716 /* Print cirrus register in the mode specified by the register's mode. */
10717 case 'V':
10719 int mode = GET_MODE (x);
10721 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10723 output_operand_lossage ("invalid operand for code '%c'", code);
10724 return;
10727 fprintf (stream, "mv%s%s",
10728 mode == DFmode ? "d"
10729 : mode == SImode ? "fx"
10730 : mode == DImode ? "dx"
10731 : "f", reg_names[REGNO (x)] + 2);
10733 return;
10736 case 'U':
10737 if (GET_CODE (x) != REG
10738 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10739 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10740 /* Bad value for wCG register number. */
10742 output_operand_lossage ("invalid operand for code '%c'", code);
10743 return;
10746 else
10747 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10748 return;
10750 /* Print an iWMMXt control register name. */
10751 case 'w':
10752 if (GET_CODE (x) != CONST_INT
10753 || INTVAL (x) < 0
10754 || INTVAL (x) >= 16)
10755 /* Bad value for wC register number. */
10757 output_operand_lossage ("invalid operand for code '%c'", code);
10758 return;
10761 else
10763 static const char * wc_reg_names [16] =
10765 "wCID", "wCon", "wCSSF", "wCASF",
10766 "wC4", "wC5", "wC6", "wC7",
10767 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10768 "wC12", "wC13", "wC14", "wC15"
10771 fprintf (stream, wc_reg_names [INTVAL (x)]);
10773 return;
10775 /* Print a VFP double precision register name. */
10776 case 'P':
10778 int mode = GET_MODE (x);
10779 int num;
10781 if (mode != DImode && mode != DFmode)
10783 output_operand_lossage ("invalid operand for code '%c'", code);
10784 return;
10787 if (GET_CODE (x) != REG
10788 || !IS_VFP_REGNUM (REGNO (x)))
10790 output_operand_lossage ("invalid operand for code '%c'", code);
10791 return;
10794 num = REGNO(x) - FIRST_VFP_REGNUM;
10795 if (num & 1)
10797 output_operand_lossage ("invalid operand for code '%c'", code);
10798 return;
10801 fprintf (stream, "d%d", num >> 1);
10803 return;
10805 default:
10806 if (x == 0)
10808 output_operand_lossage ("missing operand");
10809 return;
10812 switch (GET_CODE (x))
10814 case REG:
10815 asm_fprintf (stream, "%r", REGNO (x));
10816 break;
10818 case MEM:
10819 output_memory_reference_mode = GET_MODE (x);
10820 output_address (XEXP (x, 0));
10821 break;
10823 case CONST_DOUBLE:
10824 fprintf (stream, "#%s", fp_immediate_constant (x));
10825 break;
10827 default:
10828 gcc_assert (GET_CODE (x) != NEG);
10829 fputc ('#', stream);
10830 output_addr_const (stream, x);
10831 break;
10836 #ifndef AOF_ASSEMBLER
10837 /* Target hook for assembling integer objects. The ARM version needs to
10838 handle word-sized values specially. */
10839 static bool
10840 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10842 if (size == UNITS_PER_WORD && aligned_p)
10844 fputs ("\t.word\t", asm_out_file);
10845 output_addr_const (asm_out_file, x);
10847 /* Mark symbols as position independent. We only do this in the
10848 .text segment, not in the .data segment. */
10849 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10850 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10852 if (GET_CODE (x) == SYMBOL_REF
10853 && (CONSTANT_POOL_ADDRESS_P (x)
10854 || SYMBOL_REF_LOCAL_P (x)))
10855 fputs ("(GOTOFF)", asm_out_file);
10856 else if (GET_CODE (x) == LABEL_REF)
10857 fputs ("(GOTOFF)", asm_out_file);
10858 else
10859 fputs ("(GOT)", asm_out_file);
10861 fputc ('\n', asm_out_file);
10862 return true;
10865 if (arm_vector_mode_supported_p (GET_MODE (x)))
10867 int i, units;
10869 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10871 units = CONST_VECTOR_NUNITS (x);
10873 switch (GET_MODE (x))
10875 case V2SImode: size = 4; break;
10876 case V4HImode: size = 2; break;
10877 case V8QImode: size = 1; break;
10878 default:
10879 gcc_unreachable ();
10882 for (i = 0; i < units; i++)
10884 rtx elt;
10886 elt = CONST_VECTOR_ELT (x, i);
10887 assemble_integer
10888 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10891 return true;
10894 return default_assemble_integer (x, size, aligned_p);
10898 /* Add a function to the list of static constructors. */
10900 static void
10901 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10903 if (!TARGET_AAPCS_BASED)
10905 default_named_section_asm_out_constructor (symbol, priority);
10906 return;
10909 /* Put these in the .init_array section, using a special relocation. */
10910 ctors_section ();
10911 assemble_align (POINTER_SIZE);
10912 fputs ("\t.word\t", asm_out_file);
10913 output_addr_const (asm_out_file, symbol);
10914 fputs ("(target1)\n", asm_out_file);
10916 #endif
10918 /* A finite state machine takes care of noticing whether or not instructions
10919 can be conditionally executed, and thus decrease execution time and code
10920 size by deleting branch instructions. The fsm is controlled by
10921 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10923 /* The state of the fsm controlling condition codes are:
10924 0: normal, do nothing special
10925 1: make ASM_OUTPUT_OPCODE not output this instruction
10926 2: make ASM_OUTPUT_OPCODE not output this instruction
10927 3: make instructions conditional
10928 4: make instructions conditional
10930 State transitions (state->state by whom under condition):
10931 0 -> 1 final_prescan_insn if the `target' is a label
10932 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10933 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10934 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10935 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10936 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10937 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10938 (the target insn is arm_target_insn).
10940 If the jump clobbers the conditions then we use states 2 and 4.
10942 A similar thing can be done with conditional return insns.
10944 XXX In case the `target' is an unconditional branch, this conditionalising
10945 of the instructions always reduces code size, but not always execution
10946 time. But then, I want to reduce the code size to somewhere near what
10947 /bin/cc produces. */
10949 /* Returns the index of the ARM condition code string in
10950 `arm_condition_codes'. COMPARISON should be an rtx like
10951 `(eq (...) (...))'. */
10952 static enum arm_cond_code
10953 get_arm_condition_code (rtx comparison)
10955 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10956 int code;
10957 enum rtx_code comp_code = GET_CODE (comparison);
10959 if (GET_MODE_CLASS (mode) != MODE_CC)
10960 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10961 XEXP (comparison, 1));
10963 switch (mode)
10965 case CC_DNEmode: code = ARM_NE; goto dominance;
10966 case CC_DEQmode: code = ARM_EQ; goto dominance;
10967 case CC_DGEmode: code = ARM_GE; goto dominance;
10968 case CC_DGTmode: code = ARM_GT; goto dominance;
10969 case CC_DLEmode: code = ARM_LE; goto dominance;
10970 case CC_DLTmode: code = ARM_LT; goto dominance;
10971 case CC_DGEUmode: code = ARM_CS; goto dominance;
10972 case CC_DGTUmode: code = ARM_HI; goto dominance;
10973 case CC_DLEUmode: code = ARM_LS; goto dominance;
10974 case CC_DLTUmode: code = ARM_CC;
10976 dominance:
10977 gcc_assert (comp_code == EQ || comp_code == NE);
10979 if (comp_code == EQ)
10980 return ARM_INVERSE_CONDITION_CODE (code);
10981 return code;
10983 case CC_NOOVmode:
10984 switch (comp_code)
10986 case NE: return ARM_NE;
10987 case EQ: return ARM_EQ;
10988 case GE: return ARM_PL;
10989 case LT: return ARM_MI;
10990 default: gcc_unreachable ();
10993 case CC_Zmode:
10994 switch (comp_code)
10996 case NE: return ARM_NE;
10997 case EQ: return ARM_EQ;
10998 default: gcc_unreachable ();
11001 case CC_Nmode:
11002 switch (comp_code)
11004 case NE: return ARM_MI;
11005 case EQ: return ARM_PL;
11006 default: gcc_unreachable ();
11009 case CCFPEmode:
11010 case CCFPmode:
11011 /* These encodings assume that AC=1 in the FPA system control
11012 byte. This allows us to handle all cases except UNEQ and
11013 LTGT. */
11014 switch (comp_code)
11016 case GE: return ARM_GE;
11017 case GT: return ARM_GT;
11018 case LE: return ARM_LS;
11019 case LT: return ARM_MI;
11020 case NE: return ARM_NE;
11021 case EQ: return ARM_EQ;
11022 case ORDERED: return ARM_VC;
11023 case UNORDERED: return ARM_VS;
11024 case UNLT: return ARM_LT;
11025 case UNLE: return ARM_LE;
11026 case UNGT: return ARM_HI;
11027 case UNGE: return ARM_PL;
11028 /* UNEQ and LTGT do not have a representation. */
11029 case UNEQ: /* Fall through. */
11030 case LTGT: /* Fall through. */
11031 default: gcc_unreachable ();
11034 case CC_SWPmode:
11035 switch (comp_code)
11037 case NE: return ARM_NE;
11038 case EQ: return ARM_EQ;
11039 case GE: return ARM_LE;
11040 case GT: return ARM_LT;
11041 case LE: return ARM_GE;
11042 case LT: return ARM_GT;
11043 case GEU: return ARM_LS;
11044 case GTU: return ARM_CC;
11045 case LEU: return ARM_CS;
11046 case LTU: return ARM_HI;
11047 default: gcc_unreachable ();
11050 case CC_Cmode:
11051 switch (comp_code)
11053 case LTU: return ARM_CS;
11054 case GEU: return ARM_CC;
11055 default: gcc_unreachable ();
11058 case CCmode:
11059 switch (comp_code)
11061 case NE: return ARM_NE;
11062 case EQ: return ARM_EQ;
11063 case GE: return ARM_GE;
11064 case GT: return ARM_GT;
11065 case LE: return ARM_LE;
11066 case LT: return ARM_LT;
11067 case GEU: return ARM_CS;
11068 case GTU: return ARM_HI;
11069 case LEU: return ARM_LS;
11070 case LTU: return ARM_CC;
11071 default: gcc_unreachable ();
11074 default: gcc_unreachable ();
11078 void
11079 arm_final_prescan_insn (rtx insn)
11081 /* BODY will hold the body of INSN. */
11082 rtx body = PATTERN (insn);
11084 /* This will be 1 if trying to repeat the trick, and things need to be
11085 reversed if it appears to fail. */
11086 int reverse = 0;
11088 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11089 taken are clobbered, even if the rtl suggests otherwise. It also
11090 means that we have to grub around within the jump expression to find
11091 out what the conditions are when the jump isn't taken. */
11092 int jump_clobbers = 0;
11094 /* If we start with a return insn, we only succeed if we find another one. */
11095 int seeking_return = 0;
11097 /* START_INSN will hold the insn from where we start looking. This is the
11098 first insn after the following code_label if REVERSE is true. */
11099 rtx start_insn = insn;
11101 /* If in state 4, check if the target branch is reached, in order to
11102 change back to state 0. */
11103 if (arm_ccfsm_state == 4)
11105 if (insn == arm_target_insn)
11107 arm_target_insn = NULL;
11108 arm_ccfsm_state = 0;
11110 return;
11113 /* If in state 3, it is possible to repeat the trick, if this insn is an
11114 unconditional branch to a label, and immediately following this branch
11115 is the previous target label which is only used once, and the label this
11116 branch jumps to is not too far off. */
11117 if (arm_ccfsm_state == 3)
11119 if (simplejump_p (insn))
11121 start_insn = next_nonnote_insn (start_insn);
11122 if (GET_CODE (start_insn) == BARRIER)
11124 /* XXX Isn't this always a barrier? */
11125 start_insn = next_nonnote_insn (start_insn);
11127 if (GET_CODE (start_insn) == CODE_LABEL
11128 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11129 && LABEL_NUSES (start_insn) == 1)
11130 reverse = TRUE;
11131 else
11132 return;
11134 else if (GET_CODE (body) == RETURN)
11136 start_insn = next_nonnote_insn (start_insn);
11137 if (GET_CODE (start_insn) == BARRIER)
11138 start_insn = next_nonnote_insn (start_insn);
11139 if (GET_CODE (start_insn) == CODE_LABEL
11140 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11141 && LABEL_NUSES (start_insn) == 1)
11143 reverse = TRUE;
11144 seeking_return = 1;
11146 else
11147 return;
11149 else
11150 return;
11153 gcc_assert (!arm_ccfsm_state || reverse);
11154 if (GET_CODE (insn) != JUMP_INSN)
11155 return;
11157 /* This jump might be paralleled with a clobber of the condition codes
11158 the jump should always come first */
11159 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11160 body = XVECEXP (body, 0, 0);
11162 if (reverse
11163 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11164 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11166 int insns_skipped;
11167 int fail = FALSE, succeed = FALSE;
11168 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11169 int then_not_else = TRUE;
11170 rtx this_insn = start_insn, label = 0;
11172 /* If the jump cannot be done with one instruction, we cannot
11173 conditionally execute the instruction in the inverse case. */
11174 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11176 jump_clobbers = 1;
11177 return;
11180 /* Register the insn jumped to. */
11181 if (reverse)
11183 if (!seeking_return)
11184 label = XEXP (SET_SRC (body), 0);
11186 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11187 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11188 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11190 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11191 then_not_else = FALSE;
11193 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11194 seeking_return = 1;
11195 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11197 seeking_return = 1;
11198 then_not_else = FALSE;
11200 else
11201 gcc_unreachable ();
11203 /* See how many insns this branch skips, and what kind of insns. If all
11204 insns are okay, and the label or unconditional branch to the same
11205 label is not too far away, succeed. */
11206 for (insns_skipped = 0;
11207 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11209 rtx scanbody;
11211 this_insn = next_nonnote_insn (this_insn);
11212 if (!this_insn)
11213 break;
11215 switch (GET_CODE (this_insn))
11217 case CODE_LABEL:
11218 /* Succeed if it is the target label, otherwise fail since
11219 control falls in from somewhere else. */
11220 if (this_insn == label)
11222 if (jump_clobbers)
11224 arm_ccfsm_state = 2;
11225 this_insn = next_nonnote_insn (this_insn);
11227 else
11228 arm_ccfsm_state = 1;
11229 succeed = TRUE;
11231 else
11232 fail = TRUE;
11233 break;
11235 case BARRIER:
11236 /* Succeed if the following insn is the target label.
11237 Otherwise fail.
11238 If return insns are used then the last insn in a function
11239 will be a barrier. */
11240 this_insn = next_nonnote_insn (this_insn);
11241 if (this_insn && this_insn == label)
11243 if (jump_clobbers)
11245 arm_ccfsm_state = 2;
11246 this_insn = next_nonnote_insn (this_insn);
11248 else
11249 arm_ccfsm_state = 1;
11250 succeed = TRUE;
11252 else
11253 fail = TRUE;
11254 break;
11256 case CALL_INSN:
11257 /* The AAPCS says that conditional calls should not be
11258 used since they make interworking inefficient (the
11259 linker can't transform BL<cond> into BLX). That's
11260 only a problem if the machine has BLX. */
11261 if (arm_arch5)
11263 fail = TRUE;
11264 break;
11267 /* Succeed if the following insn is the target label, or
11268 if the following two insns are a barrier and the
11269 target label. */
11270 this_insn = next_nonnote_insn (this_insn);
11271 if (this_insn && GET_CODE (this_insn) == BARRIER)
11272 this_insn = next_nonnote_insn (this_insn);
11274 if (this_insn && this_insn == label
11275 && insns_skipped < max_insns_skipped)
11277 if (jump_clobbers)
11279 arm_ccfsm_state = 2;
11280 this_insn = next_nonnote_insn (this_insn);
11282 else
11283 arm_ccfsm_state = 1;
11284 succeed = TRUE;
11286 else
11287 fail = TRUE;
11288 break;
11290 case JUMP_INSN:
11291 /* If this is an unconditional branch to the same label, succeed.
11292 If it is to another label, do nothing. If it is conditional,
11293 fail. */
11294 /* XXX Probably, the tests for SET and the PC are
11295 unnecessary. */
11297 scanbody = PATTERN (this_insn);
11298 if (GET_CODE (scanbody) == SET
11299 && GET_CODE (SET_DEST (scanbody)) == PC)
11301 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11302 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11304 arm_ccfsm_state = 2;
11305 succeed = TRUE;
11307 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11308 fail = TRUE;
11310 /* Fail if a conditional return is undesirable (e.g. on a
11311 StrongARM), but still allow this if optimizing for size. */
11312 else if (GET_CODE (scanbody) == RETURN
11313 && !use_return_insn (TRUE, NULL)
11314 && !optimize_size)
11315 fail = TRUE;
11316 else if (GET_CODE (scanbody) == RETURN
11317 && seeking_return)
11319 arm_ccfsm_state = 2;
11320 succeed = TRUE;
11322 else if (GET_CODE (scanbody) == PARALLEL)
11324 switch (get_attr_conds (this_insn))
11326 case CONDS_NOCOND:
11327 break;
11328 default:
11329 fail = TRUE;
11330 break;
11333 else
11334 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11336 break;
11338 case INSN:
11339 /* Instructions using or affecting the condition codes make it
11340 fail. */
11341 scanbody = PATTERN (this_insn);
11342 if (!(GET_CODE (scanbody) == SET
11343 || GET_CODE (scanbody) == PARALLEL)
11344 || get_attr_conds (this_insn) != CONDS_NOCOND)
11345 fail = TRUE;
11347 /* A conditional cirrus instruction must be followed by
11348 a non Cirrus instruction. However, since we
11349 conditionalize instructions in this function and by
11350 the time we get here we can't add instructions
11351 (nops), because shorten_branches() has already been
11352 called, we will disable conditionalizing Cirrus
11353 instructions to be safe. */
11354 if (GET_CODE (scanbody) != USE
11355 && GET_CODE (scanbody) != CLOBBER
11356 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11357 fail = TRUE;
11358 break;
11360 default:
11361 break;
11364 if (succeed)
11366 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11367 arm_target_label = CODE_LABEL_NUMBER (label);
11368 else
11370 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11372 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11374 this_insn = next_nonnote_insn (this_insn);
11375 gcc_assert (!this_insn
11376 || (GET_CODE (this_insn) != BARRIER
11377 && GET_CODE (this_insn) != CODE_LABEL));
11379 if (!this_insn)
11381 /* Oh, dear! we ran off the end.. give up. */
11382 recog (PATTERN (insn), insn, NULL);
11383 arm_ccfsm_state = 0;
11384 arm_target_insn = NULL;
11385 return;
11387 arm_target_insn = this_insn;
11389 if (jump_clobbers)
11391 gcc_assert (!reverse);
11392 arm_current_cc =
11393 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11394 0), 0), 1));
11395 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11396 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11397 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11398 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11400 else
11402 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11403 what it was. */
11404 if (!reverse)
11405 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11406 0));
11409 if (reverse || then_not_else)
11410 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11413 /* Restore recog_data (getting the attributes of other insns can
11414 destroy this array, but final.c assumes that it remains intact
11415 across this call; since the insn has been recognized already we
11416 call recog direct). */
11417 recog (PATTERN (insn), insn, NULL);
11421 /* Returns true if REGNO is a valid register
11422 for holding a quantity of type MODE. */
11424 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11426 if (GET_MODE_CLASS (mode) == MODE_CC)
11427 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11429 if (TARGET_THUMB)
11430 /* For the Thumb we only allow values bigger than SImode in
11431 registers 0 - 6, so that there is always a second low
11432 register available to hold the upper part of the value.
11433 We probably we ought to ensure that the register is the
11434 start of an even numbered register pair. */
11435 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11437 if (IS_CIRRUS_REGNUM (regno))
11438 /* We have outlawed SI values in Cirrus registers because they
11439 reside in the lower 32 bits, but SF values reside in the
11440 upper 32 bits. This causes gcc all sorts of grief. We can't
11441 even split the registers into pairs because Cirrus SI values
11442 get sign extended to 64bits-- aldyh. */
11443 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11445 if (IS_VFP_REGNUM (regno))
11447 if (mode == SFmode || mode == SImode)
11448 return TRUE;
11450 /* DFmode values are only valid in even register pairs. */
11451 if (mode == DFmode)
11452 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11453 return FALSE;
11456 if (IS_IWMMXT_GR_REGNUM (regno))
11457 return mode == SImode;
11459 if (IS_IWMMXT_REGNUM (regno))
11460 return VALID_IWMMXT_REG_MODE (mode);
11462 /* We allow any value to be stored in the general registers.
11463 Restrict doubleword quantities to even register pairs so that we can
11464 use ldrd. */
11465 if (regno <= LAST_ARM_REGNUM)
11466 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11468 if ( regno == FRAME_POINTER_REGNUM
11469 || regno == ARG_POINTER_REGNUM)
11470 /* We only allow integers in the fake hard registers. */
11471 return GET_MODE_CLASS (mode) == MODE_INT;
11473 /* The only registers left are the FPA registers
11474 which we only allow to hold FP values. */
11475 return GET_MODE_CLASS (mode) == MODE_FLOAT
11476 && regno >= FIRST_FPA_REGNUM
11477 && regno <= LAST_FPA_REGNUM;
11481 arm_regno_class (int regno)
11483 if (TARGET_THUMB)
11485 if (regno == STACK_POINTER_REGNUM)
11486 return STACK_REG;
11487 if (regno == CC_REGNUM)
11488 return CC_REG;
11489 if (regno < 8)
11490 return LO_REGS;
11491 return HI_REGS;
11494 if ( regno <= LAST_ARM_REGNUM
11495 || regno == FRAME_POINTER_REGNUM
11496 || regno == ARG_POINTER_REGNUM)
11497 return GENERAL_REGS;
11499 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11500 return NO_REGS;
11502 if (IS_CIRRUS_REGNUM (regno))
11503 return CIRRUS_REGS;
11505 if (IS_VFP_REGNUM (regno))
11506 return VFP_REGS;
11508 if (IS_IWMMXT_REGNUM (regno))
11509 return IWMMXT_REGS;
11511 if (IS_IWMMXT_GR_REGNUM (regno))
11512 return IWMMXT_GR_REGS;
11514 return FPA_REGS;
11517 /* Handle a special case when computing the offset
11518 of an argument from the frame pointer. */
11520 arm_debugger_arg_offset (int value, rtx addr)
11522 rtx insn;
11524 /* We are only interested if dbxout_parms() failed to compute the offset. */
11525 if (value != 0)
11526 return 0;
11528 /* We can only cope with the case where the address is held in a register. */
11529 if (GET_CODE (addr) != REG)
11530 return 0;
11532 /* If we are using the frame pointer to point at the argument, then
11533 an offset of 0 is correct. */
11534 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11535 return 0;
11537 /* If we are using the stack pointer to point at the
11538 argument, then an offset of 0 is correct. */
11539 if ((TARGET_THUMB || !frame_pointer_needed)
11540 && REGNO (addr) == SP_REGNUM)
11541 return 0;
11543 /* Oh dear. The argument is pointed to by a register rather
11544 than being held in a register, or being stored at a known
11545 offset from the frame pointer. Since GDB only understands
11546 those two kinds of argument we must translate the address
11547 held in the register into an offset from the frame pointer.
11548 We do this by searching through the insns for the function
11549 looking to see where this register gets its value. If the
11550 register is initialized from the frame pointer plus an offset
11551 then we are in luck and we can continue, otherwise we give up.
11553 This code is exercised by producing debugging information
11554 for a function with arguments like this:
11556 double func (double a, double b, int c, double d) {return d;}
11558 Without this code the stab for parameter 'd' will be set to
11559 an offset of 0 from the frame pointer, rather than 8. */
11561 /* The if() statement says:
11563 If the insn is a normal instruction
11564 and if the insn is setting the value in a register
11565 and if the register being set is the register holding the address of the argument
11566 and if the address is computing by an addition
11567 that involves adding to a register
11568 which is the frame pointer
11569 a constant integer
11571 then... */
11573 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11575 if ( GET_CODE (insn) == INSN
11576 && GET_CODE (PATTERN (insn)) == SET
11577 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11578 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11579 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11580 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11581 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11584 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11586 break;
11590 if (value == 0)
11592 debug_rtx (addr);
11593 warning (0, "unable to compute real location of stacked parameter");
11594 value = 8; /* XXX magic hack */
11597 return value;
11600 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11601 do \
11603 if ((MASK) & insn_flags) \
11604 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11605 BUILT_IN_MD, NULL, NULL_TREE); \
11607 while (0)
11609 struct builtin_description
11611 const unsigned int mask;
11612 const enum insn_code icode;
11613 const char * const name;
11614 const enum arm_builtins code;
11615 const enum rtx_code comparison;
11616 const unsigned int flag;
11619 static const struct builtin_description bdesc_2arg[] =
11621 #define IWMMXT_BUILTIN(code, string, builtin) \
11622 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11623 ARM_BUILTIN_##builtin, 0, 0 },
11625 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11626 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11627 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11628 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11629 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11630 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11631 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11632 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11633 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11634 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11635 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11636 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11637 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11638 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11639 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11640 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11641 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11642 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11643 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11644 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11645 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11646 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11647 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11648 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11649 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11650 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11651 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11652 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11653 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11654 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11655 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11656 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11657 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11658 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11659 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11660 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11661 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11662 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11663 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11664 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11665 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11666 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11667 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11668 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11669 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11670 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11671 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11672 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11673 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11674 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11675 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11676 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11677 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11678 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11679 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11680 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11681 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11682 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11684 #define IWMMXT_BUILTIN2(code, builtin) \
11685 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11687 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11688 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11689 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11690 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11691 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11692 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11693 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11694 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11695 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11696 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11697 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11698 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11699 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11700 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11701 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11702 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11703 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11704 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11705 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11706 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11707 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11708 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11709 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11710 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11711 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11712 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11713 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11714 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11715 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11716 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11717 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11718 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11721 static const struct builtin_description bdesc_1arg[] =
11723 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11724 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11725 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11726 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11727 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11728 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11729 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11730 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11731 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11732 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11733 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11734 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11735 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11736 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11737 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11738 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11739 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11740 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11743 /* Set up all the iWMMXt builtins. This is
11744 not called if TARGET_IWMMXT is zero. */
11746 static void
11747 arm_init_iwmmxt_builtins (void)
11749 const struct builtin_description * d;
11750 size_t i;
11751 tree endlink = void_list_node;
11753 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11754 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11755 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11757 tree int_ftype_int
11758 = build_function_type (integer_type_node,
11759 tree_cons (NULL_TREE, integer_type_node, endlink));
11760 tree v8qi_ftype_v8qi_v8qi_int
11761 = build_function_type (V8QI_type_node,
11762 tree_cons (NULL_TREE, V8QI_type_node,
11763 tree_cons (NULL_TREE, V8QI_type_node,
11764 tree_cons (NULL_TREE,
11765 integer_type_node,
11766 endlink))));
11767 tree v4hi_ftype_v4hi_int
11768 = build_function_type (V4HI_type_node,
11769 tree_cons (NULL_TREE, V4HI_type_node,
11770 tree_cons (NULL_TREE, integer_type_node,
11771 endlink)));
11772 tree v2si_ftype_v2si_int
11773 = build_function_type (V2SI_type_node,
11774 tree_cons (NULL_TREE, V2SI_type_node,
11775 tree_cons (NULL_TREE, integer_type_node,
11776 endlink)));
11777 tree v2si_ftype_di_di
11778 = build_function_type (V2SI_type_node,
11779 tree_cons (NULL_TREE, long_long_integer_type_node,
11780 tree_cons (NULL_TREE, long_long_integer_type_node,
11781 endlink)));
11782 tree di_ftype_di_int
11783 = build_function_type (long_long_integer_type_node,
11784 tree_cons (NULL_TREE, long_long_integer_type_node,
11785 tree_cons (NULL_TREE, integer_type_node,
11786 endlink)));
11787 tree di_ftype_di_int_int
11788 = build_function_type (long_long_integer_type_node,
11789 tree_cons (NULL_TREE, long_long_integer_type_node,
11790 tree_cons (NULL_TREE, integer_type_node,
11791 tree_cons (NULL_TREE,
11792 integer_type_node,
11793 endlink))));
11794 tree int_ftype_v8qi
11795 = build_function_type (integer_type_node,
11796 tree_cons (NULL_TREE, V8QI_type_node,
11797 endlink));
11798 tree int_ftype_v4hi
11799 = build_function_type (integer_type_node,
11800 tree_cons (NULL_TREE, V4HI_type_node,
11801 endlink));
11802 tree int_ftype_v2si
11803 = build_function_type (integer_type_node,
11804 tree_cons (NULL_TREE, V2SI_type_node,
11805 endlink));
11806 tree int_ftype_v8qi_int
11807 = build_function_type (integer_type_node,
11808 tree_cons (NULL_TREE, V8QI_type_node,
11809 tree_cons (NULL_TREE, integer_type_node,
11810 endlink)));
11811 tree int_ftype_v4hi_int
11812 = build_function_type (integer_type_node,
11813 tree_cons (NULL_TREE, V4HI_type_node,
11814 tree_cons (NULL_TREE, integer_type_node,
11815 endlink)));
11816 tree int_ftype_v2si_int
11817 = build_function_type (integer_type_node,
11818 tree_cons (NULL_TREE, V2SI_type_node,
11819 tree_cons (NULL_TREE, integer_type_node,
11820 endlink)));
11821 tree v8qi_ftype_v8qi_int_int
11822 = build_function_type (V8QI_type_node,
11823 tree_cons (NULL_TREE, V8QI_type_node,
11824 tree_cons (NULL_TREE, integer_type_node,
11825 tree_cons (NULL_TREE,
11826 integer_type_node,
11827 endlink))));
11828 tree v4hi_ftype_v4hi_int_int
11829 = build_function_type (V4HI_type_node,
11830 tree_cons (NULL_TREE, V4HI_type_node,
11831 tree_cons (NULL_TREE, integer_type_node,
11832 tree_cons (NULL_TREE,
11833 integer_type_node,
11834 endlink))));
11835 tree v2si_ftype_v2si_int_int
11836 = build_function_type (V2SI_type_node,
11837 tree_cons (NULL_TREE, V2SI_type_node,
11838 tree_cons (NULL_TREE, integer_type_node,
11839 tree_cons (NULL_TREE,
11840 integer_type_node,
11841 endlink))));
11842 /* Miscellaneous. */
11843 tree v8qi_ftype_v4hi_v4hi
11844 = build_function_type (V8QI_type_node,
11845 tree_cons (NULL_TREE, V4HI_type_node,
11846 tree_cons (NULL_TREE, V4HI_type_node,
11847 endlink)));
11848 tree v4hi_ftype_v2si_v2si
11849 = build_function_type (V4HI_type_node,
11850 tree_cons (NULL_TREE, V2SI_type_node,
11851 tree_cons (NULL_TREE, V2SI_type_node,
11852 endlink)));
11853 tree v2si_ftype_v4hi_v4hi
11854 = build_function_type (V2SI_type_node,
11855 tree_cons (NULL_TREE, V4HI_type_node,
11856 tree_cons (NULL_TREE, V4HI_type_node,
11857 endlink)));
11858 tree v2si_ftype_v8qi_v8qi
11859 = build_function_type (V2SI_type_node,
11860 tree_cons (NULL_TREE, V8QI_type_node,
11861 tree_cons (NULL_TREE, V8QI_type_node,
11862 endlink)));
11863 tree v4hi_ftype_v4hi_di
11864 = build_function_type (V4HI_type_node,
11865 tree_cons (NULL_TREE, V4HI_type_node,
11866 tree_cons (NULL_TREE,
11867 long_long_integer_type_node,
11868 endlink)));
11869 tree v2si_ftype_v2si_di
11870 = build_function_type (V2SI_type_node,
11871 tree_cons (NULL_TREE, V2SI_type_node,
11872 tree_cons (NULL_TREE,
11873 long_long_integer_type_node,
11874 endlink)));
11875 tree void_ftype_int_int
11876 = build_function_type (void_type_node,
11877 tree_cons (NULL_TREE, integer_type_node,
11878 tree_cons (NULL_TREE, integer_type_node,
11879 endlink)));
11880 tree di_ftype_void
11881 = build_function_type (long_long_unsigned_type_node, endlink);
11882 tree di_ftype_v8qi
11883 = build_function_type (long_long_integer_type_node,
11884 tree_cons (NULL_TREE, V8QI_type_node,
11885 endlink));
11886 tree di_ftype_v4hi
11887 = build_function_type (long_long_integer_type_node,
11888 tree_cons (NULL_TREE, V4HI_type_node,
11889 endlink));
11890 tree di_ftype_v2si
11891 = build_function_type (long_long_integer_type_node,
11892 tree_cons (NULL_TREE, V2SI_type_node,
11893 endlink));
11894 tree v2si_ftype_v4hi
11895 = build_function_type (V2SI_type_node,
11896 tree_cons (NULL_TREE, V4HI_type_node,
11897 endlink));
11898 tree v4hi_ftype_v8qi
11899 = build_function_type (V4HI_type_node,
11900 tree_cons (NULL_TREE, V8QI_type_node,
11901 endlink));
11903 tree di_ftype_di_v4hi_v4hi
11904 = build_function_type (long_long_unsigned_type_node,
11905 tree_cons (NULL_TREE,
11906 long_long_unsigned_type_node,
11907 tree_cons (NULL_TREE, V4HI_type_node,
11908 tree_cons (NULL_TREE,
11909 V4HI_type_node,
11910 endlink))));
11912 tree di_ftype_v4hi_v4hi
11913 = build_function_type (long_long_unsigned_type_node,
11914 tree_cons (NULL_TREE, V4HI_type_node,
11915 tree_cons (NULL_TREE, V4HI_type_node,
11916 endlink)));
11918 /* Normal vector binops. */
11919 tree v8qi_ftype_v8qi_v8qi
11920 = build_function_type (V8QI_type_node,
11921 tree_cons (NULL_TREE, V8QI_type_node,
11922 tree_cons (NULL_TREE, V8QI_type_node,
11923 endlink)));
11924 tree v4hi_ftype_v4hi_v4hi
11925 = build_function_type (V4HI_type_node,
11926 tree_cons (NULL_TREE, V4HI_type_node,
11927 tree_cons (NULL_TREE, V4HI_type_node,
11928 endlink)));
11929 tree v2si_ftype_v2si_v2si
11930 = build_function_type (V2SI_type_node,
11931 tree_cons (NULL_TREE, V2SI_type_node,
11932 tree_cons (NULL_TREE, V2SI_type_node,
11933 endlink)));
11934 tree di_ftype_di_di
11935 = build_function_type (long_long_unsigned_type_node,
11936 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11937 tree_cons (NULL_TREE,
11938 long_long_unsigned_type_node,
11939 endlink)));
11941 /* Add all builtins that are more or less simple operations on two
11942 operands. */
11943 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11945 /* Use one of the operands; the target can have a different mode for
11946 mask-generating compares. */
11947 enum machine_mode mode;
11948 tree type;
11950 if (d->name == 0)
11951 continue;
11953 mode = insn_data[d->icode].operand[1].mode;
11955 switch (mode)
11957 case V8QImode:
11958 type = v8qi_ftype_v8qi_v8qi;
11959 break;
11960 case V4HImode:
11961 type = v4hi_ftype_v4hi_v4hi;
11962 break;
11963 case V2SImode:
11964 type = v2si_ftype_v2si_v2si;
11965 break;
11966 case DImode:
11967 type = di_ftype_di_di;
11968 break;
11970 default:
11971 gcc_unreachable ();
11974 def_mbuiltin (d->mask, d->name, type, d->code);
11977 /* Add the remaining MMX insns with somewhat more complicated types. */
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12015 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12048 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12050 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12053 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12055 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12056 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12057 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12058 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12060 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12061 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12062 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12063 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12064 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12065 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12066 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12069 static void
12070 arm_init_builtins (void)
12072 if (TARGET_REALLY_IWMMXT)
12073 arm_init_iwmmxt_builtins ();
12076 /* Errors in the source file can cause expand_expr to return const0_rtx
12077 where we expect a vector. To avoid crashing, use one of the vector
12078 clear instructions. */
12080 static rtx
12081 safe_vector_operand (rtx x, enum machine_mode mode)
12083 if (x != const0_rtx)
12084 return x;
12085 x = gen_reg_rtx (mode);
12087 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12088 : gen_rtx_SUBREG (DImode, x, 0)));
12089 return x;
12092 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12094 static rtx
12095 arm_expand_binop_builtin (enum insn_code icode,
12096 tree arglist, rtx target)
12098 rtx pat;
12099 tree arg0 = TREE_VALUE (arglist);
12100 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12101 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12102 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12103 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12104 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12105 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12107 if (VECTOR_MODE_P (mode0))
12108 op0 = safe_vector_operand (op0, mode0);
12109 if (VECTOR_MODE_P (mode1))
12110 op1 = safe_vector_operand (op1, mode1);
12112 if (! target
12113 || GET_MODE (target) != tmode
12114 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12115 target = gen_reg_rtx (tmode);
12117 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12119 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12120 op0 = copy_to_mode_reg (mode0, op0);
12121 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12122 op1 = copy_to_mode_reg (mode1, op1);
12124 pat = GEN_FCN (icode) (target, op0, op1);
12125 if (! pat)
12126 return 0;
12127 emit_insn (pat);
12128 return target;
12131 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12133 static rtx
12134 arm_expand_unop_builtin (enum insn_code icode,
12135 tree arglist, rtx target, int do_load)
12137 rtx pat;
12138 tree arg0 = TREE_VALUE (arglist);
12139 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12140 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12141 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12143 if (! target
12144 || GET_MODE (target) != tmode
12145 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12146 target = gen_reg_rtx (tmode);
12147 if (do_load)
12148 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12149 else
12151 if (VECTOR_MODE_P (mode0))
12152 op0 = safe_vector_operand (op0, mode0);
12154 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12155 op0 = copy_to_mode_reg (mode0, op0);
12158 pat = GEN_FCN (icode) (target, op0);
12159 if (! pat)
12160 return 0;
12161 emit_insn (pat);
12162 return target;
12165 /* Expand an expression EXP that calls a built-in function,
12166 with result going to TARGET if that's convenient
12167 (and in mode MODE if that's convenient).
12168 SUBTARGET may be used as the target for computing one of EXP's operands.
12169 IGNORE is nonzero if the value is to be ignored. */
12171 static rtx
12172 arm_expand_builtin (tree exp,
12173 rtx target,
12174 rtx subtarget ATTRIBUTE_UNUSED,
12175 enum machine_mode mode ATTRIBUTE_UNUSED,
12176 int ignore ATTRIBUTE_UNUSED)
12178 const struct builtin_description * d;
12179 enum insn_code icode;
12180 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12181 tree arglist = TREE_OPERAND (exp, 1);
12182 tree arg0;
12183 tree arg1;
12184 tree arg2;
12185 rtx op0;
12186 rtx op1;
12187 rtx op2;
12188 rtx pat;
12189 int fcode = DECL_FUNCTION_CODE (fndecl);
12190 size_t i;
12191 enum machine_mode tmode;
12192 enum machine_mode mode0;
12193 enum machine_mode mode1;
12194 enum machine_mode mode2;
12196 switch (fcode)
12198 case ARM_BUILTIN_TEXTRMSB:
12199 case ARM_BUILTIN_TEXTRMUB:
12200 case ARM_BUILTIN_TEXTRMSH:
12201 case ARM_BUILTIN_TEXTRMUH:
12202 case ARM_BUILTIN_TEXTRMSW:
12203 case ARM_BUILTIN_TEXTRMUW:
12204 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12205 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12206 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12207 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12208 : CODE_FOR_iwmmxt_textrmw);
12210 arg0 = TREE_VALUE (arglist);
12211 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12212 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12213 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12214 tmode = insn_data[icode].operand[0].mode;
12215 mode0 = insn_data[icode].operand[1].mode;
12216 mode1 = insn_data[icode].operand[2].mode;
12218 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12219 op0 = copy_to_mode_reg (mode0, op0);
12220 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12222 /* @@@ better error message */
12223 error ("selector must be an immediate");
12224 return gen_reg_rtx (tmode);
12226 if (target == 0
12227 || GET_MODE (target) != tmode
12228 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12229 target = gen_reg_rtx (tmode);
12230 pat = GEN_FCN (icode) (target, op0, op1);
12231 if (! pat)
12232 return 0;
12233 emit_insn (pat);
12234 return target;
12236 case ARM_BUILTIN_TINSRB:
12237 case ARM_BUILTIN_TINSRH:
12238 case ARM_BUILTIN_TINSRW:
12239 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12240 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12241 : CODE_FOR_iwmmxt_tinsrw);
12242 arg0 = TREE_VALUE (arglist);
12243 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12244 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12245 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12246 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12247 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12248 tmode = insn_data[icode].operand[0].mode;
12249 mode0 = insn_data[icode].operand[1].mode;
12250 mode1 = insn_data[icode].operand[2].mode;
12251 mode2 = insn_data[icode].operand[3].mode;
12253 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12254 op0 = copy_to_mode_reg (mode0, op0);
12255 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12256 op1 = copy_to_mode_reg (mode1, op1);
12257 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12259 /* @@@ better error message */
12260 error ("selector must be an immediate");
12261 return const0_rtx;
12263 if (target == 0
12264 || GET_MODE (target) != tmode
12265 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12266 target = gen_reg_rtx (tmode);
12267 pat = GEN_FCN (icode) (target, op0, op1, op2);
12268 if (! pat)
12269 return 0;
12270 emit_insn (pat);
12271 return target;
12273 case ARM_BUILTIN_SETWCX:
12274 arg0 = TREE_VALUE (arglist);
12275 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12276 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12277 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12278 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12279 return 0;
12281 case ARM_BUILTIN_GETWCX:
12282 arg0 = TREE_VALUE (arglist);
12283 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12284 target = gen_reg_rtx (SImode);
12285 emit_insn (gen_iwmmxt_tmrc (target, op0));
12286 return target;
12288 case ARM_BUILTIN_WSHUFH:
12289 icode = CODE_FOR_iwmmxt_wshufh;
12290 arg0 = TREE_VALUE (arglist);
12291 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12292 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12293 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12294 tmode = insn_data[icode].operand[0].mode;
12295 mode1 = insn_data[icode].operand[1].mode;
12296 mode2 = insn_data[icode].operand[2].mode;
12298 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12299 op0 = copy_to_mode_reg (mode1, op0);
12300 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12302 /* @@@ better error message */
12303 error ("mask must be an immediate");
12304 return const0_rtx;
12306 if (target == 0
12307 || GET_MODE (target) != tmode
12308 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12309 target = gen_reg_rtx (tmode);
12310 pat = GEN_FCN (icode) (target, op0, op1);
12311 if (! pat)
12312 return 0;
12313 emit_insn (pat);
12314 return target;
12316 case ARM_BUILTIN_WSADB:
12317 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12318 case ARM_BUILTIN_WSADH:
12319 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12320 case ARM_BUILTIN_WSADBZ:
12321 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12322 case ARM_BUILTIN_WSADHZ:
12323 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12325 /* Several three-argument builtins. */
12326 case ARM_BUILTIN_WMACS:
12327 case ARM_BUILTIN_WMACU:
12328 case ARM_BUILTIN_WALIGN:
12329 case ARM_BUILTIN_TMIA:
12330 case ARM_BUILTIN_TMIAPH:
12331 case ARM_BUILTIN_TMIATT:
12332 case ARM_BUILTIN_TMIATB:
12333 case ARM_BUILTIN_TMIABT:
12334 case ARM_BUILTIN_TMIABB:
12335 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12336 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12337 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12338 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12339 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12340 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12341 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12342 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12343 : CODE_FOR_iwmmxt_walign);
12344 arg0 = TREE_VALUE (arglist);
12345 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12346 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12347 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12348 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12349 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12350 tmode = insn_data[icode].operand[0].mode;
12351 mode0 = insn_data[icode].operand[1].mode;
12352 mode1 = insn_data[icode].operand[2].mode;
12353 mode2 = insn_data[icode].operand[3].mode;
12355 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12356 op0 = copy_to_mode_reg (mode0, op0);
12357 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12358 op1 = copy_to_mode_reg (mode1, op1);
12359 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12360 op2 = copy_to_mode_reg (mode2, op2);
12361 if (target == 0
12362 || GET_MODE (target) != tmode
12363 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12364 target = gen_reg_rtx (tmode);
12365 pat = GEN_FCN (icode) (target, op0, op1, op2);
12366 if (! pat)
12367 return 0;
12368 emit_insn (pat);
12369 return target;
12371 case ARM_BUILTIN_WZERO:
12372 target = gen_reg_rtx (DImode);
12373 emit_insn (gen_iwmmxt_clrdi (target));
12374 return target;
12376 default:
12377 break;
12380 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12381 if (d->code == (const enum arm_builtins) fcode)
12382 return arm_expand_binop_builtin (d->icode, arglist, target);
12384 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12385 if (d->code == (const enum arm_builtins) fcode)
12386 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12388 /* @@@ Should really do something sensible here. */
12389 return NULL_RTX;
12392 /* Return the number (counting from 0) of
12393 the least significant set bit in MASK. */
12395 inline static int
12396 number_of_first_bit_set (unsigned mask)
12398 int bit;
12400 for (bit = 0;
12401 (mask & (1 << bit)) == 0;
12402 ++bit)
12403 continue;
12405 return bit;
12408 /* Emit code to push or pop registers to or from the stack. F is the
12409 assembly file. MASK is the registers to push or pop. PUSH is
12410 nonzero if we should push, and zero if we should pop. For debugging
12411 output, if pushing, adjust CFA_OFFSET by the amount of space added
12412 to the stack. REAL_REGS should have the same number of bits set as
12413 MASK, and will be used instead (in the same order) to describe which
12414 registers were saved - this is used to mark the save slots when we
12415 push high registers after moving them to low registers. */
12416 static void
12417 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12418 unsigned long real_regs)
12420 int regno;
12421 int lo_mask = mask & 0xFF;
12422 int pushed_words = 0;
12424 gcc_assert (mask);
12426 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12428 /* Special case. Do not generate a POP PC statement here, do it in
12429 thumb_exit() */
12430 thumb_exit (f, -1);
12431 return;
12434 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12436 /* Look at the low registers first. */
12437 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12439 if (lo_mask & 1)
12441 asm_fprintf (f, "%r", regno);
12443 if ((lo_mask & ~1) != 0)
12444 fprintf (f, ", ");
12446 pushed_words++;
12450 if (push && (mask & (1 << LR_REGNUM)))
12452 /* Catch pushing the LR. */
12453 if (mask & 0xFF)
12454 fprintf (f, ", ");
12456 asm_fprintf (f, "%r", LR_REGNUM);
12458 pushed_words++;
12460 else if (!push && (mask & (1 << PC_REGNUM)))
12462 /* Catch popping the PC. */
12463 if (TARGET_INTERWORK || TARGET_BACKTRACE
12464 || current_function_calls_eh_return)
12466 /* The PC is never poped directly, instead
12467 it is popped into r3 and then BX is used. */
12468 fprintf (f, "}\n");
12470 thumb_exit (f, -1);
12472 return;
12474 else
12476 if (mask & 0xFF)
12477 fprintf (f, ", ");
12479 asm_fprintf (f, "%r", PC_REGNUM);
12483 fprintf (f, "}\n");
12485 if (push && pushed_words && dwarf2out_do_frame ())
12487 char *l = dwarf2out_cfi_label ();
12488 int pushed_mask = real_regs;
12490 *cfa_offset += pushed_words * 4;
12491 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12493 pushed_words = 0;
12494 pushed_mask = real_regs;
12495 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12497 if (pushed_mask & 1)
12498 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12503 /* Generate code to return from a thumb function.
12504 If 'reg_containing_return_addr' is -1, then the return address is
12505 actually on the stack, at the stack pointer. */
12506 static void
12507 thumb_exit (FILE *f, int reg_containing_return_addr)
12509 unsigned regs_available_for_popping;
12510 unsigned regs_to_pop;
12511 int pops_needed;
12512 unsigned available;
12513 unsigned required;
12514 int mode;
12515 int size;
12516 int restore_a4 = FALSE;
12518 /* Compute the registers we need to pop. */
12519 regs_to_pop = 0;
12520 pops_needed = 0;
12522 if (reg_containing_return_addr == -1)
12524 regs_to_pop |= 1 << LR_REGNUM;
12525 ++pops_needed;
12528 if (TARGET_BACKTRACE)
12530 /* Restore the (ARM) frame pointer and stack pointer. */
12531 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12532 pops_needed += 2;
12535 /* If there is nothing to pop then just emit the BX instruction and
12536 return. */
12537 if (pops_needed == 0)
12539 if (current_function_calls_eh_return)
12540 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12542 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12543 return;
12545 /* Otherwise if we are not supporting interworking and we have not created
12546 a backtrace structure and the function was not entered in ARM mode then
12547 just pop the return address straight into the PC. */
12548 else if (!TARGET_INTERWORK
12549 && !TARGET_BACKTRACE
12550 && !is_called_in_ARM_mode (current_function_decl)
12551 && !current_function_calls_eh_return)
12553 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12554 return;
12557 /* Find out how many of the (return) argument registers we can corrupt. */
12558 regs_available_for_popping = 0;
12560 /* If returning via __builtin_eh_return, the bottom three registers
12561 all contain information needed for the return. */
12562 if (current_function_calls_eh_return)
12563 size = 12;
12564 else
12566 /* If we can deduce the registers used from the function's
12567 return value. This is more reliable that examining
12568 regs_ever_live[] because that will be set if the register is
12569 ever used in the function, not just if the register is used
12570 to hold a return value. */
12572 if (current_function_return_rtx != 0)
12573 mode = GET_MODE (current_function_return_rtx);
12574 else
12575 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12577 size = GET_MODE_SIZE (mode);
12579 if (size == 0)
12581 /* In a void function we can use any argument register.
12582 In a function that returns a structure on the stack
12583 we can use the second and third argument registers. */
12584 if (mode == VOIDmode)
12585 regs_available_for_popping =
12586 (1 << ARG_REGISTER (1))
12587 | (1 << ARG_REGISTER (2))
12588 | (1 << ARG_REGISTER (3));
12589 else
12590 regs_available_for_popping =
12591 (1 << ARG_REGISTER (2))
12592 | (1 << ARG_REGISTER (3));
12594 else if (size <= 4)
12595 regs_available_for_popping =
12596 (1 << ARG_REGISTER (2))
12597 | (1 << ARG_REGISTER (3));
12598 else if (size <= 8)
12599 regs_available_for_popping =
12600 (1 << ARG_REGISTER (3));
12603 /* Match registers to be popped with registers into which we pop them. */
12604 for (available = regs_available_for_popping,
12605 required = regs_to_pop;
12606 required != 0 && available != 0;
12607 available &= ~(available & - available),
12608 required &= ~(required & - required))
12609 -- pops_needed;
12611 /* If we have any popping registers left over, remove them. */
12612 if (available > 0)
12613 regs_available_for_popping &= ~available;
12615 /* Otherwise if we need another popping register we can use
12616 the fourth argument register. */
12617 else if (pops_needed)
12619 /* If we have not found any free argument registers and
12620 reg a4 contains the return address, we must move it. */
12621 if (regs_available_for_popping == 0
12622 && reg_containing_return_addr == LAST_ARG_REGNUM)
12624 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12625 reg_containing_return_addr = LR_REGNUM;
12627 else if (size > 12)
12629 /* Register a4 is being used to hold part of the return value,
12630 but we have dire need of a free, low register. */
12631 restore_a4 = TRUE;
12633 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12636 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12638 /* The fourth argument register is available. */
12639 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12641 --pops_needed;
12645 /* Pop as many registers as we can. */
12646 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12647 regs_available_for_popping);
12649 /* Process the registers we popped. */
12650 if (reg_containing_return_addr == -1)
12652 /* The return address was popped into the lowest numbered register. */
12653 regs_to_pop &= ~(1 << LR_REGNUM);
12655 reg_containing_return_addr =
12656 number_of_first_bit_set (regs_available_for_popping);
12658 /* Remove this register for the mask of available registers, so that
12659 the return address will not be corrupted by further pops. */
12660 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12663 /* If we popped other registers then handle them here. */
12664 if (regs_available_for_popping)
12666 int frame_pointer;
12668 /* Work out which register currently contains the frame pointer. */
12669 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12671 /* Move it into the correct place. */
12672 asm_fprintf (f, "\tmov\t%r, %r\n",
12673 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12675 /* (Temporarily) remove it from the mask of popped registers. */
12676 regs_available_for_popping &= ~(1 << frame_pointer);
12677 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12679 if (regs_available_for_popping)
12681 int stack_pointer;
12683 /* We popped the stack pointer as well,
12684 find the register that contains it. */
12685 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12687 /* Move it into the stack register. */
12688 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12690 /* At this point we have popped all necessary registers, so
12691 do not worry about restoring regs_available_for_popping
12692 to its correct value:
12694 assert (pops_needed == 0)
12695 assert (regs_available_for_popping == (1 << frame_pointer))
12696 assert (regs_to_pop == (1 << STACK_POINTER)) */
12698 else
12700 /* Since we have just move the popped value into the frame
12701 pointer, the popping register is available for reuse, and
12702 we know that we still have the stack pointer left to pop. */
12703 regs_available_for_popping |= (1 << frame_pointer);
12707 /* If we still have registers left on the stack, but we no longer have
12708 any registers into which we can pop them, then we must move the return
12709 address into the link register and make available the register that
12710 contained it. */
12711 if (regs_available_for_popping == 0 && pops_needed > 0)
12713 regs_available_for_popping |= 1 << reg_containing_return_addr;
12715 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12716 reg_containing_return_addr);
12718 reg_containing_return_addr = LR_REGNUM;
12721 /* If we have registers left on the stack then pop some more.
12722 We know that at most we will want to pop FP and SP. */
12723 if (pops_needed > 0)
12725 int popped_into;
12726 int move_to;
12728 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12729 regs_available_for_popping);
12731 /* We have popped either FP or SP.
12732 Move whichever one it is into the correct register. */
12733 popped_into = number_of_first_bit_set (regs_available_for_popping);
12734 move_to = number_of_first_bit_set (regs_to_pop);
12736 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12738 regs_to_pop &= ~(1 << move_to);
12740 --pops_needed;
12743 /* If we still have not popped everything then we must have only
12744 had one register available to us and we are now popping the SP. */
12745 if (pops_needed > 0)
12747 int popped_into;
12749 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12750 regs_available_for_popping);
12752 popped_into = number_of_first_bit_set (regs_available_for_popping);
12754 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12756 assert (regs_to_pop == (1 << STACK_POINTER))
12757 assert (pops_needed == 1)
12761 /* If necessary restore the a4 register. */
12762 if (restore_a4)
12764 if (reg_containing_return_addr != LR_REGNUM)
12766 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12767 reg_containing_return_addr = LR_REGNUM;
12770 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12773 if (current_function_calls_eh_return)
12774 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12776 /* Return to caller. */
12777 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12781 void
12782 thumb_final_prescan_insn (rtx insn)
12784 if (flag_print_asm_name)
12785 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12786 INSN_ADDRESSES (INSN_UID (insn)));
12790 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12792 unsigned HOST_WIDE_INT mask = 0xff;
12793 int i;
12795 if (val == 0) /* XXX */
12796 return 0;
12798 for (i = 0; i < 25; i++)
12799 if ((val & (mask << i)) == val)
12800 return 1;
12802 return 0;
12805 /* Returns nonzero if the current function contains,
12806 or might contain a far jump. */
12807 static int
12808 thumb_far_jump_used_p (void)
12810 rtx insn;
12812 /* This test is only important for leaf functions. */
12813 /* assert (!leaf_function_p ()); */
12815 /* If we have already decided that far jumps may be used,
12816 do not bother checking again, and always return true even if
12817 it turns out that they are not being used. Once we have made
12818 the decision that far jumps are present (and that hence the link
12819 register will be pushed onto the stack) we cannot go back on it. */
12820 if (cfun->machine->far_jump_used)
12821 return 1;
12823 /* If this function is not being called from the prologue/epilogue
12824 generation code then it must be being called from the
12825 INITIAL_ELIMINATION_OFFSET macro. */
12826 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12828 /* In this case we know that we are being asked about the elimination
12829 of the arg pointer register. If that register is not being used,
12830 then there are no arguments on the stack, and we do not have to
12831 worry that a far jump might force the prologue to push the link
12832 register, changing the stack offsets. In this case we can just
12833 return false, since the presence of far jumps in the function will
12834 not affect stack offsets.
12836 If the arg pointer is live (or if it was live, but has now been
12837 eliminated and so set to dead) then we do have to test to see if
12838 the function might contain a far jump. This test can lead to some
12839 false negatives, since before reload is completed, then length of
12840 branch instructions is not known, so gcc defaults to returning their
12841 longest length, which in turn sets the far jump attribute to true.
12843 A false negative will not result in bad code being generated, but it
12844 will result in a needless push and pop of the link register. We
12845 hope that this does not occur too often.
12847 If we need doubleword stack alignment this could affect the other
12848 elimination offsets so we can't risk getting it wrong. */
12849 if (regs_ever_live [ARG_POINTER_REGNUM])
12850 cfun->machine->arg_pointer_live = 1;
12851 else if (!cfun->machine->arg_pointer_live)
12852 return 0;
12855 /* Check to see if the function contains a branch
12856 insn with the far jump attribute set. */
12857 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12859 if (GET_CODE (insn) == JUMP_INSN
12860 /* Ignore tablejump patterns. */
12861 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12862 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12863 && get_attr_far_jump (insn) == FAR_JUMP_YES
12866 /* Record the fact that we have decided that
12867 the function does use far jumps. */
12868 cfun->machine->far_jump_used = 1;
12869 return 1;
12873 return 0;
12876 /* Return nonzero if FUNC must be entered in ARM mode. */
12878 is_called_in_ARM_mode (tree func)
12880 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12882 /* Ignore the problem about functions whose address is taken. */
12883 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12884 return TRUE;
12886 #ifdef ARM_PE
12887 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12888 #else
12889 return FALSE;
12890 #endif
12893 /* The bits which aren't usefully expanded as rtl. */
12894 const char *
12895 thumb_unexpanded_epilogue (void)
12897 int regno;
12898 unsigned long live_regs_mask = 0;
12899 int high_regs_pushed = 0;
12900 int had_to_push_lr;
12901 int size;
12902 int mode;
12904 if (return_used_this_function)
12905 return "";
12907 if (IS_NAKED (arm_current_func_type ()))
12908 return "";
12910 live_regs_mask = thumb_compute_save_reg_mask ();
12911 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12913 /* If we can deduce the registers used from the function's return value.
12914 This is more reliable that examining regs_ever_live[] because that
12915 will be set if the register is ever used in the function, not just if
12916 the register is used to hold a return value. */
12918 if (current_function_return_rtx != 0)
12919 mode = GET_MODE (current_function_return_rtx);
12920 else
12921 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12923 size = GET_MODE_SIZE (mode);
12925 /* The prolog may have pushed some high registers to use as
12926 work registers. e.g. the testsuite file:
12927 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12928 compiles to produce:
12929 push {r4, r5, r6, r7, lr}
12930 mov r7, r9
12931 mov r6, r8
12932 push {r6, r7}
12933 as part of the prolog. We have to undo that pushing here. */
12935 if (high_regs_pushed)
12937 unsigned long mask = live_regs_mask & 0xff;
12938 int next_hi_reg;
12940 /* The available low registers depend on the size of the value we are
12941 returning. */
12942 if (size <= 12)
12943 mask |= 1 << 3;
12944 if (size <= 8)
12945 mask |= 1 << 2;
12947 if (mask == 0)
12948 /* Oh dear! We have no low registers into which we can pop
12949 high registers! */
12950 internal_error
12951 ("no low registers available for popping high registers");
12953 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12954 if (live_regs_mask & (1 << next_hi_reg))
12955 break;
12957 while (high_regs_pushed)
12959 /* Find lo register(s) into which the high register(s) can
12960 be popped. */
12961 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12963 if (mask & (1 << regno))
12964 high_regs_pushed--;
12965 if (high_regs_pushed == 0)
12966 break;
12969 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12971 /* Pop the values into the low register(s). */
12972 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12974 /* Move the value(s) into the high registers. */
12975 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12977 if (mask & (1 << regno))
12979 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12980 regno);
12982 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12983 if (live_regs_mask & (1 << next_hi_reg))
12984 break;
12988 live_regs_mask &= ~0x0f00;
12991 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12992 live_regs_mask &= 0xff;
12994 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12996 /* Pop the return address into the PC. */
12997 if (had_to_push_lr)
12998 live_regs_mask |= 1 << PC_REGNUM;
13000 /* Either no argument registers were pushed or a backtrace
13001 structure was created which includes an adjusted stack
13002 pointer, so just pop everything. */
13003 if (live_regs_mask)
13004 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13005 live_regs_mask);
13007 /* We have either just popped the return address into the
13008 PC or it is was kept in LR for the entire function. */
13009 if (!had_to_push_lr)
13010 thumb_exit (asm_out_file, LR_REGNUM);
13012 else
13014 /* Pop everything but the return address. */
13015 if (live_regs_mask)
13016 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13017 live_regs_mask);
13019 if (had_to_push_lr)
13021 if (size > 12)
13023 /* We have no free low regs, so save one. */
13024 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13025 LAST_ARG_REGNUM);
13028 /* Get the return address into a temporary register. */
13029 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13030 1 << LAST_ARG_REGNUM);
13032 if (size > 12)
13034 /* Move the return address to lr. */
13035 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13036 LAST_ARG_REGNUM);
13037 /* Restore the low register. */
13038 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13039 IP_REGNUM);
13040 regno = LR_REGNUM;
13042 else
13043 regno = LAST_ARG_REGNUM;
13045 else
13046 regno = LR_REGNUM;
13048 /* Remove the argument registers that were pushed onto the stack. */
13049 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13050 SP_REGNUM, SP_REGNUM,
13051 current_function_pretend_args_size);
13053 thumb_exit (asm_out_file, regno);
13056 return "";
13059 /* Functions to save and restore machine-specific function data. */
13060 static struct machine_function *
13061 arm_init_machine_status (void)
13063 struct machine_function *machine;
13064 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13066 #if ARM_FT_UNKNOWN != 0
13067 machine->func_type = ARM_FT_UNKNOWN;
13068 #endif
13069 return machine;
13072 /* Return an RTX indicating where the return address to the
13073 calling function can be found. */
13075 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13077 if (count != 0)
13078 return NULL_RTX;
13080 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13083 /* Do anything needed before RTL is emitted for each function. */
13084 void
13085 arm_init_expanders (void)
13087 /* Arrange to initialize and mark the machine per-function status. */
13088 init_machine_status = arm_init_machine_status;
13090 /* This is to stop the combine pass optimizing away the alignment
13091 adjustment of va_arg. */
13092 /* ??? It is claimed that this should not be necessary. */
13093 if (cfun)
13094 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13098 /* Like arm_compute_initial_elimination offset. Simpler because
13099 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13101 HOST_WIDE_INT
13102 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13104 arm_stack_offsets *offsets;
13106 offsets = arm_get_frame_offsets ();
13108 switch (from)
13110 case ARG_POINTER_REGNUM:
13111 switch (to)
13113 case STACK_POINTER_REGNUM:
13114 return offsets->outgoing_args - offsets->saved_args;
13116 case FRAME_POINTER_REGNUM:
13117 return offsets->soft_frame - offsets->saved_args;
13119 case THUMB_HARD_FRAME_POINTER_REGNUM:
13120 case ARM_HARD_FRAME_POINTER_REGNUM:
13121 return offsets->saved_regs - offsets->saved_args;
13123 default:
13124 gcc_unreachable ();
13126 break;
13128 case FRAME_POINTER_REGNUM:
13129 switch (to)
13131 case STACK_POINTER_REGNUM:
13132 return offsets->outgoing_args - offsets->soft_frame;
13134 case THUMB_HARD_FRAME_POINTER_REGNUM:
13135 case ARM_HARD_FRAME_POINTER_REGNUM:
13136 return offsets->saved_regs - offsets->soft_frame;
13138 default:
13139 gcc_unreachable ();
13141 break;
13143 default:
13144 gcc_unreachable ();
13149 /* Generate the rest of a function's prologue. */
13150 void
13151 thumb_expand_prologue (void)
13153 rtx insn, dwarf;
13155 HOST_WIDE_INT amount;
13156 arm_stack_offsets *offsets;
13157 unsigned long func_type;
13158 int regno;
13159 unsigned long live_regs_mask;
13161 func_type = arm_current_func_type ();
13163 /* Naked functions don't have prologues. */
13164 if (IS_NAKED (func_type))
13165 return;
13167 if (IS_INTERRUPT (func_type))
13169 error ("interrupt Service Routines cannot be coded in Thumb mode");
13170 return;
13173 live_regs_mask = thumb_compute_save_reg_mask ();
13174 /* Load the pic register before setting the frame pointer,
13175 so we can use r7 as a temporary work register. */
13176 if (flag_pic)
13177 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13179 offsets = arm_get_frame_offsets ();
13181 if (frame_pointer_needed)
13183 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13184 stack_pointer_rtx));
13185 RTX_FRAME_RELATED_P (insn) = 1;
13187 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13188 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13189 stack_pointer_rtx);
13191 amount = offsets->outgoing_args - offsets->saved_regs;
13192 if (amount)
13194 if (amount < 512)
13196 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13197 GEN_INT (- amount)));
13198 RTX_FRAME_RELATED_P (insn) = 1;
13200 else
13202 rtx reg;
13204 /* The stack decrement is too big for an immediate value in a single
13205 insn. In theory we could issue multiple subtracts, but after
13206 three of them it becomes more space efficient to place the full
13207 value in the constant pool and load into a register. (Also the
13208 ARM debugger really likes to see only one stack decrement per
13209 function). So instead we look for a scratch register into which
13210 we can load the decrement, and then we subtract this from the
13211 stack pointer. Unfortunately on the thumb the only available
13212 scratch registers are the argument registers, and we cannot use
13213 these as they may hold arguments to the function. Instead we
13214 attempt to locate a call preserved register which is used by this
13215 function. If we can find one, then we know that it will have
13216 been pushed at the start of the prologue and so we can corrupt
13217 it now. */
13218 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13219 if (live_regs_mask & (1 << regno)
13220 && !(frame_pointer_needed
13221 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13222 break;
13224 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13226 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13228 /* Choose an arbitrary, non-argument low register. */
13229 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13231 /* Save it by copying it into a high, scratch register. */
13232 emit_insn (gen_movsi (spare, reg));
13233 /* Add a USE to stop propagate_one_insn() from barfing. */
13234 emit_insn (gen_prologue_use (spare));
13236 /* Decrement the stack. */
13237 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13238 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13239 stack_pointer_rtx, reg));
13240 RTX_FRAME_RELATED_P (insn) = 1;
13241 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13242 plus_constant (stack_pointer_rtx,
13243 -amount));
13244 RTX_FRAME_RELATED_P (dwarf) = 1;
13245 REG_NOTES (insn)
13246 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13247 REG_NOTES (insn));
13249 /* Restore the low register's original value. */
13250 emit_insn (gen_movsi (reg, spare));
13252 /* Emit a USE of the restored scratch register, so that flow
13253 analysis will not consider the restore redundant. The
13254 register won't be used again in this function and isn't
13255 restored by the epilogue. */
13256 emit_insn (gen_prologue_use (reg));
13258 else
13260 reg = gen_rtx_REG (SImode, regno);
13262 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13264 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13265 stack_pointer_rtx, reg));
13266 RTX_FRAME_RELATED_P (insn) = 1;
13267 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13268 plus_constant (stack_pointer_rtx,
13269 -amount));
13270 RTX_FRAME_RELATED_P (dwarf) = 1;
13271 REG_NOTES (insn)
13272 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13273 REG_NOTES (insn));
13276 /* If the frame pointer is needed, emit a special barrier that
13277 will prevent the scheduler from moving stores to the frame
13278 before the stack adjustment. */
13279 if (frame_pointer_needed)
13280 emit_insn (gen_stack_tie (stack_pointer_rtx,
13281 hard_frame_pointer_rtx));
13284 if (current_function_profile || !TARGET_SCHED_PROLOG)
13285 emit_insn (gen_blockage ());
13287 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13288 if (live_regs_mask & 0xff)
13289 cfun->machine->lr_save_eliminated = 0;
13291 /* If the link register is being kept alive, with the return address in it,
13292 then make sure that it does not get reused by the ce2 pass. */
13293 if (cfun->machine->lr_save_eliminated)
13294 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13298 void
13299 thumb_expand_epilogue (void)
13301 HOST_WIDE_INT amount;
13302 arm_stack_offsets *offsets;
13303 int regno;
13305 /* Naked functions don't have prologues. */
13306 if (IS_NAKED (arm_current_func_type ()))
13307 return;
13309 offsets = arm_get_frame_offsets ();
13310 amount = offsets->outgoing_args - offsets->saved_regs;
13312 if (frame_pointer_needed)
13313 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13314 else if (amount)
13316 if (amount < 512)
13317 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13318 GEN_INT (amount)));
13319 else
13321 /* r3 is always free in the epilogue. */
13322 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13324 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13325 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13329 /* Emit a USE (stack_pointer_rtx), so that
13330 the stack adjustment will not be deleted. */
13331 emit_insn (gen_prologue_use (stack_pointer_rtx));
13333 if (current_function_profile || !TARGET_SCHED_PROLOG)
13334 emit_insn (gen_blockage ());
13336 /* Emit a clobber for each insn that will be restored in the epilogue,
13337 so that flow2 will get register lifetimes correct. */
13338 for (regno = 0; regno < 13; regno++)
13339 if (regs_ever_live[regno] && !call_used_regs[regno])
13340 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13342 if (! regs_ever_live[LR_REGNUM])
13343 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13346 static void
13347 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13349 unsigned long live_regs_mask = 0;
13350 unsigned long l_mask;
13351 unsigned high_regs_pushed = 0;
13352 int cfa_offset = 0;
13353 int regno;
13355 if (IS_NAKED (arm_current_func_type ()))
13356 return;
13358 if (is_called_in_ARM_mode (current_function_decl))
13360 const char * name;
13362 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13363 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13364 == SYMBOL_REF);
13365 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13367 /* Generate code sequence to switch us into Thumb mode. */
13368 /* The .code 32 directive has already been emitted by
13369 ASM_DECLARE_FUNCTION_NAME. */
13370 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13371 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13373 /* Generate a label, so that the debugger will notice the
13374 change in instruction sets. This label is also used by
13375 the assembler to bypass the ARM code when this function
13376 is called from a Thumb encoded function elsewhere in the
13377 same file. Hence the definition of STUB_NAME here must
13378 agree with the definition in gas/config/tc-arm.c. */
13380 #define STUB_NAME ".real_start_of"
13382 fprintf (f, "\t.code\t16\n");
13383 #ifdef ARM_PE
13384 if (arm_dllexport_name_p (name))
13385 name = arm_strip_name_encoding (name);
13386 #endif
13387 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13388 fprintf (f, "\t.thumb_func\n");
13389 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13392 if (current_function_pretend_args_size)
13394 if (cfun->machine->uses_anonymous_args)
13396 int num_pushes;
13398 fprintf (f, "\tpush\t{");
13400 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13402 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13403 regno <= LAST_ARG_REGNUM;
13404 regno++)
13405 asm_fprintf (f, "%r%s", regno,
13406 regno == LAST_ARG_REGNUM ? "" : ", ");
13408 fprintf (f, "}\n");
13410 else
13411 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13412 SP_REGNUM, SP_REGNUM,
13413 current_function_pretend_args_size);
13415 /* We don't need to record the stores for unwinding (would it
13416 help the debugger any if we did?), but record the change in
13417 the stack pointer. */
13418 if (dwarf2out_do_frame ())
13420 char *l = dwarf2out_cfi_label ();
13422 cfa_offset = cfa_offset + current_function_pretend_args_size;
13423 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13427 /* Get the registers we are going to push. */
13428 live_regs_mask = thumb_compute_save_reg_mask ();
13429 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13430 l_mask = live_regs_mask & 0x40ff;
13431 /* Then count how many other high registers will need to be pushed. */
13432 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13434 if (TARGET_BACKTRACE)
13436 unsigned offset;
13437 unsigned work_register;
13439 /* We have been asked to create a stack backtrace structure.
13440 The code looks like this:
13442 0 .align 2
13443 0 func:
13444 0 sub SP, #16 Reserve space for 4 registers.
13445 2 push {R7} Push low registers.
13446 4 add R7, SP, #20 Get the stack pointer before the push.
13447 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13448 8 mov R7, PC Get hold of the start of this code plus 12.
13449 10 str R7, [SP, #16] Store it.
13450 12 mov R7, FP Get hold of the current frame pointer.
13451 14 str R7, [SP, #4] Store it.
13452 16 mov R7, LR Get hold of the current return address.
13453 18 str R7, [SP, #12] Store it.
13454 20 add R7, SP, #16 Point at the start of the backtrace structure.
13455 22 mov FP, R7 Put this value into the frame pointer. */
13457 work_register = thumb_find_work_register (live_regs_mask);
13459 asm_fprintf
13460 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13461 SP_REGNUM, SP_REGNUM);
13463 if (dwarf2out_do_frame ())
13465 char *l = dwarf2out_cfi_label ();
13467 cfa_offset = cfa_offset + 16;
13468 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13471 if (l_mask)
13473 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13474 offset = bit_count (l_mask);
13476 else
13477 offset = 0;
13479 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13480 offset + 16 + current_function_pretend_args_size);
13482 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13483 offset + 4);
13485 /* Make sure that the instruction fetching the PC is in the right place
13486 to calculate "start of backtrace creation code + 12". */
13487 if (l_mask)
13489 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13490 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13491 offset + 12);
13492 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13493 ARM_HARD_FRAME_POINTER_REGNUM);
13494 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13495 offset);
13497 else
13499 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13500 ARM_HARD_FRAME_POINTER_REGNUM);
13501 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13502 offset);
13503 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13504 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13505 offset + 12);
13508 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13509 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13510 offset + 8);
13511 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13512 offset + 12);
13513 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13514 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13516 /* Optimisation: If we are not pushing any low registers but we are going
13517 to push some high registers then delay our first push. This will just
13518 be a push of LR and we can combine it with the push of the first high
13519 register. */
13520 else if ((l_mask & 0xff) != 0
13521 || (high_regs_pushed == 0 && l_mask))
13522 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13524 if (high_regs_pushed)
13526 unsigned pushable_regs;
13527 unsigned next_hi_reg;
13529 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13530 if (live_regs_mask & (1 << next_hi_reg))
13531 break;
13533 pushable_regs = l_mask & 0xff;
13535 if (pushable_regs == 0)
13536 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13538 while (high_regs_pushed > 0)
13540 unsigned long real_regs_mask = 0;
13542 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13544 if (pushable_regs & (1 << regno))
13546 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13548 high_regs_pushed --;
13549 real_regs_mask |= (1 << next_hi_reg);
13551 if (high_regs_pushed)
13553 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13554 next_hi_reg --)
13555 if (live_regs_mask & (1 << next_hi_reg))
13556 break;
13558 else
13560 pushable_regs &= ~((1 << regno) - 1);
13561 break;
13566 /* If we had to find a work register and we have not yet
13567 saved the LR then add it to the list of regs to push. */
13568 if (l_mask == (1 << LR_REGNUM))
13570 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13571 1, &cfa_offset,
13572 real_regs_mask | (1 << LR_REGNUM));
13573 l_mask = 0;
13575 else
13576 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13581 /* Handle the case of a double word load into a low register from
13582 a computed memory address. The computed address may involve a
13583 register which is overwritten by the load. */
13584 const char *
13585 thumb_load_double_from_address (rtx *operands)
13587 rtx addr;
13588 rtx base;
13589 rtx offset;
13590 rtx arg1;
13591 rtx arg2;
13593 gcc_assert (GET_CODE (operands[0]) == REG);
13594 gcc_assert (GET_CODE (operands[1]) == MEM);
13596 /* Get the memory address. */
13597 addr = XEXP (operands[1], 0);
13599 /* Work out how the memory address is computed. */
13600 switch (GET_CODE (addr))
13602 case REG:
13603 operands[2] = gen_rtx_MEM (SImode,
13604 plus_constant (XEXP (operands[1], 0), 4));
13606 if (REGNO (operands[0]) == REGNO (addr))
13608 output_asm_insn ("ldr\t%H0, %2", operands);
13609 output_asm_insn ("ldr\t%0, %1", operands);
13611 else
13613 output_asm_insn ("ldr\t%0, %1", operands);
13614 output_asm_insn ("ldr\t%H0, %2", operands);
13616 break;
13618 case CONST:
13619 /* Compute <address> + 4 for the high order load. */
13620 operands[2] = gen_rtx_MEM (SImode,
13621 plus_constant (XEXP (operands[1], 0), 4));
13623 output_asm_insn ("ldr\t%0, %1", operands);
13624 output_asm_insn ("ldr\t%H0, %2", operands);
13625 break;
13627 case PLUS:
13628 arg1 = XEXP (addr, 0);
13629 arg2 = XEXP (addr, 1);
13631 if (CONSTANT_P (arg1))
13632 base = arg2, offset = arg1;
13633 else
13634 base = arg1, offset = arg2;
13636 gcc_assert (GET_CODE (base) == REG);
13638 /* Catch the case of <address> = <reg> + <reg> */
13639 if (GET_CODE (offset) == REG)
13641 int reg_offset = REGNO (offset);
13642 int reg_base = REGNO (base);
13643 int reg_dest = REGNO (operands[0]);
13645 /* Add the base and offset registers together into the
13646 higher destination register. */
13647 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13648 reg_dest + 1, reg_base, reg_offset);
13650 /* Load the lower destination register from the address in
13651 the higher destination register. */
13652 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13653 reg_dest, reg_dest + 1);
13655 /* Load the higher destination register from its own address
13656 plus 4. */
13657 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13658 reg_dest + 1, reg_dest + 1);
13660 else
13662 /* Compute <address> + 4 for the high order load. */
13663 operands[2] = gen_rtx_MEM (SImode,
13664 plus_constant (XEXP (operands[1], 0), 4));
13666 /* If the computed address is held in the low order register
13667 then load the high order register first, otherwise always
13668 load the low order register first. */
13669 if (REGNO (operands[0]) == REGNO (base))
13671 output_asm_insn ("ldr\t%H0, %2", operands);
13672 output_asm_insn ("ldr\t%0, %1", operands);
13674 else
13676 output_asm_insn ("ldr\t%0, %1", operands);
13677 output_asm_insn ("ldr\t%H0, %2", operands);
13680 break;
13682 case LABEL_REF:
13683 /* With no registers to worry about we can just load the value
13684 directly. */
13685 operands[2] = gen_rtx_MEM (SImode,
13686 plus_constant (XEXP (operands[1], 0), 4));
13688 output_asm_insn ("ldr\t%H0, %2", operands);
13689 output_asm_insn ("ldr\t%0, %1", operands);
13690 break;
13692 default:
13693 gcc_unreachable ();
13696 return "";
13699 const char *
13700 thumb_output_move_mem_multiple (int n, rtx *operands)
13702 rtx tmp;
13704 switch (n)
13706 case 2:
13707 if (REGNO (operands[4]) > REGNO (operands[5]))
13709 tmp = operands[4];
13710 operands[4] = operands[5];
13711 operands[5] = tmp;
13713 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13714 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13715 break;
13717 case 3:
13718 if (REGNO (operands[4]) > REGNO (operands[5]))
13720 tmp = operands[4];
13721 operands[4] = operands[5];
13722 operands[5] = tmp;
13724 if (REGNO (operands[5]) > REGNO (operands[6]))
13726 tmp = operands[5];
13727 operands[5] = operands[6];
13728 operands[6] = tmp;
13730 if (REGNO (operands[4]) > REGNO (operands[5]))
13732 tmp = operands[4];
13733 operands[4] = operands[5];
13734 operands[5] = tmp;
13737 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13738 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13739 break;
13741 default:
13742 gcc_unreachable ();
13745 return "";
13748 /* Output a call-via instruction for thumb state. */
13749 const char *
13750 thumb_call_via_reg (rtx reg)
13752 int regno = REGNO (reg);
13753 rtx *labelp;
13755 gcc_assert (regno < LR_REGNUM);
13757 /* If we are in the normal text section we can use a single instance
13758 per compilation unit. If we are doing function sections, then we need
13759 an entry per section, since we can't rely on reachability. */
13760 if (in_text_section ())
13762 thumb_call_reg_needed = 1;
13764 if (thumb_call_via_label[regno] == NULL)
13765 thumb_call_via_label[regno] = gen_label_rtx ();
13766 labelp = thumb_call_via_label + regno;
13768 else
13770 if (cfun->machine->call_via[regno] == NULL)
13771 cfun->machine->call_via[regno] = gen_label_rtx ();
13772 labelp = cfun->machine->call_via + regno;
13775 output_asm_insn ("bl\t%a0", labelp);
13776 return "";
13779 /* Routines for generating rtl. */
13780 void
13781 thumb_expand_movmemqi (rtx *operands)
13783 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13784 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13785 HOST_WIDE_INT len = INTVAL (operands[2]);
13786 HOST_WIDE_INT offset = 0;
13788 while (len >= 12)
13790 emit_insn (gen_movmem12b (out, in, out, in));
13791 len -= 12;
13794 if (len >= 8)
13796 emit_insn (gen_movmem8b (out, in, out, in));
13797 len -= 8;
13800 if (len >= 4)
13802 rtx reg = gen_reg_rtx (SImode);
13803 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13804 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13805 len -= 4;
13806 offset += 4;
13809 if (len >= 2)
13811 rtx reg = gen_reg_rtx (HImode);
13812 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13813 plus_constant (in, offset))));
13814 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13815 reg));
13816 len -= 2;
13817 offset += 2;
13820 if (len)
13822 rtx reg = gen_reg_rtx (QImode);
13823 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13824 plus_constant (in, offset))));
13825 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13826 reg));
13830 void
13831 thumb_reload_out_hi (rtx *operands)
13833 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13836 /* Handle reading a half-word from memory during reload. */
13837 void
13838 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13840 gcc_unreachable ();
13843 /* Return the length of a function name prefix
13844 that starts with the character 'c'. */
13845 static int
13846 arm_get_strip_length (int c)
13848 switch (c)
13850 ARM_NAME_ENCODING_LENGTHS
13851 default: return 0;
13855 /* Return a pointer to a function's name with any
13856 and all prefix encodings stripped from it. */
13857 const char *
13858 arm_strip_name_encoding (const char *name)
13860 int skip;
13862 while ((skip = arm_get_strip_length (* name)))
13863 name += skip;
13865 return name;
13868 /* If there is a '*' anywhere in the name's prefix, then
13869 emit the stripped name verbatim, otherwise prepend an
13870 underscore if leading underscores are being used. */
13871 void
13872 arm_asm_output_labelref (FILE *stream, const char *name)
13874 int skip;
13875 int verbatim = 0;
13877 while ((skip = arm_get_strip_length (* name)))
13879 verbatim |= (*name == '*');
13880 name += skip;
13883 if (verbatim)
13884 fputs (name, stream);
13885 else
13886 asm_fprintf (stream, "%U%s", name);
13889 static void
13890 arm_file_end (void)
13892 int regno;
13894 if (! thumb_call_reg_needed)
13895 return;
13897 text_section ();
13898 asm_fprintf (asm_out_file, "\t.code 16\n");
13899 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13901 for (regno = 0; regno < LR_REGNUM; regno++)
13903 rtx label = thumb_call_via_label[regno];
13905 if (label != 0)
13907 targetm.asm_out.internal_label (asm_out_file, "L",
13908 CODE_LABEL_NUMBER (label));
13909 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13914 rtx aof_pic_label;
13916 #ifdef AOF_ASSEMBLER
13917 /* Special functions only needed when producing AOF syntax assembler. */
13919 struct pic_chain
13921 struct pic_chain * next;
13922 const char * symname;
13925 static struct pic_chain * aof_pic_chain = NULL;
13928 aof_pic_entry (rtx x)
13930 struct pic_chain ** chainp;
13931 int offset;
13933 if (aof_pic_label == NULL_RTX)
13935 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13938 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13939 offset += 4, chainp = &(*chainp)->next)
13940 if ((*chainp)->symname == XSTR (x, 0))
13941 return plus_constant (aof_pic_label, offset);
13943 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13944 (*chainp)->next = NULL;
13945 (*chainp)->symname = XSTR (x, 0);
13946 return plus_constant (aof_pic_label, offset);
13949 void
13950 aof_dump_pic_table (FILE *f)
13952 struct pic_chain * chain;
13954 if (aof_pic_chain == NULL)
13955 return;
13957 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13958 PIC_OFFSET_TABLE_REGNUM,
13959 PIC_OFFSET_TABLE_REGNUM);
13960 fputs ("|x$adcons|\n", f);
13962 for (chain = aof_pic_chain; chain; chain = chain->next)
13964 fputs ("\tDCD\t", f);
13965 assemble_name (f, chain->symname);
13966 fputs ("\n", f);
13970 int arm_text_section_count = 1;
13972 char *
13973 aof_text_section (void )
13975 static char buf[100];
13976 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13977 arm_text_section_count++);
13978 if (flag_pic)
13979 strcat (buf, ", PIC, REENTRANT");
13980 return buf;
13983 static int arm_data_section_count = 1;
13985 char *
13986 aof_data_section (void)
13988 static char buf[100];
13989 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13990 return buf;
13993 /* The AOF assembler is religiously strict about declarations of
13994 imported and exported symbols, so that it is impossible to declare
13995 a function as imported near the beginning of the file, and then to
13996 export it later on. It is, however, possible to delay the decision
13997 until all the functions in the file have been compiled. To get
13998 around this, we maintain a list of the imports and exports, and
13999 delete from it any that are subsequently defined. At the end of
14000 compilation we spit the remainder of the list out before the END
14001 directive. */
14003 struct import
14005 struct import * next;
14006 const char * name;
14009 static struct import * imports_list = NULL;
14011 void
14012 aof_add_import (const char *name)
14014 struct import * new;
14016 for (new = imports_list; new; new = new->next)
14017 if (new->name == name)
14018 return;
14020 new = (struct import *) xmalloc (sizeof (struct import));
14021 new->next = imports_list;
14022 imports_list = new;
14023 new->name = name;
14026 void
14027 aof_delete_import (const char *name)
14029 struct import ** old;
14031 for (old = &imports_list; *old; old = & (*old)->next)
14033 if ((*old)->name == name)
14035 *old = (*old)->next;
14036 return;
14041 int arm_main_function = 0;
14043 static void
14044 aof_dump_imports (FILE *f)
14046 /* The AOF assembler needs this to cause the startup code to be extracted
14047 from the library. Brining in __main causes the whole thing to work
14048 automagically. */
14049 if (arm_main_function)
14051 text_section ();
14052 fputs ("\tIMPORT __main\n", f);
14053 fputs ("\tDCD __main\n", f);
14056 /* Now dump the remaining imports. */
14057 while (imports_list)
14059 fprintf (f, "\tIMPORT\t");
14060 assemble_name (f, imports_list->name);
14061 fputc ('\n', f);
14062 imports_list = imports_list->next;
14066 static void
14067 aof_globalize_label (FILE *stream, const char *name)
14069 default_globalize_label (stream, name);
14070 if (! strcmp (name, "main"))
14071 arm_main_function = 1;
14074 static void
14075 aof_file_start (void)
14077 fputs ("__r0\tRN\t0\n", asm_out_file);
14078 fputs ("__a1\tRN\t0\n", asm_out_file);
14079 fputs ("__a2\tRN\t1\n", asm_out_file);
14080 fputs ("__a3\tRN\t2\n", asm_out_file);
14081 fputs ("__a4\tRN\t3\n", asm_out_file);
14082 fputs ("__v1\tRN\t4\n", asm_out_file);
14083 fputs ("__v2\tRN\t5\n", asm_out_file);
14084 fputs ("__v3\tRN\t6\n", asm_out_file);
14085 fputs ("__v4\tRN\t7\n", asm_out_file);
14086 fputs ("__v5\tRN\t8\n", asm_out_file);
14087 fputs ("__v6\tRN\t9\n", asm_out_file);
14088 fputs ("__sl\tRN\t10\n", asm_out_file);
14089 fputs ("__fp\tRN\t11\n", asm_out_file);
14090 fputs ("__ip\tRN\t12\n", asm_out_file);
14091 fputs ("__sp\tRN\t13\n", asm_out_file);
14092 fputs ("__lr\tRN\t14\n", asm_out_file);
14093 fputs ("__pc\tRN\t15\n", asm_out_file);
14094 fputs ("__f0\tFN\t0\n", asm_out_file);
14095 fputs ("__f1\tFN\t1\n", asm_out_file);
14096 fputs ("__f2\tFN\t2\n", asm_out_file);
14097 fputs ("__f3\tFN\t3\n", asm_out_file);
14098 fputs ("__f4\tFN\t4\n", asm_out_file);
14099 fputs ("__f5\tFN\t5\n", asm_out_file);
14100 fputs ("__f6\tFN\t6\n", asm_out_file);
14101 fputs ("__f7\tFN\t7\n", asm_out_file);
14102 text_section ();
14105 static void
14106 aof_file_end (void)
14108 if (flag_pic)
14109 aof_dump_pic_table (asm_out_file);
14110 arm_file_end ();
14111 aof_dump_imports (asm_out_file);
14112 fputs ("\tEND\n", asm_out_file);
14114 #endif /* AOF_ASSEMBLER */
14116 #ifndef ARM_PE
14117 /* Symbols in the text segment can be accessed without indirecting via the
14118 constant pool; it may take an extra binary operation, but this is still
14119 faster than indirecting via memory. Don't do this when not optimizing,
14120 since we won't be calculating al of the offsets necessary to do this
14121 simplification. */
14123 static void
14124 arm_encode_section_info (tree decl, rtx rtl, int first)
14126 /* This doesn't work with AOF syntax, since the string table may be in
14127 a different AREA. */
14128 #ifndef AOF_ASSEMBLER
14129 if (optimize > 0 && TREE_CONSTANT (decl))
14130 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14131 #endif
14133 /* If we are referencing a function that is weak then encode a long call
14134 flag in the function name, otherwise if the function is static or
14135 or known to be defined in this file then encode a short call flag. */
14136 if (first && DECL_P (decl))
14138 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14139 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14140 else if (! TREE_PUBLIC (decl))
14141 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14144 #endif /* !ARM_PE */
14146 static void
14147 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14149 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14150 && !strcmp (prefix, "L"))
14152 arm_ccfsm_state = 0;
14153 arm_target_insn = NULL;
14155 default_internal_label (stream, prefix, labelno);
14158 /* Output code to add DELTA to the first argument, and then jump
14159 to FUNCTION. Used for C++ multiple inheritance. */
14160 static void
14161 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14162 HOST_WIDE_INT delta,
14163 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14164 tree function)
14166 static int thunk_label = 0;
14167 char label[256];
14168 int mi_delta = delta;
14169 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14170 int shift = 0;
14171 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14172 ? 1 : 0);
14173 if (mi_delta < 0)
14174 mi_delta = - mi_delta;
14175 if (TARGET_THUMB)
14177 int labelno = thunk_label++;
14178 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14179 fputs ("\tldr\tr12, ", file);
14180 assemble_name (file, label);
14181 fputc ('\n', file);
14183 while (mi_delta != 0)
14185 if ((mi_delta & (3 << shift)) == 0)
14186 shift += 2;
14187 else
14189 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14190 mi_op, this_regno, this_regno,
14191 mi_delta & (0xff << shift));
14192 mi_delta &= ~(0xff << shift);
14193 shift += 8;
14196 if (TARGET_THUMB)
14198 fprintf (file, "\tbx\tr12\n");
14199 ASM_OUTPUT_ALIGN (file, 2);
14200 assemble_name (file, label);
14201 fputs (":\n", file);
14202 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14204 else
14206 fputs ("\tb\t", file);
14207 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14208 if (NEED_PLT_RELOC)
14209 fputs ("(PLT)", file);
14210 fputc ('\n', file);
14215 arm_emit_vector_const (FILE *file, rtx x)
14217 int i;
14218 const char * pattern;
14220 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14222 switch (GET_MODE (x))
14224 case V2SImode: pattern = "%08x"; break;
14225 case V4HImode: pattern = "%04x"; break;
14226 case V8QImode: pattern = "%02x"; break;
14227 default: gcc_unreachable ();
14230 fprintf (file, "0x");
14231 for (i = CONST_VECTOR_NUNITS (x); i--;)
14233 rtx element;
14235 element = CONST_VECTOR_ELT (x, i);
14236 fprintf (file, pattern, INTVAL (element));
14239 return 1;
14242 const char *
14243 arm_output_load_gr (rtx *operands)
14245 rtx reg;
14246 rtx offset;
14247 rtx wcgr;
14248 rtx sum;
14250 if (GET_CODE (operands [1]) != MEM
14251 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14252 || GET_CODE (reg = XEXP (sum, 0)) != REG
14253 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14254 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14255 return "wldrw%?\t%0, %1";
14257 /* Fix up an out-of-range load of a GR register. */
14258 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14259 wcgr = operands[0];
14260 operands[0] = reg;
14261 output_asm_insn ("ldr%?\t%0, %1", operands);
14263 operands[0] = wcgr;
14264 operands[1] = reg;
14265 output_asm_insn ("tmcr%?\t%0, %1", operands);
14266 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14268 return "";
14271 static rtx
14272 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14273 int incoming ATTRIBUTE_UNUSED)
14275 #if 0
14276 /* FIXME: The ARM backend has special code to handle structure
14277 returns, and will reserve its own hidden first argument. So
14278 if this macro is enabled a *second* hidden argument will be
14279 reserved, which will break binary compatibility with old
14280 toolchains and also thunk handling. One day this should be
14281 fixed. */
14282 return 0;
14283 #else
14284 /* Register in which address to store a structure value
14285 is passed to a function. */
14286 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14287 #endif
14290 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14292 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14293 named arg and all anonymous args onto the stack.
14294 XXX I know the prologue shouldn't be pushing registers, but it is faster
14295 that way. */
14297 static void
14298 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14299 enum machine_mode mode ATTRIBUTE_UNUSED,
14300 tree type ATTRIBUTE_UNUSED,
14301 int *pretend_size,
14302 int second_time ATTRIBUTE_UNUSED)
14304 cfun->machine->uses_anonymous_args = 1;
14305 if (cum->nregs < NUM_ARG_REGS)
14306 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14309 /* Return nonzero if the CONSUMER instruction (a store) does not need
14310 PRODUCER's value to calculate the address. */
14313 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14315 rtx value = PATTERN (producer);
14316 rtx addr = PATTERN (consumer);
14318 if (GET_CODE (value) == COND_EXEC)
14319 value = COND_EXEC_CODE (value);
14320 if (GET_CODE (value) == PARALLEL)
14321 value = XVECEXP (value, 0, 0);
14322 value = XEXP (value, 0);
14323 if (GET_CODE (addr) == COND_EXEC)
14324 addr = COND_EXEC_CODE (addr);
14325 if (GET_CODE (addr) == PARALLEL)
14326 addr = XVECEXP (addr, 0, 0);
14327 addr = XEXP (addr, 0);
14329 return !reg_overlap_mentioned_p (value, addr);
14332 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14333 have an early register shift value or amount dependency on the
14334 result of PRODUCER. */
14337 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14339 rtx value = PATTERN (producer);
14340 rtx op = PATTERN (consumer);
14341 rtx early_op;
14343 if (GET_CODE (value) == COND_EXEC)
14344 value = COND_EXEC_CODE (value);
14345 if (GET_CODE (value) == PARALLEL)
14346 value = XVECEXP (value, 0, 0);
14347 value = XEXP (value, 0);
14348 if (GET_CODE (op) == COND_EXEC)
14349 op = COND_EXEC_CODE (op);
14350 if (GET_CODE (op) == PARALLEL)
14351 op = XVECEXP (op, 0, 0);
14352 op = XEXP (op, 1);
14354 early_op = XEXP (op, 0);
14355 /* This is either an actual independent shift, or a shift applied to
14356 the first operand of another operation. We want the whole shift
14357 operation. */
14358 if (GET_CODE (early_op) == REG)
14359 early_op = op;
14361 return !reg_overlap_mentioned_p (value, early_op);
14364 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14365 have an early register shift value dependency on the result of
14366 PRODUCER. */
14369 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14371 rtx value = PATTERN (producer);
14372 rtx op = PATTERN (consumer);
14373 rtx early_op;
14375 if (GET_CODE (value) == COND_EXEC)
14376 value = COND_EXEC_CODE (value);
14377 if (GET_CODE (value) == PARALLEL)
14378 value = XVECEXP (value, 0, 0);
14379 value = XEXP (value, 0);
14380 if (GET_CODE (op) == COND_EXEC)
14381 op = COND_EXEC_CODE (op);
14382 if (GET_CODE (op) == PARALLEL)
14383 op = XVECEXP (op, 0, 0);
14384 op = XEXP (op, 1);
14386 early_op = XEXP (op, 0);
14388 /* This is either an actual independent shift, or a shift applied to
14389 the first operand of another operation. We want the value being
14390 shifted, in either case. */
14391 if (GET_CODE (early_op) != REG)
14392 early_op = XEXP (early_op, 0);
14394 return !reg_overlap_mentioned_p (value, early_op);
14397 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14398 have an early register mult dependency on the result of
14399 PRODUCER. */
14402 arm_no_early_mul_dep (rtx producer, rtx consumer)
14404 rtx value = PATTERN (producer);
14405 rtx op = PATTERN (consumer);
14407 if (GET_CODE (value) == COND_EXEC)
14408 value = COND_EXEC_CODE (value);
14409 if (GET_CODE (value) == PARALLEL)
14410 value = XVECEXP (value, 0, 0);
14411 value = XEXP (value, 0);
14412 if (GET_CODE (op) == COND_EXEC)
14413 op = COND_EXEC_CODE (op);
14414 if (GET_CODE (op) == PARALLEL)
14415 op = XVECEXP (op, 0, 0);
14416 op = XEXP (op, 1);
14418 return (GET_CODE (op) == PLUS
14419 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14423 /* We can't rely on the caller doing the proper promotion when
14424 using APCS or ATPCS. */
14426 static bool
14427 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14429 return !TARGET_AAPCS_BASED;
14433 /* AAPCS based ABIs use short enums by default. */
14435 static bool
14436 arm_default_short_enums (void)
14438 return TARGET_AAPCS_BASED;
14442 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14444 static bool
14445 arm_align_anon_bitfield (void)
14447 return TARGET_AAPCS_BASED;
14451 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14453 static tree
14454 arm_cxx_guard_type (void)
14456 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14460 /* The EABI says test the least significan bit of a guard variable. */
14462 static bool
14463 arm_cxx_guard_mask_bit (void)
14465 return TARGET_AAPCS_BASED;
14469 /* The EABI specifies that all array cookies are 8 bytes long. */
14471 static tree
14472 arm_get_cookie_size (tree type)
14474 tree size;
14476 if (!TARGET_AAPCS_BASED)
14477 return default_cxx_get_cookie_size (type);
14479 size = build_int_cst (sizetype, 8);
14480 return size;
14484 /* The EABI says that array cookies should also contain the element size. */
14486 static bool
14487 arm_cookie_has_size (void)
14489 return TARGET_AAPCS_BASED;
14493 /* The EABI says constructors and destructors should return a pointer to
14494 the object constructed/destroyed. */
14496 static bool
14497 arm_cxx_cdtor_returns_this (void)
14499 return TARGET_AAPCS_BASED;
14502 /* The EABI says that an inline function may never be the key
14503 method. */
14505 static bool
14506 arm_cxx_key_method_may_be_inline (void)
14508 return !TARGET_AAPCS_BASED;
14511 static void
14512 arm_cxx_determine_class_data_visibility (tree decl)
14514 if (!TARGET_AAPCS_BASED)
14515 return;
14517 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14518 is exported. However, on systems without dynamic vague linkage,
14519 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14520 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14521 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14522 else
14523 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14524 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14527 static bool
14528 arm_cxx_class_data_always_comdat (void)
14530 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14531 vague linkage if the class has no key function. */
14532 return !TARGET_AAPCS_BASED;
14536 /* The EABI says __aeabi_atexit should be used to register static
14537 destructors. */
14539 static bool
14540 arm_cxx_use_aeabi_atexit (void)
14542 return TARGET_AAPCS_BASED;
14546 void
14547 arm_set_return_address (rtx source, rtx scratch)
14549 arm_stack_offsets *offsets;
14550 HOST_WIDE_INT delta;
14551 rtx addr;
14552 unsigned long saved_regs;
14554 saved_regs = arm_compute_save_reg_mask ();
14556 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14557 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14558 else
14560 if (frame_pointer_needed)
14561 addr = plus_constant(hard_frame_pointer_rtx, -4);
14562 else
14564 /* LR will be the first saved register. */
14565 offsets = arm_get_frame_offsets ();
14566 delta = offsets->outgoing_args - (offsets->frame + 4);
14569 if (delta >= 4096)
14571 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14572 GEN_INT (delta & ~4095)));
14573 addr = scratch;
14574 delta &= 4095;
14576 else
14577 addr = stack_pointer_rtx;
14579 addr = plus_constant (addr, delta);
14581 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14586 void
14587 thumb_set_return_address (rtx source, rtx scratch)
14589 arm_stack_offsets *offsets;
14590 HOST_WIDE_INT delta;
14591 int reg;
14592 rtx addr;
14593 unsigned long mask;
14595 emit_insn (gen_rtx_USE (VOIDmode, source));
14597 mask = thumb_compute_save_reg_mask ();
14598 if (mask & (1 << LR_REGNUM))
14600 offsets = arm_get_frame_offsets ();
14602 /* Find the saved regs. */
14603 if (frame_pointer_needed)
14605 delta = offsets->soft_frame - offsets->saved_args;
14606 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14608 else
14610 delta = offsets->outgoing_args - offsets->saved_args;
14611 reg = SP_REGNUM;
14613 /* Allow for the stack frame. */
14614 if (TARGET_BACKTRACE)
14615 delta -= 16;
14616 /* The link register is always the first saved register. */
14617 delta -= 4;
14619 /* Construct the address. */
14620 addr = gen_rtx_REG (SImode, reg);
14621 if ((reg != SP_REGNUM && delta >= 128)
14622 || delta >= 1024)
14624 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14625 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14626 addr = scratch;
14628 else
14629 addr = plus_constant (addr, delta);
14631 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14633 else
14634 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14637 /* Implements target hook vector_mode_supported_p. */
14638 bool
14639 arm_vector_mode_supported_p (enum machine_mode mode)
14641 if ((mode == V2SImode)
14642 || (mode == V4HImode)
14643 || (mode == V8QImode))
14644 return true;
14646 return false;
14649 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14650 ARM insns and therefore guarantee that the shift count is modulo 256.
14651 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14652 guarantee no particular behavior for out-of-range counts. */
14654 static unsigned HOST_WIDE_INT
14655 arm_shift_truncation_mask (enum machine_mode mode)
14657 return mode == SImode ? 255 : 0;
14661 /* Map internal gcc register numbers to DWARF2 register numbers. */
14663 unsigned int
14664 arm_dbx_register_number (unsigned int regno)
14666 if (regno < 16)
14667 return regno;
14669 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14670 compatibility. The EABI defines them as registers 96-103. */
14671 if (IS_FPA_REGNUM (regno))
14672 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14674 if (IS_VFP_REGNUM (regno))
14675 return 64 + regno - FIRST_VFP_REGNUM;
14677 if (IS_IWMMXT_GR_REGNUM (regno))
14678 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14680 if (IS_IWMMXT_REGNUM (regno))
14681 return 112 + regno - FIRST_IWMMXT_REGNUM;
14683 gcc_unreachable ();