/
[official-gcc.git] / gcc / config / arm / arm.c
blobdd976b0865c0b399546e18d77736edb52991bc9c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
146 tree, bool);
148 #ifdef OBJECT_FORMAT_ELF
149 static void arm_elf_asm_constructor (rtx, int);
150 #endif
151 #ifndef ARM_PE
152 static void arm_encode_section_info (tree, rtx, int);
153 #endif
155 static void arm_file_end (void);
157 #ifdef AOF_ASSEMBLER
158 static void aof_globalize_label (FILE *, const char *);
159 static void aof_dump_imports (FILE *);
160 static void aof_dump_pic_table (FILE *);
161 static void aof_file_start (void);
162 static void aof_file_end (void);
163 #endif
164 static rtx arm_struct_value_rtx (tree, int);
165 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
166 tree, int *, int);
167 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
168 enum machine_mode, tree, bool);
169 static bool arm_promote_prototypes (tree);
170 static bool arm_default_short_enums (void);
171 static bool arm_align_anon_bitfield (void);
172 static bool arm_return_in_msb (tree);
173 static bool arm_must_pass_in_stack (enum machine_mode, tree);
174 #ifdef TARGET_UNWIND_INFO
175 static void arm_unwind_emit (FILE *, rtx);
176 static bool arm_output_ttype (rtx);
177 #endif
179 static tree arm_cxx_guard_type (void);
180 static bool arm_cxx_guard_mask_bit (void);
181 static tree arm_get_cookie_size (tree);
182 static bool arm_cookie_has_size (void);
183 static bool arm_cxx_cdtor_returns_this (void);
184 static bool arm_cxx_key_method_may_be_inline (void);
185 static void arm_cxx_determine_class_data_visibility (tree);
186 static bool arm_cxx_class_data_always_comdat (void);
187 static bool arm_cxx_use_aeabi_atexit (void);
188 static void arm_init_libfuncs (void);
189 static bool arm_handle_option (size_t, const char *, int);
190 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
192 /* Initialize the GCC target structure. */
193 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
194 #undef TARGET_MERGE_DECL_ATTRIBUTES
195 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
196 #endif
198 #undef TARGET_ATTRIBUTE_TABLE
199 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
201 #undef TARGET_ASM_FILE_END
202 #define TARGET_ASM_FILE_END arm_file_end
204 #ifdef AOF_ASSEMBLER
205 #undef TARGET_ASM_BYTE_OP
206 #define TARGET_ASM_BYTE_OP "\tDCB\t"
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
209 #undef TARGET_ASM_ALIGNED_SI_OP
210 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
211 #undef TARGET_ASM_GLOBALIZE_LABEL
212 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
213 #undef TARGET_ASM_FILE_START
214 #define TARGET_ASM_FILE_START aof_file_start
215 #undef TARGET_ASM_FILE_END
216 #define TARGET_ASM_FILE_END aof_file_end
217 #else
218 #undef TARGET_ASM_ALIGNED_SI_OP
219 #define TARGET_ASM_ALIGNED_SI_OP NULL
220 #undef TARGET_ASM_INTEGER
221 #define TARGET_ASM_INTEGER arm_assemble_integer
222 #endif
224 #undef TARGET_ASM_FUNCTION_PROLOGUE
225 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
230 #undef TARGET_DEFAULT_TARGET_FLAGS
231 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
232 #undef TARGET_HANDLE_OPTION
233 #define TARGET_HANDLE_OPTION arm_handle_option
235 #undef TARGET_COMP_TYPE_ATTRIBUTES
236 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
238 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
239 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
241 #undef TARGET_SCHED_ADJUST_COST
242 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
244 #undef TARGET_ENCODE_SECTION_INFO
245 #ifdef ARM_PE
246 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
247 #else
248 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
249 #endif
251 #undef TARGET_STRIP_NAME_ENCODING
252 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
254 #undef TARGET_ASM_INTERNAL_LABEL
255 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
257 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
258 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
260 #undef TARGET_ASM_OUTPUT_MI_THUNK
261 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
262 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
263 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
265 /* This will be overridden in arm_override_options. */
266 #undef TARGET_RTX_COSTS
267 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
268 #undef TARGET_ADDRESS_COST
269 #define TARGET_ADDRESS_COST arm_address_cost
271 #undef TARGET_SHIFT_TRUNCATION_MASK
272 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
273 #undef TARGET_VECTOR_MODE_SUPPORTED_P
274 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
276 #undef TARGET_MACHINE_DEPENDENT_REORG
277 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
279 #undef TARGET_INIT_BUILTINS
280 #define TARGET_INIT_BUILTINS arm_init_builtins
281 #undef TARGET_EXPAND_BUILTIN
282 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
287 #undef TARGET_PROMOTE_FUNCTION_ARGS
288 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
289 #undef TARGET_PROMOTE_FUNCTION_RETURN
290 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
291 #undef TARGET_PROMOTE_PROTOTYPES
292 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
295 #undef TARGET_ARG_PARTIAL_BYTES
296 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
298 #undef TARGET_STRUCT_VALUE_RTX
299 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
301 #undef TARGET_SETUP_INCOMING_VARARGS
302 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
304 #undef TARGET_DEFAULT_SHORT_ENUMS
305 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
307 #undef TARGET_ALIGN_ANON_BITFIELD
308 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
310 #undef TARGET_CXX_GUARD_TYPE
311 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
313 #undef TARGET_CXX_GUARD_MASK_BIT
314 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
316 #undef TARGET_CXX_GET_COOKIE_SIZE
317 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
319 #undef TARGET_CXX_COOKIE_HAS_SIZE
320 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
322 #undef TARGET_CXX_CDTOR_RETURNS_THIS
323 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
325 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
326 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
328 #undef TARGET_CXX_USE_AEABI_ATEXIT
329 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
331 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
332 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
333 arm_cxx_determine_class_data_visibility
335 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
336 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
338 #undef TARGET_RETURN_IN_MSB
339 #define TARGET_RETURN_IN_MSB arm_return_in_msb
341 #undef TARGET_MUST_PASS_IN_STACK
342 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
344 #ifdef TARGET_UNWIND_INFO
345 #undef TARGET_UNWIND_EMIT
346 #define TARGET_UNWIND_EMIT arm_unwind_emit
348 /* EABI unwinding tables use a different format for the typeinfo tables. */
349 #undef TARGET_ASM_TTYPE
350 #define TARGET_ASM_TTYPE arm_output_ttype
352 #undef TARGET_ARM_EABI_UNWINDER
353 #define TARGET_ARM_EABI_UNWINDER true
354 #endif /* TARGET_UNWIND_INFO */
356 struct gcc_target targetm = TARGET_INITIALIZER;
358 /* Obstack for minipool constant handling. */
359 static struct obstack minipool_obstack;
360 static char * minipool_startobj;
362 /* The maximum number of insns skipped which
363 will be conditionalised if possible. */
364 static int max_insns_skipped = 5;
366 extern FILE * asm_out_file;
368 /* True if we are currently building a constant table. */
369 int making_const_table;
371 /* Define the information needed to generate branch insns. This is
372 stored from the compare operation. */
373 rtx arm_compare_op0, arm_compare_op1;
375 /* The processor for which instructions should be scheduled. */
376 enum processor_type arm_tune = arm_none;
378 /* Which floating point model to use. */
379 enum arm_fp_model arm_fp_model;
381 /* Which floating point hardware is available. */
382 enum fputype arm_fpu_arch;
384 /* Which floating point hardware to schedule for. */
385 enum fputype arm_fpu_tune;
387 /* Whether to use floating point hardware. */
388 enum float_abi_type arm_float_abi;
390 /* Which ABI to use. */
391 enum arm_abi_type arm_abi;
393 /* Used to parse -mstructure_size_boundary command line option. */
394 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
396 /* Used for Thumb call_via trampolines. */
397 rtx thumb_call_via_label[14];
398 static int thumb_call_reg_needed;
400 /* Bit values used to identify processor capabilities. */
401 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
402 #define FL_ARCH3M (1 << 1) /* Extended multiply */
403 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
404 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
405 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
406 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
407 #define FL_THUMB (1 << 6) /* Thumb aware */
408 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
409 #define FL_STRONG (1 << 8) /* StrongARM */
410 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
411 #define FL_XSCALE (1 << 10) /* XScale */
412 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
413 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
414 media instructions. */
415 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
416 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
417 Note: ARM6 & 7 derivatives only. */
419 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
421 #define FL_FOR_ARCH2 0
422 #define FL_FOR_ARCH3 FL_MODE32
423 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
424 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
425 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
426 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
427 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
428 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
429 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
430 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
431 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
432 #define FL_FOR_ARCH6J FL_FOR_ARCH6
433 #define FL_FOR_ARCH6K FL_FOR_ARCH6
434 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
435 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
437 /* The bits in this mask specify which
438 instructions we are allowed to generate. */
439 static unsigned long insn_flags = 0;
441 /* The bits in this mask specify which instruction scheduling options should
442 be used. */
443 static unsigned long tune_flags = 0;
445 /* The following are used in the arm.md file as equivalents to bits
446 in the above two flag variables. */
448 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
449 int arm_arch3m = 0;
451 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
452 int arm_arch4 = 0;
454 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
455 int arm_arch4t = 0;
457 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
458 int arm_arch5 = 0;
460 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
461 int arm_arch5e = 0;
463 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
464 int arm_arch6 = 0;
466 /* Nonzero if this chip can benefit from load scheduling. */
467 int arm_ld_sched = 0;
469 /* Nonzero if this chip is a StrongARM. */
470 int arm_tune_strongarm = 0;
472 /* Nonzero if this chip is a Cirrus variant. */
473 int arm_arch_cirrus = 0;
475 /* Nonzero if this chip supports Intel Wireless MMX technology. */
476 int arm_arch_iwmmxt = 0;
478 /* Nonzero if this chip is an XScale. */
479 int arm_arch_xscale = 0;
481 /* Nonzero if tuning for XScale */
482 int arm_tune_xscale = 0;
484 /* Nonzero if we want to tune for stores that access the write-buffer.
485 This typically means an ARM6 or ARM7 with MMU or MPU. */
486 int arm_tune_wbuf = 0;
488 /* Nonzero if generating Thumb instructions. */
489 int thumb_code = 0;
491 /* Nonzero if we should define __THUMB_INTERWORK__ in the
492 preprocessor.
493 XXX This is a bit of a hack, it's intended to help work around
494 problems in GLD which doesn't understand that armv5t code is
495 interworking clean. */
496 int arm_cpp_interwork = 0;
498 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
499 must report the mode of the memory reference from PRINT_OPERAND to
500 PRINT_OPERAND_ADDRESS. */
501 enum machine_mode output_memory_reference_mode;
503 /* The register number to be used for the PIC offset register. */
504 int arm_pic_register = INVALID_REGNUM;
506 /* Set to 1 when a return insn is output, this means that the epilogue
507 is not needed. */
508 int return_used_this_function;
510 /* Set to 1 after arm_reorg has started. Reset to start at the start of
511 the next function. */
512 static int after_arm_reorg = 0;
514 /* The maximum number of insns to be used when loading a constant. */
515 static int arm_constant_limit = 3;
517 /* For an explanation of these variables, see final_prescan_insn below. */
518 int arm_ccfsm_state;
519 enum arm_cond_code arm_current_cc;
520 rtx arm_target_insn;
521 int arm_target_label;
523 /* The condition codes of the ARM, and the inverse function. */
524 static const char * const arm_condition_codes[] =
526 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
527 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
530 #define streq(string1, string2) (strcmp (string1, string2) == 0)
532 /* Initialization code. */
534 struct processors
536 const char *const name;
537 enum processor_type core;
538 const char *arch;
539 const unsigned long flags;
540 bool (* rtx_costs) (rtx, int, int, int *);
543 /* Not all of these give usefully different compilation alternatives,
544 but there is no simple way of generalizing them. */
545 static const struct processors all_cores[] =
547 /* ARM Cores */
548 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
549 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
550 #include "arm-cores.def"
551 #undef ARM_CORE
552 {NULL, arm_none, NULL, 0, NULL}
555 static const struct processors all_architectures[] =
557 /* ARM Architectures */
558 /* We don't specify rtx_costs here as it will be figured out
559 from the core. */
561 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
562 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
563 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
564 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
565 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
566 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
567 implementations that support it, so we will leave it out for now. */
568 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
569 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
570 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
571 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
572 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
573 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
574 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
575 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
576 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
577 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
578 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
579 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
580 {NULL, arm_none, NULL, 0 , NULL}
583 struct arm_cpu_select
585 const char * string;
586 const char * name;
587 const struct processors * processors;
590 /* This is a magic structure. The 'string' field is magically filled in
591 with a pointer to the value specified by the user on the command line
592 assuming that the user has specified such a value. */
594 static struct arm_cpu_select arm_select[] =
596 /* string name processors */
597 { NULL, "-mcpu=", all_cores },
598 { NULL, "-march=", all_architectures },
599 { NULL, "-mtune=", all_cores }
602 /* Defines representing the indexes into the above table. */
603 #define ARM_OPT_SET_CPU 0
604 #define ARM_OPT_SET_ARCH 1
605 #define ARM_OPT_SET_TUNE 2
607 /* The name of the proprocessor macro to define for this architecture. */
609 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
611 struct fpu_desc
613 const char * name;
614 enum fputype fpu;
618 /* Available values for for -mfpu=. */
620 static const struct fpu_desc all_fpus[] =
622 {"fpa", FPUTYPE_FPA},
623 {"fpe2", FPUTYPE_FPA_EMU2},
624 {"fpe3", FPUTYPE_FPA_EMU2},
625 {"maverick", FPUTYPE_MAVERICK},
626 {"vfp", FPUTYPE_VFP}
630 /* Floating point models used by the different hardware.
631 See fputype in arm.h. */
633 static const enum fputype fp_model_for_fpu[] =
635 /* No FP hardware. */
636 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
637 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
638 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
639 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
640 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
641 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
645 struct float_abi
647 const char * name;
648 enum float_abi_type abi_type;
652 /* Available values for -mfloat-abi=. */
654 static const struct float_abi all_float_abis[] =
656 {"soft", ARM_FLOAT_ABI_SOFT},
657 {"softfp", ARM_FLOAT_ABI_SOFTFP},
658 {"hard", ARM_FLOAT_ABI_HARD}
662 struct abi_name
664 const char *name;
665 enum arm_abi_type abi_type;
669 /* Available values for -mabi=. */
671 static const struct abi_name arm_all_abis[] =
673 {"apcs-gnu", ARM_ABI_APCS},
674 {"atpcs", ARM_ABI_ATPCS},
675 {"aapcs", ARM_ABI_AAPCS},
676 {"iwmmxt", ARM_ABI_IWMMXT},
677 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
680 /* Return the number of bits set in VALUE. */
681 static unsigned
682 bit_count (unsigned long value)
684 unsigned long count = 0;
686 while (value)
688 count++;
689 value &= value - 1; /* Clear the least-significant set bit. */
692 return count;
695 /* Set up library functions unique to ARM. */
697 static void
698 arm_init_libfuncs (void)
700 /* There are no special library functions unless we are using the
701 ARM BPABI. */
702 if (!TARGET_BPABI)
703 return;
705 /* The functions below are described in Section 4 of the "Run-Time
706 ABI for the ARM architecture", Version 1.0. */
708 /* Double-precision floating-point arithmetic. Table 2. */
709 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
710 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
711 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
712 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
713 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
715 /* Double-precision comparisons. Table 3. */
716 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
717 set_optab_libfunc (ne_optab, DFmode, NULL);
718 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
719 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
720 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
721 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
722 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
724 /* Single-precision floating-point arithmetic. Table 4. */
725 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
726 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
727 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
728 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
729 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
731 /* Single-precision comparisons. Table 5. */
732 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
733 set_optab_libfunc (ne_optab, SFmode, NULL);
734 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
735 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
736 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
737 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
738 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
740 /* Floating-point to integer conversions. Table 6. */
741 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
742 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
743 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
744 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
745 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
746 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
747 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
748 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
750 /* Conversions between floating types. Table 7. */
751 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
752 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
754 /* Integer to floating-point conversions. Table 8. */
755 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
756 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
757 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
758 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
759 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
760 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
761 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
762 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
764 /* Long long. Table 9. */
765 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
766 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
767 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
768 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
769 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
770 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
771 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
772 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
774 /* Integer (32/32->32) division. \S 4.3.1. */
775 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
776 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
778 /* The divmod functions are designed so that they can be used for
779 plain division, even though they return both the quotient and the
780 remainder. The quotient is returned in the usual location (i.e.,
781 r0 for SImode, {r0, r1} for DImode), just as would be expected
782 for an ordinary division routine. Because the AAPCS calling
783 conventions specify that all of { r0, r1, r2, r3 } are
784 callee-saved registers, there is no need to tell the compiler
785 explicitly that those registers are clobbered by these
786 routines. */
787 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
788 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
789 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
790 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
792 /* We don't have mod libcalls. Fortunately gcc knows how to use the
793 divmod libcalls instead. */
794 set_optab_libfunc (smod_optab, DImode, NULL);
795 set_optab_libfunc (umod_optab, DImode, NULL);
796 set_optab_libfunc (smod_optab, SImode, NULL);
797 set_optab_libfunc (umod_optab, SImode, NULL);
800 /* Implement TARGET_HANDLE_OPTION. */
802 static bool
803 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
805 switch (code)
807 case OPT_march_:
808 arm_select[1].string = arg;
809 return true;
811 case OPT_mcpu_:
812 arm_select[0].string = arg;
813 return true;
815 case OPT_mhard_float:
816 target_float_abi_name = "hard";
817 return true;
819 case OPT_msoft_float:
820 target_float_abi_name = "soft";
821 return true;
823 case OPT_mtune_:
824 arm_select[2].string = arg;
825 return true;
827 default:
828 return true;
832 /* Fix up any incompatible options that the user has specified.
833 This has now turned into a maze. */
834 void
835 arm_override_options (void)
837 unsigned i;
838 enum processor_type target_arch_cpu = arm_none;
840 /* Set up the flags based on the cpu/architecture selected by the user. */
841 for (i = ARRAY_SIZE (arm_select); i--;)
843 struct arm_cpu_select * ptr = arm_select + i;
845 if (ptr->string != NULL && ptr->string[0] != '\0')
847 const struct processors * sel;
849 for (sel = ptr->processors; sel->name != NULL; sel++)
850 if (streq (ptr->string, sel->name))
852 /* Set the architecture define. */
853 if (i != ARM_OPT_SET_TUNE)
854 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
856 /* Determine the processor core for which we should
857 tune code-generation. */
858 if (/* -mcpu= is a sensible default. */
859 i == ARM_OPT_SET_CPU
860 /* -mtune= overrides -mcpu= and -march=. */
861 || i == ARM_OPT_SET_TUNE)
862 arm_tune = (enum processor_type) (sel - ptr->processors);
864 /* Remember the CPU associated with this architecture.
865 If no other option is used to set the CPU type,
866 we'll use this to guess the most suitable tuning
867 options. */
868 if (i == ARM_OPT_SET_ARCH)
869 target_arch_cpu = sel->core;
871 if (i != ARM_OPT_SET_TUNE)
873 /* If we have been given an architecture and a processor
874 make sure that they are compatible. We only generate
875 a warning though, and we prefer the CPU over the
876 architecture. */
877 if (insn_flags != 0 && (insn_flags ^ sel->flags))
878 warning (0, "switch -mcpu=%s conflicts with -march= switch",
879 ptr->string);
881 insn_flags = sel->flags;
884 break;
887 if (sel->name == NULL)
888 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
892 /* Guess the tuning options from the architecture if necessary. */
893 if (arm_tune == arm_none)
894 arm_tune = target_arch_cpu;
896 /* If the user did not specify a processor, choose one for them. */
897 if (insn_flags == 0)
899 const struct processors * sel;
900 unsigned int sought;
901 enum processor_type cpu;
903 cpu = TARGET_CPU_DEFAULT;
904 if (cpu == arm_none)
906 #ifdef SUBTARGET_CPU_DEFAULT
907 /* Use the subtarget default CPU if none was specified by
908 configure. */
909 cpu = SUBTARGET_CPU_DEFAULT;
910 #endif
911 /* Default to ARM6. */
912 if (cpu == arm_none)
913 cpu = arm6;
915 sel = &all_cores[cpu];
917 insn_flags = sel->flags;
919 /* Now check to see if the user has specified some command line
920 switch that require certain abilities from the cpu. */
921 sought = 0;
923 if (TARGET_INTERWORK || TARGET_THUMB)
925 sought |= (FL_THUMB | FL_MODE32);
927 /* There are no ARM processors that support both APCS-26 and
928 interworking. Therefore we force FL_MODE26 to be removed
929 from insn_flags here (if it was set), so that the search
930 below will always be able to find a compatible processor. */
931 insn_flags &= ~FL_MODE26;
934 if (sought != 0 && ((sought & insn_flags) != sought))
936 /* Try to locate a CPU type that supports all of the abilities
937 of the default CPU, plus the extra abilities requested by
938 the user. */
939 for (sel = all_cores; sel->name != NULL; sel++)
940 if ((sel->flags & sought) == (sought | insn_flags))
941 break;
943 if (sel->name == NULL)
945 unsigned current_bit_count = 0;
946 const struct processors * best_fit = NULL;
948 /* Ideally we would like to issue an error message here
949 saying that it was not possible to find a CPU compatible
950 with the default CPU, but which also supports the command
951 line options specified by the programmer, and so they
952 ought to use the -mcpu=<name> command line option to
953 override the default CPU type.
955 If we cannot find a cpu that has both the
956 characteristics of the default cpu and the given
957 command line options we scan the array again looking
958 for a best match. */
959 for (sel = all_cores; sel->name != NULL; sel++)
960 if ((sel->flags & sought) == sought)
962 unsigned count;
964 count = bit_count (sel->flags & insn_flags);
966 if (count >= current_bit_count)
968 best_fit = sel;
969 current_bit_count = count;
973 gcc_assert (best_fit);
974 sel = best_fit;
977 insn_flags = sel->flags;
979 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
980 if (arm_tune == arm_none)
981 arm_tune = (enum processor_type) (sel - all_cores);
984 /* The processor for which we should tune should now have been
985 chosen. */
986 gcc_assert (arm_tune != arm_none);
988 tune_flags = all_cores[(int)arm_tune].flags;
989 if (optimize_size)
990 targetm.rtx_costs = arm_size_rtx_costs;
991 else
992 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
994 /* Make sure that the processor choice does not conflict with any of the
995 other command line choices. */
996 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
998 warning (0, "target CPU does not support interworking" );
999 target_flags &= ~MASK_INTERWORK;
1002 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1004 warning (0, "target CPU does not support THUMB instructions");
1005 target_flags &= ~MASK_THUMB;
1008 if (TARGET_APCS_FRAME && TARGET_THUMB)
1010 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1011 target_flags &= ~MASK_APCS_FRAME;
1014 /* Callee super interworking implies thumb interworking. Adding
1015 this to the flags here simplifies the logic elsewhere. */
1016 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1017 target_flags |= MASK_INTERWORK;
1019 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1020 from here where no function is being compiled currently. */
1021 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1022 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1024 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1025 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1027 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1028 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1030 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1032 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1033 target_flags |= MASK_APCS_FRAME;
1036 if (TARGET_POKE_FUNCTION_NAME)
1037 target_flags |= MASK_APCS_FRAME;
1039 if (TARGET_APCS_REENT && flag_pic)
1040 error ("-fpic and -mapcs-reent are incompatible");
1042 if (TARGET_APCS_REENT)
1043 warning (0, "APCS reentrant code not supported. Ignored");
1045 /* If this target is normally configured to use APCS frames, warn if they
1046 are turned off and debugging is turned on. */
1047 if (TARGET_ARM
1048 && write_symbols != NO_DEBUG
1049 && !TARGET_APCS_FRAME
1050 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1051 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1053 /* If stack checking is disabled, we can use r10 as the PIC register,
1054 which keeps r9 available. */
1055 if (flag_pic)
1056 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1058 if (TARGET_APCS_FLOAT)
1059 warning (0, "passing floating point arguments in fp regs not yet supported");
1061 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1062 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1063 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1064 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1065 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1066 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1067 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1068 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1069 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1071 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1072 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1073 thumb_code = (TARGET_ARM == 0);
1074 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1075 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1076 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1078 /* V5 code we generate is completely interworking capable, so we turn off
1079 TARGET_INTERWORK here to avoid many tests later on. */
1081 /* XXX However, we must pass the right pre-processor defines to CPP
1082 or GLD can get confused. This is a hack. */
1083 if (TARGET_INTERWORK)
1084 arm_cpp_interwork = 1;
1086 if (arm_arch5)
1087 target_flags &= ~MASK_INTERWORK;
1089 if (target_abi_name)
1091 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1093 if (streq (arm_all_abis[i].name, target_abi_name))
1095 arm_abi = arm_all_abis[i].abi_type;
1096 break;
1099 if (i == ARRAY_SIZE (arm_all_abis))
1100 error ("invalid ABI option: -mabi=%s", target_abi_name);
1102 else
1103 arm_abi = ARM_DEFAULT_ABI;
1105 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1106 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1108 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1109 error ("iwmmxt abi requires an iwmmxt capable cpu");
1111 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1112 if (target_fpu_name == NULL && target_fpe_name != NULL)
1114 if (streq (target_fpe_name, "2"))
1115 target_fpu_name = "fpe2";
1116 else if (streq (target_fpe_name, "3"))
1117 target_fpu_name = "fpe3";
1118 else
1119 error ("invalid floating point emulation option: -mfpe=%s",
1120 target_fpe_name);
1122 if (target_fpu_name != NULL)
1124 /* The user specified a FPU. */
1125 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1127 if (streq (all_fpus[i].name, target_fpu_name))
1129 arm_fpu_arch = all_fpus[i].fpu;
1130 arm_fpu_tune = arm_fpu_arch;
1131 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1132 break;
1135 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1136 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1138 else
1140 #ifdef FPUTYPE_DEFAULT
1141 /* Use the default if it is specified for this platform. */
1142 arm_fpu_arch = FPUTYPE_DEFAULT;
1143 arm_fpu_tune = FPUTYPE_DEFAULT;
1144 #else
1145 /* Pick one based on CPU type. */
1146 /* ??? Some targets assume FPA is the default.
1147 if ((insn_flags & FL_VFP) != 0)
1148 arm_fpu_arch = FPUTYPE_VFP;
1149 else
1151 if (arm_arch_cirrus)
1152 arm_fpu_arch = FPUTYPE_MAVERICK;
1153 else
1154 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1155 #endif
1156 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1157 arm_fpu_tune = FPUTYPE_FPA;
1158 else
1159 arm_fpu_tune = arm_fpu_arch;
1160 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1161 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1164 if (target_float_abi_name != NULL)
1166 /* The user specified a FP ABI. */
1167 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1169 if (streq (all_float_abis[i].name, target_float_abi_name))
1171 arm_float_abi = all_float_abis[i].abi_type;
1172 break;
1175 if (i == ARRAY_SIZE (all_float_abis))
1176 error ("invalid floating point abi: -mfloat-abi=%s",
1177 target_float_abi_name);
1179 else
1180 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1182 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1183 sorry ("-mfloat-abi=hard and VFP");
1185 /* If soft-float is specified then don't use FPU. */
1186 if (TARGET_SOFT_FLOAT)
1187 arm_fpu_arch = FPUTYPE_NONE;
1189 /* For arm2/3 there is no need to do any scheduling if there is only
1190 a floating point emulator, or we are doing software floating-point. */
1191 if ((TARGET_SOFT_FLOAT
1192 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1193 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1194 && (tune_flags & FL_MODE32) == 0)
1195 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1197 /* Override the default structure alignment for AAPCS ABI. */
1198 if (TARGET_AAPCS_BASED)
1199 arm_structure_size_boundary = 8;
1201 if (structure_size_string != NULL)
1203 int size = strtol (structure_size_string, NULL, 0);
1205 if (size == 8 || size == 32
1206 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1207 arm_structure_size_boundary = size;
1208 else
1209 warning (0, "structure size boundary can only be set to %s",
1210 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1213 if (arm_pic_register_string != NULL)
1215 int pic_register = decode_reg_name (arm_pic_register_string);
1217 if (!flag_pic)
1218 warning (0, "-mpic-register= is useless without -fpic");
1220 /* Prevent the user from choosing an obviously stupid PIC register. */
1221 else if (pic_register < 0 || call_used_regs[pic_register]
1222 || pic_register == HARD_FRAME_POINTER_REGNUM
1223 || pic_register == STACK_POINTER_REGNUM
1224 || pic_register >= PC_REGNUM)
1225 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1226 else
1227 arm_pic_register = pic_register;
1230 if (TARGET_THUMB && flag_schedule_insns)
1232 /* Don't warn since it's on by default in -O2. */
1233 flag_schedule_insns = 0;
1236 if (optimize_size)
1238 arm_constant_limit = 1;
1240 /* If optimizing for size, bump the number of instructions that we
1241 are prepared to conditionally execute (even on a StrongARM). */
1242 max_insns_skipped = 6;
1244 else
1246 /* For processors with load scheduling, it never costs more than
1247 2 cycles to load a constant, and the load scheduler may well
1248 reduce that to 1. */
1249 if (arm_ld_sched)
1250 arm_constant_limit = 1;
1252 /* On XScale the longer latency of a load makes it more difficult
1253 to achieve a good schedule, so it's faster to synthesize
1254 constants that can be done in two insns. */
1255 if (arm_tune_xscale)
1256 arm_constant_limit = 2;
1258 /* StrongARM has early execution of branches, so a sequence
1259 that is worth skipping is shorter. */
1260 if (arm_tune_strongarm)
1261 max_insns_skipped = 3;
1264 /* Register global variables with the garbage collector. */
1265 arm_add_gc_roots ();
1268 static void
1269 arm_add_gc_roots (void)
1271 gcc_obstack_init(&minipool_obstack);
1272 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1275 /* A table of known ARM exception types.
1276 For use with the interrupt function attribute. */
1278 typedef struct
1280 const char *const arg;
1281 const unsigned long return_value;
1283 isr_attribute_arg;
1285 static const isr_attribute_arg isr_attribute_args [] =
1287 { "IRQ", ARM_FT_ISR },
1288 { "irq", ARM_FT_ISR },
1289 { "FIQ", ARM_FT_FIQ },
1290 { "fiq", ARM_FT_FIQ },
1291 { "ABORT", ARM_FT_ISR },
1292 { "abort", ARM_FT_ISR },
1293 { "ABORT", ARM_FT_ISR },
1294 { "abort", ARM_FT_ISR },
1295 { "UNDEF", ARM_FT_EXCEPTION },
1296 { "undef", ARM_FT_EXCEPTION },
1297 { "SWI", ARM_FT_EXCEPTION },
1298 { "swi", ARM_FT_EXCEPTION },
1299 { NULL, ARM_FT_NORMAL }
1302 /* Returns the (interrupt) function type of the current
1303 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1305 static unsigned long
1306 arm_isr_value (tree argument)
1308 const isr_attribute_arg * ptr;
1309 const char * arg;
1311 /* No argument - default to IRQ. */
1312 if (argument == NULL_TREE)
1313 return ARM_FT_ISR;
1315 /* Get the value of the argument. */
1316 if (TREE_VALUE (argument) == NULL_TREE
1317 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1318 return ARM_FT_UNKNOWN;
1320 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1322 /* Check it against the list of known arguments. */
1323 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1324 if (streq (arg, ptr->arg))
1325 return ptr->return_value;
1327 /* An unrecognized interrupt type. */
1328 return ARM_FT_UNKNOWN;
1331 /* Computes the type of the current function. */
1333 static unsigned long
1334 arm_compute_func_type (void)
1336 unsigned long type = ARM_FT_UNKNOWN;
1337 tree a;
1338 tree attr;
1340 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1342 /* Decide if the current function is volatile. Such functions
1343 never return, and many memory cycles can be saved by not storing
1344 register values that will never be needed again. This optimization
1345 was added to speed up context switching in a kernel application. */
1346 if (optimize > 0
1347 && TREE_NOTHROW (current_function_decl)
1348 && TREE_THIS_VOLATILE (current_function_decl))
1349 type |= ARM_FT_VOLATILE;
1351 if (cfun->static_chain_decl != NULL)
1352 type |= ARM_FT_NESTED;
1354 attr = DECL_ATTRIBUTES (current_function_decl);
1356 a = lookup_attribute ("naked", attr);
1357 if (a != NULL_TREE)
1358 type |= ARM_FT_NAKED;
1360 a = lookup_attribute ("isr", attr);
1361 if (a == NULL_TREE)
1362 a = lookup_attribute ("interrupt", attr);
1364 if (a == NULL_TREE)
1365 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1366 else
1367 type |= arm_isr_value (TREE_VALUE (a));
1369 return type;
1372 /* Returns the type of the current function. */
1374 unsigned long
1375 arm_current_func_type (void)
1377 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1378 cfun->machine->func_type = arm_compute_func_type ();
1380 return cfun->machine->func_type;
1383 /* Return 1 if it is possible to return using a single instruction.
1384 If SIBLING is non-null, this is a test for a return before a sibling
1385 call. SIBLING is the call insn, so we can examine its register usage. */
1388 use_return_insn (int iscond, rtx sibling)
1390 int regno;
1391 unsigned int func_type;
1392 unsigned long saved_int_regs;
1393 unsigned HOST_WIDE_INT stack_adjust;
1394 arm_stack_offsets *offsets;
1396 /* Never use a return instruction before reload has run. */
1397 if (!reload_completed)
1398 return 0;
1400 func_type = arm_current_func_type ();
1402 /* Naked functions and volatile functions need special
1403 consideration. */
1404 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1405 return 0;
1407 /* So do interrupt functions that use the frame pointer. */
1408 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1409 return 0;
1411 offsets = arm_get_frame_offsets ();
1412 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1414 /* As do variadic functions. */
1415 if (current_function_pretend_args_size
1416 || cfun->machine->uses_anonymous_args
1417 /* Or if the function calls __builtin_eh_return () */
1418 || current_function_calls_eh_return
1419 /* Or if the function calls alloca */
1420 || current_function_calls_alloca
1421 /* Or if there is a stack adjustment. However, if the stack pointer
1422 is saved on the stack, we can use a pre-incrementing stack load. */
1423 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1424 return 0;
1426 saved_int_regs = arm_compute_save_reg_mask ();
1428 /* Unfortunately, the insn
1430 ldmib sp, {..., sp, ...}
1432 triggers a bug on most SA-110 based devices, such that the stack
1433 pointer won't be correctly restored if the instruction takes a
1434 page fault. We work around this problem by popping r3 along with
1435 the other registers, since that is never slower than executing
1436 another instruction.
1438 We test for !arm_arch5 here, because code for any architecture
1439 less than this could potentially be run on one of the buggy
1440 chips. */
1441 if (stack_adjust == 4 && !arm_arch5)
1443 /* Validate that r3 is a call-clobbered register (always true in
1444 the default abi) ... */
1445 if (!call_used_regs[3])
1446 return 0;
1448 /* ... that it isn't being used for a return value ... */
1449 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1450 return 0;
1452 /* ... or for a tail-call argument ... */
1453 if (sibling)
1455 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1457 if (find_regno_fusage (sibling, USE, 3))
1458 return 0;
1461 /* ... and that there are no call-saved registers in r0-r2
1462 (always true in the default ABI). */
1463 if (saved_int_regs & 0x7)
1464 return 0;
1467 /* Can't be done if interworking with Thumb, and any registers have been
1468 stacked. */
1469 if (TARGET_INTERWORK && saved_int_regs != 0)
1470 return 0;
1472 /* On StrongARM, conditional returns are expensive if they aren't
1473 taken and multiple registers have been stacked. */
1474 if (iscond && arm_tune_strongarm)
1476 /* Conditional return when just the LR is stored is a simple
1477 conditional-load instruction, that's not expensive. */
1478 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1479 return 0;
1481 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1482 return 0;
1485 /* If there are saved registers but the LR isn't saved, then we need
1486 two instructions for the return. */
1487 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1488 return 0;
1490 /* Can't be done if any of the FPA regs are pushed,
1491 since this also requires an insn. */
1492 if (TARGET_HARD_FLOAT && TARGET_FPA)
1493 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1494 if (regs_ever_live[regno] && !call_used_regs[regno])
1495 return 0;
1497 /* Likewise VFP regs. */
1498 if (TARGET_HARD_FLOAT && TARGET_VFP)
1499 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1500 if (regs_ever_live[regno] && !call_used_regs[regno])
1501 return 0;
1503 if (TARGET_REALLY_IWMMXT)
1504 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1505 if (regs_ever_live[regno] && ! call_used_regs [regno])
1506 return 0;
1508 return 1;
1511 /* Return TRUE if int I is a valid immediate ARM constant. */
1514 const_ok_for_arm (HOST_WIDE_INT i)
1516 int lowbit;
1518 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1519 be all zero, or all one. */
1520 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1521 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1522 != ((~(unsigned HOST_WIDE_INT) 0)
1523 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1524 return FALSE;
1526 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1528 /* Fast return for 0 and small values. We must do this for zero, since
1529 the code below can't handle that one case. */
1530 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1531 return TRUE;
1533 /* Get the number of trailing zeros, rounded down to the nearest even
1534 number. */
1535 lowbit = (ffs ((int) i) - 1) & ~1;
1537 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1538 return TRUE;
1539 else if (lowbit <= 4
1540 && ((i & ~0xc000003f) == 0
1541 || (i & ~0xf000000f) == 0
1542 || (i & ~0xfc000003) == 0))
1543 return TRUE;
1545 return FALSE;
1548 /* Return true if I is a valid constant for the operation CODE. */
1549 static int
1550 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1552 if (const_ok_for_arm (i))
1553 return 1;
1555 switch (code)
1557 case PLUS:
1558 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1560 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1561 case XOR:
1562 case IOR:
1563 return 0;
1565 case AND:
1566 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1568 default:
1569 gcc_unreachable ();
1573 /* Emit a sequence of insns to handle a large constant.
1574 CODE is the code of the operation required, it can be any of SET, PLUS,
1575 IOR, AND, XOR, MINUS;
1576 MODE is the mode in which the operation is being performed;
1577 VAL is the integer to operate on;
1578 SOURCE is the other operand (a register, or a null-pointer for SET);
1579 SUBTARGETS means it is safe to create scratch registers if that will
1580 either produce a simpler sequence, or we will want to cse the values.
1581 Return value is the number of insns emitted. */
1584 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1585 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1587 rtx cond;
1589 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1590 cond = COND_EXEC_TEST (PATTERN (insn));
1591 else
1592 cond = NULL_RTX;
1594 if (subtargets || code == SET
1595 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1596 && REGNO (target) != REGNO (source)))
1598 /* After arm_reorg has been called, we can't fix up expensive
1599 constants by pushing them into memory so we must synthesize
1600 them in-line, regardless of the cost. This is only likely to
1601 be more costly on chips that have load delay slots and we are
1602 compiling without running the scheduler (so no splitting
1603 occurred before the final instruction emission).
1605 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1607 if (!after_arm_reorg
1608 && !cond
1609 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1610 1, 0)
1611 > arm_constant_limit + (code != SET)))
1613 if (code == SET)
1615 /* Currently SET is the only monadic value for CODE, all
1616 the rest are diadic. */
1617 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1618 return 1;
1620 else
1622 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1624 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1625 /* For MINUS, the value is subtracted from, since we never
1626 have subtraction of a constant. */
1627 if (code == MINUS)
1628 emit_insn (gen_rtx_SET (VOIDmode, target,
1629 gen_rtx_MINUS (mode, temp, source)));
1630 else
1631 emit_insn (gen_rtx_SET (VOIDmode, target,
1632 gen_rtx_fmt_ee (code, mode, source, temp)));
1633 return 2;
1638 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1642 static int
1643 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1645 HOST_WIDE_INT temp1;
1646 int num_insns = 0;
1649 int end;
1651 if (i <= 0)
1652 i += 32;
1653 if (remainder & (3 << (i - 2)))
1655 end = i - 8;
1656 if (end < 0)
1657 end += 32;
1658 temp1 = remainder & ((0x0ff << end)
1659 | ((i < end) ? (0xff >> (32 - end)) : 0));
1660 remainder &= ~temp1;
1661 num_insns++;
1662 i -= 6;
1664 i -= 2;
1665 } while (remainder);
1666 return num_insns;
1669 /* Emit an instruction with the indicated PATTERN. If COND is
1670 non-NULL, conditionalize the execution of the instruction on COND
1671 being true. */
1673 static void
1674 emit_constant_insn (rtx cond, rtx pattern)
1676 if (cond)
1677 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1678 emit_insn (pattern);
1681 /* As above, but extra parameter GENERATE which, if clear, suppresses
1682 RTL generation. */
1684 static int
1685 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1686 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1687 int generate)
1689 int can_invert = 0;
1690 int can_negate = 0;
1691 int can_negate_initial = 0;
1692 int can_shift = 0;
1693 int i;
1694 int num_bits_set = 0;
1695 int set_sign_bit_copies = 0;
1696 int clear_sign_bit_copies = 0;
1697 int clear_zero_bit_copies = 0;
1698 int set_zero_bit_copies = 0;
1699 int insns = 0;
1700 unsigned HOST_WIDE_INT temp1, temp2;
1701 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1703 /* Find out which operations are safe for a given CODE. Also do a quick
1704 check for degenerate cases; these can occur when DImode operations
1705 are split. */
1706 switch (code)
1708 case SET:
1709 can_invert = 1;
1710 can_shift = 1;
1711 can_negate = 1;
1712 break;
1714 case PLUS:
1715 can_negate = 1;
1716 can_negate_initial = 1;
1717 break;
1719 case IOR:
1720 if (remainder == 0xffffffff)
1722 if (generate)
1723 emit_constant_insn (cond,
1724 gen_rtx_SET (VOIDmode, target,
1725 GEN_INT (ARM_SIGN_EXTEND (val))));
1726 return 1;
1728 if (remainder == 0)
1730 if (reload_completed && rtx_equal_p (target, source))
1731 return 0;
1732 if (generate)
1733 emit_constant_insn (cond,
1734 gen_rtx_SET (VOIDmode, target, source));
1735 return 1;
1737 break;
1739 case AND:
1740 if (remainder == 0)
1742 if (generate)
1743 emit_constant_insn (cond,
1744 gen_rtx_SET (VOIDmode, target, const0_rtx));
1745 return 1;
1747 if (remainder == 0xffffffff)
1749 if (reload_completed && rtx_equal_p (target, source))
1750 return 0;
1751 if (generate)
1752 emit_constant_insn (cond,
1753 gen_rtx_SET (VOIDmode, target, source));
1754 return 1;
1756 can_invert = 1;
1757 break;
1759 case XOR:
1760 if (remainder == 0)
1762 if (reload_completed && rtx_equal_p (target, source))
1763 return 0;
1764 if (generate)
1765 emit_constant_insn (cond,
1766 gen_rtx_SET (VOIDmode, target, source));
1767 return 1;
1770 /* We don't know how to handle other cases yet. */
1771 gcc_assert (remainder == 0xffffffff);
1773 if (generate)
1774 emit_constant_insn (cond,
1775 gen_rtx_SET (VOIDmode, target,
1776 gen_rtx_NOT (mode, source)));
1777 return 1;
1779 case MINUS:
1780 /* We treat MINUS as (val - source), since (source - val) is always
1781 passed as (source + (-val)). */
1782 if (remainder == 0)
1784 if (generate)
1785 emit_constant_insn (cond,
1786 gen_rtx_SET (VOIDmode, target,
1787 gen_rtx_NEG (mode, source)));
1788 return 1;
1790 if (const_ok_for_arm (val))
1792 if (generate)
1793 emit_constant_insn (cond,
1794 gen_rtx_SET (VOIDmode, target,
1795 gen_rtx_MINUS (mode, GEN_INT (val),
1796 source)));
1797 return 1;
1799 can_negate = 1;
1801 break;
1803 default:
1804 gcc_unreachable ();
1807 /* If we can do it in one insn get out quickly. */
1808 if (const_ok_for_arm (val)
1809 || (can_negate_initial && const_ok_for_arm (-val))
1810 || (can_invert && const_ok_for_arm (~val)))
1812 if (generate)
1813 emit_constant_insn (cond,
1814 gen_rtx_SET (VOIDmode, target,
1815 (source
1816 ? gen_rtx_fmt_ee (code, mode, source,
1817 GEN_INT (val))
1818 : GEN_INT (val))));
1819 return 1;
1822 /* Calculate a few attributes that may be useful for specific
1823 optimizations. */
1824 for (i = 31; i >= 0; i--)
1826 if ((remainder & (1 << i)) == 0)
1827 clear_sign_bit_copies++;
1828 else
1829 break;
1832 for (i = 31; i >= 0; i--)
1834 if ((remainder & (1 << i)) != 0)
1835 set_sign_bit_copies++;
1836 else
1837 break;
1840 for (i = 0; i <= 31; i++)
1842 if ((remainder & (1 << i)) == 0)
1843 clear_zero_bit_copies++;
1844 else
1845 break;
1848 for (i = 0; i <= 31; i++)
1850 if ((remainder & (1 << i)) != 0)
1851 set_zero_bit_copies++;
1852 else
1853 break;
1856 switch (code)
1858 case SET:
1859 /* See if we can do this by sign_extending a constant that is known
1860 to be negative. This is a good, way of doing it, since the shift
1861 may well merge into a subsequent insn. */
1862 if (set_sign_bit_copies > 1)
1864 if (const_ok_for_arm
1865 (temp1 = ARM_SIGN_EXTEND (remainder
1866 << (set_sign_bit_copies - 1))))
1868 if (generate)
1870 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1871 emit_constant_insn (cond,
1872 gen_rtx_SET (VOIDmode, new_src,
1873 GEN_INT (temp1)));
1874 emit_constant_insn (cond,
1875 gen_ashrsi3 (target, new_src,
1876 GEN_INT (set_sign_bit_copies - 1)));
1878 return 2;
1880 /* For an inverted constant, we will need to set the low bits,
1881 these will be shifted out of harm's way. */
1882 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1883 if (const_ok_for_arm (~temp1))
1885 if (generate)
1887 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1888 emit_constant_insn (cond,
1889 gen_rtx_SET (VOIDmode, new_src,
1890 GEN_INT (temp1)));
1891 emit_constant_insn (cond,
1892 gen_ashrsi3 (target, new_src,
1893 GEN_INT (set_sign_bit_copies - 1)));
1895 return 2;
1899 /* See if we can calculate the value as the difference between two
1900 valid immediates. */
1901 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1903 int topshift = clear_sign_bit_copies & ~1;
1905 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1906 & (0xff000000 >> topshift));
1908 /* If temp1 is zero, then that means the 9 most significant
1909 bits of remainder were 1 and we've caused it to overflow.
1910 When topshift is 0 we don't need to do anything since we
1911 can borrow from 'bit 32'. */
1912 if (temp1 == 0 && topshift != 0)
1913 temp1 = 0x80000000 >> (topshift - 1);
1915 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1917 if (const_ok_for_arm (temp2))
1919 if (generate)
1921 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1922 emit_constant_insn (cond,
1923 gen_rtx_SET (VOIDmode, new_src,
1924 GEN_INT (temp1)));
1925 emit_constant_insn (cond,
1926 gen_addsi3 (target, new_src,
1927 GEN_INT (-temp2)));
1930 return 2;
1934 /* See if we can generate this by setting the bottom (or the top)
1935 16 bits, and then shifting these into the other half of the
1936 word. We only look for the simplest cases, to do more would cost
1937 too much. Be careful, however, not to generate this when the
1938 alternative would take fewer insns. */
1939 if (val & 0xffff0000)
1941 temp1 = remainder & 0xffff0000;
1942 temp2 = remainder & 0x0000ffff;
1944 /* Overlaps outside this range are best done using other methods. */
1945 for (i = 9; i < 24; i++)
1947 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1948 && !const_ok_for_arm (temp2))
1950 rtx new_src = (subtargets
1951 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1952 : target);
1953 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1954 source, subtargets, generate);
1955 source = new_src;
1956 if (generate)
1957 emit_constant_insn
1958 (cond,
1959 gen_rtx_SET
1960 (VOIDmode, target,
1961 gen_rtx_IOR (mode,
1962 gen_rtx_ASHIFT (mode, source,
1963 GEN_INT (i)),
1964 source)));
1965 return insns + 1;
1969 /* Don't duplicate cases already considered. */
1970 for (i = 17; i < 24; i++)
1972 if (((temp1 | (temp1 >> i)) == remainder)
1973 && !const_ok_for_arm (temp1))
1975 rtx new_src = (subtargets
1976 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1977 : target);
1978 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1979 source, subtargets, generate);
1980 source = new_src;
1981 if (generate)
1982 emit_constant_insn
1983 (cond,
1984 gen_rtx_SET (VOIDmode, target,
1985 gen_rtx_IOR
1986 (mode,
1987 gen_rtx_LSHIFTRT (mode, source,
1988 GEN_INT (i)),
1989 source)));
1990 return insns + 1;
1994 break;
1996 case IOR:
1997 case XOR:
1998 /* If we have IOR or XOR, and the constant can be loaded in a
1999 single instruction, and we can find a temporary to put it in,
2000 then this can be done in two instructions instead of 3-4. */
2001 if (subtargets
2002 /* TARGET can't be NULL if SUBTARGETS is 0 */
2003 || (reload_completed && !reg_mentioned_p (target, source)))
2005 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2007 if (generate)
2009 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2011 emit_constant_insn (cond,
2012 gen_rtx_SET (VOIDmode, sub,
2013 GEN_INT (val)));
2014 emit_constant_insn (cond,
2015 gen_rtx_SET (VOIDmode, target,
2016 gen_rtx_fmt_ee (code, mode,
2017 source, sub)));
2019 return 2;
2023 if (code == XOR)
2024 break;
2026 if (set_sign_bit_copies > 8
2027 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2029 if (generate)
2031 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2032 rtx shift = GEN_INT (set_sign_bit_copies);
2034 emit_constant_insn
2035 (cond,
2036 gen_rtx_SET (VOIDmode, sub,
2037 gen_rtx_NOT (mode,
2038 gen_rtx_ASHIFT (mode,
2039 source,
2040 shift))));
2041 emit_constant_insn
2042 (cond,
2043 gen_rtx_SET (VOIDmode, target,
2044 gen_rtx_NOT (mode,
2045 gen_rtx_LSHIFTRT (mode, sub,
2046 shift))));
2048 return 2;
2051 if (set_zero_bit_copies > 8
2052 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2054 if (generate)
2056 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2057 rtx shift = GEN_INT (set_zero_bit_copies);
2059 emit_constant_insn
2060 (cond,
2061 gen_rtx_SET (VOIDmode, sub,
2062 gen_rtx_NOT (mode,
2063 gen_rtx_LSHIFTRT (mode,
2064 source,
2065 shift))));
2066 emit_constant_insn
2067 (cond,
2068 gen_rtx_SET (VOIDmode, target,
2069 gen_rtx_NOT (mode,
2070 gen_rtx_ASHIFT (mode, sub,
2071 shift))));
2073 return 2;
2076 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2078 if (generate)
2080 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2081 emit_constant_insn (cond,
2082 gen_rtx_SET (VOIDmode, sub,
2083 gen_rtx_NOT (mode, source)));
2084 source = sub;
2085 if (subtargets)
2086 sub = gen_reg_rtx (mode);
2087 emit_constant_insn (cond,
2088 gen_rtx_SET (VOIDmode, sub,
2089 gen_rtx_AND (mode, source,
2090 GEN_INT (temp1))));
2091 emit_constant_insn (cond,
2092 gen_rtx_SET (VOIDmode, target,
2093 gen_rtx_NOT (mode, sub)));
2095 return 3;
2097 break;
2099 case AND:
2100 /* See if two shifts will do 2 or more insn's worth of work. */
2101 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2103 HOST_WIDE_INT shift_mask = ((0xffffffff
2104 << (32 - clear_sign_bit_copies))
2105 & 0xffffffff);
2107 if ((remainder | shift_mask) != 0xffffffff)
2109 if (generate)
2111 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2112 insns = arm_gen_constant (AND, mode, cond,
2113 remainder | shift_mask,
2114 new_src, source, subtargets, 1);
2115 source = new_src;
2117 else
2119 rtx targ = subtargets ? NULL_RTX : target;
2120 insns = arm_gen_constant (AND, mode, cond,
2121 remainder | shift_mask,
2122 targ, source, subtargets, 0);
2126 if (generate)
2128 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2129 rtx shift = GEN_INT (clear_sign_bit_copies);
2131 emit_insn (gen_ashlsi3 (new_src, source, shift));
2132 emit_insn (gen_lshrsi3 (target, new_src, shift));
2135 return insns + 2;
2138 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2140 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2142 if ((remainder | shift_mask) != 0xffffffff)
2144 if (generate)
2146 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2148 insns = arm_gen_constant (AND, mode, cond,
2149 remainder | shift_mask,
2150 new_src, source, subtargets, 1);
2151 source = new_src;
2153 else
2155 rtx targ = subtargets ? NULL_RTX : target;
2157 insns = arm_gen_constant (AND, mode, cond,
2158 remainder | shift_mask,
2159 targ, source, subtargets, 0);
2163 if (generate)
2165 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2166 rtx shift = GEN_INT (clear_zero_bit_copies);
2168 emit_insn (gen_lshrsi3 (new_src, source, shift));
2169 emit_insn (gen_ashlsi3 (target, new_src, shift));
2172 return insns + 2;
2175 break;
2177 default:
2178 break;
2181 for (i = 0; i < 32; i++)
2182 if (remainder & (1 << i))
2183 num_bits_set++;
2185 if (code == AND || (can_invert && num_bits_set > 16))
2186 remainder = (~remainder) & 0xffffffff;
2187 else if (code == PLUS && num_bits_set > 16)
2188 remainder = (-remainder) & 0xffffffff;
2189 else
2191 can_invert = 0;
2192 can_negate = 0;
2195 /* Now try and find a way of doing the job in either two or three
2196 instructions.
2197 We start by looking for the largest block of zeros that are aligned on
2198 a 2-bit boundary, we then fill up the temps, wrapping around to the
2199 top of the word when we drop off the bottom.
2200 In the worst case this code should produce no more than four insns. */
2202 int best_start = 0;
2203 int best_consecutive_zeros = 0;
2205 for (i = 0; i < 32; i += 2)
2207 int consecutive_zeros = 0;
2209 if (!(remainder & (3 << i)))
2211 while ((i < 32) && !(remainder & (3 << i)))
2213 consecutive_zeros += 2;
2214 i += 2;
2216 if (consecutive_zeros > best_consecutive_zeros)
2218 best_consecutive_zeros = consecutive_zeros;
2219 best_start = i - consecutive_zeros;
2221 i -= 2;
2225 /* So long as it won't require any more insns to do so, it's
2226 desirable to emit a small constant (in bits 0...9) in the last
2227 insn. This way there is more chance that it can be combined with
2228 a later addressing insn to form a pre-indexed load or store
2229 operation. Consider:
2231 *((volatile int *)0xe0000100) = 1;
2232 *((volatile int *)0xe0000110) = 2;
2234 We want this to wind up as:
2236 mov rA, #0xe0000000
2237 mov rB, #1
2238 str rB, [rA, #0x100]
2239 mov rB, #2
2240 str rB, [rA, #0x110]
2242 rather than having to synthesize both large constants from scratch.
2244 Therefore, we calculate how many insns would be required to emit
2245 the constant starting from `best_start', and also starting from
2246 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2247 yield a shorter sequence, we may as well use zero. */
2248 if (best_start != 0
2249 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2250 && (count_insns_for_constant (remainder, 0) <=
2251 count_insns_for_constant (remainder, best_start)))
2252 best_start = 0;
2254 /* Now start emitting the insns. */
2255 i = best_start;
2258 int end;
2260 if (i <= 0)
2261 i += 32;
2262 if (remainder & (3 << (i - 2)))
2264 end = i - 8;
2265 if (end < 0)
2266 end += 32;
2267 temp1 = remainder & ((0x0ff << end)
2268 | ((i < end) ? (0xff >> (32 - end)) : 0));
2269 remainder &= ~temp1;
2271 if (generate)
2273 rtx new_src, temp1_rtx;
2275 if (code == SET || code == MINUS)
2277 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2278 if (can_invert && code != MINUS)
2279 temp1 = ~temp1;
2281 else
2283 if (remainder && subtargets)
2284 new_src = gen_reg_rtx (mode);
2285 else
2286 new_src = target;
2287 if (can_invert)
2288 temp1 = ~temp1;
2289 else if (can_negate)
2290 temp1 = -temp1;
2293 temp1 = trunc_int_for_mode (temp1, mode);
2294 temp1_rtx = GEN_INT (temp1);
2296 if (code == SET)
2298 else if (code == MINUS)
2299 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2300 else
2301 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2303 emit_constant_insn (cond,
2304 gen_rtx_SET (VOIDmode, new_src,
2305 temp1_rtx));
2306 source = new_src;
2309 if (code == SET)
2311 can_invert = 0;
2312 code = PLUS;
2314 else if (code == MINUS)
2315 code = PLUS;
2317 insns++;
2318 i -= 6;
2320 i -= 2;
2322 while (remainder);
2325 return insns;
2328 /* Canonicalize a comparison so that we are more likely to recognize it.
2329 This can be done for a few constant compares, where we can make the
2330 immediate value easier to load. */
2332 enum rtx_code
2333 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2334 rtx * op1)
2336 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2337 unsigned HOST_WIDE_INT maxval;
2338 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2340 switch (code)
2342 case EQ:
2343 case NE:
2344 return code;
2346 case GT:
2347 case LE:
2348 if (i != maxval
2349 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2351 *op1 = GEN_INT (i + 1);
2352 return code == GT ? GE : LT;
2354 break;
2356 case GE:
2357 case LT:
2358 if (i != ~maxval
2359 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2361 *op1 = GEN_INT (i - 1);
2362 return code == GE ? GT : LE;
2364 break;
2366 case GTU:
2367 case LEU:
2368 if (i != ~((unsigned HOST_WIDE_INT) 0)
2369 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2371 *op1 = GEN_INT (i + 1);
2372 return code == GTU ? GEU : LTU;
2374 break;
2376 case GEU:
2377 case LTU:
2378 if (i != 0
2379 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2381 *op1 = GEN_INT (i - 1);
2382 return code == GEU ? GTU : LEU;
2384 break;
2386 default:
2387 gcc_unreachable ();
2390 return code;
2394 /* Define how to find the value returned by a function. */
2397 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2399 enum machine_mode mode;
2400 int unsignedp ATTRIBUTE_UNUSED;
2401 rtx r ATTRIBUTE_UNUSED;
2403 mode = TYPE_MODE (type);
2404 /* Promote integer types. */
2405 if (INTEGRAL_TYPE_P (type))
2406 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2408 /* Promotes small structs returned in a register to full-word size
2409 for big-endian AAPCS. */
2410 if (arm_return_in_msb (type))
2412 HOST_WIDE_INT size = int_size_in_bytes (type);
2413 if (size % UNITS_PER_WORD != 0)
2415 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2416 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2420 return LIBCALL_VALUE(mode);
2423 /* Determine the amount of memory needed to store the possible return
2424 registers of an untyped call. */
2426 arm_apply_result_size (void)
2428 int size = 16;
2430 if (TARGET_ARM)
2432 if (TARGET_HARD_FLOAT_ABI)
2434 if (TARGET_FPA)
2435 size += 12;
2436 if (TARGET_MAVERICK)
2437 size += 8;
2439 if (TARGET_IWMMXT_ABI)
2440 size += 8;
2443 return size;
2446 /* Decide whether a type should be returned in memory (true)
2447 or in a register (false). This is called by the macro
2448 RETURN_IN_MEMORY. */
2450 arm_return_in_memory (tree type)
2452 HOST_WIDE_INT size;
2454 if (!AGGREGATE_TYPE_P (type) &&
2455 (TREE_CODE (type) != VECTOR_TYPE) &&
2456 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2457 /* All simple types are returned in registers.
2458 For AAPCS, complex types are treated the same as aggregates. */
2459 return 0;
2461 size = int_size_in_bytes (type);
2463 if (arm_abi != ARM_ABI_APCS)
2465 /* ATPCS and later return aggregate types in memory only if they are
2466 larger than a word (or are variable size). */
2467 return (size < 0 || size > UNITS_PER_WORD);
2470 /* To maximize backwards compatibility with previous versions of gcc,
2471 return vectors up to 4 words in registers. */
2472 if (TREE_CODE (type) == VECTOR_TYPE)
2473 return (size < 0 || size > (4 * UNITS_PER_WORD));
2475 /* For the arm-wince targets we choose to be compatible with Microsoft's
2476 ARM and Thumb compilers, which always return aggregates in memory. */
2477 #ifndef ARM_WINCE
2478 /* All structures/unions bigger than one word are returned in memory.
2479 Also catch the case where int_size_in_bytes returns -1. In this case
2480 the aggregate is either huge or of variable size, and in either case
2481 we will want to return it via memory and not in a register. */
2482 if (size < 0 || size > UNITS_PER_WORD)
2483 return 1;
2485 if (TREE_CODE (type) == RECORD_TYPE)
2487 tree field;
2489 /* For a struct the APCS says that we only return in a register
2490 if the type is 'integer like' and every addressable element
2491 has an offset of zero. For practical purposes this means
2492 that the structure can have at most one non bit-field element
2493 and that this element must be the first one in the structure. */
2495 /* Find the first field, ignoring non FIELD_DECL things which will
2496 have been created by C++. */
2497 for (field = TYPE_FIELDS (type);
2498 field && TREE_CODE (field) != FIELD_DECL;
2499 field = TREE_CHAIN (field))
2500 continue;
2502 if (field == NULL)
2503 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2505 /* Check that the first field is valid for returning in a register. */
2507 /* ... Floats are not allowed */
2508 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2509 return 1;
2511 /* ... Aggregates that are not themselves valid for returning in
2512 a register are not allowed. */
2513 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2514 return 1;
2516 /* Now check the remaining fields, if any. Only bitfields are allowed,
2517 since they are not addressable. */
2518 for (field = TREE_CHAIN (field);
2519 field;
2520 field = TREE_CHAIN (field))
2522 if (TREE_CODE (field) != FIELD_DECL)
2523 continue;
2525 if (!DECL_BIT_FIELD_TYPE (field))
2526 return 1;
2529 return 0;
2532 if (TREE_CODE (type) == UNION_TYPE)
2534 tree field;
2536 /* Unions can be returned in registers if every element is
2537 integral, or can be returned in an integer register. */
2538 for (field = TYPE_FIELDS (type);
2539 field;
2540 field = TREE_CHAIN (field))
2542 if (TREE_CODE (field) != FIELD_DECL)
2543 continue;
2545 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2546 return 1;
2548 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2549 return 1;
2552 return 0;
2554 #endif /* not ARM_WINCE */
2556 /* Return all other types in memory. */
2557 return 1;
2560 /* Indicate whether or not words of a double are in big-endian order. */
2563 arm_float_words_big_endian (void)
2565 if (TARGET_MAVERICK)
2566 return 0;
2568 /* For FPA, float words are always big-endian. For VFP, floats words
2569 follow the memory system mode. */
2571 if (TARGET_FPA)
2573 return 1;
2576 if (TARGET_VFP)
2577 return (TARGET_BIG_END ? 1 : 0);
2579 return 1;
2582 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2583 for a call to a function whose data type is FNTYPE.
2584 For a library call, FNTYPE is NULL. */
2585 void
2586 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2587 rtx libname ATTRIBUTE_UNUSED,
2588 tree fndecl ATTRIBUTE_UNUSED)
2590 /* On the ARM, the offset starts at 0. */
2591 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2592 pcum->iwmmxt_nregs = 0;
2593 pcum->can_split = true;
2595 pcum->call_cookie = CALL_NORMAL;
2597 if (TARGET_LONG_CALLS)
2598 pcum->call_cookie = CALL_LONG;
2600 /* Check for long call/short call attributes. The attributes
2601 override any command line option. */
2602 if (fntype)
2604 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2605 pcum->call_cookie = CALL_SHORT;
2606 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2607 pcum->call_cookie = CALL_LONG;
2610 /* Varargs vectors are treated the same as long long.
2611 named_count avoids having to change the way arm handles 'named' */
2612 pcum->named_count = 0;
2613 pcum->nargs = 0;
2615 if (TARGET_REALLY_IWMMXT && fntype)
2617 tree fn_arg;
2619 for (fn_arg = TYPE_ARG_TYPES (fntype);
2620 fn_arg;
2621 fn_arg = TREE_CHAIN (fn_arg))
2622 pcum->named_count += 1;
2624 if (! pcum->named_count)
2625 pcum->named_count = INT_MAX;
2630 /* Return true if mode/type need doubleword alignment. */
2631 bool
2632 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2634 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2635 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2639 /* Determine where to put an argument to a function.
2640 Value is zero to push the argument on the stack,
2641 or a hard register in which to store the argument.
2643 MODE is the argument's machine mode.
2644 TYPE is the data type of the argument (as a tree).
2645 This is null for libcalls where that information may
2646 not be available.
2647 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2648 the preceding args and about the function being called.
2649 NAMED is nonzero if this argument is a named parameter
2650 (otherwise it is an extra parameter matching an ellipsis). */
2653 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2654 tree type, int named)
2656 int nregs;
2658 /* Varargs vectors are treated the same as long long.
2659 named_count avoids having to change the way arm handles 'named' */
2660 if (TARGET_IWMMXT_ABI
2661 && arm_vector_mode_supported_p (mode)
2662 && pcum->named_count > pcum->nargs + 1)
2664 if (pcum->iwmmxt_nregs <= 9)
2665 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2666 else
2668 pcum->can_split = false;
2669 return NULL_RTX;
2673 /* Put doubleword aligned quantities in even register pairs. */
2674 if (pcum->nregs & 1
2675 && ARM_DOUBLEWORD_ALIGN
2676 && arm_needs_doubleword_align (mode, type))
2677 pcum->nregs++;
2679 if (mode == VOIDmode)
2680 /* Compute operand 2 of the call insn. */
2681 return GEN_INT (pcum->call_cookie);
2683 /* Only allow splitting an arg between regs and memory if all preceding
2684 args were allocated to regs. For args passed by reference we only count
2685 the reference pointer. */
2686 if (pcum->can_split)
2687 nregs = 1;
2688 else
2689 nregs = ARM_NUM_REGS2 (mode, type);
2691 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2692 return NULL_RTX;
2694 return gen_rtx_REG (mode, pcum->nregs);
2697 static int
2698 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2699 tree type, bool named ATTRIBUTE_UNUSED)
2701 int nregs = pcum->nregs;
2703 if (arm_vector_mode_supported_p (mode))
2704 return 0;
2706 if (NUM_ARG_REGS > nregs
2707 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2708 && pcum->can_split)
2709 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2711 return 0;
2714 /* Variable sized types are passed by reference. This is a GCC
2715 extension to the ARM ABI. */
2717 static bool
2718 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2719 enum machine_mode mode ATTRIBUTE_UNUSED,
2720 tree type, bool named ATTRIBUTE_UNUSED)
2722 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2725 /* Encode the current state of the #pragma [no_]long_calls. */
2726 typedef enum
2728 OFF, /* No #pramgma [no_]long_calls is in effect. */
2729 LONG, /* #pragma long_calls is in effect. */
2730 SHORT /* #pragma no_long_calls is in effect. */
2731 } arm_pragma_enum;
2733 static arm_pragma_enum arm_pragma_long_calls = OFF;
2735 void
2736 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2738 arm_pragma_long_calls = LONG;
2741 void
2742 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2744 arm_pragma_long_calls = SHORT;
2747 void
2748 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2750 arm_pragma_long_calls = OFF;
2753 /* Table of machine attributes. */
2754 const struct attribute_spec arm_attribute_table[] =
2756 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2757 /* Function calls made to this symbol must be done indirectly, because
2758 it may lie outside of the 26 bit addressing range of a normal function
2759 call. */
2760 { "long_call", 0, 0, false, true, true, NULL },
2761 /* Whereas these functions are always known to reside within the 26 bit
2762 addressing range. */
2763 { "short_call", 0, 0, false, true, true, NULL },
2764 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2765 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2766 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2767 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2768 #ifdef ARM_PE
2769 /* ARM/PE has three new attributes:
2770 interfacearm - ?
2771 dllexport - for exporting a function/variable that will live in a dll
2772 dllimport - for importing a function/variable from a dll
2774 Microsoft allows multiple declspecs in one __declspec, separating
2775 them with spaces. We do NOT support this. Instead, use __declspec
2776 multiple times.
2778 { "dllimport", 0, 0, true, false, false, NULL },
2779 { "dllexport", 0, 0, true, false, false, NULL },
2780 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2781 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2782 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2783 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2784 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2785 #endif
2786 { NULL, 0, 0, false, false, false, NULL }
2789 /* Handle an attribute requiring a FUNCTION_DECL;
2790 arguments as in struct attribute_spec.handler. */
2791 static tree
2792 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2793 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2795 if (TREE_CODE (*node) != FUNCTION_DECL)
2797 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2798 IDENTIFIER_POINTER (name));
2799 *no_add_attrs = true;
2802 return NULL_TREE;
2805 /* Handle an "interrupt" or "isr" attribute;
2806 arguments as in struct attribute_spec.handler. */
2807 static tree
2808 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2809 bool *no_add_attrs)
2811 if (DECL_P (*node))
2813 if (TREE_CODE (*node) != FUNCTION_DECL)
2815 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2816 IDENTIFIER_POINTER (name));
2817 *no_add_attrs = true;
2819 /* FIXME: the argument if any is checked for type attributes;
2820 should it be checked for decl ones? */
2822 else
2824 if (TREE_CODE (*node) == FUNCTION_TYPE
2825 || TREE_CODE (*node) == METHOD_TYPE)
2827 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2829 warning (OPT_Wattributes, "%qs attribute ignored",
2830 IDENTIFIER_POINTER (name));
2831 *no_add_attrs = true;
2834 else if (TREE_CODE (*node) == POINTER_TYPE
2835 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2836 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2837 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2839 *node = build_variant_type_copy (*node);
2840 TREE_TYPE (*node) = build_type_attribute_variant
2841 (TREE_TYPE (*node),
2842 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2843 *no_add_attrs = true;
2845 else
2847 /* Possibly pass this attribute on from the type to a decl. */
2848 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2849 | (int) ATTR_FLAG_FUNCTION_NEXT
2850 | (int) ATTR_FLAG_ARRAY_NEXT))
2852 *no_add_attrs = true;
2853 return tree_cons (name, args, NULL_TREE);
2855 else
2857 warning (OPT_Wattributes, "%qs attribute ignored",
2858 IDENTIFIER_POINTER (name));
2863 return NULL_TREE;
2866 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2867 /* Handle the "notshared" attribute. This attribute is another way of
2868 requesting hidden visibility. ARM's compiler supports
2869 "__declspec(notshared)"; we support the same thing via an
2870 attribute. */
2872 static tree
2873 arm_handle_notshared_attribute (tree *node,
2874 tree name ATTRIBUTE_UNUSED,
2875 tree args ATTRIBUTE_UNUSED,
2876 int flags ATTRIBUTE_UNUSED,
2877 bool *no_add_attrs)
2879 tree decl = TYPE_NAME (*node);
2881 if (decl)
2883 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2884 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2885 *no_add_attrs = false;
2887 return NULL_TREE;
2889 #endif
2891 /* Return 0 if the attributes for two types are incompatible, 1 if they
2892 are compatible, and 2 if they are nearly compatible (which causes a
2893 warning to be generated). */
2894 static int
2895 arm_comp_type_attributes (tree type1, tree type2)
2897 int l1, l2, s1, s2;
2899 /* Check for mismatch of non-default calling convention. */
2900 if (TREE_CODE (type1) != FUNCTION_TYPE)
2901 return 1;
2903 /* Check for mismatched call attributes. */
2904 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2905 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2906 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2907 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2909 /* Only bother to check if an attribute is defined. */
2910 if (l1 | l2 | s1 | s2)
2912 /* If one type has an attribute, the other must have the same attribute. */
2913 if ((l1 != l2) || (s1 != s2))
2914 return 0;
2916 /* Disallow mixed attributes. */
2917 if ((l1 & s2) || (l2 & s1))
2918 return 0;
2921 /* Check for mismatched ISR attribute. */
2922 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2923 if (! l1)
2924 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2925 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2926 if (! l2)
2927 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2928 if (l1 != l2)
2929 return 0;
2931 return 1;
2934 /* Encode long_call or short_call attribute by prefixing
2935 symbol name in DECL with a special character FLAG. */
2936 void
2937 arm_encode_call_attribute (tree decl, int flag)
2939 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2940 int len = strlen (str);
2941 char * newstr;
2943 /* Do not allow weak functions to be treated as short call. */
2944 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2945 return;
2947 newstr = alloca (len + 2);
2948 newstr[0] = flag;
2949 strcpy (newstr + 1, str);
2951 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2952 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2955 /* Assigns default attributes to newly defined type. This is used to
2956 set short_call/long_call attributes for function types of
2957 functions defined inside corresponding #pragma scopes. */
2958 static void
2959 arm_set_default_type_attributes (tree type)
2961 /* Add __attribute__ ((long_call)) to all functions, when
2962 inside #pragma long_calls or __attribute__ ((short_call)),
2963 when inside #pragma no_long_calls. */
2964 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2966 tree type_attr_list, attr_name;
2967 type_attr_list = TYPE_ATTRIBUTES (type);
2969 if (arm_pragma_long_calls == LONG)
2970 attr_name = get_identifier ("long_call");
2971 else if (arm_pragma_long_calls == SHORT)
2972 attr_name = get_identifier ("short_call");
2973 else
2974 return;
2976 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2977 TYPE_ATTRIBUTES (type) = type_attr_list;
2981 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2982 defined within the current compilation unit. If this cannot be
2983 determined, then 0 is returned. */
2984 static int
2985 current_file_function_operand (rtx sym_ref)
2987 /* This is a bit of a fib. A function will have a short call flag
2988 applied to its name if it has the short call attribute, or it has
2989 already been defined within the current compilation unit. */
2990 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2991 return 1;
2993 /* The current function is always defined within the current compilation
2994 unit. If it s a weak definition however, then this may not be the real
2995 definition of the function, and so we have to say no. */
2996 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2997 && !DECL_WEAK (current_function_decl))
2998 return 1;
3000 /* We cannot make the determination - default to returning 0. */
3001 return 0;
3004 /* Return nonzero if a 32 bit "long_call" should be generated for
3005 this call. We generate a long_call if the function:
3007 a. has an __attribute__((long call))
3008 or b. is within the scope of a #pragma long_calls
3009 or c. the -mlong-calls command line switch has been specified
3010 . and either:
3011 1. -ffunction-sections is in effect
3012 or 2. the current function has __attribute__ ((section))
3013 or 3. the target function has __attribute__ ((section))
3015 However we do not generate a long call if the function:
3017 d. has an __attribute__ ((short_call))
3018 or e. is inside the scope of a #pragma no_long_calls
3019 or f. is defined within the current compilation unit.
3021 This function will be called by C fragments contained in the machine
3022 description file. SYM_REF and CALL_COOKIE correspond to the matched
3023 rtl operands. CALL_SYMBOL is used to distinguish between
3024 two different callers of the function. It is set to 1 in the
3025 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3026 and "call_value" patterns. This is because of the difference in the
3027 SYM_REFs passed by these patterns. */
3029 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3031 if (!call_symbol)
3033 if (GET_CODE (sym_ref) != MEM)
3034 return 0;
3036 sym_ref = XEXP (sym_ref, 0);
3039 if (GET_CODE (sym_ref) != SYMBOL_REF)
3040 return 0;
3042 if (call_cookie & CALL_SHORT)
3043 return 0;
3045 if (TARGET_LONG_CALLS)
3047 if (flag_function_sections
3048 || DECL_SECTION_NAME (current_function_decl))
3049 /* c.3 is handled by the definition of the
3050 ARM_DECLARE_FUNCTION_SIZE macro. */
3051 return 1;
3054 if (current_file_function_operand (sym_ref))
3055 return 0;
3057 return (call_cookie & CALL_LONG)
3058 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3059 || TARGET_LONG_CALLS;
3062 /* Return nonzero if it is ok to make a tail-call to DECL. */
3063 static bool
3064 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3066 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3068 if (cfun->machine->sibcall_blocked)
3069 return false;
3071 /* Never tailcall something for which we have no decl, or if we
3072 are in Thumb mode. */
3073 if (decl == NULL || TARGET_THUMB)
3074 return false;
3076 /* Get the calling method. */
3077 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3078 call_type = CALL_SHORT;
3079 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3080 call_type = CALL_LONG;
3082 /* Cannot tail-call to long calls, since these are out of range of
3083 a branch instruction. However, if not compiling PIC, we know
3084 we can reach the symbol if it is in this compilation unit. */
3085 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3086 return false;
3088 /* If we are interworking and the function is not declared static
3089 then we can't tail-call it unless we know that it exists in this
3090 compilation unit (since it might be a Thumb routine). */
3091 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3092 return false;
3094 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3095 if (IS_INTERRUPT (arm_current_func_type ()))
3096 return false;
3098 /* Everything else is ok. */
3099 return true;
3103 /* Addressing mode support functions. */
3105 /* Return nonzero if X is a legitimate immediate operand when compiling
3106 for PIC. */
3108 legitimate_pic_operand_p (rtx x)
3110 if (CONSTANT_P (x)
3111 && flag_pic
3112 && (GET_CODE (x) == SYMBOL_REF
3113 || (GET_CODE (x) == CONST
3114 && GET_CODE (XEXP (x, 0)) == PLUS
3115 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3116 return 0;
3118 return 1;
3122 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3124 if (GET_CODE (orig) == SYMBOL_REF
3125 || GET_CODE (orig) == LABEL_REF)
3127 #ifndef AOF_ASSEMBLER
3128 rtx pic_ref, address;
3129 #endif
3130 rtx insn;
3131 int subregs = 0;
3133 if (reg == 0)
3135 gcc_assert (!no_new_pseudos);
3136 reg = gen_reg_rtx (Pmode);
3138 subregs = 1;
3141 #ifdef AOF_ASSEMBLER
3142 /* The AOF assembler can generate relocations for these directly, and
3143 understands that the PIC register has to be added into the offset. */
3144 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3145 #else
3146 if (subregs)
3147 address = gen_reg_rtx (Pmode);
3148 else
3149 address = reg;
3151 if (TARGET_ARM)
3152 emit_insn (gen_pic_load_addr_arm (address, orig));
3153 else
3154 emit_insn (gen_pic_load_addr_thumb (address, orig));
3156 if ((GET_CODE (orig) == LABEL_REF
3157 || (GET_CODE (orig) == SYMBOL_REF &&
3158 SYMBOL_REF_LOCAL_P (orig)))
3159 && NEED_GOT_RELOC)
3160 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3161 else
3163 pic_ref = gen_const_mem (Pmode,
3164 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3165 address));
3168 insn = emit_move_insn (reg, pic_ref);
3169 #endif
3170 current_function_uses_pic_offset_table = 1;
3171 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3172 by loop. */
3173 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3174 REG_NOTES (insn));
3175 return reg;
3177 else if (GET_CODE (orig) == CONST)
3179 rtx base, offset;
3181 if (GET_CODE (XEXP (orig, 0)) == PLUS
3182 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3183 return orig;
3185 if (reg == 0)
3187 gcc_assert (!no_new_pseudos);
3188 reg = gen_reg_rtx (Pmode);
3191 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3193 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3194 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3195 base == reg ? 0 : reg);
3197 if (GET_CODE (offset) == CONST_INT)
3199 /* The base register doesn't really matter, we only want to
3200 test the index for the appropriate mode. */
3201 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3203 gcc_assert (!no_new_pseudos);
3204 offset = force_reg (Pmode, offset);
3207 if (GET_CODE (offset) == CONST_INT)
3208 return plus_constant (base, INTVAL (offset));
3211 if (GET_MODE_SIZE (mode) > 4
3212 && (GET_MODE_CLASS (mode) == MODE_INT
3213 || TARGET_SOFT_FLOAT))
3215 emit_insn (gen_addsi3 (reg, base, offset));
3216 return reg;
3219 return gen_rtx_PLUS (Pmode, base, offset);
3222 return orig;
3226 /* Find a spare low register to use during the prolog of a function. */
3228 static int
3229 thumb_find_work_register (unsigned long pushed_regs_mask)
3231 int reg;
3233 /* Check the argument registers first as these are call-used. The
3234 register allocation order means that sometimes r3 might be used
3235 but earlier argument registers might not, so check them all. */
3236 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3237 if (!regs_ever_live[reg])
3238 return reg;
3240 /* Before going on to check the call-saved registers we can try a couple
3241 more ways of deducing that r3 is available. The first is when we are
3242 pushing anonymous arguments onto the stack and we have less than 4
3243 registers worth of fixed arguments(*). In this case r3 will be part of
3244 the variable argument list and so we can be sure that it will be
3245 pushed right at the start of the function. Hence it will be available
3246 for the rest of the prologue.
3247 (*): ie current_function_pretend_args_size is greater than 0. */
3248 if (cfun->machine->uses_anonymous_args
3249 && current_function_pretend_args_size > 0)
3250 return LAST_ARG_REGNUM;
3252 /* The other case is when we have fixed arguments but less than 4 registers
3253 worth. In this case r3 might be used in the body of the function, but
3254 it is not being used to convey an argument into the function. In theory
3255 we could just check current_function_args_size to see how many bytes are
3256 being passed in argument registers, but it seems that it is unreliable.
3257 Sometimes it will have the value 0 when in fact arguments are being
3258 passed. (See testcase execute/20021111-1.c for an example). So we also
3259 check the args_info.nregs field as well. The problem with this field is
3260 that it makes no allowances for arguments that are passed to the
3261 function but which are not used. Hence we could miss an opportunity
3262 when a function has an unused argument in r3. But it is better to be
3263 safe than to be sorry. */
3264 if (! cfun->machine->uses_anonymous_args
3265 && current_function_args_size >= 0
3266 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3267 && cfun->args_info.nregs < 4)
3268 return LAST_ARG_REGNUM;
3270 /* Otherwise look for a call-saved register that is going to be pushed. */
3271 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3272 if (pushed_regs_mask & (1 << reg))
3273 return reg;
3275 /* Something went wrong - thumb_compute_save_reg_mask()
3276 should have arranged for a suitable register to be pushed. */
3277 gcc_unreachable ();
3281 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3282 low register. */
3284 void
3285 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3287 #ifndef AOF_ASSEMBLER
3288 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3289 rtx global_offset_table;
3291 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3292 return;
3294 gcc_assert (flag_pic);
3296 l1 = gen_label_rtx ();
3298 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3299 /* On the ARM the PC register contains 'dot + 8' at the time of the
3300 addition, on the Thumb it is 'dot + 4'. */
3301 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3302 if (GOT_PCREL)
3303 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3304 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3305 else
3306 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3308 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3310 if (TARGET_ARM)
3312 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3313 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3315 else
3317 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3319 /* We will have pushed the pic register, so we should always be
3320 able to find a work register. */
3321 pic_tmp = gen_rtx_REG (SImode,
3322 thumb_find_work_register (saved_regs));
3323 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3324 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3326 else
3327 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3328 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3331 /* Need to emit this whether or not we obey regdecls,
3332 since setjmp/longjmp can cause life info to screw up. */
3333 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3334 #endif /* AOF_ASSEMBLER */
3338 /* Return nonzero if X is valid as an ARM state addressing register. */
3339 static int
3340 arm_address_register_rtx_p (rtx x, int strict_p)
3342 int regno;
3344 if (GET_CODE (x) != REG)
3345 return 0;
3347 regno = REGNO (x);
3349 if (strict_p)
3350 return ARM_REGNO_OK_FOR_BASE_P (regno);
3352 return (regno <= LAST_ARM_REGNUM
3353 || regno >= FIRST_PSEUDO_REGISTER
3354 || regno == FRAME_POINTER_REGNUM
3355 || regno == ARG_POINTER_REGNUM);
3358 /* Return nonzero if X is a valid ARM state address operand. */
3360 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3361 int strict_p)
3363 bool use_ldrd;
3364 enum rtx_code code = GET_CODE (x);
3366 if (arm_address_register_rtx_p (x, strict_p))
3367 return 1;
3369 use_ldrd = (TARGET_LDRD
3370 && (mode == DImode
3371 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3373 if (code == POST_INC || code == PRE_DEC
3374 || ((code == PRE_INC || code == POST_DEC)
3375 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3376 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3378 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3379 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3380 && GET_CODE (XEXP (x, 1)) == PLUS
3381 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3383 rtx addend = XEXP (XEXP (x, 1), 1);
3385 /* Don't allow ldrd post increment by register because it's hard
3386 to fixup invalid register choices. */
3387 if (use_ldrd
3388 && GET_CODE (x) == POST_MODIFY
3389 && GET_CODE (addend) == REG)
3390 return 0;
3392 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3393 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3396 /* After reload constants split into minipools will have addresses
3397 from a LABEL_REF. */
3398 else if (reload_completed
3399 && (code == LABEL_REF
3400 || (code == CONST
3401 && GET_CODE (XEXP (x, 0)) == PLUS
3402 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3403 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3404 return 1;
3406 else if (mode == TImode)
3407 return 0;
3409 else if (code == PLUS)
3411 rtx xop0 = XEXP (x, 0);
3412 rtx xop1 = XEXP (x, 1);
3414 return ((arm_address_register_rtx_p (xop0, strict_p)
3415 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3416 || (arm_address_register_rtx_p (xop1, strict_p)
3417 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3420 #if 0
3421 /* Reload currently can't handle MINUS, so disable this for now */
3422 else if (GET_CODE (x) == MINUS)
3424 rtx xop0 = XEXP (x, 0);
3425 rtx xop1 = XEXP (x, 1);
3427 return (arm_address_register_rtx_p (xop0, strict_p)
3428 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3430 #endif
3432 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3433 && code == SYMBOL_REF
3434 && CONSTANT_POOL_ADDRESS_P (x)
3435 && ! (flag_pic
3436 && symbol_mentioned_p (get_pool_constant (x))))
3437 return 1;
3439 return 0;
3442 /* Return nonzero if INDEX is valid for an address index operand in
3443 ARM state. */
3444 static int
3445 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3446 int strict_p)
3448 HOST_WIDE_INT range;
3449 enum rtx_code code = GET_CODE (index);
3451 /* Standard coprocessor addressing modes. */
3452 if (TARGET_HARD_FLOAT
3453 && (TARGET_FPA || TARGET_MAVERICK)
3454 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3455 || (TARGET_MAVERICK && mode == DImode)))
3456 return (code == CONST_INT && INTVAL (index) < 1024
3457 && INTVAL (index) > -1024
3458 && (INTVAL (index) & 3) == 0);
3460 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3461 return (code == CONST_INT
3462 && INTVAL (index) < 1024
3463 && INTVAL (index) > -1024
3464 && (INTVAL (index) & 3) == 0);
3466 if (arm_address_register_rtx_p (index, strict_p)
3467 && (GET_MODE_SIZE (mode) <= 4))
3468 return 1;
3470 if (mode == DImode || mode == DFmode)
3472 if (code == CONST_INT)
3474 HOST_WIDE_INT val = INTVAL (index);
3476 if (TARGET_LDRD)
3477 return val > -256 && val < 256;
3478 else
3479 return val > -4096 && val < 4092;
3482 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3485 if (GET_MODE_SIZE (mode) <= 4
3486 && ! (arm_arch4
3487 && (mode == HImode
3488 || (mode == QImode && outer == SIGN_EXTEND))))
3490 if (code == MULT)
3492 rtx xiop0 = XEXP (index, 0);
3493 rtx xiop1 = XEXP (index, 1);
3495 return ((arm_address_register_rtx_p (xiop0, strict_p)
3496 && power_of_two_operand (xiop1, SImode))
3497 || (arm_address_register_rtx_p (xiop1, strict_p)
3498 && power_of_two_operand (xiop0, SImode)));
3500 else if (code == LSHIFTRT || code == ASHIFTRT
3501 || code == ASHIFT || code == ROTATERT)
3503 rtx op = XEXP (index, 1);
3505 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3506 && GET_CODE (op) == CONST_INT
3507 && INTVAL (op) > 0
3508 && INTVAL (op) <= 31);
3512 /* For ARM v4 we may be doing a sign-extend operation during the
3513 load. */
3514 if (arm_arch4)
3516 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3517 range = 256;
3518 else
3519 range = 4096;
3521 else
3522 range = (mode == HImode) ? 4095 : 4096;
3524 return (code == CONST_INT
3525 && INTVAL (index) < range
3526 && INTVAL (index) > -range);
3529 /* Return nonzero if X is valid as a Thumb state base register. */
3530 static int
3531 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3533 int regno;
3535 if (GET_CODE (x) != REG)
3536 return 0;
3538 regno = REGNO (x);
3540 if (strict_p)
3541 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3543 return (regno <= LAST_LO_REGNUM
3544 || regno > LAST_VIRTUAL_REGISTER
3545 || regno == FRAME_POINTER_REGNUM
3546 || (GET_MODE_SIZE (mode) >= 4
3547 && (regno == STACK_POINTER_REGNUM
3548 || regno >= FIRST_PSEUDO_REGISTER
3549 || x == hard_frame_pointer_rtx
3550 || x == arg_pointer_rtx)));
3553 /* Return nonzero if x is a legitimate index register. This is the case
3554 for any base register that can access a QImode object. */
3555 inline static int
3556 thumb_index_register_rtx_p (rtx x, int strict_p)
3558 return thumb_base_register_rtx_p (x, QImode, strict_p);
3561 /* Return nonzero if x is a legitimate Thumb-state address.
3563 The AP may be eliminated to either the SP or the FP, so we use the
3564 least common denominator, e.g. SImode, and offsets from 0 to 64.
3566 ??? Verify whether the above is the right approach.
3568 ??? Also, the FP may be eliminated to the SP, so perhaps that
3569 needs special handling also.
3571 ??? Look at how the mips16 port solves this problem. It probably uses
3572 better ways to solve some of these problems.
3574 Although it is not incorrect, we don't accept QImode and HImode
3575 addresses based on the frame pointer or arg pointer until the
3576 reload pass starts. This is so that eliminating such addresses
3577 into stack based ones won't produce impossible code. */
3579 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3581 /* ??? Not clear if this is right. Experiment. */
3582 if (GET_MODE_SIZE (mode) < 4
3583 && !(reload_in_progress || reload_completed)
3584 && (reg_mentioned_p (frame_pointer_rtx, x)
3585 || reg_mentioned_p (arg_pointer_rtx, x)
3586 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3587 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3588 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3589 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3590 return 0;
3592 /* Accept any base register. SP only in SImode or larger. */
3593 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3594 return 1;
3596 /* This is PC relative data before arm_reorg runs. */
3597 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3598 && GET_CODE (x) == SYMBOL_REF
3599 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3600 return 1;
3602 /* This is PC relative data after arm_reorg runs. */
3603 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3604 && (GET_CODE (x) == LABEL_REF
3605 || (GET_CODE (x) == CONST
3606 && GET_CODE (XEXP (x, 0)) == PLUS
3607 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3608 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3609 return 1;
3611 /* Post-inc indexing only supported for SImode and larger. */
3612 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3613 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3614 return 1;
3616 else if (GET_CODE (x) == PLUS)
3618 /* REG+REG address can be any two index registers. */
3619 /* We disallow FRAME+REG addressing since we know that FRAME
3620 will be replaced with STACK, and SP relative addressing only
3621 permits SP+OFFSET. */
3622 if (GET_MODE_SIZE (mode) <= 4
3623 && XEXP (x, 0) != frame_pointer_rtx
3624 && XEXP (x, 1) != frame_pointer_rtx
3625 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3626 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3627 return 1;
3629 /* REG+const has 5-7 bit offset for non-SP registers. */
3630 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3631 || XEXP (x, 0) == arg_pointer_rtx)
3632 && GET_CODE (XEXP (x, 1)) == CONST_INT
3633 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3634 return 1;
3636 /* REG+const has 10 bit offset for SP, but only SImode and
3637 larger is supported. */
3638 /* ??? Should probably check for DI/DFmode overflow here
3639 just like GO_IF_LEGITIMATE_OFFSET does. */
3640 else if (GET_CODE (XEXP (x, 0)) == REG
3641 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3642 && GET_MODE_SIZE (mode) >= 4
3643 && GET_CODE (XEXP (x, 1)) == CONST_INT
3644 && INTVAL (XEXP (x, 1)) >= 0
3645 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3646 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3647 return 1;
3649 else if (GET_CODE (XEXP (x, 0)) == REG
3650 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3651 && GET_MODE_SIZE (mode) >= 4
3652 && GET_CODE (XEXP (x, 1)) == CONST_INT
3653 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3654 return 1;
3657 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3658 && GET_MODE_SIZE (mode) == 4
3659 && GET_CODE (x) == SYMBOL_REF
3660 && CONSTANT_POOL_ADDRESS_P (x)
3661 && !(flag_pic
3662 && symbol_mentioned_p (get_pool_constant (x))))
3663 return 1;
3665 return 0;
3668 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3669 instruction of mode MODE. */
3671 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3673 switch (GET_MODE_SIZE (mode))
3675 case 1:
3676 return val >= 0 && val < 32;
3678 case 2:
3679 return val >= 0 && val < 64 && (val & 1) == 0;
3681 default:
3682 return (val >= 0
3683 && (val + GET_MODE_SIZE (mode)) <= 128
3684 && (val & 3) == 0);
3688 /* Try machine-dependent ways of modifying an illegitimate address
3689 to be legitimate. If we find one, return the new, valid address. */
3691 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3693 if (GET_CODE (x) == PLUS)
3695 rtx xop0 = XEXP (x, 0);
3696 rtx xop1 = XEXP (x, 1);
3698 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3699 xop0 = force_reg (SImode, xop0);
3701 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3702 xop1 = force_reg (SImode, xop1);
3704 if (ARM_BASE_REGISTER_RTX_P (xop0)
3705 && GET_CODE (xop1) == CONST_INT)
3707 HOST_WIDE_INT n, low_n;
3708 rtx base_reg, val;
3709 n = INTVAL (xop1);
3711 /* VFP addressing modes actually allow greater offsets, but for
3712 now we just stick with the lowest common denominator. */
3713 if (mode == DImode
3714 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3716 low_n = n & 0x0f;
3717 n &= ~0x0f;
3718 if (low_n > 4)
3720 n += 16;
3721 low_n -= 16;
3724 else
3726 low_n = ((mode) == TImode ? 0
3727 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3728 n -= low_n;
3731 base_reg = gen_reg_rtx (SImode);
3732 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3733 GEN_INT (n)), NULL_RTX);
3734 emit_move_insn (base_reg, val);
3735 x = (low_n == 0 ? base_reg
3736 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3738 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3739 x = gen_rtx_PLUS (SImode, xop0, xop1);
3742 /* XXX We don't allow MINUS any more -- see comment in
3743 arm_legitimate_address_p (). */
3744 else if (GET_CODE (x) == MINUS)
3746 rtx xop0 = XEXP (x, 0);
3747 rtx xop1 = XEXP (x, 1);
3749 if (CONSTANT_P (xop0))
3750 xop0 = force_reg (SImode, xop0);
3752 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3753 xop1 = force_reg (SImode, xop1);
3755 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3756 x = gen_rtx_MINUS (SImode, xop0, xop1);
3759 /* Make sure to take full advantage of the pre-indexed addressing mode
3760 with absolute addresses which often allows for the base register to
3761 be factorized for multiple adjacent memory references, and it might
3762 even allows for the mini pool to be avoided entirely. */
3763 else if (GET_CODE (x) == CONST_INT && optimize > 0)
3765 unsigned int bits;
3766 HOST_WIDE_INT mask, base, index;
3767 rtx base_reg;
3769 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
3770 use a 8 bit index. So let's use a 12 bit index for SImode only and
3771 hope that arm_gen_constant will enable ldrb to use more bits. */
3772 bits = (mode == SImode) ? 12 : 8;
3773 mask = (1 << bits) - 1;
3774 base = INTVAL (x) & ~mask;
3775 index = INTVAL (x) & mask;
3776 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
3778 /* It'll most probably be more efficient to generate the base
3779 with more bits set and use a negative index instead. */
3780 base |= mask;
3781 index -= mask;
3783 base_reg = force_reg (SImode, GEN_INT (base));
3784 x = gen_rtx_PLUS (SImode, base_reg, GEN_INT (index));
3787 if (flag_pic)
3789 /* We need to find and carefully transform any SYMBOL and LABEL
3790 references; so go back to the original address expression. */
3791 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3793 if (new_x != orig_x)
3794 x = new_x;
3797 return x;
3801 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3802 to be legitimate. If we find one, return the new, valid address. */
3804 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3806 if (GET_CODE (x) == PLUS
3807 && GET_CODE (XEXP (x, 1)) == CONST_INT
3808 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3809 || INTVAL (XEXP (x, 1)) < 0))
3811 rtx xop0 = XEXP (x, 0);
3812 rtx xop1 = XEXP (x, 1);
3813 HOST_WIDE_INT offset = INTVAL (xop1);
3815 /* Try and fold the offset into a biasing of the base register and
3816 then offsetting that. Don't do this when optimizing for space
3817 since it can cause too many CSEs. */
3818 if (optimize_size && offset >= 0
3819 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3821 HOST_WIDE_INT delta;
3823 if (offset >= 256)
3824 delta = offset - (256 - GET_MODE_SIZE (mode));
3825 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3826 delta = 31 * GET_MODE_SIZE (mode);
3827 else
3828 delta = offset & (~31 * GET_MODE_SIZE (mode));
3830 xop0 = force_operand (plus_constant (xop0, offset - delta),
3831 NULL_RTX);
3832 x = plus_constant (xop0, delta);
3834 else if (offset < 0 && offset > -256)
3835 /* Small negative offsets are best done with a subtract before the
3836 dereference, forcing these into a register normally takes two
3837 instructions. */
3838 x = force_operand (x, NULL_RTX);
3839 else
3841 /* For the remaining cases, force the constant into a register. */
3842 xop1 = force_reg (SImode, xop1);
3843 x = gen_rtx_PLUS (SImode, xop0, xop1);
3846 else if (GET_CODE (x) == PLUS
3847 && s_register_operand (XEXP (x, 1), SImode)
3848 && !s_register_operand (XEXP (x, 0), SImode))
3850 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3852 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3855 if (flag_pic)
3857 /* We need to find and carefully transform any SYMBOL and LABEL
3858 references; so go back to the original address expression. */
3859 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3861 if (new_x != orig_x)
3862 x = new_x;
3865 return x;
3869 thumb_legitimize_reload_address(rtx *x_p,
3870 enum machine_mode mode,
3871 int opnum, int type,
3872 int ind_levels ATTRIBUTE_UNUSED)
3874 rtx x = *x_p;
3876 if (GET_CODE (x) == PLUS
3877 && GET_MODE_SIZE (mode) < 4
3878 && REG_P (XEXP (x, 0))
3879 && XEXP (x, 0) == stack_pointer_rtx
3880 && GET_CODE (XEXP (x, 1)) == CONST_INT
3881 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3883 rtx orig_x = x;
3885 x = copy_rtx (x);
3886 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
3887 Pmode, VOIDmode, 0, 0, opnum, type);
3888 return x;
3891 /* If both registers are hi-regs, then it's better to reload the
3892 entire expression rather than each register individually. That
3893 only requires one reload register rather than two. */
3894 if (GET_CODE (x) == PLUS
3895 && REG_P (XEXP (x, 0))
3896 && REG_P (XEXP (x, 1))
3897 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
3898 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
3900 rtx orig_x = x;
3902 x = copy_rtx (x);
3903 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
3904 Pmode, VOIDmode, 0, 0, opnum, type);
3905 return x;
3908 return NULL;
3913 #define REG_OR_SUBREG_REG(X) \
3914 (GET_CODE (X) == REG \
3915 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3917 #define REG_OR_SUBREG_RTX(X) \
3918 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3920 #ifndef COSTS_N_INSNS
3921 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3922 #endif
3923 static inline int
3924 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3926 enum machine_mode mode = GET_MODE (x);
3928 switch (code)
3930 case ASHIFT:
3931 case ASHIFTRT:
3932 case LSHIFTRT:
3933 case ROTATERT:
3934 case PLUS:
3935 case MINUS:
3936 case COMPARE:
3937 case NEG:
3938 case NOT:
3939 return COSTS_N_INSNS (1);
3941 case MULT:
3942 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3944 int cycles = 0;
3945 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3947 while (i)
3949 i >>= 2;
3950 cycles++;
3952 return COSTS_N_INSNS (2) + cycles;
3954 return COSTS_N_INSNS (1) + 16;
3956 case SET:
3957 return (COSTS_N_INSNS (1)
3958 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3959 + GET_CODE (SET_DEST (x)) == MEM));
3961 case CONST_INT:
3962 if (outer == SET)
3964 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3965 return 0;
3966 if (thumb_shiftable_const (INTVAL (x)))
3967 return COSTS_N_INSNS (2);
3968 return COSTS_N_INSNS (3);
3970 else if ((outer == PLUS || outer == COMPARE)
3971 && INTVAL (x) < 256 && INTVAL (x) > -256)
3972 return 0;
3973 else if (outer == AND
3974 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3975 return COSTS_N_INSNS (1);
3976 else if (outer == ASHIFT || outer == ASHIFTRT
3977 || outer == LSHIFTRT)
3978 return 0;
3979 return COSTS_N_INSNS (2);
3981 case CONST:
3982 case CONST_DOUBLE:
3983 case LABEL_REF:
3984 case SYMBOL_REF:
3985 return COSTS_N_INSNS (3);
3987 case UDIV:
3988 case UMOD:
3989 case DIV:
3990 case MOD:
3991 return 100;
3993 case TRUNCATE:
3994 return 99;
3996 case AND:
3997 case XOR:
3998 case IOR:
3999 /* XXX guess. */
4000 return 8;
4002 case MEM:
4003 /* XXX another guess. */
4004 /* Memory costs quite a lot for the first word, but subsequent words
4005 load at the equivalent of a single insn each. */
4006 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4007 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4008 ? 4 : 0));
4010 case IF_THEN_ELSE:
4011 /* XXX a guess. */
4012 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4013 return 14;
4014 return 2;
4016 case ZERO_EXTEND:
4017 /* XXX still guessing. */
4018 switch (GET_MODE (XEXP (x, 0)))
4020 case QImode:
4021 return (1 + (mode == DImode ? 4 : 0)
4022 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4024 case HImode:
4025 return (4 + (mode == DImode ? 4 : 0)
4026 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4028 case SImode:
4029 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4031 default:
4032 return 99;
4035 default:
4036 return 99;
4041 /* Worker routine for arm_rtx_costs. */
4042 static inline int
4043 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4045 enum machine_mode mode = GET_MODE (x);
4046 enum rtx_code subcode;
4047 int extra_cost;
4049 switch (code)
4051 case MEM:
4052 /* Memory costs quite a lot for the first word, but subsequent words
4053 load at the equivalent of a single insn each. */
4054 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4055 + (GET_CODE (x) == SYMBOL_REF
4056 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4058 case DIV:
4059 case MOD:
4060 case UDIV:
4061 case UMOD:
4062 return optimize_size ? COSTS_N_INSNS (2) : 100;
4064 case ROTATE:
4065 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4066 return 4;
4067 /* Fall through */
4068 case ROTATERT:
4069 if (mode != SImode)
4070 return 8;
4071 /* Fall through */
4072 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4073 if (mode == DImode)
4074 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4075 + ((GET_CODE (XEXP (x, 0)) == REG
4076 || (GET_CODE (XEXP (x, 0)) == SUBREG
4077 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4078 ? 0 : 8));
4079 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4080 || (GET_CODE (XEXP (x, 0)) == SUBREG
4081 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4082 ? 0 : 4)
4083 + ((GET_CODE (XEXP (x, 1)) == REG
4084 || (GET_CODE (XEXP (x, 1)) == SUBREG
4085 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4086 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4087 ? 0 : 4));
4089 case MINUS:
4090 if (mode == DImode)
4091 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4092 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4093 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4094 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4095 ? 0 : 8));
4097 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4098 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4099 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4100 && arm_const_double_rtx (XEXP (x, 1))))
4101 ? 0 : 8)
4102 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4103 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4104 && arm_const_double_rtx (XEXP (x, 0))))
4105 ? 0 : 8));
4107 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4108 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4109 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4110 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4111 || subcode == ASHIFTRT || subcode == LSHIFTRT
4112 || subcode == ROTATE || subcode == ROTATERT
4113 || (subcode == MULT
4114 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4115 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4116 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4117 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4118 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4119 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4120 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4121 return 1;
4122 /* Fall through */
4124 case PLUS:
4125 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4126 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4127 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4128 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4129 && arm_const_double_rtx (XEXP (x, 1))))
4130 ? 0 : 8));
4132 /* Fall through */
4133 case AND: case XOR: case IOR:
4134 extra_cost = 0;
4136 /* Normally the frame registers will be spilt into reg+const during
4137 reload, so it is a bad idea to combine them with other instructions,
4138 since then they might not be moved outside of loops. As a compromise
4139 we allow integration with ops that have a constant as their second
4140 operand. */
4141 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4142 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4143 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4144 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4145 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4146 extra_cost = 4;
4148 if (mode == DImode)
4149 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4150 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4151 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4152 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4153 ? 0 : 8));
4155 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4156 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4157 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4158 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4159 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4160 ? 0 : 4));
4162 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4163 return (1 + extra_cost
4164 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4165 || subcode == LSHIFTRT || subcode == ASHIFTRT
4166 || subcode == ROTATE || subcode == ROTATERT
4167 || (subcode == MULT
4168 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4169 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4170 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4171 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4172 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4173 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4174 ? 0 : 4));
4176 return 8;
4178 case MULT:
4179 /* This should have been handled by the CPU specific routines. */
4180 gcc_unreachable ();
4182 case TRUNCATE:
4183 if (arm_arch3m && mode == SImode
4184 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4185 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4186 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4187 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4188 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4189 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4190 return 8;
4191 return 99;
4193 case NEG:
4194 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4195 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4196 /* Fall through */
4197 case NOT:
4198 if (mode == DImode)
4199 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4201 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4203 case IF_THEN_ELSE:
4204 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4205 return 14;
4206 return 2;
4208 case COMPARE:
4209 return 1;
4211 case ABS:
4212 return 4 + (mode == DImode ? 4 : 0);
4214 case SIGN_EXTEND:
4215 if (GET_MODE (XEXP (x, 0)) == QImode)
4216 return (4 + (mode == DImode ? 4 : 0)
4217 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4218 /* Fall through */
4219 case ZERO_EXTEND:
4220 switch (GET_MODE (XEXP (x, 0)))
4222 case QImode:
4223 return (1 + (mode == DImode ? 4 : 0)
4224 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4226 case HImode:
4227 return (4 + (mode == DImode ? 4 : 0)
4228 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4230 case SImode:
4231 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4233 case V8QImode:
4234 case V4HImode:
4235 case V2SImode:
4236 case V4QImode:
4237 case V2HImode:
4238 return 1;
4240 default:
4241 gcc_unreachable ();
4243 gcc_unreachable ();
4245 case CONST_INT:
4246 if (const_ok_for_arm (INTVAL (x)))
4247 return outer == SET ? 2 : -1;
4248 else if (outer == AND
4249 && const_ok_for_arm (~INTVAL (x)))
4250 return -1;
4251 else if ((outer == COMPARE
4252 || outer == PLUS || outer == MINUS)
4253 && const_ok_for_arm (-INTVAL (x)))
4254 return -1;
4255 else
4256 return 5;
4258 case CONST:
4259 case LABEL_REF:
4260 case SYMBOL_REF:
4261 return 6;
4263 case CONST_DOUBLE:
4264 if (arm_const_double_rtx (x))
4265 return outer == SET ? 2 : -1;
4266 else if ((outer == COMPARE || outer == PLUS)
4267 && neg_const_double_rtx_ok_for_fpa (x))
4268 return -1;
4269 return 7;
4271 default:
4272 return 99;
4276 /* RTX costs when optimizing for size. */
4277 static bool
4278 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4280 enum machine_mode mode = GET_MODE (x);
4282 if (TARGET_THUMB)
4284 /* XXX TBD. For now, use the standard costs. */
4285 *total = thumb_rtx_costs (x, code, outer_code);
4286 return true;
4289 switch (code)
4291 case MEM:
4292 /* A memory access costs 1 insn if the mode is small, or the address is
4293 a single register, otherwise it costs one insn per word. */
4294 if (REG_P (XEXP (x, 0)))
4295 *total = COSTS_N_INSNS (1);
4296 else
4297 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4298 return true;
4300 case DIV:
4301 case MOD:
4302 case UDIV:
4303 case UMOD:
4304 /* Needs a libcall, so it costs about this. */
4305 *total = COSTS_N_INSNS (2);
4306 return false;
4308 case ROTATE:
4309 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4311 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4312 return true;
4314 /* Fall through */
4315 case ROTATERT:
4316 case ASHIFT:
4317 case LSHIFTRT:
4318 case ASHIFTRT:
4319 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4321 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4322 return true;
4324 else if (mode == SImode)
4326 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4327 /* Slightly disparage register shifts, but not by much. */
4328 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4329 *total += 1 + rtx_cost (XEXP (x, 1), code);
4330 return true;
4333 /* Needs a libcall. */
4334 *total = COSTS_N_INSNS (2);
4335 return false;
4337 case MINUS:
4338 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4340 *total = COSTS_N_INSNS (1);
4341 return false;
4344 if (mode == SImode)
4346 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4347 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4349 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4350 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4351 || subcode1 == ROTATE || subcode1 == ROTATERT
4352 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4353 || subcode1 == ASHIFTRT)
4355 /* It's just the cost of the two operands. */
4356 *total = 0;
4357 return false;
4360 *total = COSTS_N_INSNS (1);
4361 return false;
4364 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4365 return false;
4367 case PLUS:
4368 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4370 *total = COSTS_N_INSNS (1);
4371 return false;
4374 /* Fall through */
4375 case AND: case XOR: case IOR:
4376 if (mode == SImode)
4378 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4380 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4381 || subcode == LSHIFTRT || subcode == ASHIFTRT
4382 || (code == AND && subcode == NOT))
4384 /* It's just the cost of the two operands. */
4385 *total = 0;
4386 return false;
4390 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4391 return false;
4393 case MULT:
4394 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4395 return false;
4397 case NEG:
4398 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4399 *total = COSTS_N_INSNS (1);
4400 /* Fall through */
4401 case NOT:
4402 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4404 return false;
4406 case IF_THEN_ELSE:
4407 *total = 0;
4408 return false;
4410 case COMPARE:
4411 if (cc_register (XEXP (x, 0), VOIDmode))
4412 * total = 0;
4413 else
4414 *total = COSTS_N_INSNS (1);
4415 return false;
4417 case ABS:
4418 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4419 *total = COSTS_N_INSNS (1);
4420 else
4421 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4422 return false;
4424 case SIGN_EXTEND:
4425 *total = 0;
4426 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4428 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4429 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4431 if (mode == DImode)
4432 *total += COSTS_N_INSNS (1);
4433 return false;
4435 case ZERO_EXTEND:
4436 *total = 0;
4437 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4439 switch (GET_MODE (XEXP (x, 0)))
4441 case QImode:
4442 *total += COSTS_N_INSNS (1);
4443 break;
4445 case HImode:
4446 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4448 case SImode:
4449 break;
4451 default:
4452 *total += COSTS_N_INSNS (2);
4456 if (mode == DImode)
4457 *total += COSTS_N_INSNS (1);
4459 return false;
4461 case CONST_INT:
4462 if (const_ok_for_arm (INTVAL (x)))
4463 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4464 else if (const_ok_for_arm (~INTVAL (x)))
4465 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4466 else if (const_ok_for_arm (-INTVAL (x)))
4468 if (outer_code == COMPARE || outer_code == PLUS
4469 || outer_code == MINUS)
4470 *total = 0;
4471 else
4472 *total = COSTS_N_INSNS (1);
4474 else
4475 *total = COSTS_N_INSNS (2);
4476 return true;
4478 case CONST:
4479 case LABEL_REF:
4480 case SYMBOL_REF:
4481 *total = COSTS_N_INSNS (2);
4482 return true;
4484 case CONST_DOUBLE:
4485 *total = COSTS_N_INSNS (4);
4486 return true;
4488 default:
4489 if (mode != VOIDmode)
4490 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4491 else
4492 *total = COSTS_N_INSNS (4); /* How knows? */
4493 return false;
4497 /* RTX costs for cores with a slow MUL implementation. */
4499 static bool
4500 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4502 enum machine_mode mode = GET_MODE (x);
4504 if (TARGET_THUMB)
4506 *total = thumb_rtx_costs (x, code, outer_code);
4507 return true;
4510 switch (code)
4512 case MULT:
4513 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4514 || mode == DImode)
4516 *total = 30;
4517 return true;
4520 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4522 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4523 & (unsigned HOST_WIDE_INT) 0xffffffff);
4524 int cost, const_ok = const_ok_for_arm (i);
4525 int j, booth_unit_size;
4527 /* Tune as appropriate. */
4528 cost = const_ok ? 4 : 8;
4529 booth_unit_size = 2;
4530 for (j = 0; i && j < 32; j += booth_unit_size)
4532 i >>= booth_unit_size;
4533 cost += 2;
4536 *total = cost;
4537 return true;
4540 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4541 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4542 return true;
4544 default:
4545 *total = arm_rtx_costs_1 (x, code, outer_code);
4546 return true;
4551 /* RTX cost for cores with a fast multiply unit (M variants). */
4553 static bool
4554 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4556 enum machine_mode mode = GET_MODE (x);
4558 if (TARGET_THUMB)
4560 *total = thumb_rtx_costs (x, code, outer_code);
4561 return true;
4564 switch (code)
4566 case MULT:
4567 /* There is no point basing this on the tuning, since it is always the
4568 fast variant if it exists at all. */
4569 if (mode == DImode
4570 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4571 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4572 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4574 *total = 8;
4575 return true;
4579 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4580 || mode == DImode)
4582 *total = 30;
4583 return true;
4586 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4588 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4589 & (unsigned HOST_WIDE_INT) 0xffffffff);
4590 int cost, const_ok = const_ok_for_arm (i);
4591 int j, booth_unit_size;
4593 /* Tune as appropriate. */
4594 cost = const_ok ? 4 : 8;
4595 booth_unit_size = 8;
4596 for (j = 0; i && j < 32; j += booth_unit_size)
4598 i >>= booth_unit_size;
4599 cost += 2;
4602 *total = cost;
4603 return true;
4606 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4607 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4608 return true;
4610 default:
4611 *total = arm_rtx_costs_1 (x, code, outer_code);
4612 return true;
4617 /* RTX cost for XScale CPUs. */
4619 static bool
4620 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4622 enum machine_mode mode = GET_MODE (x);
4624 if (TARGET_THUMB)
4626 *total = thumb_rtx_costs (x, code, outer_code);
4627 return true;
4630 switch (code)
4632 case MULT:
4633 /* There is no point basing this on the tuning, since it is always the
4634 fast variant if it exists at all. */
4635 if (mode == DImode
4636 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4637 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4638 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4640 *total = 8;
4641 return true;
4645 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4646 || mode == DImode)
4648 *total = 30;
4649 return true;
4652 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4654 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4655 & (unsigned HOST_WIDE_INT) 0xffffffff);
4656 int cost, const_ok = const_ok_for_arm (i);
4657 unsigned HOST_WIDE_INT masked_const;
4659 /* The cost will be related to two insns.
4660 First a load of the constant (MOV or LDR), then a multiply. */
4661 cost = 2;
4662 if (! const_ok)
4663 cost += 1; /* LDR is probably more expensive because
4664 of longer result latency. */
4665 masked_const = i & 0xffff8000;
4666 if (masked_const != 0 && masked_const != 0xffff8000)
4668 masked_const = i & 0xf8000000;
4669 if (masked_const == 0 || masked_const == 0xf8000000)
4670 cost += 1;
4671 else
4672 cost += 2;
4674 *total = cost;
4675 return true;
4678 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4679 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4680 return true;
4682 case COMPARE:
4683 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4684 will stall until the multiplication is complete. */
4685 if (GET_CODE (XEXP (x, 0)) == MULT)
4686 *total = 4 + rtx_cost (XEXP (x, 0), code);
4687 else
4688 *total = arm_rtx_costs_1 (x, code, outer_code);
4689 return true;
4691 default:
4692 *total = arm_rtx_costs_1 (x, code, outer_code);
4693 return true;
4698 /* RTX costs for 9e (and later) cores. */
4700 static bool
4701 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4703 enum machine_mode mode = GET_MODE (x);
4704 int nonreg_cost;
4705 int cost;
4707 if (TARGET_THUMB)
4709 switch (code)
4711 case MULT:
4712 *total = COSTS_N_INSNS (3);
4713 return true;
4715 default:
4716 *total = thumb_rtx_costs (x, code, outer_code);
4717 return true;
4721 switch (code)
4723 case MULT:
4724 /* There is no point basing this on the tuning, since it is always the
4725 fast variant if it exists at all. */
4726 if (mode == DImode
4727 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4728 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4729 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4731 *total = 3;
4732 return true;
4736 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4738 *total = 30;
4739 return true;
4741 if (mode == DImode)
4743 cost = 7;
4744 nonreg_cost = 8;
4746 else
4748 cost = 2;
4749 nonreg_cost = 4;
4753 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4754 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4755 return true;
4757 default:
4758 *total = arm_rtx_costs_1 (x, code, outer_code);
4759 return true;
4762 /* All address computations that can be done are free, but rtx cost returns
4763 the same for practically all of them. So we weight the different types
4764 of address here in the order (most pref first):
4765 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4766 static inline int
4767 arm_arm_address_cost (rtx x)
4769 enum rtx_code c = GET_CODE (x);
4771 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4772 return 0;
4773 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4774 return 10;
4776 if (c == PLUS || c == MINUS)
4778 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4779 return 2;
4781 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4782 return 3;
4784 return 4;
4787 return 6;
4790 static inline int
4791 arm_thumb_address_cost (rtx x)
4793 enum rtx_code c = GET_CODE (x);
4795 if (c == REG)
4796 return 1;
4797 if (c == PLUS
4798 && GET_CODE (XEXP (x, 0)) == REG
4799 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4800 return 1;
4802 return 2;
4805 static int
4806 arm_address_cost (rtx x)
4808 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4811 static int
4812 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4814 rtx i_pat, d_pat;
4816 /* Some true dependencies can have a higher cost depending
4817 on precisely how certain input operands are used. */
4818 if (arm_tune_xscale
4819 && REG_NOTE_KIND (link) == 0
4820 && recog_memoized (insn) >= 0
4821 && recog_memoized (dep) >= 0)
4823 int shift_opnum = get_attr_shift (insn);
4824 enum attr_type attr_type = get_attr_type (dep);
4826 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4827 operand for INSN. If we have a shifted input operand and the
4828 instruction we depend on is another ALU instruction, then we may
4829 have to account for an additional stall. */
4830 if (shift_opnum != 0
4831 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4833 rtx shifted_operand;
4834 int opno;
4836 /* Get the shifted operand. */
4837 extract_insn (insn);
4838 shifted_operand = recog_data.operand[shift_opnum];
4840 /* Iterate over all the operands in DEP. If we write an operand
4841 that overlaps with SHIFTED_OPERAND, then we have increase the
4842 cost of this dependency. */
4843 extract_insn (dep);
4844 preprocess_constraints ();
4845 for (opno = 0; opno < recog_data.n_operands; opno++)
4847 /* We can ignore strict inputs. */
4848 if (recog_data.operand_type[opno] == OP_IN)
4849 continue;
4851 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4852 shifted_operand))
4853 return 2;
4858 /* XXX This is not strictly true for the FPA. */
4859 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4860 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4861 return 0;
4863 /* Call insns don't incur a stall, even if they follow a load. */
4864 if (REG_NOTE_KIND (link) == 0
4865 && GET_CODE (insn) == CALL_INSN)
4866 return 1;
4868 if ((i_pat = single_set (insn)) != NULL
4869 && GET_CODE (SET_SRC (i_pat)) == MEM
4870 && (d_pat = single_set (dep)) != NULL
4871 && GET_CODE (SET_DEST (d_pat)) == MEM)
4873 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4874 /* This is a load after a store, there is no conflict if the load reads
4875 from a cached area. Assume that loads from the stack, and from the
4876 constant pool are cached, and that others will miss. This is a
4877 hack. */
4879 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4880 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4881 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4882 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4883 return 1;
4886 return cost;
4889 static int fp_consts_inited = 0;
4891 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4892 static const char * const strings_fp[8] =
4894 "0", "1", "2", "3",
4895 "4", "5", "0.5", "10"
4898 static REAL_VALUE_TYPE values_fp[8];
4900 static void
4901 init_fp_table (void)
4903 int i;
4904 REAL_VALUE_TYPE r;
4906 if (TARGET_VFP)
4907 fp_consts_inited = 1;
4908 else
4909 fp_consts_inited = 8;
4911 for (i = 0; i < fp_consts_inited; i++)
4913 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4914 values_fp[i] = r;
4918 /* Return TRUE if rtx X is a valid immediate FP constant. */
4920 arm_const_double_rtx (rtx x)
4922 REAL_VALUE_TYPE r;
4923 int i;
4925 if (!fp_consts_inited)
4926 init_fp_table ();
4928 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4929 if (REAL_VALUE_MINUS_ZERO (r))
4930 return 0;
4932 for (i = 0; i < fp_consts_inited; i++)
4933 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4934 return 1;
4936 return 0;
4939 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4941 neg_const_double_rtx_ok_for_fpa (rtx x)
4943 REAL_VALUE_TYPE r;
4944 int i;
4946 if (!fp_consts_inited)
4947 init_fp_table ();
4949 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4950 r = REAL_VALUE_NEGATE (r);
4951 if (REAL_VALUE_MINUS_ZERO (r))
4952 return 0;
4954 for (i = 0; i < 8; i++)
4955 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4956 return 1;
4958 return 0;
4961 /* Predicates for `match_operand' and `match_operator'. */
4963 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4965 cirrus_memory_offset (rtx op)
4967 /* Reject eliminable registers. */
4968 if (! (reload_in_progress || reload_completed)
4969 && ( reg_mentioned_p (frame_pointer_rtx, op)
4970 || reg_mentioned_p (arg_pointer_rtx, op)
4971 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4972 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4973 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4974 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4975 return 0;
4977 if (GET_CODE (op) == MEM)
4979 rtx ind;
4981 ind = XEXP (op, 0);
4983 /* Match: (mem (reg)). */
4984 if (GET_CODE (ind) == REG)
4985 return 1;
4987 /* Match:
4988 (mem (plus (reg)
4989 (const))). */
4990 if (GET_CODE (ind) == PLUS
4991 && GET_CODE (XEXP (ind, 0)) == REG
4992 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4993 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4994 return 1;
4997 return 0;
5000 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5001 WB if true if writeback address modes are allowed. */
5004 arm_coproc_mem_operand (rtx op, bool wb)
5006 rtx ind;
5008 /* Reject eliminable registers. */
5009 if (! (reload_in_progress || reload_completed)
5010 && ( reg_mentioned_p (frame_pointer_rtx, op)
5011 || reg_mentioned_p (arg_pointer_rtx, op)
5012 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5013 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5014 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5015 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5016 return FALSE;
5018 /* Constants are converted into offsets from labels. */
5019 if (GET_CODE (op) != MEM)
5020 return FALSE;
5022 ind = XEXP (op, 0);
5024 if (reload_completed
5025 && (GET_CODE (ind) == LABEL_REF
5026 || (GET_CODE (ind) == CONST
5027 && GET_CODE (XEXP (ind, 0)) == PLUS
5028 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5029 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5030 return TRUE;
5032 /* Match: (mem (reg)). */
5033 if (GET_CODE (ind) == REG)
5034 return arm_address_register_rtx_p (ind, 0);
5036 /* Autoincremment addressing modes. */
5037 if (wb
5038 && (GET_CODE (ind) == PRE_INC
5039 || GET_CODE (ind) == POST_INC
5040 || GET_CODE (ind) == PRE_DEC
5041 || GET_CODE (ind) == POST_DEC))
5042 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5044 if (wb
5045 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5046 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5047 && GET_CODE (XEXP (ind, 1)) == PLUS
5048 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5049 ind = XEXP (ind, 1);
5051 /* Match:
5052 (plus (reg)
5053 (const)). */
5054 if (GET_CODE (ind) == PLUS
5055 && GET_CODE (XEXP (ind, 0)) == REG
5056 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5057 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5058 && INTVAL (XEXP (ind, 1)) > -1024
5059 && INTVAL (XEXP (ind, 1)) < 1024
5060 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5061 return TRUE;
5063 return FALSE;
5066 /* Return true if X is a register that will be eliminated later on. */
5068 arm_eliminable_register (rtx x)
5070 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5071 || REGNO (x) == ARG_POINTER_REGNUM
5072 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5073 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5076 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5077 VFP registers. Otherwise return NO_REGS. */
5079 enum reg_class
5080 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5082 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5083 return NO_REGS;
5085 return GENERAL_REGS;
5088 /* Values which must be returned in the most-significant end of the return
5089 register. */
5091 static bool
5092 arm_return_in_msb (tree valtype)
5094 return (TARGET_AAPCS_BASED
5095 && BYTES_BIG_ENDIAN
5096 && (AGGREGATE_TYPE_P (valtype)
5097 || TREE_CODE (valtype) == COMPLEX_TYPE));
5100 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5101 Use by the Cirrus Maverick code which has to workaround
5102 a hardware bug triggered by such instructions. */
5103 static bool
5104 arm_memory_load_p (rtx insn)
5106 rtx body, lhs, rhs;;
5108 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5109 return false;
5111 body = PATTERN (insn);
5113 if (GET_CODE (body) != SET)
5114 return false;
5116 lhs = XEXP (body, 0);
5117 rhs = XEXP (body, 1);
5119 lhs = REG_OR_SUBREG_RTX (lhs);
5121 /* If the destination is not a general purpose
5122 register we do not have to worry. */
5123 if (GET_CODE (lhs) != REG
5124 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5125 return false;
5127 /* As well as loads from memory we also have to react
5128 to loads of invalid constants which will be turned
5129 into loads from the minipool. */
5130 return (GET_CODE (rhs) == MEM
5131 || GET_CODE (rhs) == SYMBOL_REF
5132 || note_invalid_constants (insn, -1, false));
5135 /* Return TRUE if INSN is a Cirrus instruction. */
5136 static bool
5137 arm_cirrus_insn_p (rtx insn)
5139 enum attr_cirrus attr;
5141 /* get_attr cannot accept USE or CLOBBER. */
5142 if (!insn
5143 || GET_CODE (insn) != INSN
5144 || GET_CODE (PATTERN (insn)) == USE
5145 || GET_CODE (PATTERN (insn)) == CLOBBER)
5146 return 0;
5148 attr = get_attr_cirrus (insn);
5150 return attr != CIRRUS_NOT;
5153 /* Cirrus reorg for invalid instruction combinations. */
5154 static void
5155 cirrus_reorg (rtx first)
5157 enum attr_cirrus attr;
5158 rtx body = PATTERN (first);
5159 rtx t;
5160 int nops;
5162 /* Any branch must be followed by 2 non Cirrus instructions. */
5163 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5165 nops = 0;
5166 t = next_nonnote_insn (first);
5168 if (arm_cirrus_insn_p (t))
5169 ++ nops;
5171 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5172 ++ nops;
5174 while (nops --)
5175 emit_insn_after (gen_nop (), first);
5177 return;
5180 /* (float (blah)) is in parallel with a clobber. */
5181 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5182 body = XVECEXP (body, 0, 0);
5184 if (GET_CODE (body) == SET)
5186 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5188 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5189 be followed by a non Cirrus insn. */
5190 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5192 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5193 emit_insn_after (gen_nop (), first);
5195 return;
5197 else if (arm_memory_load_p (first))
5199 unsigned int arm_regno;
5201 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5202 ldr/cfmv64hr combination where the Rd field is the same
5203 in both instructions must be split with a non Cirrus
5204 insn. Example:
5206 ldr r0, blah
5208 cfmvsr mvf0, r0. */
5210 /* Get Arm register number for ldr insn. */
5211 if (GET_CODE (lhs) == REG)
5212 arm_regno = REGNO (lhs);
5213 else
5215 gcc_assert (GET_CODE (rhs) == REG);
5216 arm_regno = REGNO (rhs);
5219 /* Next insn. */
5220 first = next_nonnote_insn (first);
5222 if (! arm_cirrus_insn_p (first))
5223 return;
5225 body = PATTERN (first);
5227 /* (float (blah)) is in parallel with a clobber. */
5228 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5229 body = XVECEXP (body, 0, 0);
5231 if (GET_CODE (body) == FLOAT)
5232 body = XEXP (body, 0);
5234 if (get_attr_cirrus (first) == CIRRUS_MOVE
5235 && GET_CODE (XEXP (body, 1)) == REG
5236 && arm_regno == REGNO (XEXP (body, 1)))
5237 emit_insn_after (gen_nop (), first);
5239 return;
5243 /* get_attr cannot accept USE or CLOBBER. */
5244 if (!first
5245 || GET_CODE (first) != INSN
5246 || GET_CODE (PATTERN (first)) == USE
5247 || GET_CODE (PATTERN (first)) == CLOBBER)
5248 return;
5250 attr = get_attr_cirrus (first);
5252 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5253 must be followed by a non-coprocessor instruction. */
5254 if (attr == CIRRUS_COMPARE)
5256 nops = 0;
5258 t = next_nonnote_insn (first);
5260 if (arm_cirrus_insn_p (t))
5261 ++ nops;
5263 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5264 ++ nops;
5266 while (nops --)
5267 emit_insn_after (gen_nop (), first);
5269 return;
5273 /* Return TRUE if X references a SYMBOL_REF. */
5275 symbol_mentioned_p (rtx x)
5277 const char * fmt;
5278 int i;
5280 if (GET_CODE (x) == SYMBOL_REF)
5281 return 1;
5283 fmt = GET_RTX_FORMAT (GET_CODE (x));
5285 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5287 if (fmt[i] == 'E')
5289 int j;
5291 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5292 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5293 return 1;
5295 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5296 return 1;
5299 return 0;
5302 /* Return TRUE if X references a LABEL_REF. */
5304 label_mentioned_p (rtx x)
5306 const char * fmt;
5307 int i;
5309 if (GET_CODE (x) == LABEL_REF)
5310 return 1;
5312 fmt = GET_RTX_FORMAT (GET_CODE (x));
5313 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5315 if (fmt[i] == 'E')
5317 int j;
5319 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5320 if (label_mentioned_p (XVECEXP (x, i, j)))
5321 return 1;
5323 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5324 return 1;
5327 return 0;
5330 enum rtx_code
5331 minmax_code (rtx x)
5333 enum rtx_code code = GET_CODE (x);
5335 switch (code)
5337 case SMAX:
5338 return GE;
5339 case SMIN:
5340 return LE;
5341 case UMIN:
5342 return LEU;
5343 case UMAX:
5344 return GEU;
5345 default:
5346 gcc_unreachable ();
5350 /* Return 1 if memory locations are adjacent. */
5352 adjacent_mem_locations (rtx a, rtx b)
5354 /* We don't guarantee to preserve the order of these memory refs. */
5355 if (volatile_refs_p (a) || volatile_refs_p (b))
5356 return 0;
5358 if ((GET_CODE (XEXP (a, 0)) == REG
5359 || (GET_CODE (XEXP (a, 0)) == PLUS
5360 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5361 && (GET_CODE (XEXP (b, 0)) == REG
5362 || (GET_CODE (XEXP (b, 0)) == PLUS
5363 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5365 HOST_WIDE_INT val0 = 0, val1 = 0;
5366 rtx reg0, reg1;
5367 int val_diff;
5369 if (GET_CODE (XEXP (a, 0)) == PLUS)
5371 reg0 = XEXP (XEXP (a, 0), 0);
5372 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5374 else
5375 reg0 = XEXP (a, 0);
5377 if (GET_CODE (XEXP (b, 0)) == PLUS)
5379 reg1 = XEXP (XEXP (b, 0), 0);
5380 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5382 else
5383 reg1 = XEXP (b, 0);
5385 /* Don't accept any offset that will require multiple
5386 instructions to handle, since this would cause the
5387 arith_adjacentmem pattern to output an overlong sequence. */
5388 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5389 return 0;
5391 /* Don't allow an eliminable register: register elimination can make
5392 the offset too large. */
5393 if (arm_eliminable_register (reg0))
5394 return 0;
5396 val_diff = val1 - val0;
5398 if (arm_ld_sched)
5400 /* If the target has load delay slots, then there's no benefit
5401 to using an ldm instruction unless the offset is zero and
5402 we are optimizing for size. */
5403 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5404 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5405 && (val_diff == 4 || val_diff == -4));
5408 return ((REGNO (reg0) == REGNO (reg1))
5409 && (val_diff == 4 || val_diff == -4));
5412 return 0;
5416 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5417 HOST_WIDE_INT *load_offset)
5419 int unsorted_regs[4];
5420 HOST_WIDE_INT unsorted_offsets[4];
5421 int order[4];
5422 int base_reg = -1;
5423 int i;
5425 /* Can only handle 2, 3, or 4 insns at present,
5426 though could be easily extended if required. */
5427 gcc_assert (nops >= 2 && nops <= 4);
5429 /* Loop over the operands and check that the memory references are
5430 suitable (i.e. immediate offsets from the same base register). At
5431 the same time, extract the target register, and the memory
5432 offsets. */
5433 for (i = 0; i < nops; i++)
5435 rtx reg;
5436 rtx offset;
5438 /* Convert a subreg of a mem into the mem itself. */
5439 if (GET_CODE (operands[nops + i]) == SUBREG)
5440 operands[nops + i] = alter_subreg (operands + (nops + i));
5442 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5444 /* Don't reorder volatile memory references; it doesn't seem worth
5445 looking for the case where the order is ok anyway. */
5446 if (MEM_VOLATILE_P (operands[nops + i]))
5447 return 0;
5449 offset = const0_rtx;
5451 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5452 || (GET_CODE (reg) == SUBREG
5453 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5454 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5455 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5456 == REG)
5457 || (GET_CODE (reg) == SUBREG
5458 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5459 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5460 == CONST_INT)))
5462 if (i == 0)
5464 base_reg = REGNO (reg);
5465 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5466 ? REGNO (operands[i])
5467 : REGNO (SUBREG_REG (operands[i])));
5468 order[0] = 0;
5470 else
5472 if (base_reg != (int) REGNO (reg))
5473 /* Not addressed from the same base register. */
5474 return 0;
5476 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5477 ? REGNO (operands[i])
5478 : REGNO (SUBREG_REG (operands[i])));
5479 if (unsorted_regs[i] < unsorted_regs[order[0]])
5480 order[0] = i;
5483 /* If it isn't an integer register, or if it overwrites the
5484 base register but isn't the last insn in the list, then
5485 we can't do this. */
5486 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5487 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5488 return 0;
5490 unsorted_offsets[i] = INTVAL (offset);
5492 else
5493 /* Not a suitable memory address. */
5494 return 0;
5497 /* All the useful information has now been extracted from the
5498 operands into unsorted_regs and unsorted_offsets; additionally,
5499 order[0] has been set to the lowest numbered register in the
5500 list. Sort the registers into order, and check that the memory
5501 offsets are ascending and adjacent. */
5503 for (i = 1; i < nops; i++)
5505 int j;
5507 order[i] = order[i - 1];
5508 for (j = 0; j < nops; j++)
5509 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5510 && (order[i] == order[i - 1]
5511 || unsorted_regs[j] < unsorted_regs[order[i]]))
5512 order[i] = j;
5514 /* Have we found a suitable register? if not, one must be used more
5515 than once. */
5516 if (order[i] == order[i - 1])
5517 return 0;
5519 /* Is the memory address adjacent and ascending? */
5520 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5521 return 0;
5524 if (base)
5526 *base = base_reg;
5528 for (i = 0; i < nops; i++)
5529 regs[i] = unsorted_regs[order[i]];
5531 *load_offset = unsorted_offsets[order[0]];
5534 if (unsorted_offsets[order[0]] == 0)
5535 return 1; /* ldmia */
5537 if (unsorted_offsets[order[0]] == 4)
5538 return 2; /* ldmib */
5540 if (unsorted_offsets[order[nops - 1]] == 0)
5541 return 3; /* ldmda */
5543 if (unsorted_offsets[order[nops - 1]] == -4)
5544 return 4; /* ldmdb */
5546 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5547 if the offset isn't small enough. The reason 2 ldrs are faster
5548 is because these ARMs are able to do more than one cache access
5549 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5550 whilst the ARM8 has a double bandwidth cache. This means that
5551 these cores can do both an instruction fetch and a data fetch in
5552 a single cycle, so the trick of calculating the address into a
5553 scratch register (one of the result regs) and then doing a load
5554 multiple actually becomes slower (and no smaller in code size).
5555 That is the transformation
5557 ldr rd1, [rbase + offset]
5558 ldr rd2, [rbase + offset + 4]
5562 add rd1, rbase, offset
5563 ldmia rd1, {rd1, rd2}
5565 produces worse code -- '3 cycles + any stalls on rd2' instead of
5566 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5567 access per cycle, the first sequence could never complete in less
5568 than 6 cycles, whereas the ldm sequence would only take 5 and
5569 would make better use of sequential accesses if not hitting the
5570 cache.
5572 We cheat here and test 'arm_ld_sched' which we currently know to
5573 only be true for the ARM8, ARM9 and StrongARM. If this ever
5574 changes, then the test below needs to be reworked. */
5575 if (nops == 2 && arm_ld_sched)
5576 return 0;
5578 /* Can't do it without setting up the offset, only do this if it takes
5579 no more than one insn. */
5580 return (const_ok_for_arm (unsorted_offsets[order[0]])
5581 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5584 const char *
5585 emit_ldm_seq (rtx *operands, int nops)
5587 int regs[4];
5588 int base_reg;
5589 HOST_WIDE_INT offset;
5590 char buf[100];
5591 int i;
5593 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5595 case 1:
5596 strcpy (buf, "ldm%?ia\t");
5597 break;
5599 case 2:
5600 strcpy (buf, "ldm%?ib\t");
5601 break;
5603 case 3:
5604 strcpy (buf, "ldm%?da\t");
5605 break;
5607 case 4:
5608 strcpy (buf, "ldm%?db\t");
5609 break;
5611 case 5:
5612 if (offset >= 0)
5613 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5614 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5615 (long) offset);
5616 else
5617 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5618 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5619 (long) -offset);
5620 output_asm_insn (buf, operands);
5621 base_reg = regs[0];
5622 strcpy (buf, "ldm%?ia\t");
5623 break;
5625 default:
5626 gcc_unreachable ();
5629 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5630 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5632 for (i = 1; i < nops; i++)
5633 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5634 reg_names[regs[i]]);
5636 strcat (buf, "}\t%@ phole ldm");
5638 output_asm_insn (buf, operands);
5639 return "";
5643 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5644 HOST_WIDE_INT * load_offset)
5646 int unsorted_regs[4];
5647 HOST_WIDE_INT unsorted_offsets[4];
5648 int order[4];
5649 int base_reg = -1;
5650 int i;
5652 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5653 extended if required. */
5654 gcc_assert (nops >= 2 && nops <= 4);
5656 /* Loop over the operands and check that the memory references are
5657 suitable (i.e. immediate offsets from the same base register). At
5658 the same time, extract the target register, and the memory
5659 offsets. */
5660 for (i = 0; i < nops; i++)
5662 rtx reg;
5663 rtx offset;
5665 /* Convert a subreg of a mem into the mem itself. */
5666 if (GET_CODE (operands[nops + i]) == SUBREG)
5667 operands[nops + i] = alter_subreg (operands + (nops + i));
5669 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5671 /* Don't reorder volatile memory references; it doesn't seem worth
5672 looking for the case where the order is ok anyway. */
5673 if (MEM_VOLATILE_P (operands[nops + i]))
5674 return 0;
5676 offset = const0_rtx;
5678 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5679 || (GET_CODE (reg) == SUBREG
5680 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5681 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5682 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5683 == REG)
5684 || (GET_CODE (reg) == SUBREG
5685 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5686 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5687 == CONST_INT)))
5689 if (i == 0)
5691 base_reg = REGNO (reg);
5692 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5693 ? REGNO (operands[i])
5694 : REGNO (SUBREG_REG (operands[i])));
5695 order[0] = 0;
5697 else
5699 if (base_reg != (int) REGNO (reg))
5700 /* Not addressed from the same base register. */
5701 return 0;
5703 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5704 ? REGNO (operands[i])
5705 : REGNO (SUBREG_REG (operands[i])));
5706 if (unsorted_regs[i] < unsorted_regs[order[0]])
5707 order[0] = i;
5710 /* If it isn't an integer register, then we can't do this. */
5711 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5712 return 0;
5714 unsorted_offsets[i] = INTVAL (offset);
5716 else
5717 /* Not a suitable memory address. */
5718 return 0;
5721 /* All the useful information has now been extracted from the
5722 operands into unsorted_regs and unsorted_offsets; additionally,
5723 order[0] has been set to the lowest numbered register in the
5724 list. Sort the registers into order, and check that the memory
5725 offsets are ascending and adjacent. */
5727 for (i = 1; i < nops; i++)
5729 int j;
5731 order[i] = order[i - 1];
5732 for (j = 0; j < nops; j++)
5733 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5734 && (order[i] == order[i - 1]
5735 || unsorted_regs[j] < unsorted_regs[order[i]]))
5736 order[i] = j;
5738 /* Have we found a suitable register? if not, one must be used more
5739 than once. */
5740 if (order[i] == order[i - 1])
5741 return 0;
5743 /* Is the memory address adjacent and ascending? */
5744 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5745 return 0;
5748 if (base)
5750 *base = base_reg;
5752 for (i = 0; i < nops; i++)
5753 regs[i] = unsorted_regs[order[i]];
5755 *load_offset = unsorted_offsets[order[0]];
5758 if (unsorted_offsets[order[0]] == 0)
5759 return 1; /* stmia */
5761 if (unsorted_offsets[order[0]] == 4)
5762 return 2; /* stmib */
5764 if (unsorted_offsets[order[nops - 1]] == 0)
5765 return 3; /* stmda */
5767 if (unsorted_offsets[order[nops - 1]] == -4)
5768 return 4; /* stmdb */
5770 return 0;
5773 const char *
5774 emit_stm_seq (rtx *operands, int nops)
5776 int regs[4];
5777 int base_reg;
5778 HOST_WIDE_INT offset;
5779 char buf[100];
5780 int i;
5782 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5784 case 1:
5785 strcpy (buf, "stm%?ia\t");
5786 break;
5788 case 2:
5789 strcpy (buf, "stm%?ib\t");
5790 break;
5792 case 3:
5793 strcpy (buf, "stm%?da\t");
5794 break;
5796 case 4:
5797 strcpy (buf, "stm%?db\t");
5798 break;
5800 default:
5801 gcc_unreachable ();
5804 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5805 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5807 for (i = 1; i < nops; i++)
5808 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5809 reg_names[regs[i]]);
5811 strcat (buf, "}\t%@ phole stm");
5813 output_asm_insn (buf, operands);
5814 return "";
5818 /* Routines for use in generating RTL. */
5821 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5822 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5824 HOST_WIDE_INT offset = *offsetp;
5825 int i = 0, j;
5826 rtx result;
5827 int sign = up ? 1 : -1;
5828 rtx mem, addr;
5830 /* XScale has load-store double instructions, but they have stricter
5831 alignment requirements than load-store multiple, so we cannot
5832 use them.
5834 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5835 the pipeline until completion.
5837 NREGS CYCLES
5843 An ldr instruction takes 1-3 cycles, but does not block the
5844 pipeline.
5846 NREGS CYCLES
5847 1 1-3
5848 2 2-6
5849 3 3-9
5850 4 4-12
5852 Best case ldr will always win. However, the more ldr instructions
5853 we issue, the less likely we are to be able to schedule them well.
5854 Using ldr instructions also increases code size.
5856 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5857 for counts of 3 or 4 regs. */
5858 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5860 rtx seq;
5862 start_sequence ();
5864 for (i = 0; i < count; i++)
5866 addr = plus_constant (from, i * 4 * sign);
5867 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5868 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5869 offset += 4 * sign;
5872 if (write_back)
5874 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5875 *offsetp = offset;
5878 seq = get_insns ();
5879 end_sequence ();
5881 return seq;
5884 result = gen_rtx_PARALLEL (VOIDmode,
5885 rtvec_alloc (count + (write_back ? 1 : 0)));
5886 if (write_back)
5888 XVECEXP (result, 0, 0)
5889 = gen_rtx_SET (GET_MODE (from), from,
5890 plus_constant (from, count * 4 * sign));
5891 i = 1;
5892 count++;
5895 for (j = 0; i < count; i++, j++)
5897 addr = plus_constant (from, j * 4 * sign);
5898 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5899 XVECEXP (result, 0, i)
5900 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5901 offset += 4 * sign;
5904 if (write_back)
5905 *offsetp = offset;
5907 return result;
5911 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5912 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5914 HOST_WIDE_INT offset = *offsetp;
5915 int i = 0, j;
5916 rtx result;
5917 int sign = up ? 1 : -1;
5918 rtx mem, addr;
5920 /* See arm_gen_load_multiple for discussion of
5921 the pros/cons of ldm/stm usage for XScale. */
5922 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5924 rtx seq;
5926 start_sequence ();
5928 for (i = 0; i < count; i++)
5930 addr = plus_constant (to, i * 4 * sign);
5931 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5932 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5933 offset += 4 * sign;
5936 if (write_back)
5938 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5939 *offsetp = offset;
5942 seq = get_insns ();
5943 end_sequence ();
5945 return seq;
5948 result = gen_rtx_PARALLEL (VOIDmode,
5949 rtvec_alloc (count + (write_back ? 1 : 0)));
5950 if (write_back)
5952 XVECEXP (result, 0, 0)
5953 = gen_rtx_SET (GET_MODE (to), to,
5954 plus_constant (to, count * 4 * sign));
5955 i = 1;
5956 count++;
5959 for (j = 0; i < count; i++, j++)
5961 addr = plus_constant (to, j * 4 * sign);
5962 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5963 XVECEXP (result, 0, i)
5964 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5965 offset += 4 * sign;
5968 if (write_back)
5969 *offsetp = offset;
5971 return result;
5975 arm_gen_movmemqi (rtx *operands)
5977 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5978 HOST_WIDE_INT srcoffset, dstoffset;
5979 int i;
5980 rtx src, dst, srcbase, dstbase;
5981 rtx part_bytes_reg = NULL;
5982 rtx mem;
5984 if (GET_CODE (operands[2]) != CONST_INT
5985 || GET_CODE (operands[3]) != CONST_INT
5986 || INTVAL (operands[2]) > 64
5987 || INTVAL (operands[3]) & 3)
5988 return 0;
5990 dstbase = operands[0];
5991 srcbase = operands[1];
5993 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5994 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5996 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5997 out_words_to_go = INTVAL (operands[2]) / 4;
5998 last_bytes = INTVAL (operands[2]) & 3;
5999 dstoffset = srcoffset = 0;
6001 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6002 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6004 for (i = 0; in_words_to_go >= 2; i+=4)
6006 if (in_words_to_go > 4)
6007 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6008 srcbase, &srcoffset));
6009 else
6010 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6011 FALSE, srcbase, &srcoffset));
6013 if (out_words_to_go)
6015 if (out_words_to_go > 4)
6016 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6017 dstbase, &dstoffset));
6018 else if (out_words_to_go != 1)
6019 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6020 dst, TRUE,
6021 (last_bytes == 0
6022 ? FALSE : TRUE),
6023 dstbase, &dstoffset));
6024 else
6026 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6027 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6028 if (last_bytes != 0)
6030 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6031 dstoffset += 4;
6036 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6037 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6040 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6041 if (out_words_to_go)
6043 rtx sreg;
6045 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6046 sreg = copy_to_reg (mem);
6048 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6049 emit_move_insn (mem, sreg);
6050 in_words_to_go--;
6052 gcc_assert (!in_words_to_go); /* Sanity check */
6055 if (in_words_to_go)
6057 gcc_assert (in_words_to_go > 0);
6059 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6060 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6063 gcc_assert (!last_bytes || part_bytes_reg);
6065 if (BYTES_BIG_ENDIAN && last_bytes)
6067 rtx tmp = gen_reg_rtx (SImode);
6069 /* The bytes we want are in the top end of the word. */
6070 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6071 GEN_INT (8 * (4 - last_bytes))));
6072 part_bytes_reg = tmp;
6074 while (last_bytes)
6076 mem = adjust_automodify_address (dstbase, QImode,
6077 plus_constant (dst, last_bytes - 1),
6078 dstoffset + last_bytes - 1);
6079 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6081 if (--last_bytes)
6083 tmp = gen_reg_rtx (SImode);
6084 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6085 part_bytes_reg = tmp;
6090 else
6092 if (last_bytes > 1)
6094 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6095 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6096 last_bytes -= 2;
6097 if (last_bytes)
6099 rtx tmp = gen_reg_rtx (SImode);
6100 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6101 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6102 part_bytes_reg = tmp;
6103 dstoffset += 2;
6107 if (last_bytes)
6109 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6110 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6114 return 1;
6117 /* Generate a memory reference for a half word, such that it will be loaded
6118 into the top 16 bits of the word. We can assume that the address is
6119 known to be alignable and of the form reg, or plus (reg, const). */
6121 /* Select a dominance comparison mode if possible for a test of the general
6122 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6123 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6124 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6125 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6126 In all cases OP will be either EQ or NE, but we don't need to know which
6127 here. If we are unable to support a dominance comparison we return
6128 CC mode. This will then fail to match for the RTL expressions that
6129 generate this call. */
6130 enum machine_mode
6131 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6133 enum rtx_code cond1, cond2;
6134 int swapped = 0;
6136 /* Currently we will probably get the wrong result if the individual
6137 comparisons are not simple. This also ensures that it is safe to
6138 reverse a comparison if necessary. */
6139 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6140 != CCmode)
6141 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6142 != CCmode))
6143 return CCmode;
6145 /* The if_then_else variant of this tests the second condition if the
6146 first passes, but is true if the first fails. Reverse the first
6147 condition to get a true "inclusive-or" expression. */
6148 if (cond_or == DOM_CC_NX_OR_Y)
6149 cond1 = reverse_condition (cond1);
6151 /* If the comparisons are not equal, and one doesn't dominate the other,
6152 then we can't do this. */
6153 if (cond1 != cond2
6154 && !comparison_dominates_p (cond1, cond2)
6155 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6156 return CCmode;
6158 if (swapped)
6160 enum rtx_code temp = cond1;
6161 cond1 = cond2;
6162 cond2 = temp;
6165 switch (cond1)
6167 case EQ:
6168 if (cond_or == DOM_CC_X_AND_Y)
6169 return CC_DEQmode;
6171 switch (cond2)
6173 case EQ: return CC_DEQmode;
6174 case LE: return CC_DLEmode;
6175 case LEU: return CC_DLEUmode;
6176 case GE: return CC_DGEmode;
6177 case GEU: return CC_DGEUmode;
6178 default: gcc_unreachable ();
6181 case LT:
6182 if (cond_or == DOM_CC_X_AND_Y)
6183 return CC_DLTmode;
6185 switch (cond2)
6187 case LT:
6188 return CC_DLTmode;
6189 case LE:
6190 return CC_DLEmode;
6191 case NE:
6192 return CC_DNEmode;
6193 default:
6194 gcc_unreachable ();
6197 case GT:
6198 if (cond_or == DOM_CC_X_AND_Y)
6199 return CC_DGTmode;
6201 switch (cond2)
6203 case GT:
6204 return CC_DGTmode;
6205 case GE:
6206 return CC_DGEmode;
6207 case NE:
6208 return CC_DNEmode;
6209 default:
6210 gcc_unreachable ();
6213 case LTU:
6214 if (cond_or == DOM_CC_X_AND_Y)
6215 return CC_DLTUmode;
6217 switch (cond2)
6219 case LTU:
6220 return CC_DLTUmode;
6221 case LEU:
6222 return CC_DLEUmode;
6223 case NE:
6224 return CC_DNEmode;
6225 default:
6226 gcc_unreachable ();
6229 case GTU:
6230 if (cond_or == DOM_CC_X_AND_Y)
6231 return CC_DGTUmode;
6233 switch (cond2)
6235 case GTU:
6236 return CC_DGTUmode;
6237 case GEU:
6238 return CC_DGEUmode;
6239 case NE:
6240 return CC_DNEmode;
6241 default:
6242 gcc_unreachable ();
6245 /* The remaining cases only occur when both comparisons are the
6246 same. */
6247 case NE:
6248 gcc_assert (cond1 == cond2);
6249 return CC_DNEmode;
6251 case LE:
6252 gcc_assert (cond1 == cond2);
6253 return CC_DLEmode;
6255 case GE:
6256 gcc_assert (cond1 == cond2);
6257 return CC_DGEmode;
6259 case LEU:
6260 gcc_assert (cond1 == cond2);
6261 return CC_DLEUmode;
6263 case GEU:
6264 gcc_assert (cond1 == cond2);
6265 return CC_DGEUmode;
6267 default:
6268 gcc_unreachable ();
6272 enum machine_mode
6273 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6275 /* All floating point compares return CCFP if it is an equality
6276 comparison, and CCFPE otherwise. */
6277 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6279 switch (op)
6281 case EQ:
6282 case NE:
6283 case UNORDERED:
6284 case ORDERED:
6285 case UNLT:
6286 case UNLE:
6287 case UNGT:
6288 case UNGE:
6289 case UNEQ:
6290 case LTGT:
6291 return CCFPmode;
6293 case LT:
6294 case LE:
6295 case GT:
6296 case GE:
6297 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6298 return CCFPmode;
6299 return CCFPEmode;
6301 default:
6302 gcc_unreachable ();
6306 /* A compare with a shifted operand. Because of canonicalization, the
6307 comparison will have to be swapped when we emit the assembler. */
6308 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6309 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6310 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6311 || GET_CODE (x) == ROTATERT))
6312 return CC_SWPmode;
6314 /* This operation is performed swapped, but since we only rely on the Z
6315 flag we don't need an additional mode. */
6316 if (GET_MODE (y) == SImode && REG_P (y)
6317 && GET_CODE (x) == NEG
6318 && (op == EQ || op == NE))
6319 return CC_Zmode;
6321 /* This is a special case that is used by combine to allow a
6322 comparison of a shifted byte load to be split into a zero-extend
6323 followed by a comparison of the shifted integer (only valid for
6324 equalities and unsigned inequalities). */
6325 if (GET_MODE (x) == SImode
6326 && GET_CODE (x) == ASHIFT
6327 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6328 && GET_CODE (XEXP (x, 0)) == SUBREG
6329 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6330 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6331 && (op == EQ || op == NE
6332 || op == GEU || op == GTU || op == LTU || op == LEU)
6333 && GET_CODE (y) == CONST_INT)
6334 return CC_Zmode;
6336 /* A construct for a conditional compare, if the false arm contains
6337 0, then both conditions must be true, otherwise either condition
6338 must be true. Not all conditions are possible, so CCmode is
6339 returned if it can't be done. */
6340 if (GET_CODE (x) == IF_THEN_ELSE
6341 && (XEXP (x, 2) == const0_rtx
6342 || XEXP (x, 2) == const1_rtx)
6343 && COMPARISON_P (XEXP (x, 0))
6344 && COMPARISON_P (XEXP (x, 1)))
6345 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6346 INTVAL (XEXP (x, 2)));
6348 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6349 if (GET_CODE (x) == AND
6350 && COMPARISON_P (XEXP (x, 0))
6351 && COMPARISON_P (XEXP (x, 1)))
6352 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6353 DOM_CC_X_AND_Y);
6355 if (GET_CODE (x) == IOR
6356 && COMPARISON_P (XEXP (x, 0))
6357 && COMPARISON_P (XEXP (x, 1)))
6358 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6359 DOM_CC_X_OR_Y);
6361 /* An operation (on Thumb) where we want to test for a single bit.
6362 This is done by shifting that bit up into the top bit of a
6363 scratch register; we can then branch on the sign bit. */
6364 if (TARGET_THUMB
6365 && GET_MODE (x) == SImode
6366 && (op == EQ || op == NE)
6367 && (GET_CODE (x) == ZERO_EXTRACT))
6368 return CC_Nmode;
6370 /* An operation that sets the condition codes as a side-effect, the
6371 V flag is not set correctly, so we can only use comparisons where
6372 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6373 instead.) */
6374 if (GET_MODE (x) == SImode
6375 && y == const0_rtx
6376 && (op == EQ || op == NE || op == LT || op == GE)
6377 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6378 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6379 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6380 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6381 || GET_CODE (x) == LSHIFTRT
6382 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6383 || GET_CODE (x) == ROTATERT
6384 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6385 return CC_NOOVmode;
6387 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6388 return CC_Zmode;
6390 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6391 && GET_CODE (x) == PLUS
6392 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6393 return CC_Cmode;
6395 return CCmode;
6398 /* X and Y are two things to compare using CODE. Emit the compare insn and
6399 return the rtx for register 0 in the proper mode. FP means this is a
6400 floating point compare: I don't think that it is needed on the arm. */
6402 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6404 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6405 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6407 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6408 gen_rtx_COMPARE (mode, x, y)));
6410 return cc_reg;
6413 /* Generate a sequence of insns that will generate the correct return
6414 address mask depending on the physical architecture that the program
6415 is running on. */
6417 arm_gen_return_addr_mask (void)
6419 rtx reg = gen_reg_rtx (Pmode);
6421 emit_insn (gen_return_addr_mask (reg));
6422 return reg;
6425 void
6426 arm_reload_in_hi (rtx *operands)
6428 rtx ref = operands[1];
6429 rtx base, scratch;
6430 HOST_WIDE_INT offset = 0;
6432 if (GET_CODE (ref) == SUBREG)
6434 offset = SUBREG_BYTE (ref);
6435 ref = SUBREG_REG (ref);
6438 if (GET_CODE (ref) == REG)
6440 /* We have a pseudo which has been spilt onto the stack; there
6441 are two cases here: the first where there is a simple
6442 stack-slot replacement and a second where the stack-slot is
6443 out of range, or is used as a subreg. */
6444 if (reg_equiv_mem[REGNO (ref)])
6446 ref = reg_equiv_mem[REGNO (ref)];
6447 base = find_replacement (&XEXP (ref, 0));
6449 else
6450 /* The slot is out of range, or was dressed up in a SUBREG. */
6451 base = reg_equiv_address[REGNO (ref)];
6453 else
6454 base = find_replacement (&XEXP (ref, 0));
6456 /* Handle the case where the address is too complex to be offset by 1. */
6457 if (GET_CODE (base) == MINUS
6458 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6460 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6462 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6463 base = base_plus;
6465 else if (GET_CODE (base) == PLUS)
6467 /* The addend must be CONST_INT, or we would have dealt with it above. */
6468 HOST_WIDE_INT hi, lo;
6470 offset += INTVAL (XEXP (base, 1));
6471 base = XEXP (base, 0);
6473 /* Rework the address into a legal sequence of insns. */
6474 /* Valid range for lo is -4095 -> 4095 */
6475 lo = (offset >= 0
6476 ? (offset & 0xfff)
6477 : -((-offset) & 0xfff));
6479 /* Corner case, if lo is the max offset then we would be out of range
6480 once we have added the additional 1 below, so bump the msb into the
6481 pre-loading insn(s). */
6482 if (lo == 4095)
6483 lo &= 0x7ff;
6485 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6486 ^ (HOST_WIDE_INT) 0x80000000)
6487 - (HOST_WIDE_INT) 0x80000000);
6489 gcc_assert (hi + lo == offset);
6491 if (hi != 0)
6493 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6495 /* Get the base address; addsi3 knows how to handle constants
6496 that require more than one insn. */
6497 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6498 base = base_plus;
6499 offset = lo;
6503 /* Operands[2] may overlap operands[0] (though it won't overlap
6504 operands[1]), that's why we asked for a DImode reg -- so we can
6505 use the bit that does not overlap. */
6506 if (REGNO (operands[2]) == REGNO (operands[0]))
6507 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6508 else
6509 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6511 emit_insn (gen_zero_extendqisi2 (scratch,
6512 gen_rtx_MEM (QImode,
6513 plus_constant (base,
6514 offset))));
6515 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6516 gen_rtx_MEM (QImode,
6517 plus_constant (base,
6518 offset + 1))));
6519 if (!BYTES_BIG_ENDIAN)
6520 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6521 gen_rtx_IOR (SImode,
6522 gen_rtx_ASHIFT
6523 (SImode,
6524 gen_rtx_SUBREG (SImode, operands[0], 0),
6525 GEN_INT (8)),
6526 scratch)));
6527 else
6528 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6529 gen_rtx_IOR (SImode,
6530 gen_rtx_ASHIFT (SImode, scratch,
6531 GEN_INT (8)),
6532 gen_rtx_SUBREG (SImode, operands[0],
6533 0))));
6536 /* Handle storing a half-word to memory during reload by synthesizing as two
6537 byte stores. Take care not to clobber the input values until after we
6538 have moved them somewhere safe. This code assumes that if the DImode
6539 scratch in operands[2] overlaps either the input value or output address
6540 in some way, then that value must die in this insn (we absolutely need
6541 two scratch registers for some corner cases). */
6542 void
6543 arm_reload_out_hi (rtx *operands)
6545 rtx ref = operands[0];
6546 rtx outval = operands[1];
6547 rtx base, scratch;
6548 HOST_WIDE_INT offset = 0;
6550 if (GET_CODE (ref) == SUBREG)
6552 offset = SUBREG_BYTE (ref);
6553 ref = SUBREG_REG (ref);
6556 if (GET_CODE (ref) == REG)
6558 /* We have a pseudo which has been spilt onto the stack; there
6559 are two cases here: the first where there is a simple
6560 stack-slot replacement and a second where the stack-slot is
6561 out of range, or is used as a subreg. */
6562 if (reg_equiv_mem[REGNO (ref)])
6564 ref = reg_equiv_mem[REGNO (ref)];
6565 base = find_replacement (&XEXP (ref, 0));
6567 else
6568 /* The slot is out of range, or was dressed up in a SUBREG. */
6569 base = reg_equiv_address[REGNO (ref)];
6571 else
6572 base = find_replacement (&XEXP (ref, 0));
6574 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6576 /* Handle the case where the address is too complex to be offset by 1. */
6577 if (GET_CODE (base) == MINUS
6578 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6580 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6582 /* Be careful not to destroy OUTVAL. */
6583 if (reg_overlap_mentioned_p (base_plus, outval))
6585 /* Updating base_plus might destroy outval, see if we can
6586 swap the scratch and base_plus. */
6587 if (!reg_overlap_mentioned_p (scratch, outval))
6589 rtx tmp = scratch;
6590 scratch = base_plus;
6591 base_plus = tmp;
6593 else
6595 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6597 /* Be conservative and copy OUTVAL into the scratch now,
6598 this should only be necessary if outval is a subreg
6599 of something larger than a word. */
6600 /* XXX Might this clobber base? I can't see how it can,
6601 since scratch is known to overlap with OUTVAL, and
6602 must be wider than a word. */
6603 emit_insn (gen_movhi (scratch_hi, outval));
6604 outval = scratch_hi;
6608 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6609 base = base_plus;
6611 else if (GET_CODE (base) == PLUS)
6613 /* The addend must be CONST_INT, or we would have dealt with it above. */
6614 HOST_WIDE_INT hi, lo;
6616 offset += INTVAL (XEXP (base, 1));
6617 base = XEXP (base, 0);
6619 /* Rework the address into a legal sequence of insns. */
6620 /* Valid range for lo is -4095 -> 4095 */
6621 lo = (offset >= 0
6622 ? (offset & 0xfff)
6623 : -((-offset) & 0xfff));
6625 /* Corner case, if lo is the max offset then we would be out of range
6626 once we have added the additional 1 below, so bump the msb into the
6627 pre-loading insn(s). */
6628 if (lo == 4095)
6629 lo &= 0x7ff;
6631 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6632 ^ (HOST_WIDE_INT) 0x80000000)
6633 - (HOST_WIDE_INT) 0x80000000);
6635 gcc_assert (hi + lo == offset);
6637 if (hi != 0)
6639 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6641 /* Be careful not to destroy OUTVAL. */
6642 if (reg_overlap_mentioned_p (base_plus, outval))
6644 /* Updating base_plus might destroy outval, see if we
6645 can swap the scratch and base_plus. */
6646 if (!reg_overlap_mentioned_p (scratch, outval))
6648 rtx tmp = scratch;
6649 scratch = base_plus;
6650 base_plus = tmp;
6652 else
6654 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6656 /* Be conservative and copy outval into scratch now,
6657 this should only be necessary if outval is a
6658 subreg of something larger than a word. */
6659 /* XXX Might this clobber base? I can't see how it
6660 can, since scratch is known to overlap with
6661 outval. */
6662 emit_insn (gen_movhi (scratch_hi, outval));
6663 outval = scratch_hi;
6667 /* Get the base address; addsi3 knows how to handle constants
6668 that require more than one insn. */
6669 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6670 base = base_plus;
6671 offset = lo;
6675 if (BYTES_BIG_ENDIAN)
6677 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6678 plus_constant (base, offset + 1)),
6679 gen_lowpart (QImode, outval)));
6680 emit_insn (gen_lshrsi3 (scratch,
6681 gen_rtx_SUBREG (SImode, outval, 0),
6682 GEN_INT (8)));
6683 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6684 gen_lowpart (QImode, scratch)));
6686 else
6688 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6689 gen_lowpart (QImode, outval)));
6690 emit_insn (gen_lshrsi3 (scratch,
6691 gen_rtx_SUBREG (SImode, outval, 0),
6692 GEN_INT (8)));
6693 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6694 plus_constant (base, offset + 1)),
6695 gen_lowpart (QImode, scratch)));
6699 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6700 (padded to the size of a word) should be passed in a register. */
6702 static bool
6703 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6705 if (TARGET_AAPCS_BASED)
6706 return must_pass_in_stack_var_size (mode, type);
6707 else
6708 return must_pass_in_stack_var_size_or_pad (mode, type);
6712 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6713 Return true if an argument passed on the stack should be padded upwards,
6714 i.e. if the least-significant byte has useful data.
6715 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
6716 aggregate types are placed in the lowest memory address. */
6718 bool
6719 arm_pad_arg_upward (enum machine_mode mode, tree type)
6721 if (!TARGET_AAPCS_BASED)
6722 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
6724 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6725 return false;
6727 return true;
6731 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6732 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6733 byte of the register has useful data, and return the opposite if the
6734 most significant byte does.
6735 For AAPCS, small aggregates and small complex types are always padded
6736 upwards. */
6738 bool
6739 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6740 tree type, int first ATTRIBUTE_UNUSED)
6742 if (TARGET_AAPCS_BASED
6743 && BYTES_BIG_ENDIAN
6744 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6745 && int_size_in_bytes (type) <= 4)
6746 return true;
6748 /* Otherwise, use default padding. */
6749 return !BYTES_BIG_ENDIAN;
6754 /* Print a symbolic form of X to the debug file, F. */
6755 static void
6756 arm_print_value (FILE *f, rtx x)
6758 switch (GET_CODE (x))
6760 case CONST_INT:
6761 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6762 return;
6764 case CONST_DOUBLE:
6765 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6766 return;
6768 case CONST_VECTOR:
6770 int i;
6772 fprintf (f, "<");
6773 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6775 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6776 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6777 fputc (',', f);
6779 fprintf (f, ">");
6781 return;
6783 case CONST_STRING:
6784 fprintf (f, "\"%s\"", XSTR (x, 0));
6785 return;
6787 case SYMBOL_REF:
6788 fprintf (f, "`%s'", XSTR (x, 0));
6789 return;
6791 case LABEL_REF:
6792 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6793 return;
6795 case CONST:
6796 arm_print_value (f, XEXP (x, 0));
6797 return;
6799 case PLUS:
6800 arm_print_value (f, XEXP (x, 0));
6801 fprintf (f, "+");
6802 arm_print_value (f, XEXP (x, 1));
6803 return;
6805 case PC:
6806 fprintf (f, "pc");
6807 return;
6809 default:
6810 fprintf (f, "????");
6811 return;
6815 /* Routines for manipulation of the constant pool. */
6817 /* Arm instructions cannot load a large constant directly into a
6818 register; they have to come from a pc relative load. The constant
6819 must therefore be placed in the addressable range of the pc
6820 relative load. Depending on the precise pc relative load
6821 instruction the range is somewhere between 256 bytes and 4k. This
6822 means that we often have to dump a constant inside a function, and
6823 generate code to branch around it.
6825 It is important to minimize this, since the branches will slow
6826 things down and make the code larger.
6828 Normally we can hide the table after an existing unconditional
6829 branch so that there is no interruption of the flow, but in the
6830 worst case the code looks like this:
6832 ldr rn, L1
6834 b L2
6835 align
6836 L1: .long value
6840 ldr rn, L3
6842 b L4
6843 align
6844 L3: .long value
6848 We fix this by performing a scan after scheduling, which notices
6849 which instructions need to have their operands fetched from the
6850 constant table and builds the table.
6852 The algorithm starts by building a table of all the constants that
6853 need fixing up and all the natural barriers in the function (places
6854 where a constant table can be dropped without breaking the flow).
6855 For each fixup we note how far the pc-relative replacement will be
6856 able to reach and the offset of the instruction into the function.
6858 Having built the table we then group the fixes together to form
6859 tables that are as large as possible (subject to addressing
6860 constraints) and emit each table of constants after the last
6861 barrier that is within range of all the instructions in the group.
6862 If a group does not contain a barrier, then we forcibly create one
6863 by inserting a jump instruction into the flow. Once the table has
6864 been inserted, the insns are then modified to reference the
6865 relevant entry in the pool.
6867 Possible enhancements to the algorithm (not implemented) are:
6869 1) For some processors and object formats, there may be benefit in
6870 aligning the pools to the start of cache lines; this alignment
6871 would need to be taken into account when calculating addressability
6872 of a pool. */
6874 /* These typedefs are located at the start of this file, so that
6875 they can be used in the prototypes there. This comment is to
6876 remind readers of that fact so that the following structures
6877 can be understood more easily.
6879 typedef struct minipool_node Mnode;
6880 typedef struct minipool_fixup Mfix; */
6882 struct minipool_node
6884 /* Doubly linked chain of entries. */
6885 Mnode * next;
6886 Mnode * prev;
6887 /* The maximum offset into the code that this entry can be placed. While
6888 pushing fixes for forward references, all entries are sorted in order
6889 of increasing max_address. */
6890 HOST_WIDE_INT max_address;
6891 /* Similarly for an entry inserted for a backwards ref. */
6892 HOST_WIDE_INT min_address;
6893 /* The number of fixes referencing this entry. This can become zero
6894 if we "unpush" an entry. In this case we ignore the entry when we
6895 come to emit the code. */
6896 int refcount;
6897 /* The offset from the start of the minipool. */
6898 HOST_WIDE_INT offset;
6899 /* The value in table. */
6900 rtx value;
6901 /* The mode of value. */
6902 enum machine_mode mode;
6903 /* The size of the value. With iWMMXt enabled
6904 sizes > 4 also imply an alignment of 8-bytes. */
6905 int fix_size;
6908 struct minipool_fixup
6910 Mfix * next;
6911 rtx insn;
6912 HOST_WIDE_INT address;
6913 rtx * loc;
6914 enum machine_mode mode;
6915 int fix_size;
6916 rtx value;
6917 Mnode * minipool;
6918 HOST_WIDE_INT forwards;
6919 HOST_WIDE_INT backwards;
6922 /* Fixes less than a word need padding out to a word boundary. */
6923 #define MINIPOOL_FIX_SIZE(mode) \
6924 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6926 static Mnode * minipool_vector_head;
6927 static Mnode * minipool_vector_tail;
6928 static rtx minipool_vector_label;
6930 /* The linked list of all minipool fixes required for this function. */
6931 Mfix * minipool_fix_head;
6932 Mfix * minipool_fix_tail;
6933 /* The fix entry for the current minipool, once it has been placed. */
6934 Mfix * minipool_barrier;
6936 /* Determines if INSN is the start of a jump table. Returns the end
6937 of the TABLE or NULL_RTX. */
6938 static rtx
6939 is_jump_table (rtx insn)
6941 rtx table;
6943 if (GET_CODE (insn) == JUMP_INSN
6944 && JUMP_LABEL (insn) != NULL
6945 && ((table = next_real_insn (JUMP_LABEL (insn)))
6946 == next_real_insn (insn))
6947 && table != NULL
6948 && GET_CODE (table) == JUMP_INSN
6949 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6950 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6951 return table;
6953 return NULL_RTX;
6956 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6957 #define JUMP_TABLES_IN_TEXT_SECTION 0
6958 #endif
6960 static HOST_WIDE_INT
6961 get_jump_table_size (rtx insn)
6963 /* ADDR_VECs only take room if read-only data does into the text
6964 section. */
6965 if (JUMP_TABLES_IN_TEXT_SECTION
6966 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6967 || 1
6968 #endif
6971 rtx body = PATTERN (insn);
6972 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6974 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6977 return 0;
6980 /* Move a minipool fix MP from its current location to before MAX_MP.
6981 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6982 constraints may need updating. */
6983 static Mnode *
6984 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6985 HOST_WIDE_INT max_address)
6987 /* The code below assumes these are different. */
6988 gcc_assert (mp != max_mp);
6990 if (max_mp == NULL)
6992 if (max_address < mp->max_address)
6993 mp->max_address = max_address;
6995 else
6997 if (max_address > max_mp->max_address - mp->fix_size)
6998 mp->max_address = max_mp->max_address - mp->fix_size;
6999 else
7000 mp->max_address = max_address;
7002 /* Unlink MP from its current position. Since max_mp is non-null,
7003 mp->prev must be non-null. */
7004 mp->prev->next = mp->next;
7005 if (mp->next != NULL)
7006 mp->next->prev = mp->prev;
7007 else
7008 minipool_vector_tail = mp->prev;
7010 /* Re-insert it before MAX_MP. */
7011 mp->next = max_mp;
7012 mp->prev = max_mp->prev;
7013 max_mp->prev = mp;
7015 if (mp->prev != NULL)
7016 mp->prev->next = mp;
7017 else
7018 minipool_vector_head = mp;
7021 /* Save the new entry. */
7022 max_mp = mp;
7024 /* Scan over the preceding entries and adjust their addresses as
7025 required. */
7026 while (mp->prev != NULL
7027 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7029 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7030 mp = mp->prev;
7033 return max_mp;
7036 /* Add a constant to the minipool for a forward reference. Returns the
7037 node added or NULL if the constant will not fit in this pool. */
7038 static Mnode *
7039 add_minipool_forward_ref (Mfix *fix)
7041 /* If set, max_mp is the first pool_entry that has a lower
7042 constraint than the one we are trying to add. */
7043 Mnode * max_mp = NULL;
7044 HOST_WIDE_INT max_address = fix->address + fix->forwards;
7045 Mnode * mp;
7047 /* If this fix's address is greater than the address of the first
7048 entry, then we can't put the fix in this pool. We subtract the
7049 size of the current fix to ensure that if the table is fully
7050 packed we still have enough room to insert this value by shuffling
7051 the other fixes forwards. */
7052 if (minipool_vector_head &&
7053 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7054 return NULL;
7056 /* Scan the pool to see if a constant with the same value has
7057 already been added. While we are doing this, also note the
7058 location where we must insert the constant if it doesn't already
7059 exist. */
7060 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7062 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7063 && fix->mode == mp->mode
7064 && (GET_CODE (fix->value) != CODE_LABEL
7065 || (CODE_LABEL_NUMBER (fix->value)
7066 == CODE_LABEL_NUMBER (mp->value)))
7067 && rtx_equal_p (fix->value, mp->value))
7069 /* More than one fix references this entry. */
7070 mp->refcount++;
7071 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7074 /* Note the insertion point if necessary. */
7075 if (max_mp == NULL
7076 && mp->max_address > max_address)
7077 max_mp = mp;
7079 /* If we are inserting an 8-bytes aligned quantity and
7080 we have not already found an insertion point, then
7081 make sure that all such 8-byte aligned quantities are
7082 placed at the start of the pool. */
7083 if (ARM_DOUBLEWORD_ALIGN
7084 && max_mp == NULL
7085 && fix->fix_size == 8
7086 && mp->fix_size != 8)
7088 max_mp = mp;
7089 max_address = mp->max_address;
7093 /* The value is not currently in the minipool, so we need to create
7094 a new entry for it. If MAX_MP is NULL, the entry will be put on
7095 the end of the list since the placement is less constrained than
7096 any existing entry. Otherwise, we insert the new fix before
7097 MAX_MP and, if necessary, adjust the constraints on the other
7098 entries. */
7099 mp = xmalloc (sizeof (* mp));
7100 mp->fix_size = fix->fix_size;
7101 mp->mode = fix->mode;
7102 mp->value = fix->value;
7103 mp->refcount = 1;
7104 /* Not yet required for a backwards ref. */
7105 mp->min_address = -65536;
7107 if (max_mp == NULL)
7109 mp->max_address = max_address;
7110 mp->next = NULL;
7111 mp->prev = minipool_vector_tail;
7113 if (mp->prev == NULL)
7115 minipool_vector_head = mp;
7116 minipool_vector_label = gen_label_rtx ();
7118 else
7119 mp->prev->next = mp;
7121 minipool_vector_tail = mp;
7123 else
7125 if (max_address > max_mp->max_address - mp->fix_size)
7126 mp->max_address = max_mp->max_address - mp->fix_size;
7127 else
7128 mp->max_address = max_address;
7130 mp->next = max_mp;
7131 mp->prev = max_mp->prev;
7132 max_mp->prev = mp;
7133 if (mp->prev != NULL)
7134 mp->prev->next = mp;
7135 else
7136 minipool_vector_head = mp;
7139 /* Save the new entry. */
7140 max_mp = mp;
7142 /* Scan over the preceding entries and adjust their addresses as
7143 required. */
7144 while (mp->prev != NULL
7145 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7147 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7148 mp = mp->prev;
7151 return max_mp;
7154 static Mnode *
7155 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7156 HOST_WIDE_INT min_address)
7158 HOST_WIDE_INT offset;
7160 /* The code below assumes these are different. */
7161 gcc_assert (mp != min_mp);
7163 if (min_mp == NULL)
7165 if (min_address > mp->min_address)
7166 mp->min_address = min_address;
7168 else
7170 /* We will adjust this below if it is too loose. */
7171 mp->min_address = min_address;
7173 /* Unlink MP from its current position. Since min_mp is non-null,
7174 mp->next must be non-null. */
7175 mp->next->prev = mp->prev;
7176 if (mp->prev != NULL)
7177 mp->prev->next = mp->next;
7178 else
7179 minipool_vector_head = mp->next;
7181 /* Reinsert it after MIN_MP. */
7182 mp->prev = min_mp;
7183 mp->next = min_mp->next;
7184 min_mp->next = mp;
7185 if (mp->next != NULL)
7186 mp->next->prev = mp;
7187 else
7188 minipool_vector_tail = mp;
7191 min_mp = mp;
7193 offset = 0;
7194 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7196 mp->offset = offset;
7197 if (mp->refcount > 0)
7198 offset += mp->fix_size;
7200 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7201 mp->next->min_address = mp->min_address + mp->fix_size;
7204 return min_mp;
7207 /* Add a constant to the minipool for a backward reference. Returns the
7208 node added or NULL if the constant will not fit in this pool.
7210 Note that the code for insertion for a backwards reference can be
7211 somewhat confusing because the calculated offsets for each fix do
7212 not take into account the size of the pool (which is still under
7213 construction. */
7214 static Mnode *
7215 add_minipool_backward_ref (Mfix *fix)
7217 /* If set, min_mp is the last pool_entry that has a lower constraint
7218 than the one we are trying to add. */
7219 Mnode *min_mp = NULL;
7220 /* This can be negative, since it is only a constraint. */
7221 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7222 Mnode *mp;
7224 /* If we can't reach the current pool from this insn, or if we can't
7225 insert this entry at the end of the pool without pushing other
7226 fixes out of range, then we don't try. This ensures that we
7227 can't fail later on. */
7228 if (min_address >= minipool_barrier->address
7229 || (minipool_vector_tail->min_address + fix->fix_size
7230 >= minipool_barrier->address))
7231 return NULL;
7233 /* Scan the pool to see if a constant with the same value has
7234 already been added. While we are doing this, also note the
7235 location where we must insert the constant if it doesn't already
7236 exist. */
7237 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7239 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7240 && fix->mode == mp->mode
7241 && (GET_CODE (fix->value) != CODE_LABEL
7242 || (CODE_LABEL_NUMBER (fix->value)
7243 == CODE_LABEL_NUMBER (mp->value)))
7244 && rtx_equal_p (fix->value, mp->value)
7245 /* Check that there is enough slack to move this entry to the
7246 end of the table (this is conservative). */
7247 && (mp->max_address
7248 > (minipool_barrier->address
7249 + minipool_vector_tail->offset
7250 + minipool_vector_tail->fix_size)))
7252 mp->refcount++;
7253 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7256 if (min_mp != NULL)
7257 mp->min_address += fix->fix_size;
7258 else
7260 /* Note the insertion point if necessary. */
7261 if (mp->min_address < min_address)
7263 /* For now, we do not allow the insertion of 8-byte alignment
7264 requiring nodes anywhere but at the start of the pool. */
7265 if (ARM_DOUBLEWORD_ALIGN
7266 && fix->fix_size == 8 && mp->fix_size != 8)
7267 return NULL;
7268 else
7269 min_mp = mp;
7271 else if (mp->max_address
7272 < minipool_barrier->address + mp->offset + fix->fix_size)
7274 /* Inserting before this entry would push the fix beyond
7275 its maximum address (which can happen if we have
7276 re-located a forwards fix); force the new fix to come
7277 after it. */
7278 min_mp = mp;
7279 min_address = mp->min_address + fix->fix_size;
7281 /* If we are inserting an 8-bytes aligned quantity and
7282 we have not already found an insertion point, then
7283 make sure that all such 8-byte aligned quantities are
7284 placed at the start of the pool. */
7285 else if (ARM_DOUBLEWORD_ALIGN
7286 && min_mp == NULL
7287 && fix->fix_size == 8
7288 && mp->fix_size < 8)
7290 min_mp = mp;
7291 min_address = mp->min_address + fix->fix_size;
7296 /* We need to create a new entry. */
7297 mp = xmalloc (sizeof (* mp));
7298 mp->fix_size = fix->fix_size;
7299 mp->mode = fix->mode;
7300 mp->value = fix->value;
7301 mp->refcount = 1;
7302 mp->max_address = minipool_barrier->address + 65536;
7304 mp->min_address = min_address;
7306 if (min_mp == NULL)
7308 mp->prev = NULL;
7309 mp->next = minipool_vector_head;
7311 if (mp->next == NULL)
7313 minipool_vector_tail = mp;
7314 minipool_vector_label = gen_label_rtx ();
7316 else
7317 mp->next->prev = mp;
7319 minipool_vector_head = mp;
7321 else
7323 mp->next = min_mp->next;
7324 mp->prev = min_mp;
7325 min_mp->next = mp;
7327 if (mp->next != NULL)
7328 mp->next->prev = mp;
7329 else
7330 minipool_vector_tail = mp;
7333 /* Save the new entry. */
7334 min_mp = mp;
7336 if (mp->prev)
7337 mp = mp->prev;
7338 else
7339 mp->offset = 0;
7341 /* Scan over the following entries and adjust their offsets. */
7342 while (mp->next != NULL)
7344 if (mp->next->min_address < mp->min_address + mp->fix_size)
7345 mp->next->min_address = mp->min_address + mp->fix_size;
7347 if (mp->refcount)
7348 mp->next->offset = mp->offset + mp->fix_size;
7349 else
7350 mp->next->offset = mp->offset;
7352 mp = mp->next;
7355 return min_mp;
7358 static void
7359 assign_minipool_offsets (Mfix *barrier)
7361 HOST_WIDE_INT offset = 0;
7362 Mnode *mp;
7364 minipool_barrier = barrier;
7366 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7368 mp->offset = offset;
7370 if (mp->refcount > 0)
7371 offset += mp->fix_size;
7375 /* Output the literal table */
7376 static void
7377 dump_minipool (rtx scan)
7379 Mnode * mp;
7380 Mnode * nmp;
7381 int align64 = 0;
7383 if (ARM_DOUBLEWORD_ALIGN)
7384 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7385 if (mp->refcount > 0 && mp->fix_size == 8)
7387 align64 = 1;
7388 break;
7391 if (dump_file)
7392 fprintf (dump_file,
7393 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7394 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7396 scan = emit_label_after (gen_label_rtx (), scan);
7397 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7398 scan = emit_label_after (minipool_vector_label, scan);
7400 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7402 if (mp->refcount > 0)
7404 if (dump_file)
7406 fprintf (dump_file,
7407 ";; Offset %u, min %ld, max %ld ",
7408 (unsigned) mp->offset, (unsigned long) mp->min_address,
7409 (unsigned long) mp->max_address);
7410 arm_print_value (dump_file, mp->value);
7411 fputc ('\n', dump_file);
7414 switch (mp->fix_size)
7416 #ifdef HAVE_consttable_1
7417 case 1:
7418 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7419 break;
7421 #endif
7422 #ifdef HAVE_consttable_2
7423 case 2:
7424 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7425 break;
7427 #endif
7428 #ifdef HAVE_consttable_4
7429 case 4:
7430 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7431 break;
7433 #endif
7434 #ifdef HAVE_consttable_8
7435 case 8:
7436 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7437 break;
7439 #endif
7440 default:
7441 gcc_unreachable ();
7445 nmp = mp->next;
7446 free (mp);
7449 minipool_vector_head = minipool_vector_tail = NULL;
7450 scan = emit_insn_after (gen_consttable_end (), scan);
7451 scan = emit_barrier_after (scan);
7454 /* Return the cost of forcibly inserting a barrier after INSN. */
7455 static int
7456 arm_barrier_cost (rtx insn)
7458 /* Basing the location of the pool on the loop depth is preferable,
7459 but at the moment, the basic block information seems to be
7460 corrupt by this stage of the compilation. */
7461 int base_cost = 50;
7462 rtx next = next_nonnote_insn (insn);
7464 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7465 base_cost -= 20;
7467 switch (GET_CODE (insn))
7469 case CODE_LABEL:
7470 /* It will always be better to place the table before the label, rather
7471 than after it. */
7472 return 50;
7474 case INSN:
7475 case CALL_INSN:
7476 return base_cost;
7478 case JUMP_INSN:
7479 return base_cost - 10;
7481 default:
7482 return base_cost + 10;
7486 /* Find the best place in the insn stream in the range
7487 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7488 Create the barrier by inserting a jump and add a new fix entry for
7489 it. */
7490 static Mfix *
7491 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7493 HOST_WIDE_INT count = 0;
7494 rtx barrier;
7495 rtx from = fix->insn;
7496 rtx selected = from;
7497 int selected_cost;
7498 HOST_WIDE_INT selected_address;
7499 Mfix * new_fix;
7500 HOST_WIDE_INT max_count = max_address - fix->address;
7501 rtx label = gen_label_rtx ();
7503 selected_cost = arm_barrier_cost (from);
7504 selected_address = fix->address;
7506 while (from && count < max_count)
7508 rtx tmp;
7509 int new_cost;
7511 /* This code shouldn't have been called if there was a natural barrier
7512 within range. */
7513 gcc_assert (GET_CODE (from) != BARRIER);
7515 /* Count the length of this insn. */
7516 count += get_attr_length (from);
7518 /* If there is a jump table, add its length. */
7519 tmp = is_jump_table (from);
7520 if (tmp != NULL)
7522 count += get_jump_table_size (tmp);
7524 /* Jump tables aren't in a basic block, so base the cost on
7525 the dispatch insn. If we select this location, we will
7526 still put the pool after the table. */
7527 new_cost = arm_barrier_cost (from);
7529 if (count < max_count && new_cost <= selected_cost)
7531 selected = tmp;
7532 selected_cost = new_cost;
7533 selected_address = fix->address + count;
7536 /* Continue after the dispatch table. */
7537 from = NEXT_INSN (tmp);
7538 continue;
7541 new_cost = arm_barrier_cost (from);
7543 if (count < max_count && new_cost <= selected_cost)
7545 selected = from;
7546 selected_cost = new_cost;
7547 selected_address = fix->address + count;
7550 from = NEXT_INSN (from);
7553 /* Create a new JUMP_INSN that branches around a barrier. */
7554 from = emit_jump_insn_after (gen_jump (label), selected);
7555 JUMP_LABEL (from) = label;
7556 barrier = emit_barrier_after (from);
7557 emit_label_after (label, barrier);
7559 /* Create a minipool barrier entry for the new barrier. */
7560 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7561 new_fix->insn = barrier;
7562 new_fix->address = selected_address;
7563 new_fix->next = fix->next;
7564 fix->next = new_fix;
7566 return new_fix;
7569 /* Record that there is a natural barrier in the insn stream at
7570 ADDRESS. */
7571 static void
7572 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7574 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7576 fix->insn = insn;
7577 fix->address = address;
7579 fix->next = NULL;
7580 if (minipool_fix_head != NULL)
7581 minipool_fix_tail->next = fix;
7582 else
7583 minipool_fix_head = fix;
7585 minipool_fix_tail = fix;
7588 /* Record INSN, which will need fixing up to load a value from the
7589 minipool. ADDRESS is the offset of the insn since the start of the
7590 function; LOC is a pointer to the part of the insn which requires
7591 fixing; VALUE is the constant that must be loaded, which is of type
7592 MODE. */
7593 static void
7594 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7595 enum machine_mode mode, rtx value)
7597 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7599 #ifdef AOF_ASSEMBLER
7600 /* PIC symbol references need to be converted into offsets into the
7601 based area. */
7602 /* XXX This shouldn't be done here. */
7603 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7604 value = aof_pic_entry (value);
7605 #endif /* AOF_ASSEMBLER */
7607 fix->insn = insn;
7608 fix->address = address;
7609 fix->loc = loc;
7610 fix->mode = mode;
7611 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7612 fix->value = value;
7613 fix->forwards = get_attr_pool_range (insn);
7614 fix->backwards = get_attr_neg_pool_range (insn);
7615 fix->minipool = NULL;
7617 /* If an insn doesn't have a range defined for it, then it isn't
7618 expecting to be reworked by this code. Better to stop now than
7619 to generate duff assembly code. */
7620 gcc_assert (fix->forwards || fix->backwards);
7622 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7623 So there might be an empty word before the start of the pool.
7624 Hence we reduce the forward range by 4 to allow for this
7625 possibility. */
7626 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7627 fix->forwards -= 4;
7629 if (dump_file)
7631 fprintf (dump_file,
7632 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7633 GET_MODE_NAME (mode),
7634 INSN_UID (insn), (unsigned long) address,
7635 -1 * (long)fix->backwards, (long)fix->forwards);
7636 arm_print_value (dump_file, fix->value);
7637 fprintf (dump_file, "\n");
7640 /* Add it to the chain of fixes. */
7641 fix->next = NULL;
7643 if (minipool_fix_head != NULL)
7644 minipool_fix_tail->next = fix;
7645 else
7646 minipool_fix_head = fix;
7648 minipool_fix_tail = fix;
7651 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7652 Returns the number of insns needed, or 99 if we don't know how to
7653 do it. */
7655 arm_const_double_inline_cost (rtx val)
7657 rtx lowpart, highpart;
7658 enum machine_mode mode;
7660 mode = GET_MODE (val);
7662 if (mode == VOIDmode)
7663 mode = DImode;
7665 gcc_assert (GET_MODE_SIZE (mode) == 8);
7667 lowpart = gen_lowpart (SImode, val);
7668 highpart = gen_highpart_mode (SImode, mode, val);
7670 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7671 gcc_assert (GET_CODE (highpart) == CONST_INT);
7673 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7674 NULL_RTX, NULL_RTX, 0, 0)
7675 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7676 NULL_RTX, NULL_RTX, 0, 0));
7679 /* Return true if it is worthwhile to split a 64-bit constant into two
7680 32-bit operations. This is the case if optimizing for size, or
7681 if we have load delay slots, or if one 32-bit part can be done with
7682 a single data operation. */
7683 bool
7684 arm_const_double_by_parts (rtx val)
7686 enum machine_mode mode = GET_MODE (val);
7687 rtx part;
7689 if (optimize_size || arm_ld_sched)
7690 return true;
7692 if (mode == VOIDmode)
7693 mode = DImode;
7695 part = gen_highpart_mode (SImode, mode, val);
7697 gcc_assert (GET_CODE (part) == CONST_INT);
7699 if (const_ok_for_arm (INTVAL (part))
7700 || const_ok_for_arm (~INTVAL (part)))
7701 return true;
7703 part = gen_lowpart (SImode, val);
7705 gcc_assert (GET_CODE (part) == CONST_INT);
7707 if (const_ok_for_arm (INTVAL (part))
7708 || const_ok_for_arm (~INTVAL (part)))
7709 return true;
7711 return false;
7714 /* Scan INSN and note any of its operands that need fixing.
7715 If DO_PUSHES is false we do not actually push any of the fixups
7716 needed. The function returns TRUE if any fixups were needed/pushed.
7717 This is used by arm_memory_load_p() which needs to know about loads
7718 of constants that will be converted into minipool loads. */
7719 static bool
7720 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7722 bool result = false;
7723 int opno;
7725 extract_insn (insn);
7727 if (!constrain_operands (1))
7728 fatal_insn_not_found (insn);
7730 if (recog_data.n_alternatives == 0)
7731 return false;
7733 /* Fill in recog_op_alt with information about the constraints of
7734 this insn. */
7735 preprocess_constraints ();
7737 for (opno = 0; opno < recog_data.n_operands; opno++)
7739 /* Things we need to fix can only occur in inputs. */
7740 if (recog_data.operand_type[opno] != OP_IN)
7741 continue;
7743 /* If this alternative is a memory reference, then any mention
7744 of constants in this alternative is really to fool reload
7745 into allowing us to accept one there. We need to fix them up
7746 now so that we output the right code. */
7747 if (recog_op_alt[opno][which_alternative].memory_ok)
7749 rtx op = recog_data.operand[opno];
7751 if (CONSTANT_P (op))
7753 if (do_pushes)
7754 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7755 recog_data.operand_mode[opno], op);
7756 result = true;
7758 else if (GET_CODE (op) == MEM
7759 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7760 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7762 if (do_pushes)
7764 rtx cop = avoid_constant_pool_reference (op);
7766 /* Casting the address of something to a mode narrower
7767 than a word can cause avoid_constant_pool_reference()
7768 to return the pool reference itself. That's no good to
7769 us here. Lets just hope that we can use the
7770 constant pool value directly. */
7771 if (op == cop)
7772 cop = get_pool_constant (XEXP (op, 0));
7774 push_minipool_fix (insn, address,
7775 recog_data.operand_loc[opno],
7776 recog_data.operand_mode[opno], cop);
7779 result = true;
7784 return result;
7787 /* Gcc puts the pool in the wrong place for ARM, since we can only
7788 load addresses a limited distance around the pc. We do some
7789 special munging to move the constant pool values to the correct
7790 point in the code. */
7791 static void
7792 arm_reorg (void)
7794 rtx insn;
7795 HOST_WIDE_INT address = 0;
7796 Mfix * fix;
7798 minipool_fix_head = minipool_fix_tail = NULL;
7800 /* The first insn must always be a note, or the code below won't
7801 scan it properly. */
7802 insn = get_insns ();
7803 gcc_assert (GET_CODE (insn) == NOTE);
7805 /* Scan all the insns and record the operands that will need fixing. */
7806 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7808 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7809 && (arm_cirrus_insn_p (insn)
7810 || GET_CODE (insn) == JUMP_INSN
7811 || arm_memory_load_p (insn)))
7812 cirrus_reorg (insn);
7814 if (GET_CODE (insn) == BARRIER)
7815 push_minipool_barrier (insn, address);
7816 else if (INSN_P (insn))
7818 rtx table;
7820 note_invalid_constants (insn, address, true);
7821 address += get_attr_length (insn);
7823 /* If the insn is a vector jump, add the size of the table
7824 and skip the table. */
7825 if ((table = is_jump_table (insn)) != NULL)
7827 address += get_jump_table_size (table);
7828 insn = table;
7833 fix = minipool_fix_head;
7835 /* Now scan the fixups and perform the required changes. */
7836 while (fix)
7838 Mfix * ftmp;
7839 Mfix * fdel;
7840 Mfix * last_added_fix;
7841 Mfix * last_barrier = NULL;
7842 Mfix * this_fix;
7844 /* Skip any further barriers before the next fix. */
7845 while (fix && GET_CODE (fix->insn) == BARRIER)
7846 fix = fix->next;
7848 /* No more fixes. */
7849 if (fix == NULL)
7850 break;
7852 last_added_fix = NULL;
7854 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7856 if (GET_CODE (ftmp->insn) == BARRIER)
7858 if (ftmp->address >= minipool_vector_head->max_address)
7859 break;
7861 last_barrier = ftmp;
7863 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7864 break;
7866 last_added_fix = ftmp; /* Keep track of the last fix added. */
7869 /* If we found a barrier, drop back to that; any fixes that we
7870 could have reached but come after the barrier will now go in
7871 the next mini-pool. */
7872 if (last_barrier != NULL)
7874 /* Reduce the refcount for those fixes that won't go into this
7875 pool after all. */
7876 for (fdel = last_barrier->next;
7877 fdel && fdel != ftmp;
7878 fdel = fdel->next)
7880 fdel->minipool->refcount--;
7881 fdel->minipool = NULL;
7884 ftmp = last_barrier;
7886 else
7888 /* ftmp is first fix that we can't fit into this pool and
7889 there no natural barriers that we could use. Insert a
7890 new barrier in the code somewhere between the previous
7891 fix and this one, and arrange to jump around it. */
7892 HOST_WIDE_INT max_address;
7894 /* The last item on the list of fixes must be a barrier, so
7895 we can never run off the end of the list of fixes without
7896 last_barrier being set. */
7897 gcc_assert (ftmp);
7899 max_address = minipool_vector_head->max_address;
7900 /* Check that there isn't another fix that is in range that
7901 we couldn't fit into this pool because the pool was
7902 already too large: we need to put the pool before such an
7903 instruction. */
7904 if (ftmp->address < max_address)
7905 max_address = ftmp->address;
7907 last_barrier = create_fix_barrier (last_added_fix, max_address);
7910 assign_minipool_offsets (last_barrier);
7912 while (ftmp)
7914 if (GET_CODE (ftmp->insn) != BARRIER
7915 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7916 == NULL))
7917 break;
7919 ftmp = ftmp->next;
7922 /* Scan over the fixes we have identified for this pool, fixing them
7923 up and adding the constants to the pool itself. */
7924 for (this_fix = fix; this_fix && ftmp != this_fix;
7925 this_fix = this_fix->next)
7926 if (GET_CODE (this_fix->insn) != BARRIER)
7928 rtx addr
7929 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7930 minipool_vector_label),
7931 this_fix->minipool->offset);
7932 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7935 dump_minipool (last_barrier->insn);
7936 fix = ftmp;
7939 /* From now on we must synthesize any constants that we can't handle
7940 directly. This can happen if the RTL gets split during final
7941 instruction generation. */
7942 after_arm_reorg = 1;
7944 /* Free the minipool memory. */
7945 obstack_free (&minipool_obstack, minipool_startobj);
7948 /* Routines to output assembly language. */
7950 /* If the rtx is the correct value then return the string of the number.
7951 In this way we can ensure that valid double constants are generated even
7952 when cross compiling. */
7953 const char *
7954 fp_immediate_constant (rtx x)
7956 REAL_VALUE_TYPE r;
7957 int i;
7959 if (!fp_consts_inited)
7960 init_fp_table ();
7962 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7963 for (i = 0; i < 8; i++)
7964 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7965 return strings_fp[i];
7967 gcc_unreachable ();
7970 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7971 static const char *
7972 fp_const_from_val (REAL_VALUE_TYPE *r)
7974 int i;
7976 if (!fp_consts_inited)
7977 init_fp_table ();
7979 for (i = 0; i < 8; i++)
7980 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7981 return strings_fp[i];
7983 gcc_unreachable ();
7986 /* Output the operands of a LDM/STM instruction to STREAM.
7987 MASK is the ARM register set mask of which only bits 0-15 are important.
7988 REG is the base register, either the frame pointer or the stack pointer,
7989 INSTR is the possibly suffixed load or store instruction. */
7991 static void
7992 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7993 unsigned long mask)
7995 unsigned i;
7996 bool not_first = FALSE;
7998 fputc ('\t', stream);
7999 asm_fprintf (stream, instr, reg);
8000 fputs (", {", stream);
8002 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8003 if (mask & (1 << i))
8005 if (not_first)
8006 fprintf (stream, ", ");
8008 asm_fprintf (stream, "%r", i);
8009 not_first = TRUE;
8012 fprintf (stream, "}\n");
8016 /* Output a FLDMX instruction to STREAM.
8017 BASE if the register containing the address.
8018 REG and COUNT specify the register range.
8019 Extra registers may be added to avoid hardware bugs. */
8021 static void
8022 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8024 int i;
8026 /* Workaround ARM10 VFPr1 bug. */
8027 if (count == 2 && !arm_arch6)
8029 if (reg == 15)
8030 reg--;
8031 count++;
8034 fputc ('\t', stream);
8035 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8037 for (i = reg; i < reg + count; i++)
8039 if (i > reg)
8040 fputs (", ", stream);
8041 asm_fprintf (stream, "d%d", i);
8043 fputs ("}\n", stream);
8048 /* Output the assembly for a store multiple. */
8050 const char *
8051 vfp_output_fstmx (rtx * operands)
8053 char pattern[100];
8054 int p;
8055 int base;
8056 int i;
8058 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8059 p = strlen (pattern);
8061 gcc_assert (GET_CODE (operands[1]) == REG);
8063 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8064 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8066 p += sprintf (&pattern[p], ", d%d", base + i);
8068 strcpy (&pattern[p], "}");
8070 output_asm_insn (pattern, operands);
8071 return "";
8075 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8076 number of bytes pushed. */
8078 static int
8079 vfp_emit_fstmx (int base_reg, int count)
8081 rtx par;
8082 rtx dwarf;
8083 rtx tmp, reg;
8084 int i;
8086 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8087 register pairs are stored by a store multiple insn. We avoid this
8088 by pushing an extra pair. */
8089 if (count == 2 && !arm_arch6)
8091 if (base_reg == LAST_VFP_REGNUM - 3)
8092 base_reg -= 2;
8093 count++;
8096 /* ??? The frame layout is implementation defined. We describe
8097 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8098 We really need some way of representing the whole block so that the
8099 unwinder can figure it out at runtime. */
8100 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8101 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8103 reg = gen_rtx_REG (DFmode, base_reg);
8104 base_reg += 2;
8106 XVECEXP (par, 0, 0)
8107 = gen_rtx_SET (VOIDmode,
8108 gen_frame_mem (BLKmode,
8109 gen_rtx_PRE_DEC (BLKmode,
8110 stack_pointer_rtx)),
8111 gen_rtx_UNSPEC (BLKmode,
8112 gen_rtvec (1, reg),
8113 UNSPEC_PUSH_MULT));
8115 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8116 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8117 GEN_INT (-(count * 8 + 4))));
8118 RTX_FRAME_RELATED_P (tmp) = 1;
8119 XVECEXP (dwarf, 0, 0) = tmp;
8121 tmp = gen_rtx_SET (VOIDmode,
8122 gen_frame_mem (DFmode, stack_pointer_rtx),
8123 reg);
8124 RTX_FRAME_RELATED_P (tmp) = 1;
8125 XVECEXP (dwarf, 0, 1) = tmp;
8127 for (i = 1; i < count; i++)
8129 reg = gen_rtx_REG (DFmode, base_reg);
8130 base_reg += 2;
8131 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8133 tmp = gen_rtx_SET (VOIDmode,
8134 gen_frame_mem (DFmode,
8135 gen_rtx_PLUS (SImode,
8136 stack_pointer_rtx,
8137 GEN_INT (i * 8))),
8138 reg);
8139 RTX_FRAME_RELATED_P (tmp) = 1;
8140 XVECEXP (dwarf, 0, i + 1) = tmp;
8143 par = emit_insn (par);
8144 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8145 REG_NOTES (par));
8146 RTX_FRAME_RELATED_P (par) = 1;
8148 return count * 8 + 4;
8152 /* Output a 'call' insn. */
8153 const char *
8154 output_call (rtx *operands)
8156 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8158 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8159 if (REGNO (operands[0]) == LR_REGNUM)
8161 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8162 output_asm_insn ("mov%?\t%0, %|lr", operands);
8165 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8167 if (TARGET_INTERWORK || arm_arch4t)
8168 output_asm_insn ("bx%?\t%0", operands);
8169 else
8170 output_asm_insn ("mov%?\t%|pc, %0", operands);
8172 return "";
8175 /* Output a 'call' insn that is a reference in memory. */
8176 const char *
8177 output_call_mem (rtx *operands)
8179 if (TARGET_INTERWORK && !arm_arch5)
8181 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8182 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8183 output_asm_insn ("bx%?\t%|ip", operands);
8185 else if (regno_use_in (LR_REGNUM, operands[0]))
8187 /* LR is used in the memory address. We load the address in the
8188 first instruction. It's safe to use IP as the target of the
8189 load since the call will kill it anyway. */
8190 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8191 if (arm_arch5)
8192 output_asm_insn ("blx%?\t%|ip", operands);
8193 else
8195 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8196 if (arm_arch4t)
8197 output_asm_insn ("bx%?\t%|ip", operands);
8198 else
8199 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8202 else
8204 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8205 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8208 return "";
8212 /* Output a move from arm registers to an fpa registers.
8213 OPERANDS[0] is an fpa register.
8214 OPERANDS[1] is the first registers of an arm register pair. */
8215 const char *
8216 output_mov_long_double_fpa_from_arm (rtx *operands)
8218 int arm_reg0 = REGNO (operands[1]);
8219 rtx ops[3];
8221 gcc_assert (arm_reg0 != IP_REGNUM);
8223 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8224 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8225 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8227 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8228 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8230 return "";
8233 /* Output a move from an fpa register to arm registers.
8234 OPERANDS[0] is the first registers of an arm register pair.
8235 OPERANDS[1] is an fpa register. */
8236 const char *
8237 output_mov_long_double_arm_from_fpa (rtx *operands)
8239 int arm_reg0 = REGNO (operands[0]);
8240 rtx ops[3];
8242 gcc_assert (arm_reg0 != IP_REGNUM);
8244 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8245 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8246 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8248 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8249 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8250 return "";
8253 /* Output a move from arm registers to arm registers of a long double
8254 OPERANDS[0] is the destination.
8255 OPERANDS[1] is the source. */
8256 const char *
8257 output_mov_long_double_arm_from_arm (rtx *operands)
8259 /* We have to be careful here because the two might overlap. */
8260 int dest_start = REGNO (operands[0]);
8261 int src_start = REGNO (operands[1]);
8262 rtx ops[2];
8263 int i;
8265 if (dest_start < src_start)
8267 for (i = 0; i < 3; i++)
8269 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8270 ops[1] = gen_rtx_REG (SImode, src_start + i);
8271 output_asm_insn ("mov%?\t%0, %1", ops);
8274 else
8276 for (i = 2; i >= 0; i--)
8278 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8279 ops[1] = gen_rtx_REG (SImode, src_start + i);
8280 output_asm_insn ("mov%?\t%0, %1", ops);
8284 return "";
8288 /* Output a move from arm registers to an fpa registers.
8289 OPERANDS[0] is an fpa register.
8290 OPERANDS[1] is the first registers of an arm register pair. */
8291 const char *
8292 output_mov_double_fpa_from_arm (rtx *operands)
8294 int arm_reg0 = REGNO (operands[1]);
8295 rtx ops[2];
8297 gcc_assert (arm_reg0 != IP_REGNUM);
8299 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8300 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8301 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8302 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8303 return "";
8306 /* Output a move from an fpa register to arm registers.
8307 OPERANDS[0] is the first registers of an arm register pair.
8308 OPERANDS[1] is an fpa register. */
8309 const char *
8310 output_mov_double_arm_from_fpa (rtx *operands)
8312 int arm_reg0 = REGNO (operands[0]);
8313 rtx ops[2];
8315 gcc_assert (arm_reg0 != IP_REGNUM);
8317 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8318 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8319 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8320 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8321 return "";
8324 /* Output a move between double words.
8325 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8326 or MEM<-REG and all MEMs must be offsettable addresses. */
8327 const char *
8328 output_move_double (rtx *operands)
8330 enum rtx_code code0 = GET_CODE (operands[0]);
8331 enum rtx_code code1 = GET_CODE (operands[1]);
8332 rtx otherops[3];
8334 if (code0 == REG)
8336 int reg0 = REGNO (operands[0]);
8338 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8340 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8342 switch (GET_CODE (XEXP (operands[1], 0)))
8344 case REG:
8345 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8346 break;
8348 case PRE_INC:
8349 gcc_assert (TARGET_LDRD);
8350 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8351 break;
8353 case PRE_DEC:
8354 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8355 break;
8357 case POST_INC:
8358 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8359 break;
8361 case POST_DEC:
8362 gcc_assert (TARGET_LDRD);
8363 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8364 break;
8366 case PRE_MODIFY:
8367 case POST_MODIFY:
8368 otherops[0] = operands[0];
8369 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8370 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8372 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8374 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8376 /* Registers overlap so split out the increment. */
8377 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8378 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8380 else
8381 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8383 else
8385 /* We only allow constant increments, so this is safe. */
8386 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8388 break;
8390 case LABEL_REF:
8391 case CONST:
8392 output_asm_insn ("adr%?\t%0, %1", operands);
8393 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8394 break;
8396 default:
8397 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8398 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8400 otherops[0] = operands[0];
8401 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8402 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8404 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8406 if (GET_CODE (otherops[2]) == CONST_INT)
8408 switch ((int) INTVAL (otherops[2]))
8410 case -8:
8411 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8412 return "";
8413 case -4:
8414 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8415 return "";
8416 case 4:
8417 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8418 return "";
8421 if (TARGET_LDRD
8422 && (GET_CODE (otherops[2]) == REG
8423 || (GET_CODE (otherops[2]) == CONST_INT
8424 && INTVAL (otherops[2]) > -256
8425 && INTVAL (otherops[2]) < 256)))
8427 if (reg_overlap_mentioned_p (otherops[0],
8428 otherops[2]))
8430 /* Swap base and index registers over to
8431 avoid a conflict. */
8432 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8433 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8436 /* If both registers conflict, it will usually
8437 have been fixed by a splitter. */
8438 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8440 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8441 output_asm_insn ("ldr%?d\t%0, [%1]",
8442 otherops);
8444 else
8445 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8446 return "";
8449 if (GET_CODE (otherops[2]) == CONST_INT)
8451 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8452 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8453 else
8454 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8456 else
8457 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8459 else
8460 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8462 return "ldm%?ia\t%0, %M0";
8464 else
8466 otherops[1] = adjust_address (operands[1], SImode, 4);
8467 /* Take care of overlapping base/data reg. */
8468 if (reg_mentioned_p (operands[0], operands[1]))
8470 output_asm_insn ("ldr%?\t%0, %1", otherops);
8471 output_asm_insn ("ldr%?\t%0, %1", operands);
8473 else
8475 output_asm_insn ("ldr%?\t%0, %1", operands);
8476 output_asm_insn ("ldr%?\t%0, %1", otherops);
8481 else
8483 /* Constraints should ensure this. */
8484 gcc_assert (code0 == MEM && code1 == REG);
8485 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8487 switch (GET_CODE (XEXP (operands[0], 0)))
8489 case REG:
8490 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8491 break;
8493 case PRE_INC:
8494 gcc_assert (TARGET_LDRD);
8495 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8496 break;
8498 case PRE_DEC:
8499 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8500 break;
8502 case POST_INC:
8503 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8504 break;
8506 case POST_DEC:
8507 gcc_assert (TARGET_LDRD);
8508 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8509 break;
8511 case PRE_MODIFY:
8512 case POST_MODIFY:
8513 otherops[0] = operands[1];
8514 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8515 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8517 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8518 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8519 else
8520 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8521 break;
8523 case PLUS:
8524 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8525 if (GET_CODE (otherops[2]) == CONST_INT)
8527 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8529 case -8:
8530 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8531 return "";
8533 case -4:
8534 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8535 return "";
8537 case 4:
8538 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8539 return "";
8542 if (TARGET_LDRD
8543 && (GET_CODE (otherops[2]) == REG
8544 || (GET_CODE (otherops[2]) == CONST_INT
8545 && INTVAL (otherops[2]) > -256
8546 && INTVAL (otherops[2]) < 256)))
8548 otherops[0] = operands[1];
8549 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8550 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8551 return "";
8553 /* Fall through */
8555 default:
8556 otherops[0] = adjust_address (operands[0], SImode, 4);
8557 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8558 output_asm_insn ("str%?\t%1, %0", operands);
8559 output_asm_insn ("str%?\t%1, %0", otherops);
8563 return "";
8566 /* Output an ADD r, s, #n where n may be too big for one instruction.
8567 If adding zero to one register, output nothing. */
8568 const char *
8569 output_add_immediate (rtx *operands)
8571 HOST_WIDE_INT n = INTVAL (operands[2]);
8573 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8575 if (n < 0)
8576 output_multi_immediate (operands,
8577 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8578 -n);
8579 else
8580 output_multi_immediate (operands,
8581 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8585 return "";
8588 /* Output a multiple immediate operation.
8589 OPERANDS is the vector of operands referred to in the output patterns.
8590 INSTR1 is the output pattern to use for the first constant.
8591 INSTR2 is the output pattern to use for subsequent constants.
8592 IMMED_OP is the index of the constant slot in OPERANDS.
8593 N is the constant value. */
8594 static const char *
8595 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8596 int immed_op, HOST_WIDE_INT n)
8598 #if HOST_BITS_PER_WIDE_INT > 32
8599 n &= 0xffffffff;
8600 #endif
8602 if (n == 0)
8604 /* Quick and easy output. */
8605 operands[immed_op] = const0_rtx;
8606 output_asm_insn (instr1, operands);
8608 else
8610 int i;
8611 const char * instr = instr1;
8613 /* Note that n is never zero here (which would give no output). */
8614 for (i = 0; i < 32; i += 2)
8616 if (n & (3 << i))
8618 operands[immed_op] = GEN_INT (n & (255 << i));
8619 output_asm_insn (instr, operands);
8620 instr = instr2;
8621 i += 6;
8626 return "";
8629 /* Return the appropriate ARM instruction for the operation code.
8630 The returned result should not be overwritten. OP is the rtx of the
8631 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8632 was shifted. */
8633 const char *
8634 arithmetic_instr (rtx op, int shift_first_arg)
8636 switch (GET_CODE (op))
8638 case PLUS:
8639 return "add";
8641 case MINUS:
8642 return shift_first_arg ? "rsb" : "sub";
8644 case IOR:
8645 return "orr";
8647 case XOR:
8648 return "eor";
8650 case AND:
8651 return "and";
8653 default:
8654 gcc_unreachable ();
8658 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8659 for the operation code. The returned result should not be overwritten.
8660 OP is the rtx code of the shift.
8661 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8662 shift. */
8663 static const char *
8664 shift_op (rtx op, HOST_WIDE_INT *amountp)
8666 const char * mnem;
8667 enum rtx_code code = GET_CODE (op);
8669 switch (GET_CODE (XEXP (op, 1)))
8671 case REG:
8672 case SUBREG:
8673 *amountp = -1;
8674 break;
8676 case CONST_INT:
8677 *amountp = INTVAL (XEXP (op, 1));
8678 break;
8680 default:
8681 gcc_unreachable ();
8684 switch (code)
8686 case ASHIFT:
8687 mnem = "asl";
8688 break;
8690 case ASHIFTRT:
8691 mnem = "asr";
8692 break;
8694 case LSHIFTRT:
8695 mnem = "lsr";
8696 break;
8698 case ROTATE:
8699 gcc_assert (*amountp != -1);
8700 *amountp = 32 - *amountp;
8702 /* Fall through. */
8704 case ROTATERT:
8705 mnem = "ror";
8706 break;
8708 case MULT:
8709 /* We never have to worry about the amount being other than a
8710 power of 2, since this case can never be reloaded from a reg. */
8711 gcc_assert (*amountp != -1);
8712 *amountp = int_log2 (*amountp);
8713 return "asl";
8715 default:
8716 gcc_unreachable ();
8719 if (*amountp != -1)
8721 /* This is not 100% correct, but follows from the desire to merge
8722 multiplication by a power of 2 with the recognizer for a
8723 shift. >=32 is not a valid shift for "asl", so we must try and
8724 output a shift that produces the correct arithmetical result.
8725 Using lsr #32 is identical except for the fact that the carry bit
8726 is not set correctly if we set the flags; but we never use the
8727 carry bit from such an operation, so we can ignore that. */
8728 if (code == ROTATERT)
8729 /* Rotate is just modulo 32. */
8730 *amountp &= 31;
8731 else if (*amountp != (*amountp & 31))
8733 if (code == ASHIFT)
8734 mnem = "lsr";
8735 *amountp = 32;
8738 /* Shifts of 0 are no-ops. */
8739 if (*amountp == 0)
8740 return NULL;
8743 return mnem;
8746 /* Obtain the shift from the POWER of two. */
8748 static HOST_WIDE_INT
8749 int_log2 (HOST_WIDE_INT power)
8751 HOST_WIDE_INT shift = 0;
8753 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8755 gcc_assert (shift <= 31);
8756 shift++;
8759 return shift;
8762 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8763 because /bin/as is horribly restrictive. The judgement about
8764 whether or not each character is 'printable' (and can be output as
8765 is) or not (and must be printed with an octal escape) must be made
8766 with reference to the *host* character set -- the situation is
8767 similar to that discussed in the comments above pp_c_char in
8768 c-pretty-print.c. */
8770 #define MAX_ASCII_LEN 51
8772 void
8773 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8775 int i;
8776 int len_so_far = 0;
8778 fputs ("\t.ascii\t\"", stream);
8780 for (i = 0; i < len; i++)
8782 int c = p[i];
8784 if (len_so_far >= MAX_ASCII_LEN)
8786 fputs ("\"\n\t.ascii\t\"", stream);
8787 len_so_far = 0;
8790 if (ISPRINT (c))
8792 if (c == '\\' || c == '\"')
8794 putc ('\\', stream);
8795 len_so_far++;
8797 putc (c, stream);
8798 len_so_far++;
8800 else
8802 fprintf (stream, "\\%03o", c);
8803 len_so_far += 4;
8807 fputs ("\"\n", stream);
8810 /* Compute the register save mask for registers 0 through 12
8811 inclusive. This code is used by arm_compute_save_reg_mask. */
8813 static unsigned long
8814 arm_compute_save_reg0_reg12_mask (void)
8816 unsigned long func_type = arm_current_func_type ();
8817 unsigned long save_reg_mask = 0;
8818 unsigned int reg;
8820 if (IS_INTERRUPT (func_type))
8822 unsigned int max_reg;
8823 /* Interrupt functions must not corrupt any registers,
8824 even call clobbered ones. If this is a leaf function
8825 we can just examine the registers used by the RTL, but
8826 otherwise we have to assume that whatever function is
8827 called might clobber anything, and so we have to save
8828 all the call-clobbered registers as well. */
8829 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8830 /* FIQ handlers have registers r8 - r12 banked, so
8831 we only need to check r0 - r7, Normal ISRs only
8832 bank r14 and r15, so we must check up to r12.
8833 r13 is the stack pointer which is always preserved,
8834 so we do not need to consider it here. */
8835 max_reg = 7;
8836 else
8837 max_reg = 12;
8839 for (reg = 0; reg <= max_reg; reg++)
8840 if (regs_ever_live[reg]
8841 || (! current_function_is_leaf && call_used_regs [reg]))
8842 save_reg_mask |= (1 << reg);
8844 /* Also save the pic base register if necessary. */
8845 if (flag_pic
8846 && !TARGET_SINGLE_PIC_BASE
8847 && current_function_uses_pic_offset_table)
8848 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8850 else
8852 /* In the normal case we only need to save those registers
8853 which are call saved and which are used by this function. */
8854 for (reg = 0; reg <= 10; reg++)
8855 if (regs_ever_live[reg] && ! call_used_regs [reg])
8856 save_reg_mask |= (1 << reg);
8858 /* Handle the frame pointer as a special case. */
8859 if (! TARGET_APCS_FRAME
8860 && ! frame_pointer_needed
8861 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8862 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8863 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8865 /* If we aren't loading the PIC register,
8866 don't stack it even though it may be live. */
8867 if (flag_pic
8868 && !TARGET_SINGLE_PIC_BASE
8869 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8870 || current_function_uses_pic_offset_table))
8871 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8874 /* Save registers so the exception handler can modify them. */
8875 if (current_function_calls_eh_return)
8877 unsigned int i;
8879 for (i = 0; ; i++)
8881 reg = EH_RETURN_DATA_REGNO (i);
8882 if (reg == INVALID_REGNUM)
8883 break;
8884 save_reg_mask |= 1 << reg;
8888 return save_reg_mask;
8891 /* Compute a bit mask of which registers need to be
8892 saved on the stack for the current function. */
8894 static unsigned long
8895 arm_compute_save_reg_mask (void)
8897 unsigned int save_reg_mask = 0;
8898 unsigned long func_type = arm_current_func_type ();
8900 if (IS_NAKED (func_type))
8901 /* This should never really happen. */
8902 return 0;
8904 /* If we are creating a stack frame, then we must save the frame pointer,
8905 IP (which will hold the old stack pointer), LR and the PC. */
8906 if (frame_pointer_needed)
8907 save_reg_mask |=
8908 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8909 | (1 << IP_REGNUM)
8910 | (1 << LR_REGNUM)
8911 | (1 << PC_REGNUM);
8913 /* Volatile functions do not return, so there
8914 is no need to save any other registers. */
8915 if (IS_VOLATILE (func_type))
8916 return save_reg_mask;
8918 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8920 /* Decide if we need to save the link register.
8921 Interrupt routines have their own banked link register,
8922 so they never need to save it.
8923 Otherwise if we do not use the link register we do not need to save
8924 it. If we are pushing other registers onto the stack however, we
8925 can save an instruction in the epilogue by pushing the link register
8926 now and then popping it back into the PC. This incurs extra memory
8927 accesses though, so we only do it when optimizing for size, and only
8928 if we know that we will not need a fancy return sequence. */
8929 if (regs_ever_live [LR_REGNUM]
8930 || (save_reg_mask
8931 && optimize_size
8932 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8933 && !current_function_calls_eh_return))
8934 save_reg_mask |= 1 << LR_REGNUM;
8936 if (cfun->machine->lr_save_eliminated)
8937 save_reg_mask &= ~ (1 << LR_REGNUM);
8939 if (TARGET_REALLY_IWMMXT
8940 && ((bit_count (save_reg_mask)
8941 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8943 unsigned int reg;
8945 /* The total number of registers that are going to be pushed
8946 onto the stack is odd. We need to ensure that the stack
8947 is 64-bit aligned before we start to save iWMMXt registers,
8948 and also before we start to create locals. (A local variable
8949 might be a double or long long which we will load/store using
8950 an iWMMXt instruction). Therefore we need to push another
8951 ARM register, so that the stack will be 64-bit aligned. We
8952 try to avoid using the arg registers (r0 -r3) as they might be
8953 used to pass values in a tail call. */
8954 for (reg = 4; reg <= 12; reg++)
8955 if ((save_reg_mask & (1 << reg)) == 0)
8956 break;
8958 if (reg <= 12)
8959 save_reg_mask |= (1 << reg);
8960 else
8962 cfun->machine->sibcall_blocked = 1;
8963 save_reg_mask |= (1 << 3);
8967 return save_reg_mask;
8971 /* Compute a bit mask of which registers need to be
8972 saved on the stack for the current function. */
8973 static unsigned long
8974 thumb_compute_save_reg_mask (void)
8976 unsigned long mask;
8977 unsigned reg;
8979 mask = 0;
8980 for (reg = 0; reg < 12; reg ++)
8981 if (regs_ever_live[reg] && !call_used_regs[reg])
8982 mask |= 1 << reg;
8984 if (flag_pic
8985 && !TARGET_SINGLE_PIC_BASE
8986 && current_function_uses_pic_offset_table)
8987 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8989 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8990 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8991 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8993 /* LR will also be pushed if any lo regs are pushed. */
8994 if (mask & 0xff || thumb_force_lr_save ())
8995 mask |= (1 << LR_REGNUM);
8997 /* Make sure we have a low work register if we need one.
8998 We will need one if we are going to push a high register,
8999 but we are not currently intending to push a low register. */
9000 if ((mask & 0xff) == 0
9001 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9003 /* Use thumb_find_work_register to choose which register
9004 we will use. If the register is live then we will
9005 have to push it. Use LAST_LO_REGNUM as our fallback
9006 choice for the register to select. */
9007 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9009 if (! call_used_regs[reg])
9010 mask |= 1 << reg;
9013 return mask;
9017 /* Return the number of bytes required to save VFP registers. */
9018 static int
9019 arm_get_vfp_saved_size (void)
9021 unsigned int regno;
9022 int count;
9023 int saved;
9025 saved = 0;
9026 /* Space for saved VFP registers. */
9027 if (TARGET_HARD_FLOAT && TARGET_VFP)
9029 count = 0;
9030 for (regno = FIRST_VFP_REGNUM;
9031 regno < LAST_VFP_REGNUM;
9032 regno += 2)
9034 if ((!regs_ever_live[regno] || call_used_regs[regno])
9035 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9037 if (count > 0)
9039 /* Workaround ARM10 VFPr1 bug. */
9040 if (count == 2 && !arm_arch6)
9041 count++;
9042 saved += count * 8 + 4;
9044 count = 0;
9046 else
9047 count++;
9049 if (count > 0)
9051 if (count == 2 && !arm_arch6)
9052 count++;
9053 saved += count * 8 + 4;
9056 return saved;
9060 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9061 everything bar the final return instruction. */
9062 const char *
9063 output_return_instruction (rtx operand, int really_return, int reverse)
9065 char conditional[10];
9066 char instr[100];
9067 unsigned reg;
9068 unsigned long live_regs_mask;
9069 unsigned long func_type;
9070 arm_stack_offsets *offsets;
9072 func_type = arm_current_func_type ();
9074 if (IS_NAKED (func_type))
9075 return "";
9077 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9079 /* If this function was declared non-returning, and we have
9080 found a tail call, then we have to trust that the called
9081 function won't return. */
9082 if (really_return)
9084 rtx ops[2];
9086 /* Otherwise, trap an attempted return by aborting. */
9087 ops[0] = operand;
9088 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9089 : "abort");
9090 assemble_external_libcall (ops[1]);
9091 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9094 return "";
9097 gcc_assert (!current_function_calls_alloca || really_return);
9099 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9101 return_used_this_function = 1;
9103 live_regs_mask = arm_compute_save_reg_mask ();
9105 if (live_regs_mask)
9107 const char * return_reg;
9109 /* If we do not have any special requirements for function exit
9110 (e.g. interworking, or ISR) then we can load the return address
9111 directly into the PC. Otherwise we must load it into LR. */
9112 if (really_return
9113 && ! TARGET_INTERWORK)
9114 return_reg = reg_names[PC_REGNUM];
9115 else
9116 return_reg = reg_names[LR_REGNUM];
9118 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9120 /* There are three possible reasons for the IP register
9121 being saved. 1) a stack frame was created, in which case
9122 IP contains the old stack pointer, or 2) an ISR routine
9123 corrupted it, or 3) it was saved to align the stack on
9124 iWMMXt. In case 1, restore IP into SP, otherwise just
9125 restore IP. */
9126 if (frame_pointer_needed)
9128 live_regs_mask &= ~ (1 << IP_REGNUM);
9129 live_regs_mask |= (1 << SP_REGNUM);
9131 else
9132 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9135 /* On some ARM architectures it is faster to use LDR rather than
9136 LDM to load a single register. On other architectures, the
9137 cost is the same. In 26 bit mode, or for exception handlers,
9138 we have to use LDM to load the PC so that the CPSR is also
9139 restored. */
9140 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9141 if (live_regs_mask == (1U << reg))
9142 break;
9144 if (reg <= LAST_ARM_REGNUM
9145 && (reg != LR_REGNUM
9146 || ! really_return
9147 || ! IS_INTERRUPT (func_type)))
9149 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9150 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9152 else
9154 char *p;
9155 int first = 1;
9157 /* Generate the load multiple instruction to restore the
9158 registers. Note we can get here, even if
9159 frame_pointer_needed is true, but only if sp already
9160 points to the base of the saved core registers. */
9161 if (live_regs_mask & (1 << SP_REGNUM))
9163 unsigned HOST_WIDE_INT stack_adjust;
9165 offsets = arm_get_frame_offsets ();
9166 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9167 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9169 if (stack_adjust && arm_arch5)
9170 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9171 else
9173 /* If we can't use ldmib (SA110 bug),
9174 then try to pop r3 instead. */
9175 if (stack_adjust)
9176 live_regs_mask |= 1 << 3;
9177 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9180 else
9181 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9183 p = instr + strlen (instr);
9185 for (reg = 0; reg <= SP_REGNUM; reg++)
9186 if (live_regs_mask & (1 << reg))
9188 int l = strlen (reg_names[reg]);
9190 if (first)
9191 first = 0;
9192 else
9194 memcpy (p, ", ", 2);
9195 p += 2;
9198 memcpy (p, "%|", 2);
9199 memcpy (p + 2, reg_names[reg], l);
9200 p += l + 2;
9203 if (live_regs_mask & (1 << LR_REGNUM))
9205 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9206 /* If returning from an interrupt, restore the CPSR. */
9207 if (IS_INTERRUPT (func_type))
9208 strcat (p, "^");
9210 else
9211 strcpy (p, "}");
9214 output_asm_insn (instr, & operand);
9216 /* See if we need to generate an extra instruction to
9217 perform the actual function return. */
9218 if (really_return
9219 && func_type != ARM_FT_INTERWORKED
9220 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9222 /* The return has already been handled
9223 by loading the LR into the PC. */
9224 really_return = 0;
9228 if (really_return)
9230 switch ((int) ARM_FUNC_TYPE (func_type))
9232 case ARM_FT_ISR:
9233 case ARM_FT_FIQ:
9234 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9235 break;
9237 case ARM_FT_INTERWORKED:
9238 sprintf (instr, "bx%s\t%%|lr", conditional);
9239 break;
9241 case ARM_FT_EXCEPTION:
9242 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9243 break;
9245 default:
9246 /* Use bx if it's available. */
9247 if (arm_arch5 || arm_arch4t)
9248 sprintf (instr, "bx%s\t%%|lr", conditional);
9249 else
9250 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9251 break;
9254 output_asm_insn (instr, & operand);
9257 return "";
9260 /* Write the function name into the code section, directly preceding
9261 the function prologue.
9263 Code will be output similar to this:
9265 .ascii "arm_poke_function_name", 0
9266 .align
9268 .word 0xff000000 + (t1 - t0)
9269 arm_poke_function_name
9270 mov ip, sp
9271 stmfd sp!, {fp, ip, lr, pc}
9272 sub fp, ip, #4
9274 When performing a stack backtrace, code can inspect the value
9275 of 'pc' stored at 'fp' + 0. If the trace function then looks
9276 at location pc - 12 and the top 8 bits are set, then we know
9277 that there is a function name embedded immediately preceding this
9278 location and has length ((pc[-3]) & 0xff000000).
9280 We assume that pc is declared as a pointer to an unsigned long.
9282 It is of no benefit to output the function name if we are assembling
9283 a leaf function. These function types will not contain a stack
9284 backtrace structure, therefore it is not possible to determine the
9285 function name. */
9286 void
9287 arm_poke_function_name (FILE *stream, const char *name)
9289 unsigned long alignlength;
9290 unsigned long length;
9291 rtx x;
9293 length = strlen (name) + 1;
9294 alignlength = ROUND_UP_WORD (length);
9296 ASM_OUTPUT_ASCII (stream, name, length);
9297 ASM_OUTPUT_ALIGN (stream, 2);
9298 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9299 assemble_aligned_integer (UNITS_PER_WORD, x);
9302 /* Place some comments into the assembler stream
9303 describing the current function. */
9304 static void
9305 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9307 unsigned long func_type;
9309 if (!TARGET_ARM)
9311 thumb_output_function_prologue (f, frame_size);
9312 return;
9315 /* Sanity check. */
9316 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9318 func_type = arm_current_func_type ();
9320 switch ((int) ARM_FUNC_TYPE (func_type))
9322 default:
9323 case ARM_FT_NORMAL:
9324 break;
9325 case ARM_FT_INTERWORKED:
9326 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9327 break;
9328 case ARM_FT_ISR:
9329 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9330 break;
9331 case ARM_FT_FIQ:
9332 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9333 break;
9334 case ARM_FT_EXCEPTION:
9335 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9336 break;
9339 if (IS_NAKED (func_type))
9340 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9342 if (IS_VOLATILE (func_type))
9343 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9345 if (IS_NESTED (func_type))
9346 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9348 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9349 current_function_args_size,
9350 current_function_pretend_args_size, frame_size);
9352 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9353 frame_pointer_needed,
9354 cfun->machine->uses_anonymous_args);
9356 if (cfun->machine->lr_save_eliminated)
9357 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9359 if (current_function_calls_eh_return)
9360 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9362 #ifdef AOF_ASSEMBLER
9363 if (flag_pic)
9364 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9365 #endif
9367 return_used_this_function = 0;
9370 const char *
9371 arm_output_epilogue (rtx sibling)
9373 int reg;
9374 unsigned long saved_regs_mask;
9375 unsigned long func_type;
9376 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9377 frame that is $fp + 4 for a non-variadic function. */
9378 int floats_offset = 0;
9379 rtx operands[3];
9380 FILE * f = asm_out_file;
9381 unsigned int lrm_count = 0;
9382 int really_return = (sibling == NULL);
9383 int start_reg;
9384 arm_stack_offsets *offsets;
9386 /* If we have already generated the return instruction
9387 then it is futile to generate anything else. */
9388 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9389 return "";
9391 func_type = arm_current_func_type ();
9393 if (IS_NAKED (func_type))
9394 /* Naked functions don't have epilogues. */
9395 return "";
9397 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9399 rtx op;
9401 /* A volatile function should never return. Call abort. */
9402 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9403 assemble_external_libcall (op);
9404 output_asm_insn ("bl\t%a0", &op);
9406 return "";
9409 /* If we are throwing an exception, then we really must be doing a
9410 return, so we can't tail-call. */
9411 gcc_assert (!current_function_calls_eh_return || really_return);
9413 offsets = arm_get_frame_offsets ();
9414 saved_regs_mask = arm_compute_save_reg_mask ();
9416 if (TARGET_IWMMXT)
9417 lrm_count = bit_count (saved_regs_mask);
9419 floats_offset = offsets->saved_args;
9420 /* Compute how far away the floats will be. */
9421 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9422 if (saved_regs_mask & (1 << reg))
9423 floats_offset += 4;
9425 if (frame_pointer_needed)
9427 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9428 int vfp_offset = offsets->frame;
9430 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9432 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9433 if (regs_ever_live[reg] && !call_used_regs[reg])
9435 floats_offset += 12;
9436 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9437 reg, FP_REGNUM, floats_offset - vfp_offset);
9440 else
9442 start_reg = LAST_FPA_REGNUM;
9444 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9446 if (regs_ever_live[reg] && !call_used_regs[reg])
9448 floats_offset += 12;
9450 /* We can't unstack more than four registers at once. */
9451 if (start_reg - reg == 3)
9453 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9454 reg, FP_REGNUM, floats_offset - vfp_offset);
9455 start_reg = reg - 1;
9458 else
9460 if (reg != start_reg)
9461 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9462 reg + 1, start_reg - reg,
9463 FP_REGNUM, floats_offset - vfp_offset);
9464 start_reg = reg - 1;
9468 /* Just in case the last register checked also needs unstacking. */
9469 if (reg != start_reg)
9470 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9471 reg + 1, start_reg - reg,
9472 FP_REGNUM, floats_offset - vfp_offset);
9475 if (TARGET_HARD_FLOAT && TARGET_VFP)
9477 int saved_size;
9479 /* The fldmx insn does not have base+offset addressing modes,
9480 so we use IP to hold the address. */
9481 saved_size = arm_get_vfp_saved_size ();
9483 if (saved_size > 0)
9485 floats_offset += saved_size;
9486 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9487 FP_REGNUM, floats_offset - vfp_offset);
9489 start_reg = FIRST_VFP_REGNUM;
9490 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9492 if ((!regs_ever_live[reg] || call_used_regs[reg])
9493 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9495 if (start_reg != reg)
9496 arm_output_fldmx (f, IP_REGNUM,
9497 (start_reg - FIRST_VFP_REGNUM) / 2,
9498 (reg - start_reg) / 2);
9499 start_reg = reg + 2;
9502 if (start_reg != reg)
9503 arm_output_fldmx (f, IP_REGNUM,
9504 (start_reg - FIRST_VFP_REGNUM) / 2,
9505 (reg - start_reg) / 2);
9508 if (TARGET_IWMMXT)
9510 /* The frame pointer is guaranteed to be non-double-word aligned.
9511 This is because it is set to (old_stack_pointer - 4) and the
9512 old_stack_pointer was double word aligned. Thus the offset to
9513 the iWMMXt registers to be loaded must also be non-double-word
9514 sized, so that the resultant address *is* double-word aligned.
9515 We can ignore floats_offset since that was already included in
9516 the live_regs_mask. */
9517 lrm_count += (lrm_count % 2 ? 2 : 1);
9519 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9520 if (regs_ever_live[reg] && !call_used_regs[reg])
9522 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9523 reg, FP_REGNUM, lrm_count * 4);
9524 lrm_count += 2;
9528 /* saved_regs_mask should contain the IP, which at the time of stack
9529 frame generation actually contains the old stack pointer. So a
9530 quick way to unwind the stack is just pop the IP register directly
9531 into the stack pointer. */
9532 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9533 saved_regs_mask &= ~ (1 << IP_REGNUM);
9534 saved_regs_mask |= (1 << SP_REGNUM);
9536 /* There are two registers left in saved_regs_mask - LR and PC. We
9537 only need to restore the LR register (the return address), but to
9538 save time we can load it directly into the PC, unless we need a
9539 special function exit sequence, or we are not really returning. */
9540 if (really_return
9541 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9542 && !current_function_calls_eh_return)
9543 /* Delete the LR from the register mask, so that the LR on
9544 the stack is loaded into the PC in the register mask. */
9545 saved_regs_mask &= ~ (1 << LR_REGNUM);
9546 else
9547 saved_regs_mask &= ~ (1 << PC_REGNUM);
9549 /* We must use SP as the base register, because SP is one of the
9550 registers being restored. If an interrupt or page fault
9551 happens in the ldm instruction, the SP might or might not
9552 have been restored. That would be bad, as then SP will no
9553 longer indicate the safe area of stack, and we can get stack
9554 corruption. Using SP as the base register means that it will
9555 be reset correctly to the original value, should an interrupt
9556 occur. If the stack pointer already points at the right
9557 place, then omit the subtraction. */
9558 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9559 || current_function_calls_alloca)
9560 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9561 4 * bit_count (saved_regs_mask));
9562 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9564 if (IS_INTERRUPT (func_type))
9565 /* Interrupt handlers will have pushed the
9566 IP onto the stack, so restore it now. */
9567 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9569 else
9571 /* Restore stack pointer if necessary. */
9572 if (offsets->outgoing_args != offsets->saved_regs)
9574 operands[0] = operands[1] = stack_pointer_rtx;
9575 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9576 output_add_immediate (operands);
9579 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9581 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9582 if (regs_ever_live[reg] && !call_used_regs[reg])
9583 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9584 reg, SP_REGNUM);
9586 else
9588 start_reg = FIRST_FPA_REGNUM;
9590 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9592 if (regs_ever_live[reg] && !call_used_regs[reg])
9594 if (reg - start_reg == 3)
9596 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9597 start_reg, SP_REGNUM);
9598 start_reg = reg + 1;
9601 else
9603 if (reg != start_reg)
9604 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9605 start_reg, reg - start_reg,
9606 SP_REGNUM);
9608 start_reg = reg + 1;
9612 /* Just in case the last register checked also needs unstacking. */
9613 if (reg != start_reg)
9614 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9615 start_reg, reg - start_reg, SP_REGNUM);
9618 if (TARGET_HARD_FLOAT && TARGET_VFP)
9620 start_reg = FIRST_VFP_REGNUM;
9621 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9623 if ((!regs_ever_live[reg] || call_used_regs[reg])
9624 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9626 if (start_reg != reg)
9627 arm_output_fldmx (f, SP_REGNUM,
9628 (start_reg - FIRST_VFP_REGNUM) / 2,
9629 (reg - start_reg) / 2);
9630 start_reg = reg + 2;
9633 if (start_reg != reg)
9634 arm_output_fldmx (f, SP_REGNUM,
9635 (start_reg - FIRST_VFP_REGNUM) / 2,
9636 (reg - start_reg) / 2);
9638 if (TARGET_IWMMXT)
9639 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9640 if (regs_ever_live[reg] && !call_used_regs[reg])
9641 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9643 /* If we can, restore the LR into the PC. */
9644 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9645 && really_return
9646 && current_function_pretend_args_size == 0
9647 && saved_regs_mask & (1 << LR_REGNUM)
9648 && !current_function_calls_eh_return)
9650 saved_regs_mask &= ~ (1 << LR_REGNUM);
9651 saved_regs_mask |= (1 << PC_REGNUM);
9654 /* Load the registers off the stack. If we only have one register
9655 to load use the LDR instruction - it is faster. */
9656 if (saved_regs_mask == (1 << LR_REGNUM))
9658 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9660 else if (saved_regs_mask)
9662 if (saved_regs_mask & (1 << SP_REGNUM))
9663 /* Note - write back to the stack register is not enabled
9664 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9665 in the list of registers and if we add writeback the
9666 instruction becomes UNPREDICTABLE. */
9667 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9668 else
9669 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9672 if (current_function_pretend_args_size)
9674 /* Unwind the pre-pushed regs. */
9675 operands[0] = operands[1] = stack_pointer_rtx;
9676 operands[2] = GEN_INT (current_function_pretend_args_size);
9677 output_add_immediate (operands);
9681 /* We may have already restored PC directly from the stack. */
9682 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9683 return "";
9685 /* Stack adjustment for exception handler. */
9686 if (current_function_calls_eh_return)
9687 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9688 ARM_EH_STACKADJ_REGNUM);
9690 /* Generate the return instruction. */
9691 switch ((int) ARM_FUNC_TYPE (func_type))
9693 case ARM_FT_ISR:
9694 case ARM_FT_FIQ:
9695 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9696 break;
9698 case ARM_FT_EXCEPTION:
9699 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9700 break;
9702 case ARM_FT_INTERWORKED:
9703 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9704 break;
9706 default:
9707 if (arm_arch5 || arm_arch4t)
9708 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9709 else
9710 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9711 break;
9714 return "";
9717 static void
9718 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9719 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9721 arm_stack_offsets *offsets;
9723 if (TARGET_THUMB)
9725 int regno;
9727 /* Emit any call-via-reg trampolines that are needed for v4t support
9728 of call_reg and call_value_reg type insns. */
9729 for (regno = 0; regno < LR_REGNUM; regno++)
9731 rtx label = cfun->machine->call_via[regno];
9733 if (label != NULL)
9735 function_section (current_function_decl);
9736 targetm.asm_out.internal_label (asm_out_file, "L",
9737 CODE_LABEL_NUMBER (label));
9738 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9742 /* ??? Probably not safe to set this here, since it assumes that a
9743 function will be emitted as assembly immediately after we generate
9744 RTL for it. This does not happen for inline functions. */
9745 return_used_this_function = 0;
9747 else
9749 /* We need to take into account any stack-frame rounding. */
9750 offsets = arm_get_frame_offsets ();
9752 gcc_assert (!use_return_insn (FALSE, NULL)
9753 || !return_used_this_function
9754 || offsets->saved_regs == offsets->outgoing_args
9755 || frame_pointer_needed);
9757 /* Reset the ARM-specific per-function variables. */
9758 after_arm_reorg = 0;
9762 /* Generate and emit an insn that we will recognize as a push_multi.
9763 Unfortunately, since this insn does not reflect very well the actual
9764 semantics of the operation, we need to annotate the insn for the benefit
9765 of DWARF2 frame unwind information. */
9766 static rtx
9767 emit_multi_reg_push (unsigned long mask)
9769 int num_regs = 0;
9770 int num_dwarf_regs;
9771 int i, j;
9772 rtx par;
9773 rtx dwarf;
9774 int dwarf_par_index;
9775 rtx tmp, reg;
9777 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9778 if (mask & (1 << i))
9779 num_regs++;
9781 gcc_assert (num_regs && num_regs <= 16);
9783 /* We don't record the PC in the dwarf frame information. */
9784 num_dwarf_regs = num_regs;
9785 if (mask & (1 << PC_REGNUM))
9786 num_dwarf_regs--;
9788 /* For the body of the insn we are going to generate an UNSPEC in
9789 parallel with several USEs. This allows the insn to be recognized
9790 by the push_multi pattern in the arm.md file. The insn looks
9791 something like this:
9793 (parallel [
9794 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9795 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9796 (use (reg:SI 11 fp))
9797 (use (reg:SI 12 ip))
9798 (use (reg:SI 14 lr))
9799 (use (reg:SI 15 pc))
9802 For the frame note however, we try to be more explicit and actually
9803 show each register being stored into the stack frame, plus a (single)
9804 decrement of the stack pointer. We do it this way in order to be
9805 friendly to the stack unwinding code, which only wants to see a single
9806 stack decrement per instruction. The RTL we generate for the note looks
9807 something like this:
9809 (sequence [
9810 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9811 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9812 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9813 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9814 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9817 This sequence is used both by the code to support stack unwinding for
9818 exceptions handlers and the code to generate dwarf2 frame debugging. */
9820 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9821 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9822 dwarf_par_index = 1;
9824 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9826 if (mask & (1 << i))
9828 reg = gen_rtx_REG (SImode, i);
9830 XVECEXP (par, 0, 0)
9831 = gen_rtx_SET (VOIDmode,
9832 gen_frame_mem (BLKmode,
9833 gen_rtx_PRE_DEC (BLKmode,
9834 stack_pointer_rtx)),
9835 gen_rtx_UNSPEC (BLKmode,
9836 gen_rtvec (1, reg),
9837 UNSPEC_PUSH_MULT));
9839 if (i != PC_REGNUM)
9841 tmp = gen_rtx_SET (VOIDmode,
9842 gen_frame_mem (SImode, stack_pointer_rtx),
9843 reg);
9844 RTX_FRAME_RELATED_P (tmp) = 1;
9845 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9846 dwarf_par_index++;
9849 break;
9853 for (j = 1, i++; j < num_regs; i++)
9855 if (mask & (1 << i))
9857 reg = gen_rtx_REG (SImode, i);
9859 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9861 if (i != PC_REGNUM)
9864 = gen_rtx_SET (VOIDmode,
9865 gen_frame_mem (SImode,
9866 plus_constant (stack_pointer_rtx,
9867 4 * j)),
9868 reg);
9869 RTX_FRAME_RELATED_P (tmp) = 1;
9870 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9873 j++;
9877 par = emit_insn (par);
9879 tmp = gen_rtx_SET (SImode,
9880 stack_pointer_rtx,
9881 gen_rtx_PLUS (SImode,
9882 stack_pointer_rtx,
9883 GEN_INT (-4 * num_regs)));
9884 RTX_FRAME_RELATED_P (tmp) = 1;
9885 XVECEXP (dwarf, 0, 0) = tmp;
9887 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9888 REG_NOTES (par));
9889 return par;
9892 /* Calculate the size of the return value that is passed in registers. */
9893 static int
9894 arm_size_return_regs (void)
9896 enum machine_mode mode;
9898 if (current_function_return_rtx != 0)
9899 mode = GET_MODE (current_function_return_rtx);
9900 else
9901 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9903 return GET_MODE_SIZE (mode);
9906 static rtx
9907 emit_sfm (int base_reg, int count)
9909 rtx par;
9910 rtx dwarf;
9911 rtx tmp, reg;
9912 int i;
9914 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9915 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9917 reg = gen_rtx_REG (XFmode, base_reg++);
9919 XVECEXP (par, 0, 0)
9920 = gen_rtx_SET (VOIDmode,
9921 gen_frame_mem (BLKmode,
9922 gen_rtx_PRE_DEC (BLKmode,
9923 stack_pointer_rtx)),
9924 gen_rtx_UNSPEC (BLKmode,
9925 gen_rtvec (1, reg),
9926 UNSPEC_PUSH_MULT));
9927 tmp = gen_rtx_SET (VOIDmode,
9928 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
9929 RTX_FRAME_RELATED_P (tmp) = 1;
9930 XVECEXP (dwarf, 0, 1) = tmp;
9932 for (i = 1; i < count; i++)
9934 reg = gen_rtx_REG (XFmode, base_reg++);
9935 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9937 tmp = gen_rtx_SET (VOIDmode,
9938 gen_frame_mem (XFmode,
9939 plus_constant (stack_pointer_rtx,
9940 i * 12)),
9941 reg);
9942 RTX_FRAME_RELATED_P (tmp) = 1;
9943 XVECEXP (dwarf, 0, i + 1) = tmp;
9946 tmp = gen_rtx_SET (VOIDmode,
9947 stack_pointer_rtx,
9948 gen_rtx_PLUS (SImode,
9949 stack_pointer_rtx,
9950 GEN_INT (-12 * count)));
9951 RTX_FRAME_RELATED_P (tmp) = 1;
9952 XVECEXP (dwarf, 0, 0) = tmp;
9954 par = emit_insn (par);
9955 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9956 REG_NOTES (par));
9957 return par;
9961 /* Return true if the current function needs to save/restore LR. */
9963 static bool
9964 thumb_force_lr_save (void)
9966 return !cfun->machine->lr_save_eliminated
9967 && (!leaf_function_p ()
9968 || thumb_far_jump_used_p ()
9969 || regs_ever_live [LR_REGNUM]);
9973 /* Compute the distance from register FROM to register TO.
9974 These can be the arg pointer (26), the soft frame pointer (25),
9975 the stack pointer (13) or the hard frame pointer (11).
9976 In thumb mode r7 is used as the soft frame pointer, if needed.
9977 Typical stack layout looks like this:
9979 old stack pointer -> | |
9980 ----
9981 | | \
9982 | | saved arguments for
9983 | | vararg functions
9984 | | /
9986 hard FP & arg pointer -> | | \
9987 | | stack
9988 | | frame
9989 | | /
9991 | | \
9992 | | call saved
9993 | | registers
9994 soft frame pointer -> | | /
9996 | | \
9997 | | local
9998 | | variables
9999 locals base pointer -> | | /
10001 | | \
10002 | | outgoing
10003 | | arguments
10004 current stack pointer -> | | /
10007 For a given function some or all of these stack components
10008 may not be needed, giving rise to the possibility of
10009 eliminating some of the registers.
10011 The values returned by this function must reflect the behavior
10012 of arm_expand_prologue() and arm_compute_save_reg_mask().
10014 The sign of the number returned reflects the direction of stack
10015 growth, so the values are positive for all eliminations except
10016 from the soft frame pointer to the hard frame pointer.
10018 SFP may point just inside the local variables block to ensure correct
10019 alignment. */
10022 /* Calculate stack offsets. These are used to calculate register elimination
10023 offsets and in prologue/epilogue code. */
10025 static arm_stack_offsets *
10026 arm_get_frame_offsets (void)
10028 struct arm_stack_offsets *offsets;
10029 unsigned long func_type;
10030 int leaf;
10031 int saved;
10032 HOST_WIDE_INT frame_size;
10034 offsets = &cfun->machine->stack_offsets;
10036 /* We need to know if we are a leaf function. Unfortunately, it
10037 is possible to be called after start_sequence has been called,
10038 which causes get_insns to return the insns for the sequence,
10039 not the function, which will cause leaf_function_p to return
10040 the incorrect result.
10042 to know about leaf functions once reload has completed, and the
10043 frame size cannot be changed after that time, so we can safely
10044 use the cached value. */
10046 if (reload_completed)
10047 return offsets;
10049 /* Initially this is the size of the local variables. It will translated
10050 into an offset once we have determined the size of preceding data. */
10051 frame_size = ROUND_UP_WORD (get_frame_size ());
10053 leaf = leaf_function_p ();
10055 /* Space for variadic functions. */
10056 offsets->saved_args = current_function_pretend_args_size;
10058 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10060 if (TARGET_ARM)
10062 unsigned int regno;
10064 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10066 /* We know that SP will be doubleword aligned on entry, and we must
10067 preserve that condition at any subroutine call. We also require the
10068 soft frame pointer to be doubleword aligned. */
10070 if (TARGET_REALLY_IWMMXT)
10072 /* Check for the call-saved iWMMXt registers. */
10073 for (regno = FIRST_IWMMXT_REGNUM;
10074 regno <= LAST_IWMMXT_REGNUM;
10075 regno++)
10076 if (regs_ever_live [regno] && ! call_used_regs [regno])
10077 saved += 8;
10080 func_type = arm_current_func_type ();
10081 if (! IS_VOLATILE (func_type))
10083 /* Space for saved FPA registers. */
10084 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10085 if (regs_ever_live[regno] && ! call_used_regs[regno])
10086 saved += 12;
10088 /* Space for saved VFP registers. */
10089 if (TARGET_HARD_FLOAT && TARGET_VFP)
10090 saved += arm_get_vfp_saved_size ();
10093 else /* TARGET_THUMB */
10095 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10096 if (TARGET_BACKTRACE)
10097 saved += 16;
10100 /* Saved registers include the stack frame. */
10101 offsets->saved_regs = offsets->saved_args + saved;
10102 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10103 /* A leaf function does not need any stack alignment if it has nothing
10104 on the stack. */
10105 if (leaf && frame_size == 0)
10107 offsets->outgoing_args = offsets->soft_frame;
10108 return offsets;
10111 /* Ensure SFP has the correct alignment. */
10112 if (ARM_DOUBLEWORD_ALIGN
10113 && (offsets->soft_frame & 7))
10114 offsets->soft_frame += 4;
10116 offsets->locals_base = offsets->soft_frame + frame_size;
10117 offsets->outgoing_args = (offsets->locals_base
10118 + current_function_outgoing_args_size);
10120 if (ARM_DOUBLEWORD_ALIGN)
10122 /* Ensure SP remains doubleword aligned. */
10123 if (offsets->outgoing_args & 7)
10124 offsets->outgoing_args += 4;
10125 gcc_assert (!(offsets->outgoing_args & 7));
10128 return offsets;
10132 /* Calculate the relative offsets for the different stack pointers. Positive
10133 offsets are in the direction of stack growth. */
10135 HOST_WIDE_INT
10136 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10138 arm_stack_offsets *offsets;
10140 offsets = arm_get_frame_offsets ();
10142 /* OK, now we have enough information to compute the distances.
10143 There must be an entry in these switch tables for each pair
10144 of registers in ELIMINABLE_REGS, even if some of the entries
10145 seem to be redundant or useless. */
10146 switch (from)
10148 case ARG_POINTER_REGNUM:
10149 switch (to)
10151 case THUMB_HARD_FRAME_POINTER_REGNUM:
10152 return 0;
10154 case FRAME_POINTER_REGNUM:
10155 /* This is the reverse of the soft frame pointer
10156 to hard frame pointer elimination below. */
10157 return offsets->soft_frame - offsets->saved_args;
10159 case ARM_HARD_FRAME_POINTER_REGNUM:
10160 /* If there is no stack frame then the hard
10161 frame pointer and the arg pointer coincide. */
10162 if (offsets->frame == offsets->saved_regs)
10163 return 0;
10164 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10165 return (frame_pointer_needed
10166 && cfun->static_chain_decl != NULL
10167 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10169 case STACK_POINTER_REGNUM:
10170 /* If nothing has been pushed on the stack at all
10171 then this will return -4. This *is* correct! */
10172 return offsets->outgoing_args - (offsets->saved_args + 4);
10174 default:
10175 gcc_unreachable ();
10177 gcc_unreachable ();
10179 case FRAME_POINTER_REGNUM:
10180 switch (to)
10182 case THUMB_HARD_FRAME_POINTER_REGNUM:
10183 return 0;
10185 case ARM_HARD_FRAME_POINTER_REGNUM:
10186 /* The hard frame pointer points to the top entry in the
10187 stack frame. The soft frame pointer to the bottom entry
10188 in the stack frame. If there is no stack frame at all,
10189 then they are identical. */
10191 return offsets->frame - offsets->soft_frame;
10193 case STACK_POINTER_REGNUM:
10194 return offsets->outgoing_args - offsets->soft_frame;
10196 default:
10197 gcc_unreachable ();
10199 gcc_unreachable ();
10201 default:
10202 /* You cannot eliminate from the stack pointer.
10203 In theory you could eliminate from the hard frame
10204 pointer to the stack pointer, but this will never
10205 happen, since if a stack frame is not needed the
10206 hard frame pointer will never be used. */
10207 gcc_unreachable ();
10212 /* Generate the prologue instructions for entry into an ARM function. */
10213 void
10214 arm_expand_prologue (void)
10216 int reg;
10217 rtx amount;
10218 rtx insn;
10219 rtx ip_rtx;
10220 unsigned long live_regs_mask;
10221 unsigned long func_type;
10222 int fp_offset = 0;
10223 int saved_pretend_args = 0;
10224 int saved_regs = 0;
10225 unsigned HOST_WIDE_INT args_to_push;
10226 arm_stack_offsets *offsets;
10228 func_type = arm_current_func_type ();
10230 /* Naked functions don't have prologues. */
10231 if (IS_NAKED (func_type))
10232 return;
10234 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10235 args_to_push = current_function_pretend_args_size;
10237 /* Compute which register we will have to save onto the stack. */
10238 live_regs_mask = arm_compute_save_reg_mask ();
10240 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10242 if (frame_pointer_needed)
10244 if (IS_INTERRUPT (func_type))
10246 /* Interrupt functions must not corrupt any registers.
10247 Creating a frame pointer however, corrupts the IP
10248 register, so we must push it first. */
10249 insn = emit_multi_reg_push (1 << IP_REGNUM);
10251 /* Do not set RTX_FRAME_RELATED_P on this insn.
10252 The dwarf stack unwinding code only wants to see one
10253 stack decrement per function, and this is not it. If
10254 this instruction is labeled as being part of the frame
10255 creation sequence then dwarf2out_frame_debug_expr will
10256 die when it encounters the assignment of IP to FP
10257 later on, since the use of SP here establishes SP as
10258 the CFA register and not IP.
10260 Anyway this instruction is not really part of the stack
10261 frame creation although it is part of the prologue. */
10263 else if (IS_NESTED (func_type))
10265 /* The Static chain register is the same as the IP register
10266 used as a scratch register during stack frame creation.
10267 To get around this need to find somewhere to store IP
10268 whilst the frame is being created. We try the following
10269 places in order:
10271 1. The last argument register.
10272 2. A slot on the stack above the frame. (This only
10273 works if the function is not a varargs function).
10274 3. Register r3, after pushing the argument registers
10275 onto the stack.
10277 Note - we only need to tell the dwarf2 backend about the SP
10278 adjustment in the second variant; the static chain register
10279 doesn't need to be unwound, as it doesn't contain a value
10280 inherited from the caller. */
10282 if (regs_ever_live[3] == 0)
10284 insn = gen_rtx_REG (SImode, 3);
10285 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10286 insn = emit_insn (insn);
10288 else if (args_to_push == 0)
10290 rtx dwarf;
10291 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10292 insn = gen_frame_mem (SImode, insn);
10293 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10294 insn = emit_insn (insn);
10296 fp_offset = 4;
10298 /* Just tell the dwarf backend that we adjusted SP. */
10299 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10300 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10301 GEN_INT (-fp_offset)));
10302 RTX_FRAME_RELATED_P (insn) = 1;
10303 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10304 dwarf, REG_NOTES (insn));
10306 else
10308 /* Store the args on the stack. */
10309 if (cfun->machine->uses_anonymous_args)
10310 insn = emit_multi_reg_push
10311 ((0xf0 >> (args_to_push / 4)) & 0xf);
10312 else
10313 insn = emit_insn
10314 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10315 GEN_INT (- args_to_push)));
10317 RTX_FRAME_RELATED_P (insn) = 1;
10319 saved_pretend_args = 1;
10320 fp_offset = args_to_push;
10321 args_to_push = 0;
10323 /* Now reuse r3 to preserve IP. */
10324 insn = gen_rtx_REG (SImode, 3);
10325 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10326 (void) emit_insn (insn);
10330 if (fp_offset)
10332 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10333 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10335 else
10336 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10338 insn = emit_insn (insn);
10339 RTX_FRAME_RELATED_P (insn) = 1;
10342 if (args_to_push)
10344 /* Push the argument registers, or reserve space for them. */
10345 if (cfun->machine->uses_anonymous_args)
10346 insn = emit_multi_reg_push
10347 ((0xf0 >> (args_to_push / 4)) & 0xf);
10348 else
10349 insn = emit_insn
10350 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10351 GEN_INT (- args_to_push)));
10352 RTX_FRAME_RELATED_P (insn) = 1;
10355 /* If this is an interrupt service routine, and the link register
10356 is going to be pushed, and we are not creating a stack frame,
10357 (which would involve an extra push of IP and a pop in the epilogue)
10358 subtracting four from LR now will mean that the function return
10359 can be done with a single instruction. */
10360 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10361 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10362 && ! frame_pointer_needed)
10363 emit_insn (gen_rtx_SET (SImode,
10364 gen_rtx_REG (SImode, LR_REGNUM),
10365 gen_rtx_PLUS (SImode,
10366 gen_rtx_REG (SImode, LR_REGNUM),
10367 GEN_INT (-4))));
10369 if (live_regs_mask)
10371 insn = emit_multi_reg_push (live_regs_mask);
10372 saved_regs += bit_count (live_regs_mask) * 4;
10373 RTX_FRAME_RELATED_P (insn) = 1;
10376 if (TARGET_IWMMXT)
10377 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10378 if (regs_ever_live[reg] && ! call_used_regs [reg])
10380 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10381 insn = gen_frame_mem (V2SImode, insn);
10382 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10383 gen_rtx_REG (V2SImode, reg)));
10384 RTX_FRAME_RELATED_P (insn) = 1;
10385 saved_regs += 8;
10388 if (! IS_VOLATILE (func_type))
10390 int start_reg;
10392 /* Save any floating point call-saved registers used by this
10393 function. */
10394 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10396 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10397 if (regs_ever_live[reg] && !call_used_regs[reg])
10399 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10400 insn = gen_frame_mem (XFmode, insn);
10401 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10402 gen_rtx_REG (XFmode, reg)));
10403 RTX_FRAME_RELATED_P (insn) = 1;
10404 saved_regs += 12;
10407 else
10409 start_reg = LAST_FPA_REGNUM;
10411 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10413 if (regs_ever_live[reg] && !call_used_regs[reg])
10415 if (start_reg - reg == 3)
10417 insn = emit_sfm (reg, 4);
10418 RTX_FRAME_RELATED_P (insn) = 1;
10419 saved_regs += 48;
10420 start_reg = reg - 1;
10423 else
10425 if (start_reg != reg)
10427 insn = emit_sfm (reg + 1, start_reg - reg);
10428 RTX_FRAME_RELATED_P (insn) = 1;
10429 saved_regs += (start_reg - reg) * 12;
10431 start_reg = reg - 1;
10435 if (start_reg != reg)
10437 insn = emit_sfm (reg + 1, start_reg - reg);
10438 saved_regs += (start_reg - reg) * 12;
10439 RTX_FRAME_RELATED_P (insn) = 1;
10442 if (TARGET_HARD_FLOAT && TARGET_VFP)
10444 start_reg = FIRST_VFP_REGNUM;
10446 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10448 if ((!regs_ever_live[reg] || call_used_regs[reg])
10449 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10451 if (start_reg != reg)
10452 saved_regs += vfp_emit_fstmx (start_reg,
10453 (reg - start_reg) / 2);
10454 start_reg = reg + 2;
10457 if (start_reg != reg)
10458 saved_regs += vfp_emit_fstmx (start_reg,
10459 (reg - start_reg) / 2);
10463 if (frame_pointer_needed)
10465 /* Create the new frame pointer. */
10466 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10467 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10468 RTX_FRAME_RELATED_P (insn) = 1;
10470 if (IS_NESTED (func_type))
10472 /* Recover the static chain register. */
10473 if (regs_ever_live [3] == 0
10474 || saved_pretend_args)
10475 insn = gen_rtx_REG (SImode, 3);
10476 else /* if (current_function_pretend_args_size == 0) */
10478 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10479 GEN_INT (4));
10480 insn = gen_frame_mem (SImode, insn);
10483 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10484 /* Add a USE to stop propagate_one_insn() from barfing. */
10485 emit_insn (gen_prologue_use (ip_rtx));
10489 offsets = arm_get_frame_offsets ();
10490 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10492 /* This add can produce multiple insns for a large constant, so we
10493 need to get tricky. */
10494 rtx last = get_last_insn ();
10496 amount = GEN_INT (offsets->saved_args + saved_regs
10497 - offsets->outgoing_args);
10499 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10500 amount));
10503 last = last ? NEXT_INSN (last) : get_insns ();
10504 RTX_FRAME_RELATED_P (last) = 1;
10506 while (last != insn);
10508 /* If the frame pointer is needed, emit a special barrier that
10509 will prevent the scheduler from moving stores to the frame
10510 before the stack adjustment. */
10511 if (frame_pointer_needed)
10512 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10513 hard_frame_pointer_rtx));
10517 if (flag_pic)
10518 arm_load_pic_register (0UL);
10520 /* If we are profiling, make sure no instructions are scheduled before
10521 the call to mcount. Similarly if the user has requested no
10522 scheduling in the prolog. */
10523 if (current_function_profile || !TARGET_SCHED_PROLOG)
10524 emit_insn (gen_blockage ());
10526 /* If the link register is being kept alive, with the return address in it,
10527 then make sure that it does not get reused by the ce2 pass. */
10528 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10530 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10531 cfun->machine->lr_save_eliminated = 1;
10535 /* If CODE is 'd', then the X is a condition operand and the instruction
10536 should only be executed if the condition is true.
10537 if CODE is 'D', then the X is a condition operand and the instruction
10538 should only be executed if the condition is false: however, if the mode
10539 of the comparison is CCFPEmode, then always execute the instruction -- we
10540 do this because in these circumstances !GE does not necessarily imply LT;
10541 in these cases the instruction pattern will take care to make sure that
10542 an instruction containing %d will follow, thereby undoing the effects of
10543 doing this instruction unconditionally.
10544 If CODE is 'N' then X is a floating point operand that must be negated
10545 before output.
10546 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10547 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10548 void
10549 arm_print_operand (FILE *stream, rtx x, int code)
10551 switch (code)
10553 case '@':
10554 fputs (ASM_COMMENT_START, stream);
10555 return;
10557 case '_':
10558 fputs (user_label_prefix, stream);
10559 return;
10561 case '|':
10562 fputs (REGISTER_PREFIX, stream);
10563 return;
10565 case '?':
10566 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10568 if (TARGET_THUMB)
10570 output_operand_lossage ("predicated Thumb instruction");
10571 break;
10573 if (current_insn_predicate != NULL)
10575 output_operand_lossage
10576 ("predicated instruction in conditional sequence");
10577 break;
10580 fputs (arm_condition_codes[arm_current_cc], stream);
10582 else if (current_insn_predicate)
10584 enum arm_cond_code code;
10586 if (TARGET_THUMB)
10588 output_operand_lossage ("predicated Thumb instruction");
10589 break;
10592 code = get_arm_condition_code (current_insn_predicate);
10593 fputs (arm_condition_codes[code], stream);
10595 return;
10597 case 'N':
10599 REAL_VALUE_TYPE r;
10600 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10601 r = REAL_VALUE_NEGATE (r);
10602 fprintf (stream, "%s", fp_const_from_val (&r));
10604 return;
10606 case 'B':
10607 if (GET_CODE (x) == CONST_INT)
10609 HOST_WIDE_INT val;
10610 val = ARM_SIGN_EXTEND (~INTVAL (x));
10611 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10613 else
10615 putc ('~', stream);
10616 output_addr_const (stream, x);
10618 return;
10620 case 'i':
10621 fprintf (stream, "%s", arithmetic_instr (x, 1));
10622 return;
10624 /* Truncate Cirrus shift counts. */
10625 case 's':
10626 if (GET_CODE (x) == CONST_INT)
10628 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10629 return;
10631 arm_print_operand (stream, x, 0);
10632 return;
10634 case 'I':
10635 fprintf (stream, "%s", arithmetic_instr (x, 0));
10636 return;
10638 case 'S':
10640 HOST_WIDE_INT val;
10641 const char * shift = shift_op (x, &val);
10643 if (shift)
10645 fprintf (stream, ", %s ", shift_op (x, &val));
10646 if (val == -1)
10647 arm_print_operand (stream, XEXP (x, 1), 0);
10648 else
10649 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10652 return;
10654 /* An explanation of the 'Q', 'R' and 'H' register operands:
10656 In a pair of registers containing a DI or DF value the 'Q'
10657 operand returns the register number of the register containing
10658 the least significant part of the value. The 'R' operand returns
10659 the register number of the register containing the most
10660 significant part of the value.
10662 The 'H' operand returns the higher of the two register numbers.
10663 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10664 same as the 'Q' operand, since the most significant part of the
10665 value is held in the lower number register. The reverse is true
10666 on systems where WORDS_BIG_ENDIAN is false.
10668 The purpose of these operands is to distinguish between cases
10669 where the endian-ness of the values is important (for example
10670 when they are added together), and cases where the endian-ness
10671 is irrelevant, but the order of register operations is important.
10672 For example when loading a value from memory into a register
10673 pair, the endian-ness does not matter. Provided that the value
10674 from the lower memory address is put into the lower numbered
10675 register, and the value from the higher address is put into the
10676 higher numbered register, the load will work regardless of whether
10677 the value being loaded is big-wordian or little-wordian. The
10678 order of the two register loads can matter however, if the address
10679 of the memory location is actually held in one of the registers
10680 being overwritten by the load. */
10681 case 'Q':
10682 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10684 output_operand_lossage ("invalid operand for code '%c'", code);
10685 return;
10688 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10689 return;
10691 case 'R':
10692 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10694 output_operand_lossage ("invalid operand for code '%c'", code);
10695 return;
10698 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10699 return;
10701 case 'H':
10702 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10704 output_operand_lossage ("invalid operand for code '%c'", code);
10705 return;
10708 asm_fprintf (stream, "%r", REGNO (x) + 1);
10709 return;
10711 case 'm':
10712 asm_fprintf (stream, "%r",
10713 GET_CODE (XEXP (x, 0)) == REG
10714 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10715 return;
10717 case 'M':
10718 asm_fprintf (stream, "{%r-%r}",
10719 REGNO (x),
10720 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10721 return;
10723 case 'd':
10724 /* CONST_TRUE_RTX means always -- that's the default. */
10725 if (x == const_true_rtx)
10726 return;
10728 if (!COMPARISON_P (x))
10730 output_operand_lossage ("invalid operand for code '%c'", code);
10731 return;
10734 fputs (arm_condition_codes[get_arm_condition_code (x)],
10735 stream);
10736 return;
10738 case 'D':
10739 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10740 want to do that. */
10741 if (x == const_true_rtx)
10743 output_operand_lossage ("instruction never exectued");
10744 return;
10746 if (!COMPARISON_P (x))
10748 output_operand_lossage ("invalid operand for code '%c'", code);
10749 return;
10752 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10753 (get_arm_condition_code (x))],
10754 stream);
10755 return;
10757 /* Cirrus registers can be accessed in a variety of ways:
10758 single floating point (f)
10759 double floating point (d)
10760 32bit integer (fx)
10761 64bit integer (dx). */
10762 case 'W': /* Cirrus register in F mode. */
10763 case 'X': /* Cirrus register in D mode. */
10764 case 'Y': /* Cirrus register in FX mode. */
10765 case 'Z': /* Cirrus register in DX mode. */
10766 gcc_assert (GET_CODE (x) == REG
10767 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10769 fprintf (stream, "mv%s%s",
10770 code == 'W' ? "f"
10771 : code == 'X' ? "d"
10772 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10774 return;
10776 /* Print cirrus register in the mode specified by the register's mode. */
10777 case 'V':
10779 int mode = GET_MODE (x);
10781 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10783 output_operand_lossage ("invalid operand for code '%c'", code);
10784 return;
10787 fprintf (stream, "mv%s%s",
10788 mode == DFmode ? "d"
10789 : mode == SImode ? "fx"
10790 : mode == DImode ? "dx"
10791 : "f", reg_names[REGNO (x)] + 2);
10793 return;
10796 case 'U':
10797 if (GET_CODE (x) != REG
10798 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10799 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10800 /* Bad value for wCG register number. */
10802 output_operand_lossage ("invalid operand for code '%c'", code);
10803 return;
10806 else
10807 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10808 return;
10810 /* Print an iWMMXt control register name. */
10811 case 'w':
10812 if (GET_CODE (x) != CONST_INT
10813 || INTVAL (x) < 0
10814 || INTVAL (x) >= 16)
10815 /* Bad value for wC register number. */
10817 output_operand_lossage ("invalid operand for code '%c'", code);
10818 return;
10821 else
10823 static const char * wc_reg_names [16] =
10825 "wCID", "wCon", "wCSSF", "wCASF",
10826 "wC4", "wC5", "wC6", "wC7",
10827 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10828 "wC12", "wC13", "wC14", "wC15"
10831 fprintf (stream, wc_reg_names [INTVAL (x)]);
10833 return;
10835 /* Print a VFP double precision register name. */
10836 case 'P':
10838 int mode = GET_MODE (x);
10839 int num;
10841 if (mode != DImode && mode != DFmode)
10843 output_operand_lossage ("invalid operand for code '%c'", code);
10844 return;
10847 if (GET_CODE (x) != REG
10848 || !IS_VFP_REGNUM (REGNO (x)))
10850 output_operand_lossage ("invalid operand for code '%c'", code);
10851 return;
10854 num = REGNO(x) - FIRST_VFP_REGNUM;
10855 if (num & 1)
10857 output_operand_lossage ("invalid operand for code '%c'", code);
10858 return;
10861 fprintf (stream, "d%d", num >> 1);
10863 return;
10865 default:
10866 if (x == 0)
10868 output_operand_lossage ("missing operand");
10869 return;
10872 switch (GET_CODE (x))
10874 case REG:
10875 asm_fprintf (stream, "%r", REGNO (x));
10876 break;
10878 case MEM:
10879 output_memory_reference_mode = GET_MODE (x);
10880 output_address (XEXP (x, 0));
10881 break;
10883 case CONST_DOUBLE:
10884 fprintf (stream, "#%s", fp_immediate_constant (x));
10885 break;
10887 default:
10888 gcc_assert (GET_CODE (x) != NEG);
10889 fputc ('#', stream);
10890 output_addr_const (stream, x);
10891 break;
10896 #ifndef AOF_ASSEMBLER
10897 /* Target hook for assembling integer objects. The ARM version needs to
10898 handle word-sized values specially. */
10899 static bool
10900 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10902 if (size == UNITS_PER_WORD && aligned_p)
10904 fputs ("\t.word\t", asm_out_file);
10905 output_addr_const (asm_out_file, x);
10907 /* Mark symbols as position independent. We only do this in the
10908 .text segment, not in the .data segment. */
10909 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10910 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10912 if (GET_CODE (x) == SYMBOL_REF
10913 && (CONSTANT_POOL_ADDRESS_P (x)
10914 || SYMBOL_REF_LOCAL_P (x)))
10915 fputs ("(GOTOFF)", asm_out_file);
10916 else if (GET_CODE (x) == LABEL_REF)
10917 fputs ("(GOTOFF)", asm_out_file);
10918 else
10919 fputs ("(GOT)", asm_out_file);
10921 fputc ('\n', asm_out_file);
10922 return true;
10925 if (arm_vector_mode_supported_p (GET_MODE (x)))
10927 int i, units;
10929 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10931 units = CONST_VECTOR_NUNITS (x);
10933 switch (GET_MODE (x))
10935 case V2SImode: size = 4; break;
10936 case V4HImode: size = 2; break;
10937 case V8QImode: size = 1; break;
10938 default:
10939 gcc_unreachable ();
10942 for (i = 0; i < units; i++)
10944 rtx elt;
10946 elt = CONST_VECTOR_ELT (x, i);
10947 assemble_integer
10948 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10951 return true;
10954 return default_assemble_integer (x, size, aligned_p);
10958 /* Add a function to the list of static constructors. */
10960 static void
10961 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10963 if (!TARGET_AAPCS_BASED)
10965 default_named_section_asm_out_constructor (symbol, priority);
10966 return;
10969 /* Put these in the .init_array section, using a special relocation. */
10970 ctors_section ();
10971 assemble_align (POINTER_SIZE);
10972 fputs ("\t.word\t", asm_out_file);
10973 output_addr_const (asm_out_file, symbol);
10974 fputs ("(target1)\n", asm_out_file);
10976 #endif
10978 /* A finite state machine takes care of noticing whether or not instructions
10979 can be conditionally executed, and thus decrease execution time and code
10980 size by deleting branch instructions. The fsm is controlled by
10981 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10983 /* The state of the fsm controlling condition codes are:
10984 0: normal, do nothing special
10985 1: make ASM_OUTPUT_OPCODE not output this instruction
10986 2: make ASM_OUTPUT_OPCODE not output this instruction
10987 3: make instructions conditional
10988 4: make instructions conditional
10990 State transitions (state->state by whom under condition):
10991 0 -> 1 final_prescan_insn if the `target' is a label
10992 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10993 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10994 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10995 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10996 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10997 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10998 (the target insn is arm_target_insn).
11000 If the jump clobbers the conditions then we use states 2 and 4.
11002 A similar thing can be done with conditional return insns.
11004 XXX In case the `target' is an unconditional branch, this conditionalising
11005 of the instructions always reduces code size, but not always execution
11006 time. But then, I want to reduce the code size to somewhere near what
11007 /bin/cc produces. */
11009 /* Returns the index of the ARM condition code string in
11010 `arm_condition_codes'. COMPARISON should be an rtx like
11011 `(eq (...) (...))'. */
11012 static enum arm_cond_code
11013 get_arm_condition_code (rtx comparison)
11015 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11016 int code;
11017 enum rtx_code comp_code = GET_CODE (comparison);
11019 if (GET_MODE_CLASS (mode) != MODE_CC)
11020 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11021 XEXP (comparison, 1));
11023 switch (mode)
11025 case CC_DNEmode: code = ARM_NE; goto dominance;
11026 case CC_DEQmode: code = ARM_EQ; goto dominance;
11027 case CC_DGEmode: code = ARM_GE; goto dominance;
11028 case CC_DGTmode: code = ARM_GT; goto dominance;
11029 case CC_DLEmode: code = ARM_LE; goto dominance;
11030 case CC_DLTmode: code = ARM_LT; goto dominance;
11031 case CC_DGEUmode: code = ARM_CS; goto dominance;
11032 case CC_DGTUmode: code = ARM_HI; goto dominance;
11033 case CC_DLEUmode: code = ARM_LS; goto dominance;
11034 case CC_DLTUmode: code = ARM_CC;
11036 dominance:
11037 gcc_assert (comp_code == EQ || comp_code == NE);
11039 if (comp_code == EQ)
11040 return ARM_INVERSE_CONDITION_CODE (code);
11041 return code;
11043 case CC_NOOVmode:
11044 switch (comp_code)
11046 case NE: return ARM_NE;
11047 case EQ: return ARM_EQ;
11048 case GE: return ARM_PL;
11049 case LT: return ARM_MI;
11050 default: gcc_unreachable ();
11053 case CC_Zmode:
11054 switch (comp_code)
11056 case NE: return ARM_NE;
11057 case EQ: return ARM_EQ;
11058 default: gcc_unreachable ();
11061 case CC_Nmode:
11062 switch (comp_code)
11064 case NE: return ARM_MI;
11065 case EQ: return ARM_PL;
11066 default: gcc_unreachable ();
11069 case CCFPEmode:
11070 case CCFPmode:
11071 /* These encodings assume that AC=1 in the FPA system control
11072 byte. This allows us to handle all cases except UNEQ and
11073 LTGT. */
11074 switch (comp_code)
11076 case GE: return ARM_GE;
11077 case GT: return ARM_GT;
11078 case LE: return ARM_LS;
11079 case LT: return ARM_MI;
11080 case NE: return ARM_NE;
11081 case EQ: return ARM_EQ;
11082 case ORDERED: return ARM_VC;
11083 case UNORDERED: return ARM_VS;
11084 case UNLT: return ARM_LT;
11085 case UNLE: return ARM_LE;
11086 case UNGT: return ARM_HI;
11087 case UNGE: return ARM_PL;
11088 /* UNEQ and LTGT do not have a representation. */
11089 case UNEQ: /* Fall through. */
11090 case LTGT: /* Fall through. */
11091 default: gcc_unreachable ();
11094 case CC_SWPmode:
11095 switch (comp_code)
11097 case NE: return ARM_NE;
11098 case EQ: return ARM_EQ;
11099 case GE: return ARM_LE;
11100 case GT: return ARM_LT;
11101 case LE: return ARM_GE;
11102 case LT: return ARM_GT;
11103 case GEU: return ARM_LS;
11104 case GTU: return ARM_CC;
11105 case LEU: return ARM_CS;
11106 case LTU: return ARM_HI;
11107 default: gcc_unreachable ();
11110 case CC_Cmode:
11111 switch (comp_code)
11113 case LTU: return ARM_CS;
11114 case GEU: return ARM_CC;
11115 default: gcc_unreachable ();
11118 case CCmode:
11119 switch (comp_code)
11121 case NE: return ARM_NE;
11122 case EQ: return ARM_EQ;
11123 case GE: return ARM_GE;
11124 case GT: return ARM_GT;
11125 case LE: return ARM_LE;
11126 case LT: return ARM_LT;
11127 case GEU: return ARM_CS;
11128 case GTU: return ARM_HI;
11129 case LEU: return ARM_LS;
11130 case LTU: return ARM_CC;
11131 default: gcc_unreachable ();
11134 default: gcc_unreachable ();
11138 void
11139 arm_final_prescan_insn (rtx insn)
11141 /* BODY will hold the body of INSN. */
11142 rtx body = PATTERN (insn);
11144 /* This will be 1 if trying to repeat the trick, and things need to be
11145 reversed if it appears to fail. */
11146 int reverse = 0;
11148 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11149 taken are clobbered, even if the rtl suggests otherwise. It also
11150 means that we have to grub around within the jump expression to find
11151 out what the conditions are when the jump isn't taken. */
11152 int jump_clobbers = 0;
11154 /* If we start with a return insn, we only succeed if we find another one. */
11155 int seeking_return = 0;
11157 /* START_INSN will hold the insn from where we start looking. This is the
11158 first insn after the following code_label if REVERSE is true. */
11159 rtx start_insn = insn;
11161 /* If in state 4, check if the target branch is reached, in order to
11162 change back to state 0. */
11163 if (arm_ccfsm_state == 4)
11165 if (insn == arm_target_insn)
11167 arm_target_insn = NULL;
11168 arm_ccfsm_state = 0;
11170 return;
11173 /* If in state 3, it is possible to repeat the trick, if this insn is an
11174 unconditional branch to a label, and immediately following this branch
11175 is the previous target label which is only used once, and the label this
11176 branch jumps to is not too far off. */
11177 if (arm_ccfsm_state == 3)
11179 if (simplejump_p (insn))
11181 start_insn = next_nonnote_insn (start_insn);
11182 if (GET_CODE (start_insn) == BARRIER)
11184 /* XXX Isn't this always a barrier? */
11185 start_insn = next_nonnote_insn (start_insn);
11187 if (GET_CODE (start_insn) == CODE_LABEL
11188 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11189 && LABEL_NUSES (start_insn) == 1)
11190 reverse = TRUE;
11191 else
11192 return;
11194 else if (GET_CODE (body) == RETURN)
11196 start_insn = next_nonnote_insn (start_insn);
11197 if (GET_CODE (start_insn) == BARRIER)
11198 start_insn = next_nonnote_insn (start_insn);
11199 if (GET_CODE (start_insn) == CODE_LABEL
11200 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11201 && LABEL_NUSES (start_insn) == 1)
11203 reverse = TRUE;
11204 seeking_return = 1;
11206 else
11207 return;
11209 else
11210 return;
11213 gcc_assert (!arm_ccfsm_state || reverse);
11214 if (GET_CODE (insn) != JUMP_INSN)
11215 return;
11217 /* This jump might be paralleled with a clobber of the condition codes
11218 the jump should always come first */
11219 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11220 body = XVECEXP (body, 0, 0);
11222 if (reverse
11223 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11224 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11226 int insns_skipped;
11227 int fail = FALSE, succeed = FALSE;
11228 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11229 int then_not_else = TRUE;
11230 rtx this_insn = start_insn, label = 0;
11232 /* If the jump cannot be done with one instruction, we cannot
11233 conditionally execute the instruction in the inverse case. */
11234 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11236 jump_clobbers = 1;
11237 return;
11240 /* Register the insn jumped to. */
11241 if (reverse)
11243 if (!seeking_return)
11244 label = XEXP (SET_SRC (body), 0);
11246 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11247 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11248 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11250 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11251 then_not_else = FALSE;
11253 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11254 seeking_return = 1;
11255 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11257 seeking_return = 1;
11258 then_not_else = FALSE;
11260 else
11261 gcc_unreachable ();
11263 /* See how many insns this branch skips, and what kind of insns. If all
11264 insns are okay, and the label or unconditional branch to the same
11265 label is not too far away, succeed. */
11266 for (insns_skipped = 0;
11267 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11269 rtx scanbody;
11271 this_insn = next_nonnote_insn (this_insn);
11272 if (!this_insn)
11273 break;
11275 switch (GET_CODE (this_insn))
11277 case CODE_LABEL:
11278 /* Succeed if it is the target label, otherwise fail since
11279 control falls in from somewhere else. */
11280 if (this_insn == label)
11282 if (jump_clobbers)
11284 arm_ccfsm_state = 2;
11285 this_insn = next_nonnote_insn (this_insn);
11287 else
11288 arm_ccfsm_state = 1;
11289 succeed = TRUE;
11291 else
11292 fail = TRUE;
11293 break;
11295 case BARRIER:
11296 /* Succeed if the following insn is the target label.
11297 Otherwise fail.
11298 If return insns are used then the last insn in a function
11299 will be a barrier. */
11300 this_insn = next_nonnote_insn (this_insn);
11301 if (this_insn && this_insn == label)
11303 if (jump_clobbers)
11305 arm_ccfsm_state = 2;
11306 this_insn = next_nonnote_insn (this_insn);
11308 else
11309 arm_ccfsm_state = 1;
11310 succeed = TRUE;
11312 else
11313 fail = TRUE;
11314 break;
11316 case CALL_INSN:
11317 /* The AAPCS says that conditional calls should not be
11318 used since they make interworking inefficient (the
11319 linker can't transform BL<cond> into BLX). That's
11320 only a problem if the machine has BLX. */
11321 if (arm_arch5)
11323 fail = TRUE;
11324 break;
11327 /* Succeed if the following insn is the target label, or
11328 if the following two insns are a barrier and the
11329 target label. */
11330 this_insn = next_nonnote_insn (this_insn);
11331 if (this_insn && GET_CODE (this_insn) == BARRIER)
11332 this_insn = next_nonnote_insn (this_insn);
11334 if (this_insn && this_insn == label
11335 && insns_skipped < max_insns_skipped)
11337 if (jump_clobbers)
11339 arm_ccfsm_state = 2;
11340 this_insn = next_nonnote_insn (this_insn);
11342 else
11343 arm_ccfsm_state = 1;
11344 succeed = TRUE;
11346 else
11347 fail = TRUE;
11348 break;
11350 case JUMP_INSN:
11351 /* If this is an unconditional branch to the same label, succeed.
11352 If it is to another label, do nothing. If it is conditional,
11353 fail. */
11354 /* XXX Probably, the tests for SET and the PC are
11355 unnecessary. */
11357 scanbody = PATTERN (this_insn);
11358 if (GET_CODE (scanbody) == SET
11359 && GET_CODE (SET_DEST (scanbody)) == PC)
11361 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11362 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11364 arm_ccfsm_state = 2;
11365 succeed = TRUE;
11367 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11368 fail = TRUE;
11370 /* Fail if a conditional return is undesirable (e.g. on a
11371 StrongARM), but still allow this if optimizing for size. */
11372 else if (GET_CODE (scanbody) == RETURN
11373 && !use_return_insn (TRUE, NULL)
11374 && !optimize_size)
11375 fail = TRUE;
11376 else if (GET_CODE (scanbody) == RETURN
11377 && seeking_return)
11379 arm_ccfsm_state = 2;
11380 succeed = TRUE;
11382 else if (GET_CODE (scanbody) == PARALLEL)
11384 switch (get_attr_conds (this_insn))
11386 case CONDS_NOCOND:
11387 break;
11388 default:
11389 fail = TRUE;
11390 break;
11393 else
11394 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11396 break;
11398 case INSN:
11399 /* Instructions using or affecting the condition codes make it
11400 fail. */
11401 scanbody = PATTERN (this_insn);
11402 if (!(GET_CODE (scanbody) == SET
11403 || GET_CODE (scanbody) == PARALLEL)
11404 || get_attr_conds (this_insn) != CONDS_NOCOND)
11405 fail = TRUE;
11407 /* A conditional cirrus instruction must be followed by
11408 a non Cirrus instruction. However, since we
11409 conditionalize instructions in this function and by
11410 the time we get here we can't add instructions
11411 (nops), because shorten_branches() has already been
11412 called, we will disable conditionalizing Cirrus
11413 instructions to be safe. */
11414 if (GET_CODE (scanbody) != USE
11415 && GET_CODE (scanbody) != CLOBBER
11416 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11417 fail = TRUE;
11418 break;
11420 default:
11421 break;
11424 if (succeed)
11426 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11427 arm_target_label = CODE_LABEL_NUMBER (label);
11428 else
11430 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11432 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11434 this_insn = next_nonnote_insn (this_insn);
11435 gcc_assert (!this_insn
11436 || (GET_CODE (this_insn) != BARRIER
11437 && GET_CODE (this_insn) != CODE_LABEL));
11439 if (!this_insn)
11441 /* Oh, dear! we ran off the end.. give up. */
11442 recog (PATTERN (insn), insn, NULL);
11443 arm_ccfsm_state = 0;
11444 arm_target_insn = NULL;
11445 return;
11447 arm_target_insn = this_insn;
11449 if (jump_clobbers)
11451 gcc_assert (!reverse);
11452 arm_current_cc =
11453 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11454 0), 0), 1));
11455 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11456 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11457 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11458 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11460 else
11462 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11463 what it was. */
11464 if (!reverse)
11465 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11466 0));
11469 if (reverse || then_not_else)
11470 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11473 /* Restore recog_data (getting the attributes of other insns can
11474 destroy this array, but final.c assumes that it remains intact
11475 across this call; since the insn has been recognized already we
11476 call recog direct). */
11477 recog (PATTERN (insn), insn, NULL);
11481 /* Returns true if REGNO is a valid register
11482 for holding a quantity of type MODE. */
11484 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11486 if (GET_MODE_CLASS (mode) == MODE_CC)
11487 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11489 if (TARGET_THUMB)
11490 /* For the Thumb we only allow values bigger than SImode in
11491 registers 0 - 6, so that there is always a second low
11492 register available to hold the upper part of the value.
11493 We probably we ought to ensure that the register is the
11494 start of an even numbered register pair. */
11495 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11497 if (IS_CIRRUS_REGNUM (regno))
11498 /* We have outlawed SI values in Cirrus registers because they
11499 reside in the lower 32 bits, but SF values reside in the
11500 upper 32 bits. This causes gcc all sorts of grief. We can't
11501 even split the registers into pairs because Cirrus SI values
11502 get sign extended to 64bits-- aldyh. */
11503 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11505 if (IS_VFP_REGNUM (regno))
11507 if (mode == SFmode || mode == SImode)
11508 return TRUE;
11510 /* DFmode values are only valid in even register pairs. */
11511 if (mode == DFmode)
11512 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11513 return FALSE;
11516 if (IS_IWMMXT_GR_REGNUM (regno))
11517 return mode == SImode;
11519 if (IS_IWMMXT_REGNUM (regno))
11520 return VALID_IWMMXT_REG_MODE (mode);
11522 /* We allow any value to be stored in the general registers.
11523 Restrict doubleword quantities to even register pairs so that we can
11524 use ldrd. */
11525 if (regno <= LAST_ARM_REGNUM)
11526 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11528 if ( regno == FRAME_POINTER_REGNUM
11529 || regno == ARG_POINTER_REGNUM)
11530 /* We only allow integers in the fake hard registers. */
11531 return GET_MODE_CLASS (mode) == MODE_INT;
11533 /* The only registers left are the FPA registers
11534 which we only allow to hold FP values. */
11535 return GET_MODE_CLASS (mode) == MODE_FLOAT
11536 && regno >= FIRST_FPA_REGNUM
11537 && regno <= LAST_FPA_REGNUM;
11541 arm_regno_class (int regno)
11543 if (TARGET_THUMB)
11545 if (regno == STACK_POINTER_REGNUM)
11546 return STACK_REG;
11547 if (regno == CC_REGNUM)
11548 return CC_REG;
11549 if (regno < 8)
11550 return LO_REGS;
11551 return HI_REGS;
11554 if ( regno <= LAST_ARM_REGNUM
11555 || regno == FRAME_POINTER_REGNUM
11556 || regno == ARG_POINTER_REGNUM)
11557 return GENERAL_REGS;
11559 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11560 return NO_REGS;
11562 if (IS_CIRRUS_REGNUM (regno))
11563 return CIRRUS_REGS;
11565 if (IS_VFP_REGNUM (regno))
11566 return VFP_REGS;
11568 if (IS_IWMMXT_REGNUM (regno))
11569 return IWMMXT_REGS;
11571 if (IS_IWMMXT_GR_REGNUM (regno))
11572 return IWMMXT_GR_REGS;
11574 return FPA_REGS;
11577 /* Handle a special case when computing the offset
11578 of an argument from the frame pointer. */
11580 arm_debugger_arg_offset (int value, rtx addr)
11582 rtx insn;
11584 /* We are only interested if dbxout_parms() failed to compute the offset. */
11585 if (value != 0)
11586 return 0;
11588 /* We can only cope with the case where the address is held in a register. */
11589 if (GET_CODE (addr) != REG)
11590 return 0;
11592 /* If we are using the frame pointer to point at the argument, then
11593 an offset of 0 is correct. */
11594 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11595 return 0;
11597 /* If we are using the stack pointer to point at the
11598 argument, then an offset of 0 is correct. */
11599 if ((TARGET_THUMB || !frame_pointer_needed)
11600 && REGNO (addr) == SP_REGNUM)
11601 return 0;
11603 /* Oh dear. The argument is pointed to by a register rather
11604 than being held in a register, or being stored at a known
11605 offset from the frame pointer. Since GDB only understands
11606 those two kinds of argument we must translate the address
11607 held in the register into an offset from the frame pointer.
11608 We do this by searching through the insns for the function
11609 looking to see where this register gets its value. If the
11610 register is initialized from the frame pointer plus an offset
11611 then we are in luck and we can continue, otherwise we give up.
11613 This code is exercised by producing debugging information
11614 for a function with arguments like this:
11616 double func (double a, double b, int c, double d) {return d;}
11618 Without this code the stab for parameter 'd' will be set to
11619 an offset of 0 from the frame pointer, rather than 8. */
11621 /* The if() statement says:
11623 If the insn is a normal instruction
11624 and if the insn is setting the value in a register
11625 and if the register being set is the register holding the address of the argument
11626 and if the address is computing by an addition
11627 that involves adding to a register
11628 which is the frame pointer
11629 a constant integer
11631 then... */
11633 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11635 if ( GET_CODE (insn) == INSN
11636 && GET_CODE (PATTERN (insn)) == SET
11637 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11638 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11639 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11640 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11641 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11644 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11646 break;
11650 if (value == 0)
11652 debug_rtx (addr);
11653 warning (0, "unable to compute real location of stacked parameter");
11654 value = 8; /* XXX magic hack */
11657 return value;
11660 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11661 do \
11663 if ((MASK) & insn_flags) \
11664 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11665 BUILT_IN_MD, NULL, NULL_TREE); \
11667 while (0)
11669 struct builtin_description
11671 const unsigned int mask;
11672 const enum insn_code icode;
11673 const char * const name;
11674 const enum arm_builtins code;
11675 const enum rtx_code comparison;
11676 const unsigned int flag;
11679 static const struct builtin_description bdesc_2arg[] =
11681 #define IWMMXT_BUILTIN(code, string, builtin) \
11682 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11683 ARM_BUILTIN_##builtin, 0, 0 },
11685 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11686 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11687 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11688 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11689 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11690 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11691 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11692 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11693 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11694 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11695 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11696 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11697 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11698 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11699 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11700 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11701 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11702 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11703 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11704 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11705 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11706 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11707 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11708 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11709 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11710 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11711 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11712 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11713 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11714 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11715 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11716 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11717 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11718 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11719 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11720 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11721 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11722 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11723 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11724 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11725 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11726 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11727 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11728 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11729 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11730 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11731 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11732 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11733 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11734 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11735 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11736 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11737 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11738 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11739 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11740 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11741 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11742 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11744 #define IWMMXT_BUILTIN2(code, builtin) \
11745 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11747 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11748 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11749 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11750 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11751 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11752 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11753 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11754 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11755 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11756 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11757 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11758 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11759 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11760 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11761 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11762 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11763 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11764 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11765 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11766 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11767 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11768 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11769 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11770 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11771 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11772 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11773 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11774 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11775 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11776 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11777 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11778 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11781 static const struct builtin_description bdesc_1arg[] =
11783 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11784 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11785 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11786 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11787 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11788 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11789 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11790 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11791 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11792 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11793 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11794 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11795 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11796 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11797 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11798 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11799 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11800 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11803 /* Set up all the iWMMXt builtins. This is
11804 not called if TARGET_IWMMXT is zero. */
11806 static void
11807 arm_init_iwmmxt_builtins (void)
11809 const struct builtin_description * d;
11810 size_t i;
11811 tree endlink = void_list_node;
11813 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11814 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11815 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11817 tree int_ftype_int
11818 = build_function_type (integer_type_node,
11819 tree_cons (NULL_TREE, integer_type_node, endlink));
11820 tree v8qi_ftype_v8qi_v8qi_int
11821 = build_function_type (V8QI_type_node,
11822 tree_cons (NULL_TREE, V8QI_type_node,
11823 tree_cons (NULL_TREE, V8QI_type_node,
11824 tree_cons (NULL_TREE,
11825 integer_type_node,
11826 endlink))));
11827 tree v4hi_ftype_v4hi_int
11828 = build_function_type (V4HI_type_node,
11829 tree_cons (NULL_TREE, V4HI_type_node,
11830 tree_cons (NULL_TREE, integer_type_node,
11831 endlink)));
11832 tree v2si_ftype_v2si_int
11833 = build_function_type (V2SI_type_node,
11834 tree_cons (NULL_TREE, V2SI_type_node,
11835 tree_cons (NULL_TREE, integer_type_node,
11836 endlink)));
11837 tree v2si_ftype_di_di
11838 = build_function_type (V2SI_type_node,
11839 tree_cons (NULL_TREE, long_long_integer_type_node,
11840 tree_cons (NULL_TREE, long_long_integer_type_node,
11841 endlink)));
11842 tree di_ftype_di_int
11843 = build_function_type (long_long_integer_type_node,
11844 tree_cons (NULL_TREE, long_long_integer_type_node,
11845 tree_cons (NULL_TREE, integer_type_node,
11846 endlink)));
11847 tree di_ftype_di_int_int
11848 = build_function_type (long_long_integer_type_node,
11849 tree_cons (NULL_TREE, long_long_integer_type_node,
11850 tree_cons (NULL_TREE, integer_type_node,
11851 tree_cons (NULL_TREE,
11852 integer_type_node,
11853 endlink))));
11854 tree int_ftype_v8qi
11855 = build_function_type (integer_type_node,
11856 tree_cons (NULL_TREE, V8QI_type_node,
11857 endlink));
11858 tree int_ftype_v4hi
11859 = build_function_type (integer_type_node,
11860 tree_cons (NULL_TREE, V4HI_type_node,
11861 endlink));
11862 tree int_ftype_v2si
11863 = build_function_type (integer_type_node,
11864 tree_cons (NULL_TREE, V2SI_type_node,
11865 endlink));
11866 tree int_ftype_v8qi_int
11867 = build_function_type (integer_type_node,
11868 tree_cons (NULL_TREE, V8QI_type_node,
11869 tree_cons (NULL_TREE, integer_type_node,
11870 endlink)));
11871 tree int_ftype_v4hi_int
11872 = build_function_type (integer_type_node,
11873 tree_cons (NULL_TREE, V4HI_type_node,
11874 tree_cons (NULL_TREE, integer_type_node,
11875 endlink)));
11876 tree int_ftype_v2si_int
11877 = build_function_type (integer_type_node,
11878 tree_cons (NULL_TREE, V2SI_type_node,
11879 tree_cons (NULL_TREE, integer_type_node,
11880 endlink)));
11881 tree v8qi_ftype_v8qi_int_int
11882 = build_function_type (V8QI_type_node,
11883 tree_cons (NULL_TREE, V8QI_type_node,
11884 tree_cons (NULL_TREE, integer_type_node,
11885 tree_cons (NULL_TREE,
11886 integer_type_node,
11887 endlink))));
11888 tree v4hi_ftype_v4hi_int_int
11889 = build_function_type (V4HI_type_node,
11890 tree_cons (NULL_TREE, V4HI_type_node,
11891 tree_cons (NULL_TREE, integer_type_node,
11892 tree_cons (NULL_TREE,
11893 integer_type_node,
11894 endlink))));
11895 tree v2si_ftype_v2si_int_int
11896 = build_function_type (V2SI_type_node,
11897 tree_cons (NULL_TREE, V2SI_type_node,
11898 tree_cons (NULL_TREE, integer_type_node,
11899 tree_cons (NULL_TREE,
11900 integer_type_node,
11901 endlink))));
11902 /* Miscellaneous. */
11903 tree v8qi_ftype_v4hi_v4hi
11904 = build_function_type (V8QI_type_node,
11905 tree_cons (NULL_TREE, V4HI_type_node,
11906 tree_cons (NULL_TREE, V4HI_type_node,
11907 endlink)));
11908 tree v4hi_ftype_v2si_v2si
11909 = build_function_type (V4HI_type_node,
11910 tree_cons (NULL_TREE, V2SI_type_node,
11911 tree_cons (NULL_TREE, V2SI_type_node,
11912 endlink)));
11913 tree v2si_ftype_v4hi_v4hi
11914 = build_function_type (V2SI_type_node,
11915 tree_cons (NULL_TREE, V4HI_type_node,
11916 tree_cons (NULL_TREE, V4HI_type_node,
11917 endlink)));
11918 tree v2si_ftype_v8qi_v8qi
11919 = build_function_type (V2SI_type_node,
11920 tree_cons (NULL_TREE, V8QI_type_node,
11921 tree_cons (NULL_TREE, V8QI_type_node,
11922 endlink)));
11923 tree v4hi_ftype_v4hi_di
11924 = build_function_type (V4HI_type_node,
11925 tree_cons (NULL_TREE, V4HI_type_node,
11926 tree_cons (NULL_TREE,
11927 long_long_integer_type_node,
11928 endlink)));
11929 tree v2si_ftype_v2si_di
11930 = build_function_type (V2SI_type_node,
11931 tree_cons (NULL_TREE, V2SI_type_node,
11932 tree_cons (NULL_TREE,
11933 long_long_integer_type_node,
11934 endlink)));
11935 tree void_ftype_int_int
11936 = build_function_type (void_type_node,
11937 tree_cons (NULL_TREE, integer_type_node,
11938 tree_cons (NULL_TREE, integer_type_node,
11939 endlink)));
11940 tree di_ftype_void
11941 = build_function_type (long_long_unsigned_type_node, endlink);
11942 tree di_ftype_v8qi
11943 = build_function_type (long_long_integer_type_node,
11944 tree_cons (NULL_TREE, V8QI_type_node,
11945 endlink));
11946 tree di_ftype_v4hi
11947 = build_function_type (long_long_integer_type_node,
11948 tree_cons (NULL_TREE, V4HI_type_node,
11949 endlink));
11950 tree di_ftype_v2si
11951 = build_function_type (long_long_integer_type_node,
11952 tree_cons (NULL_TREE, V2SI_type_node,
11953 endlink));
11954 tree v2si_ftype_v4hi
11955 = build_function_type (V2SI_type_node,
11956 tree_cons (NULL_TREE, V4HI_type_node,
11957 endlink));
11958 tree v4hi_ftype_v8qi
11959 = build_function_type (V4HI_type_node,
11960 tree_cons (NULL_TREE, V8QI_type_node,
11961 endlink));
11963 tree di_ftype_di_v4hi_v4hi
11964 = build_function_type (long_long_unsigned_type_node,
11965 tree_cons (NULL_TREE,
11966 long_long_unsigned_type_node,
11967 tree_cons (NULL_TREE, V4HI_type_node,
11968 tree_cons (NULL_TREE,
11969 V4HI_type_node,
11970 endlink))));
11972 tree di_ftype_v4hi_v4hi
11973 = build_function_type (long_long_unsigned_type_node,
11974 tree_cons (NULL_TREE, V4HI_type_node,
11975 tree_cons (NULL_TREE, V4HI_type_node,
11976 endlink)));
11978 /* Normal vector binops. */
11979 tree v8qi_ftype_v8qi_v8qi
11980 = build_function_type (V8QI_type_node,
11981 tree_cons (NULL_TREE, V8QI_type_node,
11982 tree_cons (NULL_TREE, V8QI_type_node,
11983 endlink)));
11984 tree v4hi_ftype_v4hi_v4hi
11985 = build_function_type (V4HI_type_node,
11986 tree_cons (NULL_TREE, V4HI_type_node,
11987 tree_cons (NULL_TREE, V4HI_type_node,
11988 endlink)));
11989 tree v2si_ftype_v2si_v2si
11990 = build_function_type (V2SI_type_node,
11991 tree_cons (NULL_TREE, V2SI_type_node,
11992 tree_cons (NULL_TREE, V2SI_type_node,
11993 endlink)));
11994 tree di_ftype_di_di
11995 = build_function_type (long_long_unsigned_type_node,
11996 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11997 tree_cons (NULL_TREE,
11998 long_long_unsigned_type_node,
11999 endlink)));
12001 /* Add all builtins that are more or less simple operations on two
12002 operands. */
12003 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12005 /* Use one of the operands; the target can have a different mode for
12006 mask-generating compares. */
12007 enum machine_mode mode;
12008 tree type;
12010 if (d->name == 0)
12011 continue;
12013 mode = insn_data[d->icode].operand[1].mode;
12015 switch (mode)
12017 case V8QImode:
12018 type = v8qi_ftype_v8qi_v8qi;
12019 break;
12020 case V4HImode:
12021 type = v4hi_ftype_v4hi_v4hi;
12022 break;
12023 case V2SImode:
12024 type = v2si_ftype_v2si_v2si;
12025 break;
12026 case DImode:
12027 type = di_ftype_di_di;
12028 break;
12030 default:
12031 gcc_unreachable ();
12034 def_mbuiltin (d->mask, d->name, type, d->code);
12037 /* Add the remaining MMX insns with somewhat more complicated types. */
12038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12050 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12053 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12054 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12056 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12057 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12058 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12059 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12060 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12061 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12063 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12064 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12065 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12066 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12067 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12068 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12070 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12072 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12073 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12074 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12075 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12077 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12078 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12079 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12080 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12081 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12082 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12083 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12084 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12085 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12087 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12088 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12089 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12091 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12092 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12093 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12095 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12096 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12097 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12098 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12099 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12100 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12102 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12103 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12104 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12105 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12106 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12107 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12108 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12109 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12110 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12111 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12112 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12113 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12115 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12116 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12117 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12118 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12120 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12121 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12122 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12123 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12124 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12125 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12126 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12129 static void
12130 arm_init_builtins (void)
12132 if (TARGET_REALLY_IWMMXT)
12133 arm_init_iwmmxt_builtins ();
12136 /* Errors in the source file can cause expand_expr to return const0_rtx
12137 where we expect a vector. To avoid crashing, use one of the vector
12138 clear instructions. */
12140 static rtx
12141 safe_vector_operand (rtx x, enum machine_mode mode)
12143 if (x != const0_rtx)
12144 return x;
12145 x = gen_reg_rtx (mode);
12147 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12148 : gen_rtx_SUBREG (DImode, x, 0)));
12149 return x;
12152 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12154 static rtx
12155 arm_expand_binop_builtin (enum insn_code icode,
12156 tree arglist, rtx target)
12158 rtx pat;
12159 tree arg0 = TREE_VALUE (arglist);
12160 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12161 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12162 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12163 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12164 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12165 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12167 if (VECTOR_MODE_P (mode0))
12168 op0 = safe_vector_operand (op0, mode0);
12169 if (VECTOR_MODE_P (mode1))
12170 op1 = safe_vector_operand (op1, mode1);
12172 if (! target
12173 || GET_MODE (target) != tmode
12174 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12175 target = gen_reg_rtx (tmode);
12177 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12179 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12180 op0 = copy_to_mode_reg (mode0, op0);
12181 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12182 op1 = copy_to_mode_reg (mode1, op1);
12184 pat = GEN_FCN (icode) (target, op0, op1);
12185 if (! pat)
12186 return 0;
12187 emit_insn (pat);
12188 return target;
12191 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12193 static rtx
12194 arm_expand_unop_builtin (enum insn_code icode,
12195 tree arglist, rtx target, int do_load)
12197 rtx pat;
12198 tree arg0 = TREE_VALUE (arglist);
12199 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12200 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12201 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12203 if (! target
12204 || GET_MODE (target) != tmode
12205 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12206 target = gen_reg_rtx (tmode);
12207 if (do_load)
12208 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12209 else
12211 if (VECTOR_MODE_P (mode0))
12212 op0 = safe_vector_operand (op0, mode0);
12214 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12215 op0 = copy_to_mode_reg (mode0, op0);
12218 pat = GEN_FCN (icode) (target, op0);
12219 if (! pat)
12220 return 0;
12221 emit_insn (pat);
12222 return target;
12225 /* Expand an expression EXP that calls a built-in function,
12226 with result going to TARGET if that's convenient
12227 (and in mode MODE if that's convenient).
12228 SUBTARGET may be used as the target for computing one of EXP's operands.
12229 IGNORE is nonzero if the value is to be ignored. */
12231 static rtx
12232 arm_expand_builtin (tree exp,
12233 rtx target,
12234 rtx subtarget ATTRIBUTE_UNUSED,
12235 enum machine_mode mode ATTRIBUTE_UNUSED,
12236 int ignore ATTRIBUTE_UNUSED)
12238 const struct builtin_description * d;
12239 enum insn_code icode;
12240 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12241 tree arglist = TREE_OPERAND (exp, 1);
12242 tree arg0;
12243 tree arg1;
12244 tree arg2;
12245 rtx op0;
12246 rtx op1;
12247 rtx op2;
12248 rtx pat;
12249 int fcode = DECL_FUNCTION_CODE (fndecl);
12250 size_t i;
12251 enum machine_mode tmode;
12252 enum machine_mode mode0;
12253 enum machine_mode mode1;
12254 enum machine_mode mode2;
12256 switch (fcode)
12258 case ARM_BUILTIN_TEXTRMSB:
12259 case ARM_BUILTIN_TEXTRMUB:
12260 case ARM_BUILTIN_TEXTRMSH:
12261 case ARM_BUILTIN_TEXTRMUH:
12262 case ARM_BUILTIN_TEXTRMSW:
12263 case ARM_BUILTIN_TEXTRMUW:
12264 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12265 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12266 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12267 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12268 : CODE_FOR_iwmmxt_textrmw);
12270 arg0 = TREE_VALUE (arglist);
12271 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12272 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12273 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12274 tmode = insn_data[icode].operand[0].mode;
12275 mode0 = insn_data[icode].operand[1].mode;
12276 mode1 = insn_data[icode].operand[2].mode;
12278 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12279 op0 = copy_to_mode_reg (mode0, op0);
12280 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12282 /* @@@ better error message */
12283 error ("selector must be an immediate");
12284 return gen_reg_rtx (tmode);
12286 if (target == 0
12287 || GET_MODE (target) != tmode
12288 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12289 target = gen_reg_rtx (tmode);
12290 pat = GEN_FCN (icode) (target, op0, op1);
12291 if (! pat)
12292 return 0;
12293 emit_insn (pat);
12294 return target;
12296 case ARM_BUILTIN_TINSRB:
12297 case ARM_BUILTIN_TINSRH:
12298 case ARM_BUILTIN_TINSRW:
12299 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12300 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12301 : CODE_FOR_iwmmxt_tinsrw);
12302 arg0 = TREE_VALUE (arglist);
12303 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12304 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12305 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12306 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12307 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12308 tmode = insn_data[icode].operand[0].mode;
12309 mode0 = insn_data[icode].operand[1].mode;
12310 mode1 = insn_data[icode].operand[2].mode;
12311 mode2 = insn_data[icode].operand[3].mode;
12313 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12314 op0 = copy_to_mode_reg (mode0, op0);
12315 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12316 op1 = copy_to_mode_reg (mode1, op1);
12317 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12319 /* @@@ better error message */
12320 error ("selector must be an immediate");
12321 return const0_rtx;
12323 if (target == 0
12324 || GET_MODE (target) != tmode
12325 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12326 target = gen_reg_rtx (tmode);
12327 pat = GEN_FCN (icode) (target, op0, op1, op2);
12328 if (! pat)
12329 return 0;
12330 emit_insn (pat);
12331 return target;
12333 case ARM_BUILTIN_SETWCX:
12334 arg0 = TREE_VALUE (arglist);
12335 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12336 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12337 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12338 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12339 return 0;
12341 case ARM_BUILTIN_GETWCX:
12342 arg0 = TREE_VALUE (arglist);
12343 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12344 target = gen_reg_rtx (SImode);
12345 emit_insn (gen_iwmmxt_tmrc (target, op0));
12346 return target;
12348 case ARM_BUILTIN_WSHUFH:
12349 icode = CODE_FOR_iwmmxt_wshufh;
12350 arg0 = TREE_VALUE (arglist);
12351 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12352 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12353 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12354 tmode = insn_data[icode].operand[0].mode;
12355 mode1 = insn_data[icode].operand[1].mode;
12356 mode2 = insn_data[icode].operand[2].mode;
12358 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12359 op0 = copy_to_mode_reg (mode1, op0);
12360 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12362 /* @@@ better error message */
12363 error ("mask must be an immediate");
12364 return const0_rtx;
12366 if (target == 0
12367 || GET_MODE (target) != tmode
12368 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12369 target = gen_reg_rtx (tmode);
12370 pat = GEN_FCN (icode) (target, op0, op1);
12371 if (! pat)
12372 return 0;
12373 emit_insn (pat);
12374 return target;
12376 case ARM_BUILTIN_WSADB:
12377 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12378 case ARM_BUILTIN_WSADH:
12379 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12380 case ARM_BUILTIN_WSADBZ:
12381 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12382 case ARM_BUILTIN_WSADHZ:
12383 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12385 /* Several three-argument builtins. */
12386 case ARM_BUILTIN_WMACS:
12387 case ARM_BUILTIN_WMACU:
12388 case ARM_BUILTIN_WALIGN:
12389 case ARM_BUILTIN_TMIA:
12390 case ARM_BUILTIN_TMIAPH:
12391 case ARM_BUILTIN_TMIATT:
12392 case ARM_BUILTIN_TMIATB:
12393 case ARM_BUILTIN_TMIABT:
12394 case ARM_BUILTIN_TMIABB:
12395 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12396 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12397 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12398 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12399 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12400 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12401 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12402 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12403 : CODE_FOR_iwmmxt_walign);
12404 arg0 = TREE_VALUE (arglist);
12405 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12406 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12407 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12408 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12409 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12410 tmode = insn_data[icode].operand[0].mode;
12411 mode0 = insn_data[icode].operand[1].mode;
12412 mode1 = insn_data[icode].operand[2].mode;
12413 mode2 = insn_data[icode].operand[3].mode;
12415 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12416 op0 = copy_to_mode_reg (mode0, op0);
12417 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12418 op1 = copy_to_mode_reg (mode1, op1);
12419 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12420 op2 = copy_to_mode_reg (mode2, op2);
12421 if (target == 0
12422 || GET_MODE (target) != tmode
12423 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12424 target = gen_reg_rtx (tmode);
12425 pat = GEN_FCN (icode) (target, op0, op1, op2);
12426 if (! pat)
12427 return 0;
12428 emit_insn (pat);
12429 return target;
12431 case ARM_BUILTIN_WZERO:
12432 target = gen_reg_rtx (DImode);
12433 emit_insn (gen_iwmmxt_clrdi (target));
12434 return target;
12436 default:
12437 break;
12440 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12441 if (d->code == (const enum arm_builtins) fcode)
12442 return arm_expand_binop_builtin (d->icode, arglist, target);
12444 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12445 if (d->code == (const enum arm_builtins) fcode)
12446 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12448 /* @@@ Should really do something sensible here. */
12449 return NULL_RTX;
12452 /* Return the number (counting from 0) of
12453 the least significant set bit in MASK. */
12455 inline static int
12456 number_of_first_bit_set (unsigned mask)
12458 int bit;
12460 for (bit = 0;
12461 (mask & (1 << bit)) == 0;
12462 ++bit)
12463 continue;
12465 return bit;
12468 /* Emit code to push or pop registers to or from the stack. F is the
12469 assembly file. MASK is the registers to push or pop. PUSH is
12470 nonzero if we should push, and zero if we should pop. For debugging
12471 output, if pushing, adjust CFA_OFFSET by the amount of space added
12472 to the stack. REAL_REGS should have the same number of bits set as
12473 MASK, and will be used instead (in the same order) to describe which
12474 registers were saved - this is used to mark the save slots when we
12475 push high registers after moving them to low registers. */
12476 static void
12477 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12478 unsigned long real_regs)
12480 int regno;
12481 int lo_mask = mask & 0xFF;
12482 int pushed_words = 0;
12484 gcc_assert (mask);
12486 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12488 /* Special case. Do not generate a POP PC statement here, do it in
12489 thumb_exit() */
12490 thumb_exit (f, -1);
12491 return;
12494 if (ARM_EABI_UNWIND_TABLES && push)
12496 fprintf (f, "\t.save\t{");
12497 for (regno = 0; regno < 15; regno++)
12499 if (real_regs & (1 << regno))
12501 if (real_regs & ((1 << regno) -1))
12502 fprintf (f, ", ");
12503 asm_fprintf (f, "%r", regno);
12506 fprintf (f, "}\n");
12509 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12511 /* Look at the low registers first. */
12512 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12514 if (lo_mask & 1)
12516 asm_fprintf (f, "%r", regno);
12518 if ((lo_mask & ~1) != 0)
12519 fprintf (f, ", ");
12521 pushed_words++;
12525 if (push && (mask & (1 << LR_REGNUM)))
12527 /* Catch pushing the LR. */
12528 if (mask & 0xFF)
12529 fprintf (f, ", ");
12531 asm_fprintf (f, "%r", LR_REGNUM);
12533 pushed_words++;
12535 else if (!push && (mask & (1 << PC_REGNUM)))
12537 /* Catch popping the PC. */
12538 if (TARGET_INTERWORK || TARGET_BACKTRACE
12539 || current_function_calls_eh_return)
12541 /* The PC is never poped directly, instead
12542 it is popped into r3 and then BX is used. */
12543 fprintf (f, "}\n");
12545 thumb_exit (f, -1);
12547 return;
12549 else
12551 if (mask & 0xFF)
12552 fprintf (f, ", ");
12554 asm_fprintf (f, "%r", PC_REGNUM);
12558 fprintf (f, "}\n");
12560 if (push && pushed_words && dwarf2out_do_frame ())
12562 char *l = dwarf2out_cfi_label ();
12563 int pushed_mask = real_regs;
12565 *cfa_offset += pushed_words * 4;
12566 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12568 pushed_words = 0;
12569 pushed_mask = real_regs;
12570 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12572 if (pushed_mask & 1)
12573 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12578 /* Generate code to return from a thumb function.
12579 If 'reg_containing_return_addr' is -1, then the return address is
12580 actually on the stack, at the stack pointer. */
12581 static void
12582 thumb_exit (FILE *f, int reg_containing_return_addr)
12584 unsigned regs_available_for_popping;
12585 unsigned regs_to_pop;
12586 int pops_needed;
12587 unsigned available;
12588 unsigned required;
12589 int mode;
12590 int size;
12591 int restore_a4 = FALSE;
12593 /* Compute the registers we need to pop. */
12594 regs_to_pop = 0;
12595 pops_needed = 0;
12597 if (reg_containing_return_addr == -1)
12599 regs_to_pop |= 1 << LR_REGNUM;
12600 ++pops_needed;
12603 if (TARGET_BACKTRACE)
12605 /* Restore the (ARM) frame pointer and stack pointer. */
12606 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12607 pops_needed += 2;
12610 /* If there is nothing to pop then just emit the BX instruction and
12611 return. */
12612 if (pops_needed == 0)
12614 if (current_function_calls_eh_return)
12615 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12617 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12618 return;
12620 /* Otherwise if we are not supporting interworking and we have not created
12621 a backtrace structure and the function was not entered in ARM mode then
12622 just pop the return address straight into the PC. */
12623 else if (!TARGET_INTERWORK
12624 && !TARGET_BACKTRACE
12625 && !is_called_in_ARM_mode (current_function_decl)
12626 && !current_function_calls_eh_return)
12628 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12629 return;
12632 /* Find out how many of the (return) argument registers we can corrupt. */
12633 regs_available_for_popping = 0;
12635 /* If returning via __builtin_eh_return, the bottom three registers
12636 all contain information needed for the return. */
12637 if (current_function_calls_eh_return)
12638 size = 12;
12639 else
12641 /* If we can deduce the registers used from the function's
12642 return value. This is more reliable that examining
12643 regs_ever_live[] because that will be set if the register is
12644 ever used in the function, not just if the register is used
12645 to hold a return value. */
12647 if (current_function_return_rtx != 0)
12648 mode = GET_MODE (current_function_return_rtx);
12649 else
12650 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12652 size = GET_MODE_SIZE (mode);
12654 if (size == 0)
12656 /* In a void function we can use any argument register.
12657 In a function that returns a structure on the stack
12658 we can use the second and third argument registers. */
12659 if (mode == VOIDmode)
12660 regs_available_for_popping =
12661 (1 << ARG_REGISTER (1))
12662 | (1 << ARG_REGISTER (2))
12663 | (1 << ARG_REGISTER (3));
12664 else
12665 regs_available_for_popping =
12666 (1 << ARG_REGISTER (2))
12667 | (1 << ARG_REGISTER (3));
12669 else if (size <= 4)
12670 regs_available_for_popping =
12671 (1 << ARG_REGISTER (2))
12672 | (1 << ARG_REGISTER (3));
12673 else if (size <= 8)
12674 regs_available_for_popping =
12675 (1 << ARG_REGISTER (3));
12678 /* Match registers to be popped with registers into which we pop them. */
12679 for (available = regs_available_for_popping,
12680 required = regs_to_pop;
12681 required != 0 && available != 0;
12682 available &= ~(available & - available),
12683 required &= ~(required & - required))
12684 -- pops_needed;
12686 /* If we have any popping registers left over, remove them. */
12687 if (available > 0)
12688 regs_available_for_popping &= ~available;
12690 /* Otherwise if we need another popping register we can use
12691 the fourth argument register. */
12692 else if (pops_needed)
12694 /* If we have not found any free argument registers and
12695 reg a4 contains the return address, we must move it. */
12696 if (regs_available_for_popping == 0
12697 && reg_containing_return_addr == LAST_ARG_REGNUM)
12699 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12700 reg_containing_return_addr = LR_REGNUM;
12702 else if (size > 12)
12704 /* Register a4 is being used to hold part of the return value,
12705 but we have dire need of a free, low register. */
12706 restore_a4 = TRUE;
12708 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12711 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12713 /* The fourth argument register is available. */
12714 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12716 --pops_needed;
12720 /* Pop as many registers as we can. */
12721 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12722 regs_available_for_popping);
12724 /* Process the registers we popped. */
12725 if (reg_containing_return_addr == -1)
12727 /* The return address was popped into the lowest numbered register. */
12728 regs_to_pop &= ~(1 << LR_REGNUM);
12730 reg_containing_return_addr =
12731 number_of_first_bit_set (regs_available_for_popping);
12733 /* Remove this register for the mask of available registers, so that
12734 the return address will not be corrupted by further pops. */
12735 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12738 /* If we popped other registers then handle them here. */
12739 if (regs_available_for_popping)
12741 int frame_pointer;
12743 /* Work out which register currently contains the frame pointer. */
12744 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12746 /* Move it into the correct place. */
12747 asm_fprintf (f, "\tmov\t%r, %r\n",
12748 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12750 /* (Temporarily) remove it from the mask of popped registers. */
12751 regs_available_for_popping &= ~(1 << frame_pointer);
12752 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12754 if (regs_available_for_popping)
12756 int stack_pointer;
12758 /* We popped the stack pointer as well,
12759 find the register that contains it. */
12760 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12762 /* Move it into the stack register. */
12763 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12765 /* At this point we have popped all necessary registers, so
12766 do not worry about restoring regs_available_for_popping
12767 to its correct value:
12769 assert (pops_needed == 0)
12770 assert (regs_available_for_popping == (1 << frame_pointer))
12771 assert (regs_to_pop == (1 << STACK_POINTER)) */
12773 else
12775 /* Since we have just move the popped value into the frame
12776 pointer, the popping register is available for reuse, and
12777 we know that we still have the stack pointer left to pop. */
12778 regs_available_for_popping |= (1 << frame_pointer);
12782 /* If we still have registers left on the stack, but we no longer have
12783 any registers into which we can pop them, then we must move the return
12784 address into the link register and make available the register that
12785 contained it. */
12786 if (regs_available_for_popping == 0 && pops_needed > 0)
12788 regs_available_for_popping |= 1 << reg_containing_return_addr;
12790 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12791 reg_containing_return_addr);
12793 reg_containing_return_addr = LR_REGNUM;
12796 /* If we have registers left on the stack then pop some more.
12797 We know that at most we will want to pop FP and SP. */
12798 if (pops_needed > 0)
12800 int popped_into;
12801 int move_to;
12803 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12804 regs_available_for_popping);
12806 /* We have popped either FP or SP.
12807 Move whichever one it is into the correct register. */
12808 popped_into = number_of_first_bit_set (regs_available_for_popping);
12809 move_to = number_of_first_bit_set (regs_to_pop);
12811 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12813 regs_to_pop &= ~(1 << move_to);
12815 --pops_needed;
12818 /* If we still have not popped everything then we must have only
12819 had one register available to us and we are now popping the SP. */
12820 if (pops_needed > 0)
12822 int popped_into;
12824 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12825 regs_available_for_popping);
12827 popped_into = number_of_first_bit_set (regs_available_for_popping);
12829 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12831 assert (regs_to_pop == (1 << STACK_POINTER))
12832 assert (pops_needed == 1)
12836 /* If necessary restore the a4 register. */
12837 if (restore_a4)
12839 if (reg_containing_return_addr != LR_REGNUM)
12841 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12842 reg_containing_return_addr = LR_REGNUM;
12845 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12848 if (current_function_calls_eh_return)
12849 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12851 /* Return to caller. */
12852 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12856 void
12857 thumb_final_prescan_insn (rtx insn)
12859 if (flag_print_asm_name)
12860 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12861 INSN_ADDRESSES (INSN_UID (insn)));
12865 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12867 unsigned HOST_WIDE_INT mask = 0xff;
12868 int i;
12870 if (val == 0) /* XXX */
12871 return 0;
12873 for (i = 0; i < 25; i++)
12874 if ((val & (mask << i)) == val)
12875 return 1;
12877 return 0;
12880 /* Returns nonzero if the current function contains,
12881 or might contain a far jump. */
12882 static int
12883 thumb_far_jump_used_p (void)
12885 rtx insn;
12887 /* This test is only important for leaf functions. */
12888 /* assert (!leaf_function_p ()); */
12890 /* If we have already decided that far jumps may be used,
12891 do not bother checking again, and always return true even if
12892 it turns out that they are not being used. Once we have made
12893 the decision that far jumps are present (and that hence the link
12894 register will be pushed onto the stack) we cannot go back on it. */
12895 if (cfun->machine->far_jump_used)
12896 return 1;
12898 /* If this function is not being called from the prologue/epilogue
12899 generation code then it must be being called from the
12900 INITIAL_ELIMINATION_OFFSET macro. */
12901 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12903 /* In this case we know that we are being asked about the elimination
12904 of the arg pointer register. If that register is not being used,
12905 then there are no arguments on the stack, and we do not have to
12906 worry that a far jump might force the prologue to push the link
12907 register, changing the stack offsets. In this case we can just
12908 return false, since the presence of far jumps in the function will
12909 not affect stack offsets.
12911 If the arg pointer is live (or if it was live, but has now been
12912 eliminated and so set to dead) then we do have to test to see if
12913 the function might contain a far jump. This test can lead to some
12914 false negatives, since before reload is completed, then length of
12915 branch instructions is not known, so gcc defaults to returning their
12916 longest length, which in turn sets the far jump attribute to true.
12918 A false negative will not result in bad code being generated, but it
12919 will result in a needless push and pop of the link register. We
12920 hope that this does not occur too often.
12922 If we need doubleword stack alignment this could affect the other
12923 elimination offsets so we can't risk getting it wrong. */
12924 if (regs_ever_live [ARG_POINTER_REGNUM])
12925 cfun->machine->arg_pointer_live = 1;
12926 else if (!cfun->machine->arg_pointer_live)
12927 return 0;
12930 /* Check to see if the function contains a branch
12931 insn with the far jump attribute set. */
12932 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12934 if (GET_CODE (insn) == JUMP_INSN
12935 /* Ignore tablejump patterns. */
12936 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12937 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12938 && get_attr_far_jump (insn) == FAR_JUMP_YES
12941 /* Record the fact that we have decided that
12942 the function does use far jumps. */
12943 cfun->machine->far_jump_used = 1;
12944 return 1;
12948 return 0;
12951 /* Return nonzero if FUNC must be entered in ARM mode. */
12953 is_called_in_ARM_mode (tree func)
12955 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12957 /* Ignore the problem about functions whose address is taken. */
12958 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12959 return TRUE;
12961 #ifdef ARM_PE
12962 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12963 #else
12964 return FALSE;
12965 #endif
12968 /* The bits which aren't usefully expanded as rtl. */
12969 const char *
12970 thumb_unexpanded_epilogue (void)
12972 int regno;
12973 unsigned long live_regs_mask = 0;
12974 int high_regs_pushed = 0;
12975 int had_to_push_lr;
12976 int size;
12978 if (return_used_this_function)
12979 return "";
12981 if (IS_NAKED (arm_current_func_type ()))
12982 return "";
12984 live_regs_mask = thumb_compute_save_reg_mask ();
12985 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12987 /* If we can deduce the registers used from the function's return value.
12988 This is more reliable that examining regs_ever_live[] because that
12989 will be set if the register is ever used in the function, not just if
12990 the register is used to hold a return value. */
12991 size = arm_size_return_regs ();
12993 /* The prolog may have pushed some high registers to use as
12994 work registers. e.g. the testsuite file:
12995 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12996 compiles to produce:
12997 push {r4, r5, r6, r7, lr}
12998 mov r7, r9
12999 mov r6, r8
13000 push {r6, r7}
13001 as part of the prolog. We have to undo that pushing here. */
13003 if (high_regs_pushed)
13005 unsigned long mask = live_regs_mask & 0xff;
13006 int next_hi_reg;
13008 /* The available low registers depend on the size of the value we are
13009 returning. */
13010 if (size <= 12)
13011 mask |= 1 << 3;
13012 if (size <= 8)
13013 mask |= 1 << 2;
13015 if (mask == 0)
13016 /* Oh dear! We have no low registers into which we can pop
13017 high registers! */
13018 internal_error
13019 ("no low registers available for popping high registers");
13021 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13022 if (live_regs_mask & (1 << next_hi_reg))
13023 break;
13025 while (high_regs_pushed)
13027 /* Find lo register(s) into which the high register(s) can
13028 be popped. */
13029 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13031 if (mask & (1 << regno))
13032 high_regs_pushed--;
13033 if (high_regs_pushed == 0)
13034 break;
13037 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13039 /* Pop the values into the low register(s). */
13040 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13042 /* Move the value(s) into the high registers. */
13043 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13045 if (mask & (1 << regno))
13047 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13048 regno);
13050 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13051 if (live_regs_mask & (1 << next_hi_reg))
13052 break;
13056 live_regs_mask &= ~0x0f00;
13059 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13060 live_regs_mask &= 0xff;
13062 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13064 /* Pop the return address into the PC. */
13065 if (had_to_push_lr)
13066 live_regs_mask |= 1 << PC_REGNUM;
13068 /* Either no argument registers were pushed or a backtrace
13069 structure was created which includes an adjusted stack
13070 pointer, so just pop everything. */
13071 if (live_regs_mask)
13072 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13073 live_regs_mask);
13075 /* We have either just popped the return address into the
13076 PC or it is was kept in LR for the entire function. */
13077 if (!had_to_push_lr)
13078 thumb_exit (asm_out_file, LR_REGNUM);
13080 else
13082 /* Pop everything but the return address. */
13083 if (live_regs_mask)
13084 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13085 live_regs_mask);
13087 if (had_to_push_lr)
13089 if (size > 12)
13091 /* We have no free low regs, so save one. */
13092 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13093 LAST_ARG_REGNUM);
13096 /* Get the return address into a temporary register. */
13097 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13098 1 << LAST_ARG_REGNUM);
13100 if (size > 12)
13102 /* Move the return address to lr. */
13103 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13104 LAST_ARG_REGNUM);
13105 /* Restore the low register. */
13106 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13107 IP_REGNUM);
13108 regno = LR_REGNUM;
13110 else
13111 regno = LAST_ARG_REGNUM;
13113 else
13114 regno = LR_REGNUM;
13116 /* Remove the argument registers that were pushed onto the stack. */
13117 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13118 SP_REGNUM, SP_REGNUM,
13119 current_function_pretend_args_size);
13121 thumb_exit (asm_out_file, regno);
13124 return "";
13127 /* Functions to save and restore machine-specific function data. */
13128 static struct machine_function *
13129 arm_init_machine_status (void)
13131 struct machine_function *machine;
13132 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13134 #if ARM_FT_UNKNOWN != 0
13135 machine->func_type = ARM_FT_UNKNOWN;
13136 #endif
13137 return machine;
13140 /* Return an RTX indicating where the return address to the
13141 calling function can be found. */
13143 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13145 if (count != 0)
13146 return NULL_RTX;
13148 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13151 /* Do anything needed before RTL is emitted for each function. */
13152 void
13153 arm_init_expanders (void)
13155 /* Arrange to initialize and mark the machine per-function status. */
13156 init_machine_status = arm_init_machine_status;
13158 /* This is to stop the combine pass optimizing away the alignment
13159 adjustment of va_arg. */
13160 /* ??? It is claimed that this should not be necessary. */
13161 if (cfun)
13162 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13166 /* Like arm_compute_initial_elimination offset. Simpler because there
13167 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13168 to point at the base of the local variables after static stack
13169 space for a function has been allocated. */
13171 HOST_WIDE_INT
13172 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13174 arm_stack_offsets *offsets;
13176 offsets = arm_get_frame_offsets ();
13178 switch (from)
13180 case ARG_POINTER_REGNUM:
13181 switch (to)
13183 case STACK_POINTER_REGNUM:
13184 return offsets->outgoing_args - offsets->saved_args;
13186 case FRAME_POINTER_REGNUM:
13187 return offsets->soft_frame - offsets->saved_args;
13189 case ARM_HARD_FRAME_POINTER_REGNUM:
13190 return offsets->saved_regs - offsets->saved_args;
13192 case THUMB_HARD_FRAME_POINTER_REGNUM:
13193 return offsets->locals_base - offsets->saved_args;
13195 default:
13196 gcc_unreachable ();
13198 break;
13200 case FRAME_POINTER_REGNUM:
13201 switch (to)
13203 case STACK_POINTER_REGNUM:
13204 return offsets->outgoing_args - offsets->soft_frame;
13206 case ARM_HARD_FRAME_POINTER_REGNUM:
13207 return offsets->saved_regs - offsets->soft_frame;
13209 case THUMB_HARD_FRAME_POINTER_REGNUM:
13210 return offsets->locals_base - offsets->soft_frame;
13212 default:
13213 gcc_unreachable ();
13215 break;
13217 default:
13218 gcc_unreachable ();
13223 /* Generate the rest of a function's prologue. */
13224 void
13225 thumb_expand_prologue (void)
13227 rtx insn, dwarf;
13229 HOST_WIDE_INT amount;
13230 arm_stack_offsets *offsets;
13231 unsigned long func_type;
13232 int regno;
13233 unsigned long live_regs_mask;
13235 func_type = arm_current_func_type ();
13237 /* Naked functions don't have prologues. */
13238 if (IS_NAKED (func_type))
13239 return;
13241 if (IS_INTERRUPT (func_type))
13243 error ("interrupt Service Routines cannot be coded in Thumb mode");
13244 return;
13247 live_regs_mask = thumb_compute_save_reg_mask ();
13248 /* Load the pic register before setting the frame pointer,
13249 so we can use r7 as a temporary work register. */
13250 if (flag_pic)
13251 arm_load_pic_register (live_regs_mask);
13253 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13254 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13255 stack_pointer_rtx);
13257 offsets = arm_get_frame_offsets ();
13258 amount = offsets->outgoing_args - offsets->saved_regs;
13259 if (amount)
13261 if (amount < 512)
13263 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13264 GEN_INT (- amount)));
13265 RTX_FRAME_RELATED_P (insn) = 1;
13267 else
13269 rtx reg;
13271 /* The stack decrement is too big for an immediate value in a single
13272 insn. In theory we could issue multiple subtracts, but after
13273 three of them it becomes more space efficient to place the full
13274 value in the constant pool and load into a register. (Also the
13275 ARM debugger really likes to see only one stack decrement per
13276 function). So instead we look for a scratch register into which
13277 we can load the decrement, and then we subtract this from the
13278 stack pointer. Unfortunately on the thumb the only available
13279 scratch registers are the argument registers, and we cannot use
13280 these as they may hold arguments to the function. Instead we
13281 attempt to locate a call preserved register which is used by this
13282 function. If we can find one, then we know that it will have
13283 been pushed at the start of the prologue and so we can corrupt
13284 it now. */
13285 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13286 if (live_regs_mask & (1 << regno)
13287 && !(frame_pointer_needed
13288 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13289 break;
13291 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13293 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13295 /* Choose an arbitrary, non-argument low register. */
13296 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13298 /* Save it by copying it into a high, scratch register. */
13299 emit_insn (gen_movsi (spare, reg));
13300 /* Add a USE to stop propagate_one_insn() from barfing. */
13301 emit_insn (gen_prologue_use (spare));
13303 /* Decrement the stack. */
13304 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13305 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13306 stack_pointer_rtx, reg));
13307 RTX_FRAME_RELATED_P (insn) = 1;
13308 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13309 plus_constant (stack_pointer_rtx,
13310 -amount));
13311 RTX_FRAME_RELATED_P (dwarf) = 1;
13312 REG_NOTES (insn)
13313 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13314 REG_NOTES (insn));
13316 /* Restore the low register's original value. */
13317 emit_insn (gen_movsi (reg, spare));
13319 /* Emit a USE of the restored scratch register, so that flow
13320 analysis will not consider the restore redundant. The
13321 register won't be used again in this function and isn't
13322 restored by the epilogue. */
13323 emit_insn (gen_prologue_use (reg));
13325 else
13327 reg = gen_rtx_REG (SImode, regno);
13329 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13331 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13332 stack_pointer_rtx, reg));
13333 RTX_FRAME_RELATED_P (insn) = 1;
13334 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13335 plus_constant (stack_pointer_rtx,
13336 -amount));
13337 RTX_FRAME_RELATED_P (dwarf) = 1;
13338 REG_NOTES (insn)
13339 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13340 REG_NOTES (insn));
13345 if (frame_pointer_needed)
13347 amount = offsets->outgoing_args - offsets->locals_base;
13349 if (amount < 1024)
13350 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13351 stack_pointer_rtx, GEN_INT (amount)));
13352 else
13354 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13355 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13356 hard_frame_pointer_rtx,
13357 stack_pointer_rtx));
13358 dwarf = gen_rtx_SET (SImode, hard_frame_pointer_rtx,
13359 plus_constant (stack_pointer_rtx, amount));
13360 RTX_FRAME_RELATED_P (dwarf) = 1;
13361 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13362 REG_NOTES (insn));
13365 RTX_FRAME_RELATED_P (insn) = 1;
13368 if (current_function_profile || !TARGET_SCHED_PROLOG)
13369 emit_insn (gen_blockage ());
13371 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13372 if (live_regs_mask & 0xff)
13373 cfun->machine->lr_save_eliminated = 0;
13375 /* If the link register is being kept alive, with the return address in it,
13376 then make sure that it does not get reused by the ce2 pass. */
13377 if (cfun->machine->lr_save_eliminated)
13378 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13382 void
13383 thumb_expand_epilogue (void)
13385 HOST_WIDE_INT amount;
13386 arm_stack_offsets *offsets;
13387 int regno;
13389 /* Naked functions don't have prologues. */
13390 if (IS_NAKED (arm_current_func_type ()))
13391 return;
13393 offsets = arm_get_frame_offsets ();
13394 amount = offsets->outgoing_args - offsets->saved_regs;
13396 if (frame_pointer_needed)
13398 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13399 amount = offsets->locals_base - offsets->saved_regs;
13402 if (amount)
13404 if (amount < 512)
13405 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13406 GEN_INT (amount)));
13407 else
13409 /* r3 is always free in the epilogue. */
13410 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13412 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13413 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13417 /* Emit a USE (stack_pointer_rtx), so that
13418 the stack adjustment will not be deleted. */
13419 emit_insn (gen_prologue_use (stack_pointer_rtx));
13421 if (current_function_profile || !TARGET_SCHED_PROLOG)
13422 emit_insn (gen_blockage ());
13424 /* Emit a clobber for each insn that will be restored in the epilogue,
13425 so that flow2 will get register lifetimes correct. */
13426 for (regno = 0; regno < 13; regno++)
13427 if (regs_ever_live[regno] && !call_used_regs[regno])
13428 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13430 if (! regs_ever_live[LR_REGNUM])
13431 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13434 static void
13435 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13437 unsigned long live_regs_mask = 0;
13438 unsigned long l_mask;
13439 unsigned high_regs_pushed = 0;
13440 int cfa_offset = 0;
13441 int regno;
13443 if (IS_NAKED (arm_current_func_type ()))
13444 return;
13446 if (is_called_in_ARM_mode (current_function_decl))
13448 const char * name;
13450 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13451 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13452 == SYMBOL_REF);
13453 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13455 /* Generate code sequence to switch us into Thumb mode. */
13456 /* The .code 32 directive has already been emitted by
13457 ASM_DECLARE_FUNCTION_NAME. */
13458 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13459 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13461 /* Generate a label, so that the debugger will notice the
13462 change in instruction sets. This label is also used by
13463 the assembler to bypass the ARM code when this function
13464 is called from a Thumb encoded function elsewhere in the
13465 same file. Hence the definition of STUB_NAME here must
13466 agree with the definition in gas/config/tc-arm.c. */
13468 #define STUB_NAME ".real_start_of"
13470 fprintf (f, "\t.code\t16\n");
13471 #ifdef ARM_PE
13472 if (arm_dllexport_name_p (name))
13473 name = arm_strip_name_encoding (name);
13474 #endif
13475 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13476 fprintf (f, "\t.thumb_func\n");
13477 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13480 if (current_function_pretend_args_size)
13482 /* Output unwind directive for the stack adjustment. */
13483 if (ARM_EABI_UNWIND_TABLES)
13484 fprintf (f, "\t.pad #%d\n",
13485 current_function_pretend_args_size);
13487 if (cfun->machine->uses_anonymous_args)
13489 int num_pushes;
13491 fprintf (f, "\tpush\t{");
13493 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13495 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13496 regno <= LAST_ARG_REGNUM;
13497 regno++)
13498 asm_fprintf (f, "%r%s", regno,
13499 regno == LAST_ARG_REGNUM ? "" : ", ");
13501 fprintf (f, "}\n");
13503 else
13504 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13505 SP_REGNUM, SP_REGNUM,
13506 current_function_pretend_args_size);
13508 /* We don't need to record the stores for unwinding (would it
13509 help the debugger any if we did?), but record the change in
13510 the stack pointer. */
13511 if (dwarf2out_do_frame ())
13513 char *l = dwarf2out_cfi_label ();
13515 cfa_offset = cfa_offset + current_function_pretend_args_size;
13516 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13520 /* Get the registers we are going to push. */
13521 live_regs_mask = thumb_compute_save_reg_mask ();
13522 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13523 l_mask = live_regs_mask & 0x40ff;
13524 /* Then count how many other high registers will need to be pushed. */
13525 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13527 if (TARGET_BACKTRACE)
13529 unsigned offset;
13530 unsigned work_register;
13532 /* We have been asked to create a stack backtrace structure.
13533 The code looks like this:
13535 0 .align 2
13536 0 func:
13537 0 sub SP, #16 Reserve space for 4 registers.
13538 2 push {R7} Push low registers.
13539 4 add R7, SP, #20 Get the stack pointer before the push.
13540 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13541 8 mov R7, PC Get hold of the start of this code plus 12.
13542 10 str R7, [SP, #16] Store it.
13543 12 mov R7, FP Get hold of the current frame pointer.
13544 14 str R7, [SP, #4] Store it.
13545 16 mov R7, LR Get hold of the current return address.
13546 18 str R7, [SP, #12] Store it.
13547 20 add R7, SP, #16 Point at the start of the backtrace structure.
13548 22 mov FP, R7 Put this value into the frame pointer. */
13550 work_register = thumb_find_work_register (live_regs_mask);
13552 if (ARM_EABI_UNWIND_TABLES)
13553 asm_fprintf (f, "\t.pad #16\n");
13555 asm_fprintf
13556 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13557 SP_REGNUM, SP_REGNUM);
13559 if (dwarf2out_do_frame ())
13561 char *l = dwarf2out_cfi_label ();
13563 cfa_offset = cfa_offset + 16;
13564 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13567 if (l_mask)
13569 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13570 offset = bit_count (l_mask) * UNITS_PER_WORD;
13572 else
13573 offset = 0;
13575 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13576 offset + 16 + current_function_pretend_args_size);
13578 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13579 offset + 4);
13581 /* Make sure that the instruction fetching the PC is in the right place
13582 to calculate "start of backtrace creation code + 12". */
13583 if (l_mask)
13585 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13586 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13587 offset + 12);
13588 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13589 ARM_HARD_FRAME_POINTER_REGNUM);
13590 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13591 offset);
13593 else
13595 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13596 ARM_HARD_FRAME_POINTER_REGNUM);
13597 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13598 offset);
13599 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13600 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13601 offset + 12);
13604 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13605 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13606 offset + 8);
13607 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13608 offset + 12);
13609 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13610 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13612 /* Optimization: If we are not pushing any low registers but we are going
13613 to push some high registers then delay our first push. This will just
13614 be a push of LR and we can combine it with the push of the first high
13615 register. */
13616 else if ((l_mask & 0xff) != 0
13617 || (high_regs_pushed == 0 && l_mask))
13618 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13620 if (high_regs_pushed)
13622 unsigned pushable_regs;
13623 unsigned next_hi_reg;
13625 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13626 if (live_regs_mask & (1 << next_hi_reg))
13627 break;
13629 pushable_regs = l_mask & 0xff;
13631 if (pushable_regs == 0)
13632 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13634 while (high_regs_pushed > 0)
13636 unsigned long real_regs_mask = 0;
13638 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13640 if (pushable_regs & (1 << regno))
13642 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13644 high_regs_pushed --;
13645 real_regs_mask |= (1 << next_hi_reg);
13647 if (high_regs_pushed)
13649 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13650 next_hi_reg --)
13651 if (live_regs_mask & (1 << next_hi_reg))
13652 break;
13654 else
13656 pushable_regs &= ~((1 << regno) - 1);
13657 break;
13662 /* If we had to find a work register and we have not yet
13663 saved the LR then add it to the list of regs to push. */
13664 if (l_mask == (1 << LR_REGNUM))
13666 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13667 1, &cfa_offset,
13668 real_regs_mask | (1 << LR_REGNUM));
13669 l_mask = 0;
13671 else
13672 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13677 /* Handle the case of a double word load into a low register from
13678 a computed memory address. The computed address may involve a
13679 register which is overwritten by the load. */
13680 const char *
13681 thumb_load_double_from_address (rtx *operands)
13683 rtx addr;
13684 rtx base;
13685 rtx offset;
13686 rtx arg1;
13687 rtx arg2;
13689 gcc_assert (GET_CODE (operands[0]) == REG);
13690 gcc_assert (GET_CODE (operands[1]) == MEM);
13692 /* Get the memory address. */
13693 addr = XEXP (operands[1], 0);
13695 /* Work out how the memory address is computed. */
13696 switch (GET_CODE (addr))
13698 case REG:
13699 operands[2] = adjust_address (operands[1], SImode, 4);
13701 if (REGNO (operands[0]) == REGNO (addr))
13703 output_asm_insn ("ldr\t%H0, %2", operands);
13704 output_asm_insn ("ldr\t%0, %1", operands);
13706 else
13708 output_asm_insn ("ldr\t%0, %1", operands);
13709 output_asm_insn ("ldr\t%H0, %2", operands);
13711 break;
13713 case CONST:
13714 /* Compute <address> + 4 for the high order load. */
13715 operands[2] = adjust_address (operands[1], SImode, 4);
13717 output_asm_insn ("ldr\t%0, %1", operands);
13718 output_asm_insn ("ldr\t%H0, %2", operands);
13719 break;
13721 case PLUS:
13722 arg1 = XEXP (addr, 0);
13723 arg2 = XEXP (addr, 1);
13725 if (CONSTANT_P (arg1))
13726 base = arg2, offset = arg1;
13727 else
13728 base = arg1, offset = arg2;
13730 gcc_assert (GET_CODE (base) == REG);
13732 /* Catch the case of <address> = <reg> + <reg> */
13733 if (GET_CODE (offset) == REG)
13735 int reg_offset = REGNO (offset);
13736 int reg_base = REGNO (base);
13737 int reg_dest = REGNO (operands[0]);
13739 /* Add the base and offset registers together into the
13740 higher destination register. */
13741 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13742 reg_dest + 1, reg_base, reg_offset);
13744 /* Load the lower destination register from the address in
13745 the higher destination register. */
13746 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13747 reg_dest, reg_dest + 1);
13749 /* Load the higher destination register from its own address
13750 plus 4. */
13751 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13752 reg_dest + 1, reg_dest + 1);
13754 else
13756 /* Compute <address> + 4 for the high order load. */
13757 operands[2] = adjust_address (operands[1], SImode, 4);
13760 /* If the computed address is held in the low order register
13761 then load the high order register first, otherwise always
13762 load the low order register first. */
13763 if (REGNO (operands[0]) == REGNO (base))
13765 output_asm_insn ("ldr\t%H0, %2", operands);
13766 output_asm_insn ("ldr\t%0, %1", operands);
13768 else
13770 output_asm_insn ("ldr\t%0, %1", operands);
13771 output_asm_insn ("ldr\t%H0, %2", operands);
13774 break;
13776 case LABEL_REF:
13777 /* With no registers to worry about we can just load the value
13778 directly. */
13779 operands[2] = adjust_address (operands[1], SImode, 4);
13781 output_asm_insn ("ldr\t%H0, %2", operands);
13782 output_asm_insn ("ldr\t%0, %1", operands);
13783 break;
13785 default:
13786 gcc_unreachable ();
13789 return "";
13792 const char *
13793 thumb_output_move_mem_multiple (int n, rtx *operands)
13795 rtx tmp;
13797 switch (n)
13799 case 2:
13800 if (REGNO (operands[4]) > REGNO (operands[5]))
13802 tmp = operands[4];
13803 operands[4] = operands[5];
13804 operands[5] = tmp;
13806 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13807 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13808 break;
13810 case 3:
13811 if (REGNO (operands[4]) > REGNO (operands[5]))
13813 tmp = operands[4];
13814 operands[4] = operands[5];
13815 operands[5] = tmp;
13817 if (REGNO (operands[5]) > REGNO (operands[6]))
13819 tmp = operands[5];
13820 operands[5] = operands[6];
13821 operands[6] = tmp;
13823 if (REGNO (operands[4]) > REGNO (operands[5]))
13825 tmp = operands[4];
13826 operands[4] = operands[5];
13827 operands[5] = tmp;
13830 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13831 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13832 break;
13834 default:
13835 gcc_unreachable ();
13838 return "";
13841 /* Output a call-via instruction for thumb state. */
13842 const char *
13843 thumb_call_via_reg (rtx reg)
13845 int regno = REGNO (reg);
13846 rtx *labelp;
13848 gcc_assert (regno < LR_REGNUM);
13850 /* If we are in the normal text section we can use a single instance
13851 per compilation unit. If we are doing function sections, then we need
13852 an entry per section, since we can't rely on reachability. */
13853 if (in_text_section ())
13855 thumb_call_reg_needed = 1;
13857 if (thumb_call_via_label[regno] == NULL)
13858 thumb_call_via_label[regno] = gen_label_rtx ();
13859 labelp = thumb_call_via_label + regno;
13861 else
13863 if (cfun->machine->call_via[regno] == NULL)
13864 cfun->machine->call_via[regno] = gen_label_rtx ();
13865 labelp = cfun->machine->call_via + regno;
13868 output_asm_insn ("bl\t%a0", labelp);
13869 return "";
13872 /* Routines for generating rtl. */
13873 void
13874 thumb_expand_movmemqi (rtx *operands)
13876 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13877 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13878 HOST_WIDE_INT len = INTVAL (operands[2]);
13879 HOST_WIDE_INT offset = 0;
13881 while (len >= 12)
13883 emit_insn (gen_movmem12b (out, in, out, in));
13884 len -= 12;
13887 if (len >= 8)
13889 emit_insn (gen_movmem8b (out, in, out, in));
13890 len -= 8;
13893 if (len >= 4)
13895 rtx reg = gen_reg_rtx (SImode);
13896 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13897 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13898 len -= 4;
13899 offset += 4;
13902 if (len >= 2)
13904 rtx reg = gen_reg_rtx (HImode);
13905 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13906 plus_constant (in, offset))));
13907 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13908 reg));
13909 len -= 2;
13910 offset += 2;
13913 if (len)
13915 rtx reg = gen_reg_rtx (QImode);
13916 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13917 plus_constant (in, offset))));
13918 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13919 reg));
13923 void
13924 thumb_reload_out_hi (rtx *operands)
13926 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13929 /* Handle reading a half-word from memory during reload. */
13930 void
13931 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13933 gcc_unreachable ();
13936 /* Return the length of a function name prefix
13937 that starts with the character 'c'. */
13938 static int
13939 arm_get_strip_length (int c)
13941 switch (c)
13943 ARM_NAME_ENCODING_LENGTHS
13944 default: return 0;
13948 /* Return a pointer to a function's name with any
13949 and all prefix encodings stripped from it. */
13950 const char *
13951 arm_strip_name_encoding (const char *name)
13953 int skip;
13955 while ((skip = arm_get_strip_length (* name)))
13956 name += skip;
13958 return name;
13961 /* If there is a '*' anywhere in the name's prefix, then
13962 emit the stripped name verbatim, otherwise prepend an
13963 underscore if leading underscores are being used. */
13964 void
13965 arm_asm_output_labelref (FILE *stream, const char *name)
13967 int skip;
13968 int verbatim = 0;
13970 while ((skip = arm_get_strip_length (* name)))
13972 verbatim |= (*name == '*');
13973 name += skip;
13976 if (verbatim)
13977 fputs (name, stream);
13978 else
13979 asm_fprintf (stream, "%U%s", name);
13982 static void
13983 arm_file_end (void)
13985 int regno;
13987 if (! thumb_call_reg_needed)
13988 return;
13990 text_section ();
13991 asm_fprintf (asm_out_file, "\t.code 16\n");
13992 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13994 for (regno = 0; regno < LR_REGNUM; regno++)
13996 rtx label = thumb_call_via_label[regno];
13998 if (label != 0)
14000 targetm.asm_out.internal_label (asm_out_file, "L",
14001 CODE_LABEL_NUMBER (label));
14002 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14007 rtx aof_pic_label;
14009 #ifdef AOF_ASSEMBLER
14010 /* Special functions only needed when producing AOF syntax assembler. */
14012 struct pic_chain
14014 struct pic_chain * next;
14015 const char * symname;
14018 static struct pic_chain * aof_pic_chain = NULL;
14021 aof_pic_entry (rtx x)
14023 struct pic_chain ** chainp;
14024 int offset;
14026 if (aof_pic_label == NULL_RTX)
14028 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14031 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14032 offset += 4, chainp = &(*chainp)->next)
14033 if ((*chainp)->symname == XSTR (x, 0))
14034 return plus_constant (aof_pic_label, offset);
14036 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14037 (*chainp)->next = NULL;
14038 (*chainp)->symname = XSTR (x, 0);
14039 return plus_constant (aof_pic_label, offset);
14042 void
14043 aof_dump_pic_table (FILE *f)
14045 struct pic_chain * chain;
14047 if (aof_pic_chain == NULL)
14048 return;
14050 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14051 PIC_OFFSET_TABLE_REGNUM,
14052 PIC_OFFSET_TABLE_REGNUM);
14053 fputs ("|x$adcons|\n", f);
14055 for (chain = aof_pic_chain; chain; chain = chain->next)
14057 fputs ("\tDCD\t", f);
14058 assemble_name (f, chain->symname);
14059 fputs ("\n", f);
14063 int arm_text_section_count = 1;
14065 char *
14066 aof_text_section (void )
14068 static char buf[100];
14069 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
14070 arm_text_section_count++);
14071 if (flag_pic)
14072 strcat (buf, ", PIC, REENTRANT");
14073 return buf;
14076 static int arm_data_section_count = 1;
14078 char *
14079 aof_data_section (void)
14081 static char buf[100];
14082 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14083 return buf;
14086 /* The AOF assembler is religiously strict about declarations of
14087 imported and exported symbols, so that it is impossible to declare
14088 a function as imported near the beginning of the file, and then to
14089 export it later on. It is, however, possible to delay the decision
14090 until all the functions in the file have been compiled. To get
14091 around this, we maintain a list of the imports and exports, and
14092 delete from it any that are subsequently defined. At the end of
14093 compilation we spit the remainder of the list out before the END
14094 directive. */
14096 struct import
14098 struct import * next;
14099 const char * name;
14102 static struct import * imports_list = NULL;
14104 void
14105 aof_add_import (const char *name)
14107 struct import * new;
14109 for (new = imports_list; new; new = new->next)
14110 if (new->name == name)
14111 return;
14113 new = (struct import *) xmalloc (sizeof (struct import));
14114 new->next = imports_list;
14115 imports_list = new;
14116 new->name = name;
14119 void
14120 aof_delete_import (const char *name)
14122 struct import ** old;
14124 for (old = &imports_list; *old; old = & (*old)->next)
14126 if ((*old)->name == name)
14128 *old = (*old)->next;
14129 return;
14134 int arm_main_function = 0;
14136 static void
14137 aof_dump_imports (FILE *f)
14139 /* The AOF assembler needs this to cause the startup code to be extracted
14140 from the library. Brining in __main causes the whole thing to work
14141 automagically. */
14142 if (arm_main_function)
14144 text_section ();
14145 fputs ("\tIMPORT __main\n", f);
14146 fputs ("\tDCD __main\n", f);
14149 /* Now dump the remaining imports. */
14150 while (imports_list)
14152 fprintf (f, "\tIMPORT\t");
14153 assemble_name (f, imports_list->name);
14154 fputc ('\n', f);
14155 imports_list = imports_list->next;
14159 static void
14160 aof_globalize_label (FILE *stream, const char *name)
14162 default_globalize_label (stream, name);
14163 if (! strcmp (name, "main"))
14164 arm_main_function = 1;
14167 static void
14168 aof_file_start (void)
14170 fputs ("__r0\tRN\t0\n", asm_out_file);
14171 fputs ("__a1\tRN\t0\n", asm_out_file);
14172 fputs ("__a2\tRN\t1\n", asm_out_file);
14173 fputs ("__a3\tRN\t2\n", asm_out_file);
14174 fputs ("__a4\tRN\t3\n", asm_out_file);
14175 fputs ("__v1\tRN\t4\n", asm_out_file);
14176 fputs ("__v2\tRN\t5\n", asm_out_file);
14177 fputs ("__v3\tRN\t6\n", asm_out_file);
14178 fputs ("__v4\tRN\t7\n", asm_out_file);
14179 fputs ("__v5\tRN\t8\n", asm_out_file);
14180 fputs ("__v6\tRN\t9\n", asm_out_file);
14181 fputs ("__sl\tRN\t10\n", asm_out_file);
14182 fputs ("__fp\tRN\t11\n", asm_out_file);
14183 fputs ("__ip\tRN\t12\n", asm_out_file);
14184 fputs ("__sp\tRN\t13\n", asm_out_file);
14185 fputs ("__lr\tRN\t14\n", asm_out_file);
14186 fputs ("__pc\tRN\t15\n", asm_out_file);
14187 fputs ("__f0\tFN\t0\n", asm_out_file);
14188 fputs ("__f1\tFN\t1\n", asm_out_file);
14189 fputs ("__f2\tFN\t2\n", asm_out_file);
14190 fputs ("__f3\tFN\t3\n", asm_out_file);
14191 fputs ("__f4\tFN\t4\n", asm_out_file);
14192 fputs ("__f5\tFN\t5\n", asm_out_file);
14193 fputs ("__f6\tFN\t6\n", asm_out_file);
14194 fputs ("__f7\tFN\t7\n", asm_out_file);
14195 text_section ();
14198 static void
14199 aof_file_end (void)
14201 if (flag_pic)
14202 aof_dump_pic_table (asm_out_file);
14203 arm_file_end ();
14204 aof_dump_imports (asm_out_file);
14205 fputs ("\tEND\n", asm_out_file);
14207 #endif /* AOF_ASSEMBLER */
14209 #ifndef ARM_PE
14210 /* Symbols in the text segment can be accessed without indirecting via the
14211 constant pool; it may take an extra binary operation, but this is still
14212 faster than indirecting via memory. Don't do this when not optimizing,
14213 since we won't be calculating al of the offsets necessary to do this
14214 simplification. */
14216 static void
14217 arm_encode_section_info (tree decl, rtx rtl, int first)
14219 /* This doesn't work with AOF syntax, since the string table may be in
14220 a different AREA. */
14221 #ifndef AOF_ASSEMBLER
14222 if (optimize > 0 && TREE_CONSTANT (decl))
14223 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14224 #endif
14226 /* If we are referencing a function that is weak then encode a long call
14227 flag in the function name, otherwise if the function is static or
14228 or known to be defined in this file then encode a short call flag. */
14229 if (first && DECL_P (decl))
14231 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14232 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14233 else if (! TREE_PUBLIC (decl))
14234 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14237 #endif /* !ARM_PE */
14239 static void
14240 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14242 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14243 && !strcmp (prefix, "L"))
14245 arm_ccfsm_state = 0;
14246 arm_target_insn = NULL;
14248 default_internal_label (stream, prefix, labelno);
14251 /* Output code to add DELTA to the first argument, and then jump
14252 to FUNCTION. Used for C++ multiple inheritance. */
14253 static void
14254 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14255 HOST_WIDE_INT delta,
14256 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14257 tree function)
14259 static int thunk_label = 0;
14260 char label[256];
14261 int mi_delta = delta;
14262 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14263 int shift = 0;
14264 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14265 ? 1 : 0);
14266 if (mi_delta < 0)
14267 mi_delta = - mi_delta;
14268 if (TARGET_THUMB)
14270 int labelno = thunk_label++;
14271 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14272 fputs ("\tldr\tr12, ", file);
14273 assemble_name (file, label);
14274 fputc ('\n', file);
14276 while (mi_delta != 0)
14278 if ((mi_delta & (3 << shift)) == 0)
14279 shift += 2;
14280 else
14282 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14283 mi_op, this_regno, this_regno,
14284 mi_delta & (0xff << shift));
14285 mi_delta &= ~(0xff << shift);
14286 shift += 8;
14289 if (TARGET_THUMB)
14291 fprintf (file, "\tbx\tr12\n");
14292 ASM_OUTPUT_ALIGN (file, 2);
14293 assemble_name (file, label);
14294 fputs (":\n", file);
14295 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14297 else
14299 fputs ("\tb\t", file);
14300 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14301 if (NEED_PLT_RELOC)
14302 fputs ("(PLT)", file);
14303 fputc ('\n', file);
14308 arm_emit_vector_const (FILE *file, rtx x)
14310 int i;
14311 const char * pattern;
14313 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14315 switch (GET_MODE (x))
14317 case V2SImode: pattern = "%08x"; break;
14318 case V4HImode: pattern = "%04x"; break;
14319 case V8QImode: pattern = "%02x"; break;
14320 default: gcc_unreachable ();
14323 fprintf (file, "0x");
14324 for (i = CONST_VECTOR_NUNITS (x); i--;)
14326 rtx element;
14328 element = CONST_VECTOR_ELT (x, i);
14329 fprintf (file, pattern, INTVAL (element));
14332 return 1;
14335 const char *
14336 arm_output_load_gr (rtx *operands)
14338 rtx reg;
14339 rtx offset;
14340 rtx wcgr;
14341 rtx sum;
14343 if (GET_CODE (operands [1]) != MEM
14344 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14345 || GET_CODE (reg = XEXP (sum, 0)) != REG
14346 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14347 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14348 return "wldrw%?\t%0, %1";
14350 /* Fix up an out-of-range load of a GR register. */
14351 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14352 wcgr = operands[0];
14353 operands[0] = reg;
14354 output_asm_insn ("ldr%?\t%0, %1", operands);
14356 operands[0] = wcgr;
14357 operands[1] = reg;
14358 output_asm_insn ("tmcr%?\t%0, %1", operands);
14359 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14361 return "";
14364 static rtx
14365 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14366 int incoming ATTRIBUTE_UNUSED)
14368 #if 0
14369 /* FIXME: The ARM backend has special code to handle structure
14370 returns, and will reserve its own hidden first argument. So
14371 if this macro is enabled a *second* hidden argument will be
14372 reserved, which will break binary compatibility with old
14373 toolchains and also thunk handling. One day this should be
14374 fixed. */
14375 return 0;
14376 #else
14377 /* Register in which address to store a structure value
14378 is passed to a function. */
14379 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14380 #endif
14383 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14385 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14386 named arg and all anonymous args onto the stack.
14387 XXX I know the prologue shouldn't be pushing registers, but it is faster
14388 that way. */
14390 static void
14391 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14392 enum machine_mode mode ATTRIBUTE_UNUSED,
14393 tree type ATTRIBUTE_UNUSED,
14394 int *pretend_size,
14395 int second_time ATTRIBUTE_UNUSED)
14397 cfun->machine->uses_anonymous_args = 1;
14398 if (cum->nregs < NUM_ARG_REGS)
14399 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14402 /* Return nonzero if the CONSUMER instruction (a store) does not need
14403 PRODUCER's value to calculate the address. */
14406 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14408 rtx value = PATTERN (producer);
14409 rtx addr = PATTERN (consumer);
14411 if (GET_CODE (value) == COND_EXEC)
14412 value = COND_EXEC_CODE (value);
14413 if (GET_CODE (value) == PARALLEL)
14414 value = XVECEXP (value, 0, 0);
14415 value = XEXP (value, 0);
14416 if (GET_CODE (addr) == COND_EXEC)
14417 addr = COND_EXEC_CODE (addr);
14418 if (GET_CODE (addr) == PARALLEL)
14419 addr = XVECEXP (addr, 0, 0);
14420 addr = XEXP (addr, 0);
14422 return !reg_overlap_mentioned_p (value, addr);
14425 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14426 have an early register shift value or amount dependency on the
14427 result of PRODUCER. */
14430 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14432 rtx value = PATTERN (producer);
14433 rtx op = PATTERN (consumer);
14434 rtx early_op;
14436 if (GET_CODE (value) == COND_EXEC)
14437 value = COND_EXEC_CODE (value);
14438 if (GET_CODE (value) == PARALLEL)
14439 value = XVECEXP (value, 0, 0);
14440 value = XEXP (value, 0);
14441 if (GET_CODE (op) == COND_EXEC)
14442 op = COND_EXEC_CODE (op);
14443 if (GET_CODE (op) == PARALLEL)
14444 op = XVECEXP (op, 0, 0);
14445 op = XEXP (op, 1);
14447 early_op = XEXP (op, 0);
14448 /* This is either an actual independent shift, or a shift applied to
14449 the first operand of another operation. We want the whole shift
14450 operation. */
14451 if (GET_CODE (early_op) == REG)
14452 early_op = op;
14454 return !reg_overlap_mentioned_p (value, early_op);
14457 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14458 have an early register shift value dependency on the result of
14459 PRODUCER. */
14462 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14464 rtx value = PATTERN (producer);
14465 rtx op = PATTERN (consumer);
14466 rtx early_op;
14468 if (GET_CODE (value) == COND_EXEC)
14469 value = COND_EXEC_CODE (value);
14470 if (GET_CODE (value) == PARALLEL)
14471 value = XVECEXP (value, 0, 0);
14472 value = XEXP (value, 0);
14473 if (GET_CODE (op) == COND_EXEC)
14474 op = COND_EXEC_CODE (op);
14475 if (GET_CODE (op) == PARALLEL)
14476 op = XVECEXP (op, 0, 0);
14477 op = XEXP (op, 1);
14479 early_op = XEXP (op, 0);
14481 /* This is either an actual independent shift, or a shift applied to
14482 the first operand of another operation. We want the value being
14483 shifted, in either case. */
14484 if (GET_CODE (early_op) != REG)
14485 early_op = XEXP (early_op, 0);
14487 return !reg_overlap_mentioned_p (value, early_op);
14490 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14491 have an early register mult dependency on the result of
14492 PRODUCER. */
14495 arm_no_early_mul_dep (rtx producer, rtx consumer)
14497 rtx value = PATTERN (producer);
14498 rtx op = PATTERN (consumer);
14500 if (GET_CODE (value) == COND_EXEC)
14501 value = COND_EXEC_CODE (value);
14502 if (GET_CODE (value) == PARALLEL)
14503 value = XVECEXP (value, 0, 0);
14504 value = XEXP (value, 0);
14505 if (GET_CODE (op) == COND_EXEC)
14506 op = COND_EXEC_CODE (op);
14507 if (GET_CODE (op) == PARALLEL)
14508 op = XVECEXP (op, 0, 0);
14509 op = XEXP (op, 1);
14511 return (GET_CODE (op) == PLUS
14512 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14516 /* We can't rely on the caller doing the proper promotion when
14517 using APCS or ATPCS. */
14519 static bool
14520 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14522 return !TARGET_AAPCS_BASED;
14526 /* AAPCS based ABIs use short enums by default. */
14528 static bool
14529 arm_default_short_enums (void)
14531 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
14535 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14537 static bool
14538 arm_align_anon_bitfield (void)
14540 return TARGET_AAPCS_BASED;
14544 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14546 static tree
14547 arm_cxx_guard_type (void)
14549 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14553 /* The EABI says test the least significant bit of a guard variable. */
14555 static bool
14556 arm_cxx_guard_mask_bit (void)
14558 return TARGET_AAPCS_BASED;
14562 /* The EABI specifies that all array cookies are 8 bytes long. */
14564 static tree
14565 arm_get_cookie_size (tree type)
14567 tree size;
14569 if (!TARGET_AAPCS_BASED)
14570 return default_cxx_get_cookie_size (type);
14572 size = build_int_cst (sizetype, 8);
14573 return size;
14577 /* The EABI says that array cookies should also contain the element size. */
14579 static bool
14580 arm_cookie_has_size (void)
14582 return TARGET_AAPCS_BASED;
14586 /* The EABI says constructors and destructors should return a pointer to
14587 the object constructed/destroyed. */
14589 static bool
14590 arm_cxx_cdtor_returns_this (void)
14592 return TARGET_AAPCS_BASED;
14595 /* The EABI says that an inline function may never be the key
14596 method. */
14598 static bool
14599 arm_cxx_key_method_may_be_inline (void)
14601 return !TARGET_AAPCS_BASED;
14604 static void
14605 arm_cxx_determine_class_data_visibility (tree decl)
14607 if (!TARGET_AAPCS_BASED)
14608 return;
14610 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14611 is exported. However, on systems without dynamic vague linkage,
14612 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14613 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14614 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14615 else
14616 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14617 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14620 static bool
14621 arm_cxx_class_data_always_comdat (void)
14623 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14624 vague linkage if the class has no key function. */
14625 return !TARGET_AAPCS_BASED;
14629 /* The EABI says __aeabi_atexit should be used to register static
14630 destructors. */
14632 static bool
14633 arm_cxx_use_aeabi_atexit (void)
14635 return TARGET_AAPCS_BASED;
14639 void
14640 arm_set_return_address (rtx source, rtx scratch)
14642 arm_stack_offsets *offsets;
14643 HOST_WIDE_INT delta;
14644 rtx addr;
14645 unsigned long saved_regs;
14647 saved_regs = arm_compute_save_reg_mask ();
14649 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14650 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14651 else
14653 if (frame_pointer_needed)
14654 addr = plus_constant(hard_frame_pointer_rtx, -4);
14655 else
14657 /* LR will be the first saved register. */
14658 offsets = arm_get_frame_offsets ();
14659 delta = offsets->outgoing_args - (offsets->frame + 4);
14662 if (delta >= 4096)
14664 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14665 GEN_INT (delta & ~4095)));
14666 addr = scratch;
14667 delta &= 4095;
14669 else
14670 addr = stack_pointer_rtx;
14672 addr = plus_constant (addr, delta);
14674 emit_move_insn (gen_frame_mem (Pmode, addr), source);
14679 void
14680 thumb_set_return_address (rtx source, rtx scratch)
14682 arm_stack_offsets *offsets;
14683 HOST_WIDE_INT delta;
14684 int reg;
14685 rtx addr;
14686 unsigned long mask;
14688 emit_insn (gen_rtx_USE (VOIDmode, source));
14690 mask = thumb_compute_save_reg_mask ();
14691 if (mask & (1 << LR_REGNUM))
14693 offsets = arm_get_frame_offsets ();
14695 /* Find the saved regs. */
14696 if (frame_pointer_needed)
14698 delta = offsets->soft_frame - offsets->saved_args;
14699 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14701 else
14703 delta = offsets->outgoing_args - offsets->saved_args;
14704 reg = SP_REGNUM;
14706 /* Allow for the stack frame. */
14707 if (TARGET_BACKTRACE)
14708 delta -= 16;
14709 /* The link register is always the first saved register. */
14710 delta -= 4;
14712 /* Construct the address. */
14713 addr = gen_rtx_REG (SImode, reg);
14714 if ((reg != SP_REGNUM && delta >= 128)
14715 || delta >= 1024)
14717 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14718 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14719 addr = scratch;
14721 else
14722 addr = plus_constant (addr, delta);
14724 emit_move_insn (gen_frame_mem (Pmode, addr), source);
14726 else
14727 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14730 /* Implements target hook vector_mode_supported_p. */
14731 bool
14732 arm_vector_mode_supported_p (enum machine_mode mode)
14734 if ((mode == V2SImode)
14735 || (mode == V4HImode)
14736 || (mode == V8QImode))
14737 return true;
14739 return false;
14742 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14743 ARM insns and therefore guarantee that the shift count is modulo 256.
14744 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14745 guarantee no particular behavior for out-of-range counts. */
14747 static unsigned HOST_WIDE_INT
14748 arm_shift_truncation_mask (enum machine_mode mode)
14750 return mode == SImode ? 255 : 0;
14754 /* Map internal gcc register numbers to DWARF2 register numbers. */
14756 unsigned int
14757 arm_dbx_register_number (unsigned int regno)
14759 if (regno < 16)
14760 return regno;
14762 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14763 compatibility. The EABI defines them as registers 96-103. */
14764 if (IS_FPA_REGNUM (regno))
14765 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14767 if (IS_VFP_REGNUM (regno))
14768 return 64 + regno - FIRST_VFP_REGNUM;
14770 if (IS_IWMMXT_GR_REGNUM (regno))
14771 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14773 if (IS_IWMMXT_REGNUM (regno))
14774 return 112 + regno - FIRST_IWMMXT_REGNUM;
14776 gcc_unreachable ();
14780 #ifdef TARGET_UNWIND_INFO
14781 /* Emit unwind directives for a store-multiple instruction. This should
14782 only ever be generated by the function prologue code, so we expect it
14783 to have a particular form. */
14785 static void
14786 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
14788 int i;
14789 HOST_WIDE_INT offset;
14790 HOST_WIDE_INT nregs;
14791 int reg_size;
14792 unsigned reg;
14793 unsigned lastreg;
14794 rtx e;
14796 /* First insn will adjust the stack pointer. */
14797 e = XVECEXP (p, 0, 0);
14798 if (GET_CODE (e) != SET
14799 || GET_CODE (XEXP (e, 0)) != REG
14800 || REGNO (XEXP (e, 0)) != SP_REGNUM
14801 || GET_CODE (XEXP (e, 1)) != PLUS)
14802 abort ();
14804 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
14805 nregs = XVECLEN (p, 0) - 1;
14807 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
14808 if (reg < 16)
14810 /* The function prologue may also push pc, but not annotate it as it is
14811 never restored. We turn this into a stack pointer adjustment. */
14812 if (nregs * 4 == offset - 4)
14814 fprintf (asm_out_file, "\t.pad #4\n");
14815 offset -= 4;
14817 reg_size = 4;
14819 else if (IS_VFP_REGNUM (reg))
14821 /* FPA register saves use an additional word. */
14822 offset -= 4;
14823 reg_size = 8;
14825 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
14827 /* FPA registers are done differently. */
14828 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
14829 return;
14831 else
14832 /* Unknown register type. */
14833 abort ();
14835 /* If the stack increment doesn't match the size of the saved registers,
14836 something has gone horribly wrong. */
14837 if (offset != nregs * reg_size)
14838 abort ();
14840 fprintf (asm_out_file, "\t.save {");
14842 offset = 0;
14843 lastreg = 0;
14844 /* The remaining insns will describe the stores. */
14845 for (i = 1; i <= nregs; i++)
14847 /* Expect (set (mem <addr>) (reg)).
14848 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
14849 e = XVECEXP (p, 0, i);
14850 if (GET_CODE (e) != SET
14851 || GET_CODE (XEXP (e, 0)) != MEM
14852 || GET_CODE (XEXP (e, 1)) != REG)
14853 abort ();
14855 reg = REGNO (XEXP (e, 1));
14856 if (reg < lastreg)
14857 abort ();
14859 if (i != 1)
14860 fprintf (asm_out_file, ", ");
14861 /* We can't use %r for vfp because we need to use the
14862 double precision register names. */
14863 if (IS_VFP_REGNUM (reg))
14864 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
14865 else
14866 asm_fprintf (asm_out_file, "%r", reg);
14868 #ifdef ENABLE_CHECKING
14869 /* Check that the addresses are consecutive. */
14870 e = XEXP (XEXP (e, 0), 0);
14871 if (GET_CODE (e) == PLUS)
14873 offset += reg_size;
14874 if (GET_CODE (XEXP (e, 0)) != REG
14875 || REGNO (XEXP (e, 0)) != SP_REGNUM
14876 || GET_CODE (XEXP (e, 1)) != CONST_INT
14877 || offset != INTVAL (XEXP (e, 1)))
14878 abort ();
14880 else if (i != 1
14881 || GET_CODE (e) != REG
14882 || REGNO (e) != SP_REGNUM)
14883 abort ();
14884 #endif
14886 fprintf (asm_out_file, "}\n");
14889 /* Emit unwind directives for a SET. */
14891 static void
14892 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
14894 rtx e0;
14895 rtx e1;
14897 e0 = XEXP (p, 0);
14898 e1 = XEXP (p, 1);
14899 switch (GET_CODE (e0))
14901 case MEM:
14902 /* Pushing a single register. */
14903 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
14904 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
14905 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
14906 abort ();
14908 asm_fprintf (asm_out_file, "\t.save ");
14909 if (IS_VFP_REGNUM (REGNO (e1)))
14910 asm_fprintf(asm_out_file, "{d%d}\n",
14911 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
14912 else
14913 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
14914 break;
14916 case REG:
14917 if (REGNO (e0) == SP_REGNUM)
14919 /* A stack increment. */
14920 if (GET_CODE (e1) != PLUS
14921 || GET_CODE (XEXP (e1, 0)) != REG
14922 || REGNO (XEXP (e1, 0)) != SP_REGNUM
14923 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14924 abort ();
14926 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
14927 -INTVAL (XEXP (e1, 1)));
14929 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
14931 HOST_WIDE_INT offset;
14932 unsigned reg;
14934 if (GET_CODE (e1) == PLUS)
14936 if (GET_CODE (XEXP (e1, 0)) != REG
14937 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14938 abort ();
14939 reg = REGNO (XEXP (e1, 0));
14940 offset = INTVAL (XEXP (e1, 1));
14941 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
14942 HARD_FRAME_POINTER_REGNUM, reg,
14943 INTVAL (XEXP (e1, 1)));
14945 else if (GET_CODE (e1) == REG)
14947 reg = REGNO (e1);
14948 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
14949 HARD_FRAME_POINTER_REGNUM, reg);
14951 else
14952 abort ();
14954 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
14956 /* Move from sp to reg. */
14957 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
14959 else
14960 abort ();
14961 break;
14963 default:
14964 abort ();
14969 /* Emit unwind directives for the given insn. */
14971 static void
14972 arm_unwind_emit (FILE * asm_out_file, rtx insn)
14974 rtx pat;
14976 if (!ARM_EABI_UNWIND_TABLES)
14977 return;
14979 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
14980 return;
14982 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
14983 if (pat)
14984 pat = XEXP (pat, 0);
14985 else
14986 pat = PATTERN (insn);
14988 switch (GET_CODE (pat))
14990 case SET:
14991 arm_unwind_emit_set (asm_out_file, pat);
14992 break;
14994 case SEQUENCE:
14995 /* Store multiple. */
14996 arm_unwind_emit_stm (asm_out_file, pat);
14997 break;
14999 default:
15000 abort();
15005 /* Output a reference from a function exception table to the type_info
15006 object X. The EABI specifies that the symbol should be relocated by
15007 an R_ARM_TARGET2 relocation. */
15009 static bool
15010 arm_output_ttype (rtx x)
15012 fputs ("\t.word\t", asm_out_file);
15013 output_addr_const (asm_out_file, x);
15014 /* Use special relocations for symbol references. */
15015 if (GET_CODE (x) != CONST_INT)
15016 fputs ("(TARGET2)", asm_out_file);
15017 fputc ('\n', asm_out_file);
15019 return TRUE;
15021 #endif /* TARGET_UNWIND_INFO */
15024 /* Output unwind directives for the start/end of a function. */
15026 void
15027 arm_output_fn_unwind (FILE * f, bool prologue)
15029 if (!ARM_EABI_UNWIND_TABLES)
15030 return;
15032 if (prologue)
15033 fputs ("\t.fnstart\n", f);
15034 else
15035 fputs ("\t.fnend\n", f);