2005-07-07 Khem Raj <kraj@mvista.com>
[official-gcc.git] / gcc / config / arm / arm.c
blob31fa08efaa4c996782d532bad7f264fd0222e025
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
146 tree, bool);
148 #ifdef OBJECT_FORMAT_ELF
149 static void arm_elf_asm_constructor (rtx, int);
150 #endif
151 #ifndef ARM_PE
152 static void arm_encode_section_info (tree, rtx, int);
153 #endif
155 static void arm_file_end (void);
157 #ifdef AOF_ASSEMBLER
158 static void aof_globalize_label (FILE *, const char *);
159 static void aof_dump_imports (FILE *);
160 static void aof_dump_pic_table (FILE *);
161 static void aof_file_start (void);
162 static void aof_file_end (void);
163 #endif
164 static rtx arm_struct_value_rtx (tree, int);
165 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
166 tree, int *, int);
167 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
168 enum machine_mode, tree, bool);
169 static bool arm_promote_prototypes (tree);
170 static bool arm_default_short_enums (void);
171 static bool arm_align_anon_bitfield (void);
172 static bool arm_return_in_msb (tree);
173 static bool arm_must_pass_in_stack (enum machine_mode, tree);
174 #ifdef TARGET_UNWIND_INFO
175 static void arm_unwind_emit (FILE *, rtx);
176 static bool arm_output_ttype (rtx);
177 #endif
179 static tree arm_cxx_guard_type (void);
180 static bool arm_cxx_guard_mask_bit (void);
181 static tree arm_get_cookie_size (tree);
182 static bool arm_cookie_has_size (void);
183 static bool arm_cxx_cdtor_returns_this (void);
184 static bool arm_cxx_key_method_may_be_inline (void);
185 static void arm_cxx_determine_class_data_visibility (tree);
186 static bool arm_cxx_class_data_always_comdat (void);
187 static bool arm_cxx_use_aeabi_atexit (void);
188 static void arm_init_libfuncs (void);
189 static bool arm_handle_option (size_t, const char *, int);
190 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
192 /* Initialize the GCC target structure. */
193 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
194 #undef TARGET_MERGE_DECL_ATTRIBUTES
195 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
196 #endif
198 #undef TARGET_ATTRIBUTE_TABLE
199 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
201 #undef TARGET_ASM_FILE_END
202 #define TARGET_ASM_FILE_END arm_file_end
204 #ifdef AOF_ASSEMBLER
205 #undef TARGET_ASM_BYTE_OP
206 #define TARGET_ASM_BYTE_OP "\tDCB\t"
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
209 #undef TARGET_ASM_ALIGNED_SI_OP
210 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
211 #undef TARGET_ASM_GLOBALIZE_LABEL
212 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
213 #undef TARGET_ASM_FILE_START
214 #define TARGET_ASM_FILE_START aof_file_start
215 #undef TARGET_ASM_FILE_END
216 #define TARGET_ASM_FILE_END aof_file_end
217 #else
218 #undef TARGET_ASM_ALIGNED_SI_OP
219 #define TARGET_ASM_ALIGNED_SI_OP NULL
220 #undef TARGET_ASM_INTEGER
221 #define TARGET_ASM_INTEGER arm_assemble_integer
222 #endif
224 #undef TARGET_ASM_FUNCTION_PROLOGUE
225 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
230 #undef TARGET_DEFAULT_TARGET_FLAGS
231 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
232 #undef TARGET_HANDLE_OPTION
233 #define TARGET_HANDLE_OPTION arm_handle_option
235 #undef TARGET_COMP_TYPE_ATTRIBUTES
236 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
238 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
239 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
241 #undef TARGET_SCHED_ADJUST_COST
242 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
244 #undef TARGET_ENCODE_SECTION_INFO
245 #ifdef ARM_PE
246 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
247 #else
248 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
249 #endif
251 #undef TARGET_STRIP_NAME_ENCODING
252 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
254 #undef TARGET_ASM_INTERNAL_LABEL
255 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
257 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
258 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
260 #undef TARGET_ASM_OUTPUT_MI_THUNK
261 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
262 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
263 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
265 /* This will be overridden in arm_override_options. */
266 #undef TARGET_RTX_COSTS
267 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
268 #undef TARGET_ADDRESS_COST
269 #define TARGET_ADDRESS_COST arm_address_cost
271 #undef TARGET_SHIFT_TRUNCATION_MASK
272 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
273 #undef TARGET_VECTOR_MODE_SUPPORTED_P
274 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
276 #undef TARGET_MACHINE_DEPENDENT_REORG
277 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
279 #undef TARGET_INIT_BUILTINS
280 #define TARGET_INIT_BUILTINS arm_init_builtins
281 #undef TARGET_EXPAND_BUILTIN
282 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
287 #undef TARGET_PROMOTE_FUNCTION_ARGS
288 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
289 #undef TARGET_PROMOTE_FUNCTION_RETURN
290 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
291 #undef TARGET_PROMOTE_PROTOTYPES
292 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
295 #undef TARGET_ARG_PARTIAL_BYTES
296 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
298 #undef TARGET_STRUCT_VALUE_RTX
299 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
301 #undef TARGET_SETUP_INCOMING_VARARGS
302 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
304 #undef TARGET_DEFAULT_SHORT_ENUMS
305 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
307 #undef TARGET_ALIGN_ANON_BITFIELD
308 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
310 #undef TARGET_CXX_GUARD_TYPE
311 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
313 #undef TARGET_CXX_GUARD_MASK_BIT
314 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
316 #undef TARGET_CXX_GET_COOKIE_SIZE
317 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
319 #undef TARGET_CXX_COOKIE_HAS_SIZE
320 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
322 #undef TARGET_CXX_CDTOR_RETURNS_THIS
323 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
325 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
326 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
328 #undef TARGET_CXX_USE_AEABI_ATEXIT
329 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
331 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
332 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
333 arm_cxx_determine_class_data_visibility
335 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
336 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
338 #undef TARGET_RETURN_IN_MSB
339 #define TARGET_RETURN_IN_MSB arm_return_in_msb
341 #undef TARGET_MUST_PASS_IN_STACK
342 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
344 #ifdef TARGET_UNWIND_INFO
345 #undef TARGET_UNWIND_EMIT
346 #define TARGET_UNWIND_EMIT arm_unwind_emit
348 /* EABI unwinding tables use a different format for the typeinfo tables. */
349 #undef TARGET_ASM_TTYPE
350 #define TARGET_ASM_TTYPE arm_output_ttype
352 #undef TARGET_ARM_EABI_UNWINDER
353 #define TARGET_ARM_EABI_UNWINDER true
354 #endif /* TARGET_UNWIND_INFO */
356 struct gcc_target targetm = TARGET_INITIALIZER;
358 /* Obstack for minipool constant handling. */
359 static struct obstack minipool_obstack;
360 static char * minipool_startobj;
362 /* The maximum number of insns skipped which
363 will be conditionalised if possible. */
364 static int max_insns_skipped = 5;
366 extern FILE * asm_out_file;
368 /* True if we are currently building a constant table. */
369 int making_const_table;
371 /* Define the information needed to generate branch insns. This is
372 stored from the compare operation. */
373 rtx arm_compare_op0, arm_compare_op1;
375 /* The processor for which instructions should be scheduled. */
376 enum processor_type arm_tune = arm_none;
378 /* Which floating point model to use. */
379 enum arm_fp_model arm_fp_model;
381 /* Which floating point hardware is available. */
382 enum fputype arm_fpu_arch;
384 /* Which floating point hardware to schedule for. */
385 enum fputype arm_fpu_tune;
387 /* Whether to use floating point hardware. */
388 enum float_abi_type arm_float_abi;
390 /* Which ABI to use. */
391 enum arm_abi_type arm_abi;
393 /* Used to parse -mstructure_size_boundary command line option. */
394 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
396 /* Used for Thumb call_via trampolines. */
397 rtx thumb_call_via_label[14];
398 static int thumb_call_reg_needed;
400 /* Bit values used to identify processor capabilities. */
401 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
402 #define FL_ARCH3M (1 << 1) /* Extended multiply */
403 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
404 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
405 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
406 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
407 #define FL_THUMB (1 << 6) /* Thumb aware */
408 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
409 #define FL_STRONG (1 << 8) /* StrongARM */
410 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
411 #define FL_XSCALE (1 << 10) /* XScale */
412 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
413 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
414 media instructions. */
415 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
416 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
417 Note: ARM6 & 7 derivatives only. */
419 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
421 #define FL_FOR_ARCH2 0
422 #define FL_FOR_ARCH3 FL_MODE32
423 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
424 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
425 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
426 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
427 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
428 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
429 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
430 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
431 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
432 #define FL_FOR_ARCH6J FL_FOR_ARCH6
433 #define FL_FOR_ARCH6K FL_FOR_ARCH6
434 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
435 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
437 /* The bits in this mask specify which
438 instructions we are allowed to generate. */
439 static unsigned long insn_flags = 0;
441 /* The bits in this mask specify which instruction scheduling options should
442 be used. */
443 static unsigned long tune_flags = 0;
445 /* The following are used in the arm.md file as equivalents to bits
446 in the above two flag variables. */
448 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
449 int arm_arch3m = 0;
451 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
452 int arm_arch4 = 0;
454 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
455 int arm_arch4t = 0;
457 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
458 int arm_arch5 = 0;
460 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
461 int arm_arch5e = 0;
463 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
464 int arm_arch6 = 0;
466 /* Nonzero if this chip can benefit from load scheduling. */
467 int arm_ld_sched = 0;
469 /* Nonzero if this chip is a StrongARM. */
470 int arm_tune_strongarm = 0;
472 /* Nonzero if this chip is a Cirrus variant. */
473 int arm_arch_cirrus = 0;
475 /* Nonzero if this chip supports Intel Wireless MMX technology. */
476 int arm_arch_iwmmxt = 0;
478 /* Nonzero if this chip is an XScale. */
479 int arm_arch_xscale = 0;
481 /* Nonzero if tuning for XScale */
482 int arm_tune_xscale = 0;
484 /* Nonzero if we want to tune for stores that access the write-buffer.
485 This typically means an ARM6 or ARM7 with MMU or MPU. */
486 int arm_tune_wbuf = 0;
488 /* Nonzero if generating Thumb instructions. */
489 int thumb_code = 0;
491 /* Nonzero if we should define __THUMB_INTERWORK__ in the
492 preprocessor.
493 XXX This is a bit of a hack, it's intended to help work around
494 problems in GLD which doesn't understand that armv5t code is
495 interworking clean. */
496 int arm_cpp_interwork = 0;
498 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
499 must report the mode of the memory reference from PRINT_OPERAND to
500 PRINT_OPERAND_ADDRESS. */
501 enum machine_mode output_memory_reference_mode;
503 /* The register number to be used for the PIC offset register. */
504 int arm_pic_register = INVALID_REGNUM;
506 /* Set to 1 when a return insn is output, this means that the epilogue
507 is not needed. */
508 int return_used_this_function;
510 /* Set to 1 after arm_reorg has started. Reset to start at the start of
511 the next function. */
512 static int after_arm_reorg = 0;
514 /* The maximum number of insns to be used when loading a constant. */
515 static int arm_constant_limit = 3;
517 /* For an explanation of these variables, see final_prescan_insn below. */
518 int arm_ccfsm_state;
519 enum arm_cond_code arm_current_cc;
520 rtx arm_target_insn;
521 int arm_target_label;
523 /* The condition codes of the ARM, and the inverse function. */
524 static const char * const arm_condition_codes[] =
526 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
527 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
530 #define streq(string1, string2) (strcmp (string1, string2) == 0)
532 /* Initialization code. */
534 struct processors
536 const char *const name;
537 enum processor_type core;
538 const char *arch;
539 const unsigned long flags;
540 bool (* rtx_costs) (rtx, int, int, int *);
543 /* Not all of these give usefully different compilation alternatives,
544 but there is no simple way of generalizing them. */
545 static const struct processors all_cores[] =
547 /* ARM Cores */
548 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
549 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
550 #include "arm-cores.def"
551 #undef ARM_CORE
552 {NULL, arm_none, NULL, 0, NULL}
555 static const struct processors all_architectures[] =
557 /* ARM Architectures */
558 /* We don't specify rtx_costs here as it will be figured out
559 from the core. */
561 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
562 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
563 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
564 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
565 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
566 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
567 implementations that support it, so we will leave it out for now. */
568 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
569 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
570 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
571 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
572 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
573 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
574 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
575 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
576 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
577 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
578 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
579 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
580 {NULL, arm_none, NULL, 0 , NULL}
583 struct arm_cpu_select
585 const char * string;
586 const char * name;
587 const struct processors * processors;
590 /* This is a magic structure. The 'string' field is magically filled in
591 with a pointer to the value specified by the user on the command line
592 assuming that the user has specified such a value. */
594 static struct arm_cpu_select arm_select[] =
596 /* string name processors */
597 { NULL, "-mcpu=", all_cores },
598 { NULL, "-march=", all_architectures },
599 { NULL, "-mtune=", all_cores }
602 /* Defines representing the indexes into the above table. */
603 #define ARM_OPT_SET_CPU 0
604 #define ARM_OPT_SET_ARCH 1
605 #define ARM_OPT_SET_TUNE 2
607 /* The name of the proprocessor macro to define for this architecture. */
609 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
611 struct fpu_desc
613 const char * name;
614 enum fputype fpu;
618 /* Available values for for -mfpu=. */
620 static const struct fpu_desc all_fpus[] =
622 {"fpa", FPUTYPE_FPA},
623 {"fpe2", FPUTYPE_FPA_EMU2},
624 {"fpe3", FPUTYPE_FPA_EMU2},
625 {"maverick", FPUTYPE_MAVERICK},
626 {"vfp", FPUTYPE_VFP}
630 /* Floating point models used by the different hardware.
631 See fputype in arm.h. */
633 static const enum fputype fp_model_for_fpu[] =
635 /* No FP hardware. */
636 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
637 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
638 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
639 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
640 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
641 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
645 struct float_abi
647 const char * name;
648 enum float_abi_type abi_type;
652 /* Available values for -mfloat-abi=. */
654 static const struct float_abi all_float_abis[] =
656 {"soft", ARM_FLOAT_ABI_SOFT},
657 {"softfp", ARM_FLOAT_ABI_SOFTFP},
658 {"hard", ARM_FLOAT_ABI_HARD}
662 struct abi_name
664 const char *name;
665 enum arm_abi_type abi_type;
669 /* Available values for -mabi=. */
671 static const struct abi_name arm_all_abis[] =
673 {"apcs-gnu", ARM_ABI_APCS},
674 {"atpcs", ARM_ABI_ATPCS},
675 {"aapcs", ARM_ABI_AAPCS},
676 {"iwmmxt", ARM_ABI_IWMMXT}
679 /* Return the number of bits set in VALUE. */
680 static unsigned
681 bit_count (unsigned long value)
683 unsigned long count = 0;
685 while (value)
687 count++;
688 value &= value - 1; /* Clear the least-significant set bit. */
691 return count;
694 /* Set up library functions unique to ARM. */
696 static void
697 arm_init_libfuncs (void)
699 /* There are no special library functions unless we are using the
700 ARM BPABI. */
701 if (!TARGET_BPABI)
702 return;
704 /* The functions below are described in Section 4 of the "Run-Time
705 ABI for the ARM architecture", Version 1.0. */
707 /* Double-precision floating-point arithmetic. Table 2. */
708 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
709 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
710 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
711 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
712 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
714 /* Double-precision comparisons. Table 3. */
715 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
716 set_optab_libfunc (ne_optab, DFmode, NULL);
717 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
718 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
719 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
720 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
721 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
723 /* Single-precision floating-point arithmetic. Table 4. */
724 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
725 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
726 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
727 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
728 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
730 /* Single-precision comparisons. Table 5. */
731 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
732 set_optab_libfunc (ne_optab, SFmode, NULL);
733 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
734 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
735 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
736 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
737 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
739 /* Floating-point to integer conversions. Table 6. */
740 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
741 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
742 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
743 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
744 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
745 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
746 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
747 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
749 /* Conversions between floating types. Table 7. */
750 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
751 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
753 /* Integer to floating-point conversions. Table 8. */
754 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
755 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
756 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
757 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
758 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
759 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
760 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
761 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
763 /* Long long. Table 9. */
764 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
765 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
766 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
767 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
768 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
769 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
770 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
771 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
773 /* Integer (32/32->32) division. \S 4.3.1. */
774 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
775 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
777 /* The divmod functions are designed so that they can be used for
778 plain division, even though they return both the quotient and the
779 remainder. The quotient is returned in the usual location (i.e.,
780 r0 for SImode, {r0, r1} for DImode), just as would be expected
781 for an ordinary division routine. Because the AAPCS calling
782 conventions specify that all of { r0, r1, r2, r3 } are
783 callee-saved registers, there is no need to tell the compiler
784 explicitly that those registers are clobbered by these
785 routines. */
786 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
787 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
788 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
789 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
791 /* We don't have mod libcalls. Fortunately gcc knows how to use the
792 divmod libcalls instead. */
793 set_optab_libfunc (smod_optab, DImode, NULL);
794 set_optab_libfunc (umod_optab, DImode, NULL);
795 set_optab_libfunc (smod_optab, SImode, NULL);
796 set_optab_libfunc (umod_optab, SImode, NULL);
799 /* Implement TARGET_HANDLE_OPTION. */
801 static bool
802 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
804 switch (code)
806 case OPT_march_:
807 arm_select[1].string = arg;
808 return true;
810 case OPT_mcpu_:
811 arm_select[0].string = arg;
812 return true;
814 case OPT_mhard_float:
815 target_float_abi_name = "hard";
816 return true;
818 case OPT_msoft_float:
819 target_float_abi_name = "soft";
820 return true;
822 case OPT_mtune_:
823 arm_select[2].string = arg;
824 return true;
826 default:
827 return true;
831 /* Fix up any incompatible options that the user has specified.
832 This has now turned into a maze. */
833 void
834 arm_override_options (void)
836 unsigned i;
837 enum processor_type target_arch_cpu = arm_none;
839 /* Set up the flags based on the cpu/architecture selected by the user. */
840 for (i = ARRAY_SIZE (arm_select); i--;)
842 struct arm_cpu_select * ptr = arm_select + i;
844 if (ptr->string != NULL && ptr->string[0] != '\0')
846 const struct processors * sel;
848 for (sel = ptr->processors; sel->name != NULL; sel++)
849 if (streq (ptr->string, sel->name))
851 /* Set the architecture define. */
852 if (i != ARM_OPT_SET_TUNE)
853 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
855 /* Determine the processor core for which we should
856 tune code-generation. */
857 if (/* -mcpu= is a sensible default. */
858 i == ARM_OPT_SET_CPU
859 /* -mtune= overrides -mcpu= and -march=. */
860 || i == ARM_OPT_SET_TUNE)
861 arm_tune = (enum processor_type) (sel - ptr->processors);
863 /* Remember the CPU associated with this architecture.
864 If no other option is used to set the CPU type,
865 we'll use this to guess the most suitable tuning
866 options. */
867 if (i == ARM_OPT_SET_ARCH)
868 target_arch_cpu = sel->core;
870 if (i != ARM_OPT_SET_TUNE)
872 /* If we have been given an architecture and a processor
873 make sure that they are compatible. We only generate
874 a warning though, and we prefer the CPU over the
875 architecture. */
876 if (insn_flags != 0 && (insn_flags ^ sel->flags))
877 warning (0, "switch -mcpu=%s conflicts with -march= switch",
878 ptr->string);
880 insn_flags = sel->flags;
883 break;
886 if (sel->name == NULL)
887 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
891 /* Guess the tuning options from the architecture if necessary. */
892 if (arm_tune == arm_none)
893 arm_tune = target_arch_cpu;
895 /* If the user did not specify a processor, choose one for them. */
896 if (insn_flags == 0)
898 const struct processors * sel;
899 unsigned int sought;
900 enum processor_type cpu;
902 cpu = TARGET_CPU_DEFAULT;
903 if (cpu == arm_none)
905 #ifdef SUBTARGET_CPU_DEFAULT
906 /* Use the subtarget default CPU if none was specified by
907 configure. */
908 cpu = SUBTARGET_CPU_DEFAULT;
909 #endif
910 /* Default to ARM6. */
911 if (cpu == arm_none)
912 cpu = arm6;
914 sel = &all_cores[cpu];
916 insn_flags = sel->flags;
918 /* Now check to see if the user has specified some command line
919 switch that require certain abilities from the cpu. */
920 sought = 0;
922 if (TARGET_INTERWORK || TARGET_THUMB)
924 sought |= (FL_THUMB | FL_MODE32);
926 /* There are no ARM processors that support both APCS-26 and
927 interworking. Therefore we force FL_MODE26 to be removed
928 from insn_flags here (if it was set), so that the search
929 below will always be able to find a compatible processor. */
930 insn_flags &= ~FL_MODE26;
933 if (sought != 0 && ((sought & insn_flags) != sought))
935 /* Try to locate a CPU type that supports all of the abilities
936 of the default CPU, plus the extra abilities requested by
937 the user. */
938 for (sel = all_cores; sel->name != NULL; sel++)
939 if ((sel->flags & sought) == (sought | insn_flags))
940 break;
942 if (sel->name == NULL)
944 unsigned current_bit_count = 0;
945 const struct processors * best_fit = NULL;
947 /* Ideally we would like to issue an error message here
948 saying that it was not possible to find a CPU compatible
949 with the default CPU, but which also supports the command
950 line options specified by the programmer, and so they
951 ought to use the -mcpu=<name> command line option to
952 override the default CPU type.
954 If we cannot find a cpu that has both the
955 characteristics of the default cpu and the given
956 command line options we scan the array again looking
957 for a best match. */
958 for (sel = all_cores; sel->name != NULL; sel++)
959 if ((sel->flags & sought) == sought)
961 unsigned count;
963 count = bit_count (sel->flags & insn_flags);
965 if (count >= current_bit_count)
967 best_fit = sel;
968 current_bit_count = count;
972 gcc_assert (best_fit);
973 sel = best_fit;
976 insn_flags = sel->flags;
978 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
979 if (arm_tune == arm_none)
980 arm_tune = (enum processor_type) (sel - all_cores);
983 /* The processor for which we should tune should now have been
984 chosen. */
985 gcc_assert (arm_tune != arm_none);
987 tune_flags = all_cores[(int)arm_tune].flags;
988 if (optimize_size)
989 targetm.rtx_costs = arm_size_rtx_costs;
990 else
991 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
993 /* Make sure that the processor choice does not conflict with any of the
994 other command line choices. */
995 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
997 warning (0, "target CPU does not support interworking" );
998 target_flags &= ~MASK_INTERWORK;
1001 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1003 warning (0, "target CPU does not support THUMB instructions");
1004 target_flags &= ~MASK_THUMB;
1007 if (TARGET_APCS_FRAME && TARGET_THUMB)
1009 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1010 target_flags &= ~MASK_APCS_FRAME;
1013 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1014 from here where no function is being compiled currently. */
1015 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1016 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1018 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1019 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1021 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1022 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1024 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1026 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1027 target_flags |= MASK_APCS_FRAME;
1030 if (TARGET_POKE_FUNCTION_NAME)
1031 target_flags |= MASK_APCS_FRAME;
1033 if (TARGET_APCS_REENT && flag_pic)
1034 error ("-fpic and -mapcs-reent are incompatible");
1036 if (TARGET_APCS_REENT)
1037 warning (0, "APCS reentrant code not supported. Ignored");
1039 /* If this target is normally configured to use APCS frames, warn if they
1040 are turned off and debugging is turned on. */
1041 if (TARGET_ARM
1042 && write_symbols != NO_DEBUG
1043 && !TARGET_APCS_FRAME
1044 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1045 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1047 /* If stack checking is disabled, we can use r10 as the PIC register,
1048 which keeps r9 available. */
1049 if (flag_pic)
1050 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1052 if (TARGET_APCS_FLOAT)
1053 warning (0, "passing floating point arguments in fp regs not yet supported");
1055 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1056 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1057 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1058 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1059 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1060 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1061 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1062 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1063 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1065 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1066 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1067 thumb_code = (TARGET_ARM == 0);
1068 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1069 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1070 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1072 /* V5 code we generate is completely interworking capable, so we turn off
1073 TARGET_INTERWORK here to avoid many tests later on. */
1075 /* XXX However, we must pass the right pre-processor defines to CPP
1076 or GLD can get confused. This is a hack. */
1077 if (TARGET_INTERWORK)
1078 arm_cpp_interwork = 1;
1080 if (arm_arch5)
1081 target_flags &= ~MASK_INTERWORK;
1083 if (target_abi_name)
1085 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1087 if (streq (arm_all_abis[i].name, target_abi_name))
1089 arm_abi = arm_all_abis[i].abi_type;
1090 break;
1093 if (i == ARRAY_SIZE (arm_all_abis))
1094 error ("invalid ABI option: -mabi=%s", target_abi_name);
1096 else
1097 arm_abi = ARM_DEFAULT_ABI;
1099 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1100 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1102 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1103 error ("iwmmxt abi requires an iwmmxt capable cpu");
1105 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1106 if (target_fpu_name == NULL && target_fpe_name != NULL)
1108 if (streq (target_fpe_name, "2"))
1109 target_fpu_name = "fpe2";
1110 else if (streq (target_fpe_name, "3"))
1111 target_fpu_name = "fpe3";
1112 else
1113 error ("invalid floating point emulation option: -mfpe=%s",
1114 target_fpe_name);
1116 if (target_fpu_name != NULL)
1118 /* The user specified a FPU. */
1119 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1121 if (streq (all_fpus[i].name, target_fpu_name))
1123 arm_fpu_arch = all_fpus[i].fpu;
1124 arm_fpu_tune = arm_fpu_arch;
1125 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1126 break;
1129 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1130 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1132 else
1134 #ifdef FPUTYPE_DEFAULT
1135 /* Use the default if it is specified for this platform. */
1136 arm_fpu_arch = FPUTYPE_DEFAULT;
1137 arm_fpu_tune = FPUTYPE_DEFAULT;
1138 #else
1139 /* Pick one based on CPU type. */
1140 /* ??? Some targets assume FPA is the default.
1141 if ((insn_flags & FL_VFP) != 0)
1142 arm_fpu_arch = FPUTYPE_VFP;
1143 else
1145 if (arm_arch_cirrus)
1146 arm_fpu_arch = FPUTYPE_MAVERICK;
1147 else
1148 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1149 #endif
1150 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1151 arm_fpu_tune = FPUTYPE_FPA;
1152 else
1153 arm_fpu_tune = arm_fpu_arch;
1154 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1155 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1158 if (target_float_abi_name != NULL)
1160 /* The user specified a FP ABI. */
1161 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1163 if (streq (all_float_abis[i].name, target_float_abi_name))
1165 arm_float_abi = all_float_abis[i].abi_type;
1166 break;
1169 if (i == ARRAY_SIZE (all_float_abis))
1170 error ("invalid floating point abi: -mfloat-abi=%s",
1171 target_float_abi_name);
1173 else
1174 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1176 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1177 sorry ("-mfloat-abi=hard and VFP");
1179 /* If soft-float is specified then don't use FPU. */
1180 if (TARGET_SOFT_FLOAT)
1181 arm_fpu_arch = FPUTYPE_NONE;
1183 /* For arm2/3 there is no need to do any scheduling if there is only
1184 a floating point emulator, or we are doing software floating-point. */
1185 if ((TARGET_SOFT_FLOAT
1186 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1187 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1188 && (tune_flags & FL_MODE32) == 0)
1189 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1191 /* Override the default structure alignment for AAPCS ABI. */
1192 if (arm_abi == ARM_ABI_AAPCS)
1193 arm_structure_size_boundary = 8;
1195 if (structure_size_string != NULL)
1197 int size = strtol (structure_size_string, NULL, 0);
1199 if (size == 8 || size == 32
1200 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1201 arm_structure_size_boundary = size;
1202 else
1203 warning (0, "structure size boundary can only be set to %s",
1204 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1207 if (arm_pic_register_string != NULL)
1209 int pic_register = decode_reg_name (arm_pic_register_string);
1211 if (!flag_pic)
1212 warning (0, "-mpic-register= is useless without -fpic");
1214 /* Prevent the user from choosing an obviously stupid PIC register. */
1215 else if (pic_register < 0 || call_used_regs[pic_register]
1216 || pic_register == HARD_FRAME_POINTER_REGNUM
1217 || pic_register == STACK_POINTER_REGNUM
1218 || pic_register >= PC_REGNUM)
1219 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1220 else
1221 arm_pic_register = pic_register;
1224 if (TARGET_THUMB && flag_schedule_insns)
1226 /* Don't warn since it's on by default in -O2. */
1227 flag_schedule_insns = 0;
1230 if (optimize_size)
1232 /* There's some dispute as to whether this should be 1 or 2. However,
1233 experiments seem to show that in pathological cases a setting of
1234 1 degrades less severely than a setting of 2. This could change if
1235 other parts of the compiler change their behavior. */
1236 arm_constant_limit = 1;
1238 /* If optimizing for size, bump the number of instructions that we
1239 are prepared to conditionally execute (even on a StrongARM). */
1240 max_insns_skipped = 6;
1242 else
1244 /* For processors with load scheduling, it never costs more than
1245 2 cycles to load a constant, and the load scheduler may well
1246 reduce that to 1. */
1247 if (arm_ld_sched)
1248 arm_constant_limit = 1;
1250 /* On XScale the longer latency of a load makes it more difficult
1251 to achieve a good schedule, so it's faster to synthesize
1252 constants that can be done in two insns. */
1253 if (arm_tune_xscale)
1254 arm_constant_limit = 2;
1256 /* StrongARM has early execution of branches, so a sequence
1257 that is worth skipping is shorter. */
1258 if (arm_tune_strongarm)
1259 max_insns_skipped = 3;
1262 /* Register global variables with the garbage collector. */
1263 arm_add_gc_roots ();
1266 static void
1267 arm_add_gc_roots (void)
1269 gcc_obstack_init(&minipool_obstack);
1270 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1273 /* A table of known ARM exception types.
1274 For use with the interrupt function attribute. */
1276 typedef struct
1278 const char *const arg;
1279 const unsigned long return_value;
1281 isr_attribute_arg;
1283 static const isr_attribute_arg isr_attribute_args [] =
1285 { "IRQ", ARM_FT_ISR },
1286 { "irq", ARM_FT_ISR },
1287 { "FIQ", ARM_FT_FIQ },
1288 { "fiq", ARM_FT_FIQ },
1289 { "ABORT", ARM_FT_ISR },
1290 { "abort", ARM_FT_ISR },
1291 { "ABORT", ARM_FT_ISR },
1292 { "abort", ARM_FT_ISR },
1293 { "UNDEF", ARM_FT_EXCEPTION },
1294 { "undef", ARM_FT_EXCEPTION },
1295 { "SWI", ARM_FT_EXCEPTION },
1296 { "swi", ARM_FT_EXCEPTION },
1297 { NULL, ARM_FT_NORMAL }
1300 /* Returns the (interrupt) function type of the current
1301 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1303 static unsigned long
1304 arm_isr_value (tree argument)
1306 const isr_attribute_arg * ptr;
1307 const char * arg;
1309 /* No argument - default to IRQ. */
1310 if (argument == NULL_TREE)
1311 return ARM_FT_ISR;
1313 /* Get the value of the argument. */
1314 if (TREE_VALUE (argument) == NULL_TREE
1315 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1316 return ARM_FT_UNKNOWN;
1318 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1320 /* Check it against the list of known arguments. */
1321 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1322 if (streq (arg, ptr->arg))
1323 return ptr->return_value;
1325 /* An unrecognized interrupt type. */
1326 return ARM_FT_UNKNOWN;
1329 /* Computes the type of the current function. */
1331 static unsigned long
1332 arm_compute_func_type (void)
1334 unsigned long type = ARM_FT_UNKNOWN;
1335 tree a;
1336 tree attr;
1338 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1340 /* Decide if the current function is volatile. Such functions
1341 never return, and many memory cycles can be saved by not storing
1342 register values that will never be needed again. This optimization
1343 was added to speed up context switching in a kernel application. */
1344 if (optimize > 0
1345 && TREE_NOTHROW (current_function_decl)
1346 && TREE_THIS_VOLATILE (current_function_decl))
1347 type |= ARM_FT_VOLATILE;
1349 if (cfun->static_chain_decl != NULL)
1350 type |= ARM_FT_NESTED;
1352 attr = DECL_ATTRIBUTES (current_function_decl);
1354 a = lookup_attribute ("naked", attr);
1355 if (a != NULL_TREE)
1356 type |= ARM_FT_NAKED;
1358 a = lookup_attribute ("isr", attr);
1359 if (a == NULL_TREE)
1360 a = lookup_attribute ("interrupt", attr);
1362 if (a == NULL_TREE)
1363 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1364 else
1365 type |= arm_isr_value (TREE_VALUE (a));
1367 return type;
1370 /* Returns the type of the current function. */
1372 unsigned long
1373 arm_current_func_type (void)
1375 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1376 cfun->machine->func_type = arm_compute_func_type ();
1378 return cfun->machine->func_type;
1381 /* Return 1 if it is possible to return using a single instruction.
1382 If SIBLING is non-null, this is a test for a return before a sibling
1383 call. SIBLING is the call insn, so we can examine its register usage. */
1386 use_return_insn (int iscond, rtx sibling)
1388 int regno;
1389 unsigned int func_type;
1390 unsigned long saved_int_regs;
1391 unsigned HOST_WIDE_INT stack_adjust;
1392 arm_stack_offsets *offsets;
1394 /* Never use a return instruction before reload has run. */
1395 if (!reload_completed)
1396 return 0;
1398 func_type = arm_current_func_type ();
1400 /* Naked functions and volatile functions need special
1401 consideration. */
1402 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1403 return 0;
1405 /* So do interrupt functions that use the frame pointer. */
1406 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1407 return 0;
1409 offsets = arm_get_frame_offsets ();
1410 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1412 /* As do variadic functions. */
1413 if (current_function_pretend_args_size
1414 || cfun->machine->uses_anonymous_args
1415 /* Or if the function calls __builtin_eh_return () */
1416 || current_function_calls_eh_return
1417 /* Or if the function calls alloca */
1418 || current_function_calls_alloca
1419 /* Or if there is a stack adjustment. However, if the stack pointer
1420 is saved on the stack, we can use a pre-incrementing stack load. */
1421 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1422 return 0;
1424 saved_int_regs = arm_compute_save_reg_mask ();
1426 /* Unfortunately, the insn
1428 ldmib sp, {..., sp, ...}
1430 triggers a bug on most SA-110 based devices, such that the stack
1431 pointer won't be correctly restored if the instruction takes a
1432 page fault. We work around this problem by popping r3 along with
1433 the other registers, since that is never slower than executing
1434 another instruction.
1436 We test for !arm_arch5 here, because code for any architecture
1437 less than this could potentially be run on one of the buggy
1438 chips. */
1439 if (stack_adjust == 4 && !arm_arch5)
1441 /* Validate that r3 is a call-clobbered register (always true in
1442 the default abi) ... */
1443 if (!call_used_regs[3])
1444 return 0;
1446 /* ... that it isn't being used for a return value ... */
1447 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1448 return 0;
1450 /* ... or for a tail-call argument ... */
1451 if (sibling)
1453 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1455 if (find_regno_fusage (sibling, USE, 3))
1456 return 0;
1459 /* ... and that there are no call-saved registers in r0-r2
1460 (always true in the default ABI). */
1461 if (saved_int_regs & 0x7)
1462 return 0;
1465 /* Can't be done if interworking with Thumb, and any registers have been
1466 stacked. */
1467 if (TARGET_INTERWORK && saved_int_regs != 0)
1468 return 0;
1470 /* On StrongARM, conditional returns are expensive if they aren't
1471 taken and multiple registers have been stacked. */
1472 if (iscond && arm_tune_strongarm)
1474 /* Conditional return when just the LR is stored is a simple
1475 conditional-load instruction, that's not expensive. */
1476 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1477 return 0;
1479 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1480 return 0;
1483 /* If there are saved registers but the LR isn't saved, then we need
1484 two instructions for the return. */
1485 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1486 return 0;
1488 /* Can't be done if any of the FPA regs are pushed,
1489 since this also requires an insn. */
1490 if (TARGET_HARD_FLOAT && TARGET_FPA)
1491 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1492 if (regs_ever_live[regno] && !call_used_regs[regno])
1493 return 0;
1495 /* Likewise VFP regs. */
1496 if (TARGET_HARD_FLOAT && TARGET_VFP)
1497 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1498 if (regs_ever_live[regno] && !call_used_regs[regno])
1499 return 0;
1501 if (TARGET_REALLY_IWMMXT)
1502 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1503 if (regs_ever_live[regno] && ! call_used_regs [regno])
1504 return 0;
1506 return 1;
1509 /* Return TRUE if int I is a valid immediate ARM constant. */
1512 const_ok_for_arm (HOST_WIDE_INT i)
1514 int lowbit;
1516 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1517 be all zero, or all one. */
1518 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1519 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1520 != ((~(unsigned HOST_WIDE_INT) 0)
1521 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1522 return FALSE;
1524 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1526 /* Fast return for 0 and small values. We must do this for zero, since
1527 the code below can't handle that one case. */
1528 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1529 return TRUE;
1531 /* Get the number of trailing zeros, rounded down to the nearest even
1532 number. */
1533 lowbit = (ffs ((int) i) - 1) & ~1;
1535 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1536 return TRUE;
1537 else if (lowbit <= 4
1538 && ((i & ~0xc000003f) == 0
1539 || (i & ~0xf000000f) == 0
1540 || (i & ~0xfc000003) == 0))
1541 return TRUE;
1543 return FALSE;
1546 /* Return true if I is a valid constant for the operation CODE. */
1547 static int
1548 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1550 if (const_ok_for_arm (i))
1551 return 1;
1553 switch (code)
1555 case PLUS:
1556 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1558 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1559 case XOR:
1560 case IOR:
1561 return 0;
1563 case AND:
1564 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1566 default:
1567 gcc_unreachable ();
1571 /* Emit a sequence of insns to handle a large constant.
1572 CODE is the code of the operation required, it can be any of SET, PLUS,
1573 IOR, AND, XOR, MINUS;
1574 MODE is the mode in which the operation is being performed;
1575 VAL is the integer to operate on;
1576 SOURCE is the other operand (a register, or a null-pointer for SET);
1577 SUBTARGETS means it is safe to create scratch registers if that will
1578 either produce a simpler sequence, or we will want to cse the values.
1579 Return value is the number of insns emitted. */
1582 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1583 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1585 rtx cond;
1587 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1588 cond = COND_EXEC_TEST (PATTERN (insn));
1589 else
1590 cond = NULL_RTX;
1592 if (subtargets || code == SET
1593 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1594 && REGNO (target) != REGNO (source)))
1596 /* After arm_reorg has been called, we can't fix up expensive
1597 constants by pushing them into memory so we must synthesize
1598 them in-line, regardless of the cost. This is only likely to
1599 be more costly on chips that have load delay slots and we are
1600 compiling without running the scheduler (so no splitting
1601 occurred before the final instruction emission).
1603 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1605 if (!after_arm_reorg
1606 && !cond
1607 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1608 1, 0)
1609 > arm_constant_limit + (code != SET)))
1611 if (code == SET)
1613 /* Currently SET is the only monadic value for CODE, all
1614 the rest are diadic. */
1615 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1616 return 1;
1618 else
1620 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1622 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1623 /* For MINUS, the value is subtracted from, since we never
1624 have subtraction of a constant. */
1625 if (code == MINUS)
1626 emit_insn (gen_rtx_SET (VOIDmode, target,
1627 gen_rtx_MINUS (mode, temp, source)));
1628 else
1629 emit_insn (gen_rtx_SET (VOIDmode, target,
1630 gen_rtx_fmt_ee (code, mode, source, temp)));
1631 return 2;
1636 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1640 static int
1641 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1643 HOST_WIDE_INT temp1;
1644 int num_insns = 0;
1647 int end;
1649 if (i <= 0)
1650 i += 32;
1651 if (remainder & (3 << (i - 2)))
1653 end = i - 8;
1654 if (end < 0)
1655 end += 32;
1656 temp1 = remainder & ((0x0ff << end)
1657 | ((i < end) ? (0xff >> (32 - end)) : 0));
1658 remainder &= ~temp1;
1659 num_insns++;
1660 i -= 6;
1662 i -= 2;
1663 } while (remainder);
1664 return num_insns;
1667 /* Emit an instruction with the indicated PATTERN. If COND is
1668 non-NULL, conditionalize the execution of the instruction on COND
1669 being true. */
1671 static void
1672 emit_constant_insn (rtx cond, rtx pattern)
1674 if (cond)
1675 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1676 emit_insn (pattern);
1679 /* As above, but extra parameter GENERATE which, if clear, suppresses
1680 RTL generation. */
1682 static int
1683 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1684 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1685 int generate)
1687 int can_invert = 0;
1688 int can_negate = 0;
1689 int can_negate_initial = 0;
1690 int can_shift = 0;
1691 int i;
1692 int num_bits_set = 0;
1693 int set_sign_bit_copies = 0;
1694 int clear_sign_bit_copies = 0;
1695 int clear_zero_bit_copies = 0;
1696 int set_zero_bit_copies = 0;
1697 int insns = 0;
1698 unsigned HOST_WIDE_INT temp1, temp2;
1699 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1701 /* Find out which operations are safe for a given CODE. Also do a quick
1702 check for degenerate cases; these can occur when DImode operations
1703 are split. */
1704 switch (code)
1706 case SET:
1707 can_invert = 1;
1708 can_shift = 1;
1709 can_negate = 1;
1710 break;
1712 case PLUS:
1713 can_negate = 1;
1714 can_negate_initial = 1;
1715 break;
1717 case IOR:
1718 if (remainder == 0xffffffff)
1720 if (generate)
1721 emit_constant_insn (cond,
1722 gen_rtx_SET (VOIDmode, target,
1723 GEN_INT (ARM_SIGN_EXTEND (val))));
1724 return 1;
1726 if (remainder == 0)
1728 if (reload_completed && rtx_equal_p (target, source))
1729 return 0;
1730 if (generate)
1731 emit_constant_insn (cond,
1732 gen_rtx_SET (VOIDmode, target, source));
1733 return 1;
1735 break;
1737 case AND:
1738 if (remainder == 0)
1740 if (generate)
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, target, const0_rtx));
1743 return 1;
1745 if (remainder == 0xffffffff)
1747 if (reload_completed && rtx_equal_p (target, source))
1748 return 0;
1749 if (generate)
1750 emit_constant_insn (cond,
1751 gen_rtx_SET (VOIDmode, target, source));
1752 return 1;
1754 can_invert = 1;
1755 break;
1757 case XOR:
1758 if (remainder == 0)
1760 if (reload_completed && rtx_equal_p (target, source))
1761 return 0;
1762 if (generate)
1763 emit_constant_insn (cond,
1764 gen_rtx_SET (VOIDmode, target, source));
1765 return 1;
1768 /* We don't know how to handle other cases yet. */
1769 gcc_assert (remainder == 0xffffffff);
1771 if (generate)
1772 emit_constant_insn (cond,
1773 gen_rtx_SET (VOIDmode, target,
1774 gen_rtx_NOT (mode, source)));
1775 return 1;
1777 case MINUS:
1778 /* We treat MINUS as (val - source), since (source - val) is always
1779 passed as (source + (-val)). */
1780 if (remainder == 0)
1782 if (generate)
1783 emit_constant_insn (cond,
1784 gen_rtx_SET (VOIDmode, target,
1785 gen_rtx_NEG (mode, source)));
1786 return 1;
1788 if (const_ok_for_arm (val))
1790 if (generate)
1791 emit_constant_insn (cond,
1792 gen_rtx_SET (VOIDmode, target,
1793 gen_rtx_MINUS (mode, GEN_INT (val),
1794 source)));
1795 return 1;
1797 can_negate = 1;
1799 break;
1801 default:
1802 gcc_unreachable ();
1805 /* If we can do it in one insn get out quickly. */
1806 if (const_ok_for_arm (val)
1807 || (can_negate_initial && const_ok_for_arm (-val))
1808 || (can_invert && const_ok_for_arm (~val)))
1810 if (generate)
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, target,
1813 (source
1814 ? gen_rtx_fmt_ee (code, mode, source,
1815 GEN_INT (val))
1816 : GEN_INT (val))));
1817 return 1;
1820 /* Calculate a few attributes that may be useful for specific
1821 optimizations. */
1822 for (i = 31; i >= 0; i--)
1824 if ((remainder & (1 << i)) == 0)
1825 clear_sign_bit_copies++;
1826 else
1827 break;
1830 for (i = 31; i >= 0; i--)
1832 if ((remainder & (1 << i)) != 0)
1833 set_sign_bit_copies++;
1834 else
1835 break;
1838 for (i = 0; i <= 31; i++)
1840 if ((remainder & (1 << i)) == 0)
1841 clear_zero_bit_copies++;
1842 else
1843 break;
1846 for (i = 0; i <= 31; i++)
1848 if ((remainder & (1 << i)) != 0)
1849 set_zero_bit_copies++;
1850 else
1851 break;
1854 switch (code)
1856 case SET:
1857 /* See if we can do this by sign_extending a constant that is known
1858 to be negative. This is a good, way of doing it, since the shift
1859 may well merge into a subsequent insn. */
1860 if (set_sign_bit_copies > 1)
1862 if (const_ok_for_arm
1863 (temp1 = ARM_SIGN_EXTEND (remainder
1864 << (set_sign_bit_copies - 1))))
1866 if (generate)
1868 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1869 emit_constant_insn (cond,
1870 gen_rtx_SET (VOIDmode, new_src,
1871 GEN_INT (temp1)));
1872 emit_constant_insn (cond,
1873 gen_ashrsi3 (target, new_src,
1874 GEN_INT (set_sign_bit_copies - 1)));
1876 return 2;
1878 /* For an inverted constant, we will need to set the low bits,
1879 these will be shifted out of harm's way. */
1880 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1881 if (const_ok_for_arm (~temp1))
1883 if (generate)
1885 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1886 emit_constant_insn (cond,
1887 gen_rtx_SET (VOIDmode, new_src,
1888 GEN_INT (temp1)));
1889 emit_constant_insn (cond,
1890 gen_ashrsi3 (target, new_src,
1891 GEN_INT (set_sign_bit_copies - 1)));
1893 return 2;
1897 /* See if we can calculate the value as the difference between two
1898 valid immediates. */
1899 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1901 int topshift = clear_sign_bit_copies & ~1;
1903 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1904 & (0xff000000 >> topshift));
1906 /* If temp1 is zero, then that means the 9 most significant
1907 bits of remainder were 1 and we've caused it to overflow.
1908 When topshift is 0 we don't need to do anything since we
1909 can borrow from 'bit 32'. */
1910 if (temp1 == 0 && topshift != 0)
1911 temp1 = 0x80000000 >> (topshift - 1);
1913 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1915 if (const_ok_for_arm (temp2))
1917 if (generate)
1919 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1920 emit_constant_insn (cond,
1921 gen_rtx_SET (VOIDmode, new_src,
1922 GEN_INT (temp1)));
1923 emit_constant_insn (cond,
1924 gen_addsi3 (target, new_src,
1925 GEN_INT (-temp2)));
1928 return 2;
1932 /* See if we can generate this by setting the bottom (or the top)
1933 16 bits, and then shifting these into the other half of the
1934 word. We only look for the simplest cases, to do more would cost
1935 too much. Be careful, however, not to generate this when the
1936 alternative would take fewer insns. */
1937 if (val & 0xffff0000)
1939 temp1 = remainder & 0xffff0000;
1940 temp2 = remainder & 0x0000ffff;
1942 /* Overlaps outside this range are best done using other methods. */
1943 for (i = 9; i < 24; i++)
1945 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1946 && !const_ok_for_arm (temp2))
1948 rtx new_src = (subtargets
1949 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1950 : target);
1951 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1952 source, subtargets, generate);
1953 source = new_src;
1954 if (generate)
1955 emit_constant_insn
1956 (cond,
1957 gen_rtx_SET
1958 (VOIDmode, target,
1959 gen_rtx_IOR (mode,
1960 gen_rtx_ASHIFT (mode, source,
1961 GEN_INT (i)),
1962 source)));
1963 return insns + 1;
1967 /* Don't duplicate cases already considered. */
1968 for (i = 17; i < 24; i++)
1970 if (((temp1 | (temp1 >> i)) == remainder)
1971 && !const_ok_for_arm (temp1))
1973 rtx new_src = (subtargets
1974 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1975 : target);
1976 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1977 source, subtargets, generate);
1978 source = new_src;
1979 if (generate)
1980 emit_constant_insn
1981 (cond,
1982 gen_rtx_SET (VOIDmode, target,
1983 gen_rtx_IOR
1984 (mode,
1985 gen_rtx_LSHIFTRT (mode, source,
1986 GEN_INT (i)),
1987 source)));
1988 return insns + 1;
1992 break;
1994 case IOR:
1995 case XOR:
1996 /* If we have IOR or XOR, and the constant can be loaded in a
1997 single instruction, and we can find a temporary to put it in,
1998 then this can be done in two instructions instead of 3-4. */
1999 if (subtargets
2000 /* TARGET can't be NULL if SUBTARGETS is 0 */
2001 || (reload_completed && !reg_mentioned_p (target, source)))
2003 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2005 if (generate)
2007 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2009 emit_constant_insn (cond,
2010 gen_rtx_SET (VOIDmode, sub,
2011 GEN_INT (val)));
2012 emit_constant_insn (cond,
2013 gen_rtx_SET (VOIDmode, target,
2014 gen_rtx_fmt_ee (code, mode,
2015 source, sub)));
2017 return 2;
2021 if (code == XOR)
2022 break;
2024 if (set_sign_bit_copies > 8
2025 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2027 if (generate)
2029 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2030 rtx shift = GEN_INT (set_sign_bit_copies);
2032 emit_constant_insn
2033 (cond,
2034 gen_rtx_SET (VOIDmode, sub,
2035 gen_rtx_NOT (mode,
2036 gen_rtx_ASHIFT (mode,
2037 source,
2038 shift))));
2039 emit_constant_insn
2040 (cond,
2041 gen_rtx_SET (VOIDmode, target,
2042 gen_rtx_NOT (mode,
2043 gen_rtx_LSHIFTRT (mode, sub,
2044 shift))));
2046 return 2;
2049 if (set_zero_bit_copies > 8
2050 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2052 if (generate)
2054 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2055 rtx shift = GEN_INT (set_zero_bit_copies);
2057 emit_constant_insn
2058 (cond,
2059 gen_rtx_SET (VOIDmode, sub,
2060 gen_rtx_NOT (mode,
2061 gen_rtx_LSHIFTRT (mode,
2062 source,
2063 shift))));
2064 emit_constant_insn
2065 (cond,
2066 gen_rtx_SET (VOIDmode, target,
2067 gen_rtx_NOT (mode,
2068 gen_rtx_ASHIFT (mode, sub,
2069 shift))));
2071 return 2;
2074 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2076 if (generate)
2078 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2079 emit_constant_insn (cond,
2080 gen_rtx_SET (VOIDmode, sub,
2081 gen_rtx_NOT (mode, source)));
2082 source = sub;
2083 if (subtargets)
2084 sub = gen_reg_rtx (mode);
2085 emit_constant_insn (cond,
2086 gen_rtx_SET (VOIDmode, sub,
2087 gen_rtx_AND (mode, source,
2088 GEN_INT (temp1))));
2089 emit_constant_insn (cond,
2090 gen_rtx_SET (VOIDmode, target,
2091 gen_rtx_NOT (mode, sub)));
2093 return 3;
2095 break;
2097 case AND:
2098 /* See if two shifts will do 2 or more insn's worth of work. */
2099 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2101 HOST_WIDE_INT shift_mask = ((0xffffffff
2102 << (32 - clear_sign_bit_copies))
2103 & 0xffffffff);
2105 if ((remainder | shift_mask) != 0xffffffff)
2107 if (generate)
2109 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2110 insns = arm_gen_constant (AND, mode, cond,
2111 remainder | shift_mask,
2112 new_src, source, subtargets, 1);
2113 source = new_src;
2115 else
2117 rtx targ = subtargets ? NULL_RTX : target;
2118 insns = arm_gen_constant (AND, mode, cond,
2119 remainder | shift_mask,
2120 targ, source, subtargets, 0);
2124 if (generate)
2126 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2127 rtx shift = GEN_INT (clear_sign_bit_copies);
2129 emit_insn (gen_ashlsi3 (new_src, source, shift));
2130 emit_insn (gen_lshrsi3 (target, new_src, shift));
2133 return insns + 2;
2136 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2138 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2140 if ((remainder | shift_mask) != 0xffffffff)
2142 if (generate)
2144 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2146 insns = arm_gen_constant (AND, mode, cond,
2147 remainder | shift_mask,
2148 new_src, source, subtargets, 1);
2149 source = new_src;
2151 else
2153 rtx targ = subtargets ? NULL_RTX : target;
2155 insns = arm_gen_constant (AND, mode, cond,
2156 remainder | shift_mask,
2157 targ, source, subtargets, 0);
2161 if (generate)
2163 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2164 rtx shift = GEN_INT (clear_zero_bit_copies);
2166 emit_insn (gen_lshrsi3 (new_src, source, shift));
2167 emit_insn (gen_ashlsi3 (target, new_src, shift));
2170 return insns + 2;
2173 break;
2175 default:
2176 break;
2179 for (i = 0; i < 32; i++)
2180 if (remainder & (1 << i))
2181 num_bits_set++;
2183 if (code == AND || (can_invert && num_bits_set > 16))
2184 remainder = (~remainder) & 0xffffffff;
2185 else if (code == PLUS && num_bits_set > 16)
2186 remainder = (-remainder) & 0xffffffff;
2187 else
2189 can_invert = 0;
2190 can_negate = 0;
2193 /* Now try and find a way of doing the job in either two or three
2194 instructions.
2195 We start by looking for the largest block of zeros that are aligned on
2196 a 2-bit boundary, we then fill up the temps, wrapping around to the
2197 top of the word when we drop off the bottom.
2198 In the worst case this code should produce no more than four insns. */
2200 int best_start = 0;
2201 int best_consecutive_zeros = 0;
2203 for (i = 0; i < 32; i += 2)
2205 int consecutive_zeros = 0;
2207 if (!(remainder & (3 << i)))
2209 while ((i < 32) && !(remainder & (3 << i)))
2211 consecutive_zeros += 2;
2212 i += 2;
2214 if (consecutive_zeros > best_consecutive_zeros)
2216 best_consecutive_zeros = consecutive_zeros;
2217 best_start = i - consecutive_zeros;
2219 i -= 2;
2223 /* So long as it won't require any more insns to do so, it's
2224 desirable to emit a small constant (in bits 0...9) in the last
2225 insn. This way there is more chance that it can be combined with
2226 a later addressing insn to form a pre-indexed load or store
2227 operation. Consider:
2229 *((volatile int *)0xe0000100) = 1;
2230 *((volatile int *)0xe0000110) = 2;
2232 We want this to wind up as:
2234 mov rA, #0xe0000000
2235 mov rB, #1
2236 str rB, [rA, #0x100]
2237 mov rB, #2
2238 str rB, [rA, #0x110]
2240 rather than having to synthesize both large constants from scratch.
2242 Therefore, we calculate how many insns would be required to emit
2243 the constant starting from `best_start', and also starting from
2244 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2245 yield a shorter sequence, we may as well use zero. */
2246 if (best_start != 0
2247 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2248 && (count_insns_for_constant (remainder, 0) <=
2249 count_insns_for_constant (remainder, best_start)))
2250 best_start = 0;
2252 /* Now start emitting the insns. */
2253 i = best_start;
2256 int end;
2258 if (i <= 0)
2259 i += 32;
2260 if (remainder & (3 << (i - 2)))
2262 end = i - 8;
2263 if (end < 0)
2264 end += 32;
2265 temp1 = remainder & ((0x0ff << end)
2266 | ((i < end) ? (0xff >> (32 - end)) : 0));
2267 remainder &= ~temp1;
2269 if (generate)
2271 rtx new_src, temp1_rtx;
2273 if (code == SET || code == MINUS)
2275 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2276 if (can_invert && code != MINUS)
2277 temp1 = ~temp1;
2279 else
2281 if (remainder && subtargets)
2282 new_src = gen_reg_rtx (mode);
2283 else
2284 new_src = target;
2285 if (can_invert)
2286 temp1 = ~temp1;
2287 else if (can_negate)
2288 temp1 = -temp1;
2291 temp1 = trunc_int_for_mode (temp1, mode);
2292 temp1_rtx = GEN_INT (temp1);
2294 if (code == SET)
2296 else if (code == MINUS)
2297 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2298 else
2299 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2301 emit_constant_insn (cond,
2302 gen_rtx_SET (VOIDmode, new_src,
2303 temp1_rtx));
2304 source = new_src;
2307 if (code == SET)
2309 can_invert = 0;
2310 code = PLUS;
2312 else if (code == MINUS)
2313 code = PLUS;
2315 insns++;
2316 i -= 6;
2318 i -= 2;
2320 while (remainder);
2323 return insns;
2326 /* Canonicalize a comparison so that we are more likely to recognize it.
2327 This can be done for a few constant compares, where we can make the
2328 immediate value easier to load. */
2330 enum rtx_code
2331 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2333 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2335 switch (code)
2337 case EQ:
2338 case NE:
2339 return code;
2341 case GT:
2342 case LE:
2343 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2344 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2346 *op1 = GEN_INT (i + 1);
2347 return code == GT ? GE : LT;
2349 break;
2351 case GE:
2352 case LT:
2353 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2354 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2356 *op1 = GEN_INT (i - 1);
2357 return code == GE ? GT : LE;
2359 break;
2361 case GTU:
2362 case LEU:
2363 if (i != ~((unsigned HOST_WIDE_INT) 0)
2364 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2366 *op1 = GEN_INT (i + 1);
2367 return code == GTU ? GEU : LTU;
2369 break;
2371 case GEU:
2372 case LTU:
2373 if (i != 0
2374 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2376 *op1 = GEN_INT (i - 1);
2377 return code == GEU ? GTU : LEU;
2379 break;
2381 default:
2382 gcc_unreachable ();
2385 return code;
2389 /* Define how to find the value returned by a function. */
2392 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2394 enum machine_mode mode;
2395 int unsignedp ATTRIBUTE_UNUSED;
2396 rtx r ATTRIBUTE_UNUSED;
2398 mode = TYPE_MODE (type);
2399 /* Promote integer types. */
2400 if (INTEGRAL_TYPE_P (type))
2401 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2403 /* Promotes small structs returned in a register to full-word size
2404 for big-endian AAPCS. */
2405 if (arm_return_in_msb (type))
2407 HOST_WIDE_INT size = int_size_in_bytes (type);
2408 if (size % UNITS_PER_WORD != 0)
2410 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2411 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2415 return LIBCALL_VALUE(mode);
2418 /* Determine the amount of memory needed to store the possible return
2419 registers of an untyped call. */
2421 arm_apply_result_size (void)
2423 int size = 16;
2425 if (TARGET_ARM)
2427 if (TARGET_HARD_FLOAT_ABI)
2429 if (TARGET_FPA)
2430 size += 12;
2431 if (TARGET_MAVERICK)
2432 size += 8;
2434 if (TARGET_IWMMXT_ABI)
2435 size += 8;
2438 return size;
2441 /* Decide whether a type should be returned in memory (true)
2442 or in a register (false). This is called by the macro
2443 RETURN_IN_MEMORY. */
2445 arm_return_in_memory (tree type)
2447 HOST_WIDE_INT size;
2449 if (!AGGREGATE_TYPE_P (type) &&
2450 (TREE_CODE (type) != VECTOR_TYPE) &&
2451 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2452 /* All simple types are returned in registers.
2453 For AAPCS, complex types are treated the same as aggregates. */
2454 return 0;
2456 size = int_size_in_bytes (type);
2458 if (arm_abi != ARM_ABI_APCS)
2460 /* ATPCS and later return aggregate types in memory only if they are
2461 larger than a word (or are variable size). */
2462 return (size < 0 || size > UNITS_PER_WORD);
2465 /* To maximize backwards compatibility with previous versions of gcc,
2466 return vectors up to 4 words in registers. */
2467 if (TREE_CODE (type) == VECTOR_TYPE)
2468 return (size < 0 || size > (4 * UNITS_PER_WORD));
2470 /* For the arm-wince targets we choose to be compatible with Microsoft's
2471 ARM and Thumb compilers, which always return aggregates in memory. */
2472 #ifndef ARM_WINCE
2473 /* All structures/unions bigger than one word are returned in memory.
2474 Also catch the case where int_size_in_bytes returns -1. In this case
2475 the aggregate is either huge or of variable size, and in either case
2476 we will want to return it via memory and not in a register. */
2477 if (size < 0 || size > UNITS_PER_WORD)
2478 return 1;
2480 if (TREE_CODE (type) == RECORD_TYPE)
2482 tree field;
2484 /* For a struct the APCS says that we only return in a register
2485 if the type is 'integer like' and every addressable element
2486 has an offset of zero. For practical purposes this means
2487 that the structure can have at most one non bit-field element
2488 and that this element must be the first one in the structure. */
2490 /* Find the first field, ignoring non FIELD_DECL things which will
2491 have been created by C++. */
2492 for (field = TYPE_FIELDS (type);
2493 field && TREE_CODE (field) != FIELD_DECL;
2494 field = TREE_CHAIN (field))
2495 continue;
2497 if (field == NULL)
2498 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2500 /* Check that the first field is valid for returning in a register. */
2502 /* ... Floats are not allowed */
2503 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2504 return 1;
2506 /* ... Aggregates that are not themselves valid for returning in
2507 a register are not allowed. */
2508 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2509 return 1;
2511 /* Now check the remaining fields, if any. Only bitfields are allowed,
2512 since they are not addressable. */
2513 for (field = TREE_CHAIN (field);
2514 field;
2515 field = TREE_CHAIN (field))
2517 if (TREE_CODE (field) != FIELD_DECL)
2518 continue;
2520 if (!DECL_BIT_FIELD_TYPE (field))
2521 return 1;
2524 return 0;
2527 if (TREE_CODE (type) == UNION_TYPE)
2529 tree field;
2531 /* Unions can be returned in registers if every element is
2532 integral, or can be returned in an integer register. */
2533 for (field = TYPE_FIELDS (type);
2534 field;
2535 field = TREE_CHAIN (field))
2537 if (TREE_CODE (field) != FIELD_DECL)
2538 continue;
2540 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2541 return 1;
2543 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2544 return 1;
2547 return 0;
2549 #endif /* not ARM_WINCE */
2551 /* Return all other types in memory. */
2552 return 1;
2555 /* Indicate whether or not words of a double are in big-endian order. */
2558 arm_float_words_big_endian (void)
2560 if (TARGET_MAVERICK)
2561 return 0;
2563 /* For FPA, float words are always big-endian. For VFP, floats words
2564 follow the memory system mode. */
2566 if (TARGET_FPA)
2568 return 1;
2571 if (TARGET_VFP)
2572 return (TARGET_BIG_END ? 1 : 0);
2574 return 1;
2577 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2578 for a call to a function whose data type is FNTYPE.
2579 For a library call, FNTYPE is NULL. */
2580 void
2581 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2582 rtx libname ATTRIBUTE_UNUSED,
2583 tree fndecl ATTRIBUTE_UNUSED)
2585 /* On the ARM, the offset starts at 0. */
2586 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2587 pcum->iwmmxt_nregs = 0;
2588 pcum->can_split = true;
2590 pcum->call_cookie = CALL_NORMAL;
2592 if (TARGET_LONG_CALLS)
2593 pcum->call_cookie = CALL_LONG;
2595 /* Check for long call/short call attributes. The attributes
2596 override any command line option. */
2597 if (fntype)
2599 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2600 pcum->call_cookie = CALL_SHORT;
2601 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2602 pcum->call_cookie = CALL_LONG;
2605 /* Varargs vectors are treated the same as long long.
2606 named_count avoids having to change the way arm handles 'named' */
2607 pcum->named_count = 0;
2608 pcum->nargs = 0;
2610 if (TARGET_REALLY_IWMMXT && fntype)
2612 tree fn_arg;
2614 for (fn_arg = TYPE_ARG_TYPES (fntype);
2615 fn_arg;
2616 fn_arg = TREE_CHAIN (fn_arg))
2617 pcum->named_count += 1;
2619 if (! pcum->named_count)
2620 pcum->named_count = INT_MAX;
2625 /* Return true if mode/type need doubleword alignment. */
2626 bool
2627 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2629 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2630 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2634 /* Determine where to put an argument to a function.
2635 Value is zero to push the argument on the stack,
2636 or a hard register in which to store the argument.
2638 MODE is the argument's machine mode.
2639 TYPE is the data type of the argument (as a tree).
2640 This is null for libcalls where that information may
2641 not be available.
2642 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2643 the preceding args and about the function being called.
2644 NAMED is nonzero if this argument is a named parameter
2645 (otherwise it is an extra parameter matching an ellipsis). */
2648 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2649 tree type, int named)
2651 int nregs;
2653 /* Varargs vectors are treated the same as long long.
2654 named_count avoids having to change the way arm handles 'named' */
2655 if (TARGET_IWMMXT_ABI
2656 && arm_vector_mode_supported_p (mode)
2657 && pcum->named_count > pcum->nargs + 1)
2659 if (pcum->iwmmxt_nregs <= 9)
2660 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2661 else
2663 pcum->can_split = false;
2664 return NULL_RTX;
2668 /* Put doubleword aligned quantities in even register pairs. */
2669 if (pcum->nregs & 1
2670 && ARM_DOUBLEWORD_ALIGN
2671 && arm_needs_doubleword_align (mode, type))
2672 pcum->nregs++;
2674 if (mode == VOIDmode)
2675 /* Compute operand 2 of the call insn. */
2676 return GEN_INT (pcum->call_cookie);
2678 /* Only allow splitting an arg between regs and memory if all preceding
2679 args were allocated to regs. For args passed by reference we only count
2680 the reference pointer. */
2681 if (pcum->can_split)
2682 nregs = 1;
2683 else
2684 nregs = ARM_NUM_REGS2 (mode, type);
2686 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2687 return NULL_RTX;
2689 return gen_rtx_REG (mode, pcum->nregs);
2692 static int
2693 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2694 tree type, bool named ATTRIBUTE_UNUSED)
2696 int nregs = pcum->nregs;
2698 if (arm_vector_mode_supported_p (mode))
2699 return 0;
2701 if (NUM_ARG_REGS > nregs
2702 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2703 && pcum->can_split)
2704 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2706 return 0;
2709 /* Variable sized types are passed by reference. This is a GCC
2710 extension to the ARM ABI. */
2712 static bool
2713 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2714 enum machine_mode mode ATTRIBUTE_UNUSED,
2715 tree type, bool named ATTRIBUTE_UNUSED)
2717 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2720 /* Encode the current state of the #pragma [no_]long_calls. */
2721 typedef enum
2723 OFF, /* No #pramgma [no_]long_calls is in effect. */
2724 LONG, /* #pragma long_calls is in effect. */
2725 SHORT /* #pragma no_long_calls is in effect. */
2726 } arm_pragma_enum;
2728 static arm_pragma_enum arm_pragma_long_calls = OFF;
2730 void
2731 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2733 arm_pragma_long_calls = LONG;
2736 void
2737 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2739 arm_pragma_long_calls = SHORT;
2742 void
2743 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2745 arm_pragma_long_calls = OFF;
2748 /* Table of machine attributes. */
2749 const struct attribute_spec arm_attribute_table[] =
2751 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2752 /* Function calls made to this symbol must be done indirectly, because
2753 it may lie outside of the 26 bit addressing range of a normal function
2754 call. */
2755 { "long_call", 0, 0, false, true, true, NULL },
2756 /* Whereas these functions are always known to reside within the 26 bit
2757 addressing range. */
2758 { "short_call", 0, 0, false, true, true, NULL },
2759 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2760 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2761 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2762 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2763 #ifdef ARM_PE
2764 /* ARM/PE has three new attributes:
2765 interfacearm - ?
2766 dllexport - for exporting a function/variable that will live in a dll
2767 dllimport - for importing a function/variable from a dll
2769 Microsoft allows multiple declspecs in one __declspec, separating
2770 them with spaces. We do NOT support this. Instead, use __declspec
2771 multiple times.
2773 { "dllimport", 0, 0, true, false, false, NULL },
2774 { "dllexport", 0, 0, true, false, false, NULL },
2775 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2776 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2777 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2778 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2779 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2780 #endif
2781 { NULL, 0, 0, false, false, false, NULL }
2784 /* Handle an attribute requiring a FUNCTION_DECL;
2785 arguments as in struct attribute_spec.handler. */
2786 static tree
2787 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2788 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2790 if (TREE_CODE (*node) != FUNCTION_DECL)
2792 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2793 IDENTIFIER_POINTER (name));
2794 *no_add_attrs = true;
2797 return NULL_TREE;
2800 /* Handle an "interrupt" or "isr" attribute;
2801 arguments as in struct attribute_spec.handler. */
2802 static tree
2803 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2804 bool *no_add_attrs)
2806 if (DECL_P (*node))
2808 if (TREE_CODE (*node) != FUNCTION_DECL)
2810 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2811 IDENTIFIER_POINTER (name));
2812 *no_add_attrs = true;
2814 /* FIXME: the argument if any is checked for type attributes;
2815 should it be checked for decl ones? */
2817 else
2819 if (TREE_CODE (*node) == FUNCTION_TYPE
2820 || TREE_CODE (*node) == METHOD_TYPE)
2822 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2824 warning (OPT_Wattributes, "%qs attribute ignored",
2825 IDENTIFIER_POINTER (name));
2826 *no_add_attrs = true;
2829 else if (TREE_CODE (*node) == POINTER_TYPE
2830 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2831 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2832 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2834 *node = build_variant_type_copy (*node);
2835 TREE_TYPE (*node) = build_type_attribute_variant
2836 (TREE_TYPE (*node),
2837 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2838 *no_add_attrs = true;
2840 else
2842 /* Possibly pass this attribute on from the type to a decl. */
2843 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2844 | (int) ATTR_FLAG_FUNCTION_NEXT
2845 | (int) ATTR_FLAG_ARRAY_NEXT))
2847 *no_add_attrs = true;
2848 return tree_cons (name, args, NULL_TREE);
2850 else
2852 warning (OPT_Wattributes, "%qs attribute ignored",
2853 IDENTIFIER_POINTER (name));
2858 return NULL_TREE;
2861 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2862 /* Handle the "notshared" attribute. This attribute is another way of
2863 requesting hidden visibility. ARM's compiler supports
2864 "__declspec(notshared)"; we support the same thing via an
2865 attribute. */
2867 static tree
2868 arm_handle_notshared_attribute (tree *node,
2869 tree name ATTRIBUTE_UNUSED,
2870 tree args ATTRIBUTE_UNUSED,
2871 int flags ATTRIBUTE_UNUSED,
2872 bool *no_add_attrs)
2874 tree decl = TYPE_NAME (*node);
2876 if (decl)
2878 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2879 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2880 *no_add_attrs = false;
2882 return NULL_TREE;
2884 #endif
2886 /* Return 0 if the attributes for two types are incompatible, 1 if they
2887 are compatible, and 2 if they are nearly compatible (which causes a
2888 warning to be generated). */
2889 static int
2890 arm_comp_type_attributes (tree type1, tree type2)
2892 int l1, l2, s1, s2;
2894 /* Check for mismatch of non-default calling convention. */
2895 if (TREE_CODE (type1) != FUNCTION_TYPE)
2896 return 1;
2898 /* Check for mismatched call attributes. */
2899 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2900 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2901 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2902 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2904 /* Only bother to check if an attribute is defined. */
2905 if (l1 | l2 | s1 | s2)
2907 /* If one type has an attribute, the other must have the same attribute. */
2908 if ((l1 != l2) || (s1 != s2))
2909 return 0;
2911 /* Disallow mixed attributes. */
2912 if ((l1 & s2) || (l2 & s1))
2913 return 0;
2916 /* Check for mismatched ISR attribute. */
2917 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2918 if (! l1)
2919 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2920 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2921 if (! l2)
2922 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2923 if (l1 != l2)
2924 return 0;
2926 return 1;
2929 /* Encode long_call or short_call attribute by prefixing
2930 symbol name in DECL with a special character FLAG. */
2931 void
2932 arm_encode_call_attribute (tree decl, int flag)
2934 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2935 int len = strlen (str);
2936 char * newstr;
2938 /* Do not allow weak functions to be treated as short call. */
2939 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2940 return;
2942 newstr = alloca (len + 2);
2943 newstr[0] = flag;
2944 strcpy (newstr + 1, str);
2946 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2947 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2950 /* Assigns default attributes to newly defined type. This is used to
2951 set short_call/long_call attributes for function types of
2952 functions defined inside corresponding #pragma scopes. */
2953 static void
2954 arm_set_default_type_attributes (tree type)
2956 /* Add __attribute__ ((long_call)) to all functions, when
2957 inside #pragma long_calls or __attribute__ ((short_call)),
2958 when inside #pragma no_long_calls. */
2959 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2961 tree type_attr_list, attr_name;
2962 type_attr_list = TYPE_ATTRIBUTES (type);
2964 if (arm_pragma_long_calls == LONG)
2965 attr_name = get_identifier ("long_call");
2966 else if (arm_pragma_long_calls == SHORT)
2967 attr_name = get_identifier ("short_call");
2968 else
2969 return;
2971 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2972 TYPE_ATTRIBUTES (type) = type_attr_list;
2976 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2977 defined within the current compilation unit. If this cannot be
2978 determined, then 0 is returned. */
2979 static int
2980 current_file_function_operand (rtx sym_ref)
2982 /* This is a bit of a fib. A function will have a short call flag
2983 applied to its name if it has the short call attribute, or it has
2984 already been defined within the current compilation unit. */
2985 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2986 return 1;
2988 /* The current function is always defined within the current compilation
2989 unit. If it s a weak definition however, then this may not be the real
2990 definition of the function, and so we have to say no. */
2991 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2992 && !DECL_WEAK (current_function_decl))
2993 return 1;
2995 /* We cannot make the determination - default to returning 0. */
2996 return 0;
2999 /* Return nonzero if a 32 bit "long_call" should be generated for
3000 this call. We generate a long_call if the function:
3002 a. has an __attribute__((long call))
3003 or b. is within the scope of a #pragma long_calls
3004 or c. the -mlong-calls command line switch has been specified
3005 . and either:
3006 1. -ffunction-sections is in effect
3007 or 2. the current function has __attribute__ ((section))
3008 or 3. the target function has __attribute__ ((section))
3010 However we do not generate a long call if the function:
3012 d. has an __attribute__ ((short_call))
3013 or e. is inside the scope of a #pragma no_long_calls
3014 or f. is defined within the current compilation unit.
3016 This function will be called by C fragments contained in the machine
3017 description file. SYM_REF and CALL_COOKIE correspond to the matched
3018 rtl operands. CALL_SYMBOL is used to distinguish between
3019 two different callers of the function. It is set to 1 in the
3020 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3021 and "call_value" patterns. This is because of the difference in the
3022 SYM_REFs passed by these patterns. */
3024 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3026 if (!call_symbol)
3028 if (GET_CODE (sym_ref) != MEM)
3029 return 0;
3031 sym_ref = XEXP (sym_ref, 0);
3034 if (GET_CODE (sym_ref) != SYMBOL_REF)
3035 return 0;
3037 if (call_cookie & CALL_SHORT)
3038 return 0;
3040 if (TARGET_LONG_CALLS)
3042 if (flag_function_sections
3043 || DECL_SECTION_NAME (current_function_decl))
3044 /* c.3 is handled by the definition of the
3045 ARM_DECLARE_FUNCTION_SIZE macro. */
3046 return 1;
3049 if (current_file_function_operand (sym_ref))
3050 return 0;
3052 return (call_cookie & CALL_LONG)
3053 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3054 || TARGET_LONG_CALLS;
3057 /* Return nonzero if it is ok to make a tail-call to DECL. */
3058 static bool
3059 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3061 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3063 if (cfun->machine->sibcall_blocked)
3064 return false;
3066 /* Never tailcall something for which we have no decl, or if we
3067 are in Thumb mode. */
3068 if (decl == NULL || TARGET_THUMB)
3069 return false;
3071 /* Get the calling method. */
3072 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3073 call_type = CALL_SHORT;
3074 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3075 call_type = CALL_LONG;
3077 /* Cannot tail-call to long calls, since these are out of range of
3078 a branch instruction. However, if not compiling PIC, we know
3079 we can reach the symbol if it is in this compilation unit. */
3080 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3081 return false;
3083 /* If we are interworking and the function is not declared static
3084 then we can't tail-call it unless we know that it exists in this
3085 compilation unit (since it might be a Thumb routine). */
3086 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3087 return false;
3089 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3090 if (IS_INTERRUPT (arm_current_func_type ()))
3091 return false;
3093 /* Everything else is ok. */
3094 return true;
3098 /* Addressing mode support functions. */
3100 /* Return nonzero if X is a legitimate immediate operand when compiling
3101 for PIC. */
3103 legitimate_pic_operand_p (rtx x)
3105 if (CONSTANT_P (x)
3106 && flag_pic
3107 && (GET_CODE (x) == SYMBOL_REF
3108 || (GET_CODE (x) == CONST
3109 && GET_CODE (XEXP (x, 0)) == PLUS
3110 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3111 return 0;
3113 return 1;
3117 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3119 if (GET_CODE (orig) == SYMBOL_REF
3120 || GET_CODE (orig) == LABEL_REF)
3122 #ifndef AOF_ASSEMBLER
3123 rtx pic_ref, address;
3124 #endif
3125 rtx insn;
3126 int subregs = 0;
3128 if (reg == 0)
3130 gcc_assert (!no_new_pseudos);
3131 reg = gen_reg_rtx (Pmode);
3133 subregs = 1;
3136 #ifdef AOF_ASSEMBLER
3137 /* The AOF assembler can generate relocations for these directly, and
3138 understands that the PIC register has to be added into the offset. */
3139 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3140 #else
3141 if (subregs)
3142 address = gen_reg_rtx (Pmode);
3143 else
3144 address = reg;
3146 if (TARGET_ARM)
3147 emit_insn (gen_pic_load_addr_arm (address, orig));
3148 else
3149 emit_insn (gen_pic_load_addr_thumb (address, orig));
3151 if ((GET_CODE (orig) == LABEL_REF
3152 || (GET_CODE (orig) == SYMBOL_REF &&
3153 SYMBOL_REF_LOCAL_P (orig)))
3154 && NEED_GOT_RELOC)
3155 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3156 else
3158 pic_ref = gen_const_mem (Pmode,
3159 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3160 address));
3163 insn = emit_move_insn (reg, pic_ref);
3164 #endif
3165 current_function_uses_pic_offset_table = 1;
3166 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3167 by loop. */
3168 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3169 REG_NOTES (insn));
3170 return reg;
3172 else if (GET_CODE (orig) == CONST)
3174 rtx base, offset;
3176 if (GET_CODE (XEXP (orig, 0)) == PLUS
3177 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3178 return orig;
3180 if (reg == 0)
3182 gcc_assert (!no_new_pseudos);
3183 reg = gen_reg_rtx (Pmode);
3186 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3188 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3189 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3190 base == reg ? 0 : reg);
3192 if (GET_CODE (offset) == CONST_INT)
3194 /* The base register doesn't really matter, we only want to
3195 test the index for the appropriate mode. */
3196 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3198 gcc_assert (!no_new_pseudos);
3199 offset = force_reg (Pmode, offset);
3202 if (GET_CODE (offset) == CONST_INT)
3203 return plus_constant (base, INTVAL (offset));
3206 if (GET_MODE_SIZE (mode) > 4
3207 && (GET_MODE_CLASS (mode) == MODE_INT
3208 || TARGET_SOFT_FLOAT))
3210 emit_insn (gen_addsi3 (reg, base, offset));
3211 return reg;
3214 return gen_rtx_PLUS (Pmode, base, offset);
3217 return orig;
3221 /* Find a spare low register to use during the prolog of a function. */
3223 static int
3224 thumb_find_work_register (unsigned long pushed_regs_mask)
3226 int reg;
3228 /* Check the argument registers first as these are call-used. The
3229 register allocation order means that sometimes r3 might be used
3230 but earlier argument registers might not, so check them all. */
3231 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3232 if (!regs_ever_live[reg])
3233 return reg;
3235 /* Before going on to check the call-saved registers we can try a couple
3236 more ways of deducing that r3 is available. The first is when we are
3237 pushing anonymous arguments onto the stack and we have less than 4
3238 registers worth of fixed arguments(*). In this case r3 will be part of
3239 the variable argument list and so we can be sure that it will be
3240 pushed right at the start of the function. Hence it will be available
3241 for the rest of the prologue.
3242 (*): ie current_function_pretend_args_size is greater than 0. */
3243 if (cfun->machine->uses_anonymous_args
3244 && current_function_pretend_args_size > 0)
3245 return LAST_ARG_REGNUM;
3247 /* The other case is when we have fixed arguments but less than 4 registers
3248 worth. In this case r3 might be used in the body of the function, but
3249 it is not being used to convey an argument into the function. In theory
3250 we could just check current_function_args_size to see how many bytes are
3251 being passed in argument registers, but it seems that it is unreliable.
3252 Sometimes it will have the value 0 when in fact arguments are being
3253 passed. (See testcase execute/20021111-1.c for an example). So we also
3254 check the args_info.nregs field as well. The problem with this field is
3255 that it makes no allowances for arguments that are passed to the
3256 function but which are not used. Hence we could miss an opportunity
3257 when a function has an unused argument in r3. But it is better to be
3258 safe than to be sorry. */
3259 if (! cfun->machine->uses_anonymous_args
3260 && current_function_args_size >= 0
3261 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3262 && cfun->args_info.nregs < 4)
3263 return LAST_ARG_REGNUM;
3265 /* Otherwise look for a call-saved register that is going to be pushed. */
3266 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3267 if (pushed_regs_mask & (1 << reg))
3268 return reg;
3270 /* Something went wrong - thumb_compute_save_reg_mask()
3271 should have arranged for a suitable register to be pushed. */
3272 gcc_unreachable ();
3276 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3277 low register. */
3279 void
3280 arm_load_pic_register (unsigned int scratch)
3282 #ifndef AOF_ASSEMBLER
3283 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3284 rtx global_offset_table;
3286 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3287 return;
3289 gcc_assert (flag_pic);
3291 l1 = gen_label_rtx ();
3293 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3294 /* On the ARM the PC register contains 'dot + 8' at the time of the
3295 addition, on the Thumb it is 'dot + 4'. */
3296 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3297 if (GOT_PCREL)
3298 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3299 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3300 else
3301 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3303 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3305 if (TARGET_ARM)
3307 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3308 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3310 else
3312 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3314 /* We will have pushed the pic register, so should always be
3315 able to find a work register. */
3316 pic_tmp = gen_rtx_REG (SImode, scratch);
3317 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3318 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3320 else
3321 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3322 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3325 /* Need to emit this whether or not we obey regdecls,
3326 since setjmp/longjmp can cause life info to screw up. */
3327 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3328 #endif /* AOF_ASSEMBLER */
3332 /* Return nonzero if X is valid as an ARM state addressing register. */
3333 static int
3334 arm_address_register_rtx_p (rtx x, int strict_p)
3336 int regno;
3338 if (GET_CODE (x) != REG)
3339 return 0;
3341 regno = REGNO (x);
3343 if (strict_p)
3344 return ARM_REGNO_OK_FOR_BASE_P (regno);
3346 return (regno <= LAST_ARM_REGNUM
3347 || regno >= FIRST_PSEUDO_REGISTER
3348 || regno == FRAME_POINTER_REGNUM
3349 || regno == ARG_POINTER_REGNUM);
3352 /* Return nonzero if X is a valid ARM state address operand. */
3354 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3355 int strict_p)
3357 bool use_ldrd;
3358 enum rtx_code code = GET_CODE (x);
3360 if (arm_address_register_rtx_p (x, strict_p))
3361 return 1;
3363 use_ldrd = (TARGET_LDRD
3364 && (mode == DImode
3365 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3367 if (code == POST_INC || code == PRE_DEC
3368 || ((code == PRE_INC || code == POST_DEC)
3369 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3370 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3372 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3373 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3374 && GET_CODE (XEXP (x, 1)) == PLUS
3375 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3377 rtx addend = XEXP (XEXP (x, 1), 1);
3379 /* Don't allow ldrd post increment by register because it's hard
3380 to fixup invalid register choices. */
3381 if (use_ldrd
3382 && GET_CODE (x) == POST_MODIFY
3383 && GET_CODE (addend) == REG)
3384 return 0;
3386 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3387 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3390 /* After reload constants split into minipools will have addresses
3391 from a LABEL_REF. */
3392 else if (reload_completed
3393 && (code == LABEL_REF
3394 || (code == CONST
3395 && GET_CODE (XEXP (x, 0)) == PLUS
3396 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3397 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3398 return 1;
3400 else if (mode == TImode)
3401 return 0;
3403 else if (code == PLUS)
3405 rtx xop0 = XEXP (x, 0);
3406 rtx xop1 = XEXP (x, 1);
3408 return ((arm_address_register_rtx_p (xop0, strict_p)
3409 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3410 || (arm_address_register_rtx_p (xop1, strict_p)
3411 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3414 #if 0
3415 /* Reload currently can't handle MINUS, so disable this for now */
3416 else if (GET_CODE (x) == MINUS)
3418 rtx xop0 = XEXP (x, 0);
3419 rtx xop1 = XEXP (x, 1);
3421 return (arm_address_register_rtx_p (xop0, strict_p)
3422 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3424 #endif
3426 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3427 && code == SYMBOL_REF
3428 && CONSTANT_POOL_ADDRESS_P (x)
3429 && ! (flag_pic
3430 && symbol_mentioned_p (get_pool_constant (x))))
3431 return 1;
3433 return 0;
3436 /* Return nonzero if INDEX is valid for an address index operand in
3437 ARM state. */
3438 static int
3439 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3440 int strict_p)
3442 HOST_WIDE_INT range;
3443 enum rtx_code code = GET_CODE (index);
3445 /* Standard coprocessor addressing modes. */
3446 if (TARGET_HARD_FLOAT
3447 && (TARGET_FPA || TARGET_MAVERICK)
3448 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3449 || (TARGET_MAVERICK && mode == DImode)))
3450 return (code == CONST_INT && INTVAL (index) < 1024
3451 && INTVAL (index) > -1024
3452 && (INTVAL (index) & 3) == 0);
3454 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3455 return (code == CONST_INT
3456 && INTVAL (index) < 1024
3457 && INTVAL (index) > -1024
3458 && (INTVAL (index) & 3) == 0);
3460 if (arm_address_register_rtx_p (index, strict_p)
3461 && (GET_MODE_SIZE (mode) <= 4))
3462 return 1;
3464 if (mode == DImode || mode == DFmode)
3466 if (code == CONST_INT)
3468 HOST_WIDE_INT val = INTVAL (index);
3470 if (TARGET_LDRD)
3471 return val > -256 && val < 256;
3472 else
3473 return val > -4096 && val < 4092;
3476 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3479 if (GET_MODE_SIZE (mode) <= 4
3480 && ! (arm_arch4
3481 && (mode == HImode
3482 || (mode == QImode && outer == SIGN_EXTEND))))
3484 if (code == MULT)
3486 rtx xiop0 = XEXP (index, 0);
3487 rtx xiop1 = XEXP (index, 1);
3489 return ((arm_address_register_rtx_p (xiop0, strict_p)
3490 && power_of_two_operand (xiop1, SImode))
3491 || (arm_address_register_rtx_p (xiop1, strict_p)
3492 && power_of_two_operand (xiop0, SImode)));
3494 else if (code == LSHIFTRT || code == ASHIFTRT
3495 || code == ASHIFT || code == ROTATERT)
3497 rtx op = XEXP (index, 1);
3499 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3500 && GET_CODE (op) == CONST_INT
3501 && INTVAL (op) > 0
3502 && INTVAL (op) <= 31);
3506 /* For ARM v4 we may be doing a sign-extend operation during the
3507 load. */
3508 if (arm_arch4)
3510 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3511 range = 256;
3512 else
3513 range = 4096;
3515 else
3516 range = (mode == HImode) ? 4095 : 4096;
3518 return (code == CONST_INT
3519 && INTVAL (index) < range
3520 && INTVAL (index) > -range);
3523 /* Return nonzero if X is valid as a Thumb state base register. */
3524 static int
3525 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3527 int regno;
3529 if (GET_CODE (x) != REG)
3530 return 0;
3532 regno = REGNO (x);
3534 if (strict_p)
3535 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3537 return (regno <= LAST_LO_REGNUM
3538 || regno > LAST_VIRTUAL_REGISTER
3539 || regno == FRAME_POINTER_REGNUM
3540 || (GET_MODE_SIZE (mode) >= 4
3541 && (regno == STACK_POINTER_REGNUM
3542 || regno >= FIRST_PSEUDO_REGISTER
3543 || x == hard_frame_pointer_rtx
3544 || x == arg_pointer_rtx)));
3547 /* Return nonzero if x is a legitimate index register. This is the case
3548 for any base register that can access a QImode object. */
3549 inline static int
3550 thumb_index_register_rtx_p (rtx x, int strict_p)
3552 return thumb_base_register_rtx_p (x, QImode, strict_p);
3555 /* Return nonzero if x is a legitimate Thumb-state address.
3557 The AP may be eliminated to either the SP or the FP, so we use the
3558 least common denominator, e.g. SImode, and offsets from 0 to 64.
3560 ??? Verify whether the above is the right approach.
3562 ??? Also, the FP may be eliminated to the SP, so perhaps that
3563 needs special handling also.
3565 ??? Look at how the mips16 port solves this problem. It probably uses
3566 better ways to solve some of these problems.
3568 Although it is not incorrect, we don't accept QImode and HImode
3569 addresses based on the frame pointer or arg pointer until the
3570 reload pass starts. This is so that eliminating such addresses
3571 into stack based ones won't produce impossible code. */
3573 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3575 /* ??? Not clear if this is right. Experiment. */
3576 if (GET_MODE_SIZE (mode) < 4
3577 && !(reload_in_progress || reload_completed)
3578 && (reg_mentioned_p (frame_pointer_rtx, x)
3579 || reg_mentioned_p (arg_pointer_rtx, x)
3580 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3581 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3582 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3583 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3584 return 0;
3586 /* Accept any base register. SP only in SImode or larger. */
3587 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3588 return 1;
3590 /* This is PC relative data before arm_reorg runs. */
3591 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3592 && GET_CODE (x) == SYMBOL_REF
3593 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3594 return 1;
3596 /* This is PC relative data after arm_reorg runs. */
3597 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3598 && (GET_CODE (x) == LABEL_REF
3599 || (GET_CODE (x) == CONST
3600 && GET_CODE (XEXP (x, 0)) == PLUS
3601 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3602 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3603 return 1;
3605 /* Post-inc indexing only supported for SImode and larger. */
3606 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3607 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3608 return 1;
3610 else if (GET_CODE (x) == PLUS)
3612 /* REG+REG address can be any two index registers. */
3613 /* We disallow FRAME+REG addressing since we know that FRAME
3614 will be replaced with STACK, and SP relative addressing only
3615 permits SP+OFFSET. */
3616 if (GET_MODE_SIZE (mode) <= 4
3617 && XEXP (x, 0) != frame_pointer_rtx
3618 && XEXP (x, 1) != frame_pointer_rtx
3619 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3620 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3621 return 1;
3623 /* REG+const has 5-7 bit offset for non-SP registers. */
3624 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3625 || XEXP (x, 0) == arg_pointer_rtx)
3626 && GET_CODE (XEXP (x, 1)) == CONST_INT
3627 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3628 return 1;
3630 /* REG+const has 10 bit offset for SP, but only SImode and
3631 larger is supported. */
3632 /* ??? Should probably check for DI/DFmode overflow here
3633 just like GO_IF_LEGITIMATE_OFFSET does. */
3634 else if (GET_CODE (XEXP (x, 0)) == REG
3635 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3636 && GET_MODE_SIZE (mode) >= 4
3637 && GET_CODE (XEXP (x, 1)) == CONST_INT
3638 && INTVAL (XEXP (x, 1)) >= 0
3639 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3640 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3641 return 1;
3643 else if (GET_CODE (XEXP (x, 0)) == REG
3644 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3645 && GET_MODE_SIZE (mode) >= 4
3646 && GET_CODE (XEXP (x, 1)) == CONST_INT
3647 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3648 return 1;
3651 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3652 && GET_MODE_SIZE (mode) == 4
3653 && GET_CODE (x) == SYMBOL_REF
3654 && CONSTANT_POOL_ADDRESS_P (x)
3655 && !(flag_pic
3656 && symbol_mentioned_p (get_pool_constant (x))))
3657 return 1;
3659 return 0;
3662 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3663 instruction of mode MODE. */
3665 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3667 switch (GET_MODE_SIZE (mode))
3669 case 1:
3670 return val >= 0 && val < 32;
3672 case 2:
3673 return val >= 0 && val < 64 && (val & 1) == 0;
3675 default:
3676 return (val >= 0
3677 && (val + GET_MODE_SIZE (mode)) <= 128
3678 && (val & 3) == 0);
3682 /* Try machine-dependent ways of modifying an illegitimate address
3683 to be legitimate. If we find one, return the new, valid address. */
3685 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3687 if (GET_CODE (x) == PLUS)
3689 rtx xop0 = XEXP (x, 0);
3690 rtx xop1 = XEXP (x, 1);
3692 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3693 xop0 = force_reg (SImode, xop0);
3695 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3696 xop1 = force_reg (SImode, xop1);
3698 if (ARM_BASE_REGISTER_RTX_P (xop0)
3699 && GET_CODE (xop1) == CONST_INT)
3701 HOST_WIDE_INT n, low_n;
3702 rtx base_reg, val;
3703 n = INTVAL (xop1);
3705 /* VFP addressing modes actually allow greater offsets, but for
3706 now we just stick with the lowest common denominator. */
3707 if (mode == DImode
3708 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3710 low_n = n & 0x0f;
3711 n &= ~0x0f;
3712 if (low_n > 4)
3714 n += 16;
3715 low_n -= 16;
3718 else
3720 low_n = ((mode) == TImode ? 0
3721 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3722 n -= low_n;
3725 base_reg = gen_reg_rtx (SImode);
3726 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3727 GEN_INT (n)), NULL_RTX);
3728 emit_move_insn (base_reg, val);
3729 x = (low_n == 0 ? base_reg
3730 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3732 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3733 x = gen_rtx_PLUS (SImode, xop0, xop1);
3736 /* XXX We don't allow MINUS any more -- see comment in
3737 arm_legitimate_address_p (). */
3738 else if (GET_CODE (x) == MINUS)
3740 rtx xop0 = XEXP (x, 0);
3741 rtx xop1 = XEXP (x, 1);
3743 if (CONSTANT_P (xop0))
3744 xop0 = force_reg (SImode, xop0);
3746 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3747 xop1 = force_reg (SImode, xop1);
3749 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3750 x = gen_rtx_MINUS (SImode, xop0, xop1);
3753 if (flag_pic)
3755 /* We need to find and carefully transform any SYMBOL and LABEL
3756 references; so go back to the original address expression. */
3757 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3759 if (new_x != orig_x)
3760 x = new_x;
3763 return x;
3767 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3768 to be legitimate. If we find one, return the new, valid address. */
3770 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3772 if (GET_CODE (x) == PLUS
3773 && GET_CODE (XEXP (x, 1)) == CONST_INT
3774 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3775 || INTVAL (XEXP (x, 1)) < 0))
3777 rtx xop0 = XEXP (x, 0);
3778 rtx xop1 = XEXP (x, 1);
3779 HOST_WIDE_INT offset = INTVAL (xop1);
3781 /* Try and fold the offset into a biasing of the base register and
3782 then offsetting that. Don't do this when optimizing for space
3783 since it can cause too many CSEs. */
3784 if (optimize_size && offset >= 0
3785 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3787 HOST_WIDE_INT delta;
3789 if (offset >= 256)
3790 delta = offset - (256 - GET_MODE_SIZE (mode));
3791 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3792 delta = 31 * GET_MODE_SIZE (mode);
3793 else
3794 delta = offset & (~31 * GET_MODE_SIZE (mode));
3796 xop0 = force_operand (plus_constant (xop0, offset - delta),
3797 NULL_RTX);
3798 x = plus_constant (xop0, delta);
3800 else if (offset < 0 && offset > -256)
3801 /* Small negative offsets are best done with a subtract before the
3802 dereference, forcing these into a register normally takes two
3803 instructions. */
3804 x = force_operand (x, NULL_RTX);
3805 else
3807 /* For the remaining cases, force the constant into a register. */
3808 xop1 = force_reg (SImode, xop1);
3809 x = gen_rtx_PLUS (SImode, xop0, xop1);
3812 else if (GET_CODE (x) == PLUS
3813 && s_register_operand (XEXP (x, 1), SImode)
3814 && !s_register_operand (XEXP (x, 0), SImode))
3816 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3818 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3821 if (flag_pic)
3823 /* We need to find and carefully transform any SYMBOL and LABEL
3824 references; so go back to the original address expression. */
3825 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3827 if (new_x != orig_x)
3828 x = new_x;
3831 return x;
3836 #define REG_OR_SUBREG_REG(X) \
3837 (GET_CODE (X) == REG \
3838 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3840 #define REG_OR_SUBREG_RTX(X) \
3841 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3843 #ifndef COSTS_N_INSNS
3844 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3845 #endif
3846 static inline int
3847 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3849 enum machine_mode mode = GET_MODE (x);
3851 switch (code)
3853 case ASHIFT:
3854 case ASHIFTRT:
3855 case LSHIFTRT:
3856 case ROTATERT:
3857 case PLUS:
3858 case MINUS:
3859 case COMPARE:
3860 case NEG:
3861 case NOT:
3862 return COSTS_N_INSNS (1);
3864 case MULT:
3865 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3867 int cycles = 0;
3868 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3870 while (i)
3872 i >>= 2;
3873 cycles++;
3875 return COSTS_N_INSNS (2) + cycles;
3877 return COSTS_N_INSNS (1) + 16;
3879 case SET:
3880 return (COSTS_N_INSNS (1)
3881 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3882 + GET_CODE (SET_DEST (x)) == MEM));
3884 case CONST_INT:
3885 if (outer == SET)
3887 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3888 return 0;
3889 if (thumb_shiftable_const (INTVAL (x)))
3890 return COSTS_N_INSNS (2);
3891 return COSTS_N_INSNS (3);
3893 else if ((outer == PLUS || outer == COMPARE)
3894 && INTVAL (x) < 256 && INTVAL (x) > -256)
3895 return 0;
3896 else if (outer == AND
3897 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3898 return COSTS_N_INSNS (1);
3899 else if (outer == ASHIFT || outer == ASHIFTRT
3900 || outer == LSHIFTRT)
3901 return 0;
3902 return COSTS_N_INSNS (2);
3904 case CONST:
3905 case CONST_DOUBLE:
3906 case LABEL_REF:
3907 case SYMBOL_REF:
3908 return COSTS_N_INSNS (3);
3910 case UDIV:
3911 case UMOD:
3912 case DIV:
3913 case MOD:
3914 return 100;
3916 case TRUNCATE:
3917 return 99;
3919 case AND:
3920 case XOR:
3921 case IOR:
3922 /* XXX guess. */
3923 return 8;
3925 case MEM:
3926 /* XXX another guess. */
3927 /* Memory costs quite a lot for the first word, but subsequent words
3928 load at the equivalent of a single insn each. */
3929 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3930 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3931 ? 4 : 0));
3933 case IF_THEN_ELSE:
3934 /* XXX a guess. */
3935 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3936 return 14;
3937 return 2;
3939 case ZERO_EXTEND:
3940 /* XXX still guessing. */
3941 switch (GET_MODE (XEXP (x, 0)))
3943 case QImode:
3944 return (1 + (mode == DImode ? 4 : 0)
3945 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3947 case HImode:
3948 return (4 + (mode == DImode ? 4 : 0)
3949 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3951 case SImode:
3952 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3954 default:
3955 return 99;
3958 default:
3959 return 99;
3964 /* Worker routine for arm_rtx_costs. */
3965 static inline int
3966 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3968 enum machine_mode mode = GET_MODE (x);
3969 enum rtx_code subcode;
3970 int extra_cost;
3972 switch (code)
3974 case MEM:
3975 /* Memory costs quite a lot for the first word, but subsequent words
3976 load at the equivalent of a single insn each. */
3977 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3978 + (GET_CODE (x) == SYMBOL_REF
3979 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3981 case DIV:
3982 case MOD:
3983 case UDIV:
3984 case UMOD:
3985 return optimize_size ? COSTS_N_INSNS (2) : 100;
3987 case ROTATE:
3988 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3989 return 4;
3990 /* Fall through */
3991 case ROTATERT:
3992 if (mode != SImode)
3993 return 8;
3994 /* Fall through */
3995 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3996 if (mode == DImode)
3997 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3998 + ((GET_CODE (XEXP (x, 0)) == REG
3999 || (GET_CODE (XEXP (x, 0)) == SUBREG
4000 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4001 ? 0 : 8));
4002 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4003 || (GET_CODE (XEXP (x, 0)) == SUBREG
4004 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4005 ? 0 : 4)
4006 + ((GET_CODE (XEXP (x, 1)) == REG
4007 || (GET_CODE (XEXP (x, 1)) == SUBREG
4008 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4009 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4010 ? 0 : 4));
4012 case MINUS:
4013 if (mode == DImode)
4014 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4015 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4016 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4017 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4018 ? 0 : 8));
4020 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4021 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4022 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4023 && arm_const_double_rtx (XEXP (x, 1))))
4024 ? 0 : 8)
4025 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4026 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4027 && arm_const_double_rtx (XEXP (x, 0))))
4028 ? 0 : 8));
4030 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4031 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4032 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4033 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4034 || subcode == ASHIFTRT || subcode == LSHIFTRT
4035 || subcode == ROTATE || subcode == ROTATERT
4036 || (subcode == MULT
4037 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4038 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4039 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4040 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4041 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4042 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4043 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4044 return 1;
4045 /* Fall through */
4047 case PLUS:
4048 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4049 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4050 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4051 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4052 && arm_const_double_rtx (XEXP (x, 1))))
4053 ? 0 : 8));
4055 /* Fall through */
4056 case AND: case XOR: case IOR:
4057 extra_cost = 0;
4059 /* Normally the frame registers will be spilt into reg+const during
4060 reload, so it is a bad idea to combine them with other instructions,
4061 since then they might not be moved outside of loops. As a compromise
4062 we allow integration with ops that have a constant as their second
4063 operand. */
4064 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4065 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4066 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4067 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4068 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4069 extra_cost = 4;
4071 if (mode == DImode)
4072 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4073 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4074 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4075 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4076 ? 0 : 8));
4078 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4079 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4080 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4081 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4082 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4083 ? 0 : 4));
4085 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4086 return (1 + extra_cost
4087 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4088 || subcode == LSHIFTRT || subcode == ASHIFTRT
4089 || subcode == ROTATE || subcode == ROTATERT
4090 || (subcode == MULT
4091 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4092 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4093 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4094 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4095 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4096 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4097 ? 0 : 4));
4099 return 8;
4101 case MULT:
4102 /* This should have been handled by the CPU specific routines. */
4103 gcc_unreachable ();
4105 case TRUNCATE:
4106 if (arm_arch3m && mode == SImode
4107 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4108 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4109 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4110 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4111 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4112 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4113 return 8;
4114 return 99;
4116 case NEG:
4117 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4118 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4119 /* Fall through */
4120 case NOT:
4121 if (mode == DImode)
4122 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4124 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4126 case IF_THEN_ELSE:
4127 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4128 return 14;
4129 return 2;
4131 case COMPARE:
4132 return 1;
4134 case ABS:
4135 return 4 + (mode == DImode ? 4 : 0);
4137 case SIGN_EXTEND:
4138 if (GET_MODE (XEXP (x, 0)) == QImode)
4139 return (4 + (mode == DImode ? 4 : 0)
4140 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4141 /* Fall through */
4142 case ZERO_EXTEND:
4143 switch (GET_MODE (XEXP (x, 0)))
4145 case QImode:
4146 return (1 + (mode == DImode ? 4 : 0)
4147 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4149 case HImode:
4150 return (4 + (mode == DImode ? 4 : 0)
4151 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4153 case SImode:
4154 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4156 case V8QImode:
4157 case V4HImode:
4158 case V2SImode:
4159 case V4QImode:
4160 case V2HImode:
4161 return 1;
4163 default:
4164 gcc_unreachable ();
4166 gcc_unreachable ();
4168 case CONST_INT:
4169 if (const_ok_for_arm (INTVAL (x)))
4170 return outer == SET ? 2 : -1;
4171 else if (outer == AND
4172 && const_ok_for_arm (~INTVAL (x)))
4173 return -1;
4174 else if ((outer == COMPARE
4175 || outer == PLUS || outer == MINUS)
4176 && const_ok_for_arm (-INTVAL (x)))
4177 return -1;
4178 else
4179 return 5;
4181 case CONST:
4182 case LABEL_REF:
4183 case SYMBOL_REF:
4184 return 6;
4186 case CONST_DOUBLE:
4187 if (arm_const_double_rtx (x))
4188 return outer == SET ? 2 : -1;
4189 else if ((outer == COMPARE || outer == PLUS)
4190 && neg_const_double_rtx_ok_for_fpa (x))
4191 return -1;
4192 return 7;
4194 default:
4195 return 99;
4199 /* RTX costs when optimizing for size. */
4200 static bool
4201 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4203 enum machine_mode mode = GET_MODE (x);
4205 if (TARGET_THUMB)
4207 /* XXX TBD. For now, use the standard costs. */
4208 *total = thumb_rtx_costs (x, code, outer_code);
4209 return true;
4212 switch (code)
4214 case MEM:
4215 /* A memory access costs 1 insn if the mode is small, or the address is
4216 a single register, otherwise it costs one insn per word. */
4217 if (REG_P (XEXP (x, 0)))
4218 *total = COSTS_N_INSNS (1);
4219 else
4220 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4221 return true;
4223 case DIV:
4224 case MOD:
4225 case UDIV:
4226 case UMOD:
4227 /* Needs a libcall, so it costs about this. */
4228 *total = COSTS_N_INSNS (2);
4229 return false;
4231 case ROTATE:
4232 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4234 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4235 return true;
4237 /* Fall through */
4238 case ROTATERT:
4239 case ASHIFT:
4240 case LSHIFTRT:
4241 case ASHIFTRT:
4242 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4244 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4245 return true;
4247 else if (mode == SImode)
4249 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4250 /* Slightly disparage register shifts, but not by much. */
4251 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4252 *total += 1 + rtx_cost (XEXP (x, 1), code);
4253 return true;
4256 /* Needs a libcall. */
4257 *total = COSTS_N_INSNS (2);
4258 return false;
4260 case MINUS:
4261 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4263 *total = COSTS_N_INSNS (1);
4264 return false;
4267 if (mode == SImode)
4269 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4270 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4272 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4273 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4274 || subcode1 == ROTATE || subcode1 == ROTATERT
4275 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4276 || subcode1 == ASHIFTRT)
4278 /* It's just the cost of the two operands. */
4279 *total = 0;
4280 return false;
4283 *total = COSTS_N_INSNS (1);
4284 return false;
4287 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4288 return false;
4290 case PLUS:
4291 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4293 *total = COSTS_N_INSNS (1);
4294 return false;
4297 /* Fall through */
4298 case AND: case XOR: case IOR:
4299 if (mode == SImode)
4301 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4303 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4304 || subcode == LSHIFTRT || subcode == ASHIFTRT
4305 || (code == AND && subcode == NOT))
4307 /* It's just the cost of the two operands. */
4308 *total = 0;
4309 return false;
4313 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4314 return false;
4316 case MULT:
4317 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4318 return false;
4320 case NEG:
4321 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4322 *total = COSTS_N_INSNS (1);
4323 /* Fall through */
4324 case NOT:
4325 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4327 return false;
4329 case IF_THEN_ELSE:
4330 *total = 0;
4331 return false;
4333 case COMPARE:
4334 if (cc_register (XEXP (x, 0), VOIDmode))
4335 * total = 0;
4336 else
4337 *total = COSTS_N_INSNS (1);
4338 return false;
4340 case ABS:
4341 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4342 *total = COSTS_N_INSNS (1);
4343 else
4344 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4345 return false;
4347 case SIGN_EXTEND:
4348 *total = 0;
4349 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4351 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4352 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4354 if (mode == DImode)
4355 *total += COSTS_N_INSNS (1);
4356 return false;
4358 case ZERO_EXTEND:
4359 *total = 0;
4360 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4362 switch (GET_MODE (XEXP (x, 0)))
4364 case QImode:
4365 *total += COSTS_N_INSNS (1);
4366 break;
4368 case HImode:
4369 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4371 case SImode:
4372 break;
4374 default:
4375 *total += COSTS_N_INSNS (2);
4379 if (mode == DImode)
4380 *total += COSTS_N_INSNS (1);
4382 return false;
4384 case CONST_INT:
4385 if (const_ok_for_arm (INTVAL (x)))
4386 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4387 else if (const_ok_for_arm (~INTVAL (x)))
4388 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4389 else if (const_ok_for_arm (-INTVAL (x)))
4391 if (outer_code == COMPARE || outer_code == PLUS
4392 || outer_code == MINUS)
4393 *total = 0;
4394 else
4395 *total = COSTS_N_INSNS (1);
4397 else
4398 *total = COSTS_N_INSNS (2);
4399 return true;
4401 case CONST:
4402 case LABEL_REF:
4403 case SYMBOL_REF:
4404 *total = COSTS_N_INSNS (2);
4405 return true;
4407 case CONST_DOUBLE:
4408 *total = COSTS_N_INSNS (4);
4409 return true;
4411 default:
4412 if (mode != VOIDmode)
4413 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4414 else
4415 *total = COSTS_N_INSNS (4); /* How knows? */
4416 return false;
4420 /* RTX costs for cores with a slow MUL implementation. */
4422 static bool
4423 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4425 enum machine_mode mode = GET_MODE (x);
4427 if (TARGET_THUMB)
4429 *total = thumb_rtx_costs (x, code, outer_code);
4430 return true;
4433 switch (code)
4435 case MULT:
4436 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4437 || mode == DImode)
4439 *total = 30;
4440 return true;
4443 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4445 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4446 & (unsigned HOST_WIDE_INT) 0xffffffff);
4447 int cost, const_ok = const_ok_for_arm (i);
4448 int j, booth_unit_size;
4450 /* Tune as appropriate. */
4451 cost = const_ok ? 4 : 8;
4452 booth_unit_size = 2;
4453 for (j = 0; i && j < 32; j += booth_unit_size)
4455 i >>= booth_unit_size;
4456 cost += 2;
4459 *total = cost;
4460 return true;
4463 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4464 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4465 return true;
4467 default:
4468 *total = arm_rtx_costs_1 (x, code, outer_code);
4469 return true;
4474 /* RTX cost for cores with a fast multiply unit (M variants). */
4476 static bool
4477 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4479 enum machine_mode mode = GET_MODE (x);
4481 if (TARGET_THUMB)
4483 *total = thumb_rtx_costs (x, code, outer_code);
4484 return true;
4487 switch (code)
4489 case MULT:
4490 /* There is no point basing this on the tuning, since it is always the
4491 fast variant if it exists at all. */
4492 if (mode == DImode
4493 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4494 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4495 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4497 *total = 8;
4498 return true;
4502 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4503 || mode == DImode)
4505 *total = 30;
4506 return true;
4509 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4511 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4512 & (unsigned HOST_WIDE_INT) 0xffffffff);
4513 int cost, const_ok = const_ok_for_arm (i);
4514 int j, booth_unit_size;
4516 /* Tune as appropriate. */
4517 cost = const_ok ? 4 : 8;
4518 booth_unit_size = 8;
4519 for (j = 0; i && j < 32; j += booth_unit_size)
4521 i >>= booth_unit_size;
4522 cost += 2;
4525 *total = cost;
4526 return true;
4529 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4530 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4531 return true;
4533 default:
4534 *total = arm_rtx_costs_1 (x, code, outer_code);
4535 return true;
4540 /* RTX cost for XScale CPUs. */
4542 static bool
4543 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4545 enum machine_mode mode = GET_MODE (x);
4547 if (TARGET_THUMB)
4549 *total = thumb_rtx_costs (x, code, outer_code);
4550 return true;
4553 switch (code)
4555 case MULT:
4556 /* There is no point basing this on the tuning, since it is always the
4557 fast variant if it exists at all. */
4558 if (mode == DImode
4559 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4560 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4561 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4563 *total = 8;
4564 return true;
4568 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4569 || mode == DImode)
4571 *total = 30;
4572 return true;
4575 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4577 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4578 & (unsigned HOST_WIDE_INT) 0xffffffff);
4579 int cost, const_ok = const_ok_for_arm (i);
4580 unsigned HOST_WIDE_INT masked_const;
4582 /* The cost will be related to two insns.
4583 First a load of the constant (MOV or LDR), then a multiply. */
4584 cost = 2;
4585 if (! const_ok)
4586 cost += 1; /* LDR is probably more expensive because
4587 of longer result latency. */
4588 masked_const = i & 0xffff8000;
4589 if (masked_const != 0 && masked_const != 0xffff8000)
4591 masked_const = i & 0xf8000000;
4592 if (masked_const == 0 || masked_const == 0xf8000000)
4593 cost += 1;
4594 else
4595 cost += 2;
4597 *total = cost;
4598 return true;
4601 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4602 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4603 return true;
4605 case COMPARE:
4606 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4607 will stall until the multiplication is complete. */
4608 if (GET_CODE (XEXP (x, 0)) == MULT)
4609 *total = 4 + rtx_cost (XEXP (x, 0), code);
4610 else
4611 *total = arm_rtx_costs_1 (x, code, outer_code);
4612 return true;
4614 default:
4615 *total = arm_rtx_costs_1 (x, code, outer_code);
4616 return true;
4621 /* RTX costs for 9e (and later) cores. */
4623 static bool
4624 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4626 enum machine_mode mode = GET_MODE (x);
4627 int nonreg_cost;
4628 int cost;
4630 if (TARGET_THUMB)
4632 switch (code)
4634 case MULT:
4635 *total = COSTS_N_INSNS (3);
4636 return true;
4638 default:
4639 *total = thumb_rtx_costs (x, code, outer_code);
4640 return true;
4644 switch (code)
4646 case MULT:
4647 /* There is no point basing this on the tuning, since it is always the
4648 fast variant if it exists at all. */
4649 if (mode == DImode
4650 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4651 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4652 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4654 *total = 3;
4655 return true;
4659 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4661 *total = 30;
4662 return true;
4664 if (mode == DImode)
4666 cost = 7;
4667 nonreg_cost = 8;
4669 else
4671 cost = 2;
4672 nonreg_cost = 4;
4676 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4677 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4678 return true;
4680 default:
4681 *total = arm_rtx_costs_1 (x, code, outer_code);
4682 return true;
4685 /* All address computations that can be done are free, but rtx cost returns
4686 the same for practically all of them. So we weight the different types
4687 of address here in the order (most pref first):
4688 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4689 static inline int
4690 arm_arm_address_cost (rtx x)
4692 enum rtx_code c = GET_CODE (x);
4694 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4695 return 0;
4696 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4697 return 10;
4699 if (c == PLUS || c == MINUS)
4701 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4702 return 2;
4704 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4705 return 3;
4707 return 4;
4710 return 6;
4713 static inline int
4714 arm_thumb_address_cost (rtx x)
4716 enum rtx_code c = GET_CODE (x);
4718 if (c == REG)
4719 return 1;
4720 if (c == PLUS
4721 && GET_CODE (XEXP (x, 0)) == REG
4722 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4723 return 1;
4725 return 2;
4728 static int
4729 arm_address_cost (rtx x)
4731 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4734 static int
4735 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4737 rtx i_pat, d_pat;
4739 /* Some true dependencies can have a higher cost depending
4740 on precisely how certain input operands are used. */
4741 if (arm_tune_xscale
4742 && REG_NOTE_KIND (link) == 0
4743 && recog_memoized (insn) >= 0
4744 && recog_memoized (dep) >= 0)
4746 int shift_opnum = get_attr_shift (insn);
4747 enum attr_type attr_type = get_attr_type (dep);
4749 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4750 operand for INSN. If we have a shifted input operand and the
4751 instruction we depend on is another ALU instruction, then we may
4752 have to account for an additional stall. */
4753 if (shift_opnum != 0
4754 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4756 rtx shifted_operand;
4757 int opno;
4759 /* Get the shifted operand. */
4760 extract_insn (insn);
4761 shifted_operand = recog_data.operand[shift_opnum];
4763 /* Iterate over all the operands in DEP. If we write an operand
4764 that overlaps with SHIFTED_OPERAND, then we have increase the
4765 cost of this dependency. */
4766 extract_insn (dep);
4767 preprocess_constraints ();
4768 for (opno = 0; opno < recog_data.n_operands; opno++)
4770 /* We can ignore strict inputs. */
4771 if (recog_data.operand_type[opno] == OP_IN)
4772 continue;
4774 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4775 shifted_operand))
4776 return 2;
4781 /* XXX This is not strictly true for the FPA. */
4782 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4783 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4784 return 0;
4786 /* Call insns don't incur a stall, even if they follow a load. */
4787 if (REG_NOTE_KIND (link) == 0
4788 && GET_CODE (insn) == CALL_INSN)
4789 return 1;
4791 if ((i_pat = single_set (insn)) != NULL
4792 && GET_CODE (SET_SRC (i_pat)) == MEM
4793 && (d_pat = single_set (dep)) != NULL
4794 && GET_CODE (SET_DEST (d_pat)) == MEM)
4796 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4797 /* This is a load after a store, there is no conflict if the load reads
4798 from a cached area. Assume that loads from the stack, and from the
4799 constant pool are cached, and that others will miss. This is a
4800 hack. */
4802 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4803 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4804 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4805 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4806 return 1;
4809 return cost;
4812 static int fp_consts_inited = 0;
4814 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4815 static const char * const strings_fp[8] =
4817 "0", "1", "2", "3",
4818 "4", "5", "0.5", "10"
4821 static REAL_VALUE_TYPE values_fp[8];
4823 static void
4824 init_fp_table (void)
4826 int i;
4827 REAL_VALUE_TYPE r;
4829 if (TARGET_VFP)
4830 fp_consts_inited = 1;
4831 else
4832 fp_consts_inited = 8;
4834 for (i = 0; i < fp_consts_inited; i++)
4836 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4837 values_fp[i] = r;
4841 /* Return TRUE if rtx X is a valid immediate FP constant. */
4843 arm_const_double_rtx (rtx x)
4845 REAL_VALUE_TYPE r;
4846 int i;
4848 if (!fp_consts_inited)
4849 init_fp_table ();
4851 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4852 if (REAL_VALUE_MINUS_ZERO (r))
4853 return 0;
4855 for (i = 0; i < fp_consts_inited; i++)
4856 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4857 return 1;
4859 return 0;
4862 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4864 neg_const_double_rtx_ok_for_fpa (rtx x)
4866 REAL_VALUE_TYPE r;
4867 int i;
4869 if (!fp_consts_inited)
4870 init_fp_table ();
4872 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4873 r = REAL_VALUE_NEGATE (r);
4874 if (REAL_VALUE_MINUS_ZERO (r))
4875 return 0;
4877 for (i = 0; i < 8; i++)
4878 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4879 return 1;
4881 return 0;
4884 /* Predicates for `match_operand' and `match_operator'. */
4886 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4888 cirrus_memory_offset (rtx op)
4890 /* Reject eliminable registers. */
4891 if (! (reload_in_progress || reload_completed)
4892 && ( reg_mentioned_p (frame_pointer_rtx, op)
4893 || reg_mentioned_p (arg_pointer_rtx, op)
4894 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4895 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4896 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4897 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4898 return 0;
4900 if (GET_CODE (op) == MEM)
4902 rtx ind;
4904 ind = XEXP (op, 0);
4906 /* Match: (mem (reg)). */
4907 if (GET_CODE (ind) == REG)
4908 return 1;
4910 /* Match:
4911 (mem (plus (reg)
4912 (const))). */
4913 if (GET_CODE (ind) == PLUS
4914 && GET_CODE (XEXP (ind, 0)) == REG
4915 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4916 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4917 return 1;
4920 return 0;
4923 /* Return TRUE if OP is a valid VFP memory address pattern.
4924 WB if true if writeback address modes are allowed. */
4927 arm_coproc_mem_operand (rtx op, bool wb)
4929 rtx ind;
4931 /* Reject eliminable registers. */
4932 if (! (reload_in_progress || reload_completed)
4933 && ( reg_mentioned_p (frame_pointer_rtx, op)
4934 || reg_mentioned_p (arg_pointer_rtx, op)
4935 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4936 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4937 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4938 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4939 return FALSE;
4941 /* Constants are converted into offsets from labels. */
4942 if (GET_CODE (op) != MEM)
4943 return FALSE;
4945 ind = XEXP (op, 0);
4947 if (reload_completed
4948 && (GET_CODE (ind) == LABEL_REF
4949 || (GET_CODE (ind) == CONST
4950 && GET_CODE (XEXP (ind, 0)) == PLUS
4951 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4952 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4953 return TRUE;
4955 /* Match: (mem (reg)). */
4956 if (GET_CODE (ind) == REG)
4957 return arm_address_register_rtx_p (ind, 0);
4959 /* Autoincremment addressing modes. */
4960 if (wb
4961 && (GET_CODE (ind) == PRE_INC
4962 || GET_CODE (ind) == POST_INC
4963 || GET_CODE (ind) == PRE_DEC
4964 || GET_CODE (ind) == POST_DEC))
4965 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4967 if (wb
4968 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4969 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4970 && GET_CODE (XEXP (ind, 1)) == PLUS
4971 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4972 ind = XEXP (ind, 1);
4974 /* Match:
4975 (plus (reg)
4976 (const)). */
4977 if (GET_CODE (ind) == PLUS
4978 && GET_CODE (XEXP (ind, 0)) == REG
4979 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4980 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4981 && INTVAL (XEXP (ind, 1)) > -1024
4982 && INTVAL (XEXP (ind, 1)) < 1024
4983 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4984 return TRUE;
4986 return FALSE;
4989 /* Return true if X is a register that will be eliminated later on. */
4991 arm_eliminable_register (rtx x)
4993 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4994 || REGNO (x) == ARG_POINTER_REGNUM
4995 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4996 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4999 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5000 VFP registers. Otherwise return NO_REGS. */
5002 enum reg_class
5003 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5005 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5006 return NO_REGS;
5008 return GENERAL_REGS;
5011 /* Values which must be returned in the most-significant end of the return
5012 register. */
5014 static bool
5015 arm_return_in_msb (tree valtype)
5017 return (TARGET_AAPCS_BASED
5018 && BYTES_BIG_ENDIAN
5019 && (AGGREGATE_TYPE_P (valtype)
5020 || TREE_CODE (valtype) == COMPLEX_TYPE));
5023 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5024 Use by the Cirrus Maverick code which has to workaround
5025 a hardware bug triggered by such instructions. */
5026 static bool
5027 arm_memory_load_p (rtx insn)
5029 rtx body, lhs, rhs;;
5031 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5032 return false;
5034 body = PATTERN (insn);
5036 if (GET_CODE (body) != SET)
5037 return false;
5039 lhs = XEXP (body, 0);
5040 rhs = XEXP (body, 1);
5042 lhs = REG_OR_SUBREG_RTX (lhs);
5044 /* If the destination is not a general purpose
5045 register we do not have to worry. */
5046 if (GET_CODE (lhs) != REG
5047 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5048 return false;
5050 /* As well as loads from memory we also have to react
5051 to loads of invalid constants which will be turned
5052 into loads from the minipool. */
5053 return (GET_CODE (rhs) == MEM
5054 || GET_CODE (rhs) == SYMBOL_REF
5055 || note_invalid_constants (insn, -1, false));
5058 /* Return TRUE if INSN is a Cirrus instruction. */
5059 static bool
5060 arm_cirrus_insn_p (rtx insn)
5062 enum attr_cirrus attr;
5064 /* get_attr cannot accept USE or CLOBBER. */
5065 if (!insn
5066 || GET_CODE (insn) != INSN
5067 || GET_CODE (PATTERN (insn)) == USE
5068 || GET_CODE (PATTERN (insn)) == CLOBBER)
5069 return 0;
5071 attr = get_attr_cirrus (insn);
5073 return attr != CIRRUS_NOT;
5076 /* Cirrus reorg for invalid instruction combinations. */
5077 static void
5078 cirrus_reorg (rtx first)
5080 enum attr_cirrus attr;
5081 rtx body = PATTERN (first);
5082 rtx t;
5083 int nops;
5085 /* Any branch must be followed by 2 non Cirrus instructions. */
5086 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5088 nops = 0;
5089 t = next_nonnote_insn (first);
5091 if (arm_cirrus_insn_p (t))
5092 ++ nops;
5094 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5095 ++ nops;
5097 while (nops --)
5098 emit_insn_after (gen_nop (), first);
5100 return;
5103 /* (float (blah)) is in parallel with a clobber. */
5104 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5105 body = XVECEXP (body, 0, 0);
5107 if (GET_CODE (body) == SET)
5109 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5111 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5112 be followed by a non Cirrus insn. */
5113 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5115 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5116 emit_insn_after (gen_nop (), first);
5118 return;
5120 else if (arm_memory_load_p (first))
5122 unsigned int arm_regno;
5124 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5125 ldr/cfmv64hr combination where the Rd field is the same
5126 in both instructions must be split with a non Cirrus
5127 insn. Example:
5129 ldr r0, blah
5131 cfmvsr mvf0, r0. */
5133 /* Get Arm register number for ldr insn. */
5134 if (GET_CODE (lhs) == REG)
5135 arm_regno = REGNO (lhs);
5136 else
5138 gcc_assert (GET_CODE (rhs) == REG);
5139 arm_regno = REGNO (rhs);
5142 /* Next insn. */
5143 first = next_nonnote_insn (first);
5145 if (! arm_cirrus_insn_p (first))
5146 return;
5148 body = PATTERN (first);
5150 /* (float (blah)) is in parallel with a clobber. */
5151 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5152 body = XVECEXP (body, 0, 0);
5154 if (GET_CODE (body) == FLOAT)
5155 body = XEXP (body, 0);
5157 if (get_attr_cirrus (first) == CIRRUS_MOVE
5158 && GET_CODE (XEXP (body, 1)) == REG
5159 && arm_regno == REGNO (XEXP (body, 1)))
5160 emit_insn_after (gen_nop (), first);
5162 return;
5166 /* get_attr cannot accept USE or CLOBBER. */
5167 if (!first
5168 || GET_CODE (first) != INSN
5169 || GET_CODE (PATTERN (first)) == USE
5170 || GET_CODE (PATTERN (first)) == CLOBBER)
5171 return;
5173 attr = get_attr_cirrus (first);
5175 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5176 must be followed by a non-coprocessor instruction. */
5177 if (attr == CIRRUS_COMPARE)
5179 nops = 0;
5181 t = next_nonnote_insn (first);
5183 if (arm_cirrus_insn_p (t))
5184 ++ nops;
5186 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5187 ++ nops;
5189 while (nops --)
5190 emit_insn_after (gen_nop (), first);
5192 return;
5196 /* Return TRUE if X references a SYMBOL_REF. */
5198 symbol_mentioned_p (rtx x)
5200 const char * fmt;
5201 int i;
5203 if (GET_CODE (x) == SYMBOL_REF)
5204 return 1;
5206 fmt = GET_RTX_FORMAT (GET_CODE (x));
5208 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5210 if (fmt[i] == 'E')
5212 int j;
5214 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5215 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5216 return 1;
5218 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5219 return 1;
5222 return 0;
5225 /* Return TRUE if X references a LABEL_REF. */
5227 label_mentioned_p (rtx x)
5229 const char * fmt;
5230 int i;
5232 if (GET_CODE (x) == LABEL_REF)
5233 return 1;
5235 fmt = GET_RTX_FORMAT (GET_CODE (x));
5236 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5238 if (fmt[i] == 'E')
5240 int j;
5242 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5243 if (label_mentioned_p (XVECEXP (x, i, j)))
5244 return 1;
5246 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5247 return 1;
5250 return 0;
5253 enum rtx_code
5254 minmax_code (rtx x)
5256 enum rtx_code code = GET_CODE (x);
5258 switch (code)
5260 case SMAX:
5261 return GE;
5262 case SMIN:
5263 return LE;
5264 case UMIN:
5265 return LEU;
5266 case UMAX:
5267 return GEU;
5268 default:
5269 gcc_unreachable ();
5273 /* Return 1 if memory locations are adjacent. */
5275 adjacent_mem_locations (rtx a, rtx b)
5277 /* We don't guarantee to preserve the order of these memory refs. */
5278 if (volatile_refs_p (a) || volatile_refs_p (b))
5279 return 0;
5281 if ((GET_CODE (XEXP (a, 0)) == REG
5282 || (GET_CODE (XEXP (a, 0)) == PLUS
5283 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5284 && (GET_CODE (XEXP (b, 0)) == REG
5285 || (GET_CODE (XEXP (b, 0)) == PLUS
5286 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5288 HOST_WIDE_INT val0 = 0, val1 = 0;
5289 rtx reg0, reg1;
5290 int val_diff;
5292 if (GET_CODE (XEXP (a, 0)) == PLUS)
5294 reg0 = XEXP (XEXP (a, 0), 0);
5295 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5297 else
5298 reg0 = XEXP (a, 0);
5300 if (GET_CODE (XEXP (b, 0)) == PLUS)
5302 reg1 = XEXP (XEXP (b, 0), 0);
5303 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5305 else
5306 reg1 = XEXP (b, 0);
5308 /* Don't accept any offset that will require multiple
5309 instructions to handle, since this would cause the
5310 arith_adjacentmem pattern to output an overlong sequence. */
5311 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5312 return 0;
5314 /* Don't allow an eliminable register: register elimination can make
5315 the offset too large. */
5316 if (arm_eliminable_register (reg0))
5317 return 0;
5319 val_diff = val1 - val0;
5321 if (arm_ld_sched)
5323 /* If the target has load delay slots, then there's no benefit
5324 to using an ldm instruction unless the offset is zero and
5325 we are optimizing for size. */
5326 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5327 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5328 && (val_diff == 4 || val_diff == -4));
5331 return ((REGNO (reg0) == REGNO (reg1))
5332 && (val_diff == 4 || val_diff == -4));
5335 return 0;
5339 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5340 HOST_WIDE_INT *load_offset)
5342 int unsorted_regs[4];
5343 HOST_WIDE_INT unsorted_offsets[4];
5344 int order[4];
5345 int base_reg = -1;
5346 int i;
5348 /* Can only handle 2, 3, or 4 insns at present,
5349 though could be easily extended if required. */
5350 gcc_assert (nops >= 2 && nops <= 4);
5352 /* Loop over the operands and check that the memory references are
5353 suitable (i.e. immediate offsets from the same base register). At
5354 the same time, extract the target register, and the memory
5355 offsets. */
5356 for (i = 0; i < nops; i++)
5358 rtx reg;
5359 rtx offset;
5361 /* Convert a subreg of a mem into the mem itself. */
5362 if (GET_CODE (operands[nops + i]) == SUBREG)
5363 operands[nops + i] = alter_subreg (operands + (nops + i));
5365 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5367 /* Don't reorder volatile memory references; it doesn't seem worth
5368 looking for the case where the order is ok anyway. */
5369 if (MEM_VOLATILE_P (operands[nops + i]))
5370 return 0;
5372 offset = const0_rtx;
5374 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5375 || (GET_CODE (reg) == SUBREG
5376 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5377 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5378 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5379 == REG)
5380 || (GET_CODE (reg) == SUBREG
5381 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5382 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5383 == CONST_INT)))
5385 if (i == 0)
5387 base_reg = REGNO (reg);
5388 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5389 ? REGNO (operands[i])
5390 : REGNO (SUBREG_REG (operands[i])));
5391 order[0] = 0;
5393 else
5395 if (base_reg != (int) REGNO (reg))
5396 /* Not addressed from the same base register. */
5397 return 0;
5399 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5400 ? REGNO (operands[i])
5401 : REGNO (SUBREG_REG (operands[i])));
5402 if (unsorted_regs[i] < unsorted_regs[order[0]])
5403 order[0] = i;
5406 /* If it isn't an integer register, or if it overwrites the
5407 base register but isn't the last insn in the list, then
5408 we can't do this. */
5409 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5410 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5411 return 0;
5413 unsorted_offsets[i] = INTVAL (offset);
5415 else
5416 /* Not a suitable memory address. */
5417 return 0;
5420 /* All the useful information has now been extracted from the
5421 operands into unsorted_regs and unsorted_offsets; additionally,
5422 order[0] has been set to the lowest numbered register in the
5423 list. Sort the registers into order, and check that the memory
5424 offsets are ascending and adjacent. */
5426 for (i = 1; i < nops; i++)
5428 int j;
5430 order[i] = order[i - 1];
5431 for (j = 0; j < nops; j++)
5432 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5433 && (order[i] == order[i - 1]
5434 || unsorted_regs[j] < unsorted_regs[order[i]]))
5435 order[i] = j;
5437 /* Have we found a suitable register? if not, one must be used more
5438 than once. */
5439 if (order[i] == order[i - 1])
5440 return 0;
5442 /* Is the memory address adjacent and ascending? */
5443 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5444 return 0;
5447 if (base)
5449 *base = base_reg;
5451 for (i = 0; i < nops; i++)
5452 regs[i] = unsorted_regs[order[i]];
5454 *load_offset = unsorted_offsets[order[0]];
5457 if (unsorted_offsets[order[0]] == 0)
5458 return 1; /* ldmia */
5460 if (unsorted_offsets[order[0]] == 4)
5461 return 2; /* ldmib */
5463 if (unsorted_offsets[order[nops - 1]] == 0)
5464 return 3; /* ldmda */
5466 if (unsorted_offsets[order[nops - 1]] == -4)
5467 return 4; /* ldmdb */
5469 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5470 if the offset isn't small enough. The reason 2 ldrs are faster
5471 is because these ARMs are able to do more than one cache access
5472 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5473 whilst the ARM8 has a double bandwidth cache. This means that
5474 these cores can do both an instruction fetch and a data fetch in
5475 a single cycle, so the trick of calculating the address into a
5476 scratch register (one of the result regs) and then doing a load
5477 multiple actually becomes slower (and no smaller in code size).
5478 That is the transformation
5480 ldr rd1, [rbase + offset]
5481 ldr rd2, [rbase + offset + 4]
5485 add rd1, rbase, offset
5486 ldmia rd1, {rd1, rd2}
5488 produces worse code -- '3 cycles + any stalls on rd2' instead of
5489 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5490 access per cycle, the first sequence could never complete in less
5491 than 6 cycles, whereas the ldm sequence would only take 5 and
5492 would make better use of sequential accesses if not hitting the
5493 cache.
5495 We cheat here and test 'arm_ld_sched' which we currently know to
5496 only be true for the ARM8, ARM9 and StrongARM. If this ever
5497 changes, then the test below needs to be reworked. */
5498 if (nops == 2 && arm_ld_sched)
5499 return 0;
5501 /* Can't do it without setting up the offset, only do this if it takes
5502 no more than one insn. */
5503 return (const_ok_for_arm (unsorted_offsets[order[0]])
5504 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5507 const char *
5508 emit_ldm_seq (rtx *operands, int nops)
5510 int regs[4];
5511 int base_reg;
5512 HOST_WIDE_INT offset;
5513 char buf[100];
5514 int i;
5516 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5518 case 1:
5519 strcpy (buf, "ldm%?ia\t");
5520 break;
5522 case 2:
5523 strcpy (buf, "ldm%?ib\t");
5524 break;
5526 case 3:
5527 strcpy (buf, "ldm%?da\t");
5528 break;
5530 case 4:
5531 strcpy (buf, "ldm%?db\t");
5532 break;
5534 case 5:
5535 if (offset >= 0)
5536 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5537 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5538 (long) offset);
5539 else
5540 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5541 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5542 (long) -offset);
5543 output_asm_insn (buf, operands);
5544 base_reg = regs[0];
5545 strcpy (buf, "ldm%?ia\t");
5546 break;
5548 default:
5549 gcc_unreachable ();
5552 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5553 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5555 for (i = 1; i < nops; i++)
5556 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5557 reg_names[regs[i]]);
5559 strcat (buf, "}\t%@ phole ldm");
5561 output_asm_insn (buf, operands);
5562 return "";
5566 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5567 HOST_WIDE_INT * load_offset)
5569 int unsorted_regs[4];
5570 HOST_WIDE_INT unsorted_offsets[4];
5571 int order[4];
5572 int base_reg = -1;
5573 int i;
5575 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5576 extended if required. */
5577 gcc_assert (nops >= 2 && nops <= 4);
5579 /* Loop over the operands and check that the memory references are
5580 suitable (i.e. immediate offsets from the same base register). At
5581 the same time, extract the target register, and the memory
5582 offsets. */
5583 for (i = 0; i < nops; i++)
5585 rtx reg;
5586 rtx offset;
5588 /* Convert a subreg of a mem into the mem itself. */
5589 if (GET_CODE (operands[nops + i]) == SUBREG)
5590 operands[nops + i] = alter_subreg (operands + (nops + i));
5592 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5594 /* Don't reorder volatile memory references; it doesn't seem worth
5595 looking for the case where the order is ok anyway. */
5596 if (MEM_VOLATILE_P (operands[nops + i]))
5597 return 0;
5599 offset = const0_rtx;
5601 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5602 || (GET_CODE (reg) == SUBREG
5603 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5604 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5605 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5606 == REG)
5607 || (GET_CODE (reg) == SUBREG
5608 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5609 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5610 == CONST_INT)))
5612 if (i == 0)
5614 base_reg = REGNO (reg);
5615 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5616 ? REGNO (operands[i])
5617 : REGNO (SUBREG_REG (operands[i])));
5618 order[0] = 0;
5620 else
5622 if (base_reg != (int) REGNO (reg))
5623 /* Not addressed from the same base register. */
5624 return 0;
5626 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5627 ? REGNO (operands[i])
5628 : REGNO (SUBREG_REG (operands[i])));
5629 if (unsorted_regs[i] < unsorted_regs[order[0]])
5630 order[0] = i;
5633 /* If it isn't an integer register, then we can't do this. */
5634 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5635 return 0;
5637 unsorted_offsets[i] = INTVAL (offset);
5639 else
5640 /* Not a suitable memory address. */
5641 return 0;
5644 /* All the useful information has now been extracted from the
5645 operands into unsorted_regs and unsorted_offsets; additionally,
5646 order[0] has been set to the lowest numbered register in the
5647 list. Sort the registers into order, and check that the memory
5648 offsets are ascending and adjacent. */
5650 for (i = 1; i < nops; i++)
5652 int j;
5654 order[i] = order[i - 1];
5655 for (j = 0; j < nops; j++)
5656 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5657 && (order[i] == order[i - 1]
5658 || unsorted_regs[j] < unsorted_regs[order[i]]))
5659 order[i] = j;
5661 /* Have we found a suitable register? if not, one must be used more
5662 than once. */
5663 if (order[i] == order[i - 1])
5664 return 0;
5666 /* Is the memory address adjacent and ascending? */
5667 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5668 return 0;
5671 if (base)
5673 *base = base_reg;
5675 for (i = 0; i < nops; i++)
5676 regs[i] = unsorted_regs[order[i]];
5678 *load_offset = unsorted_offsets[order[0]];
5681 if (unsorted_offsets[order[0]] == 0)
5682 return 1; /* stmia */
5684 if (unsorted_offsets[order[0]] == 4)
5685 return 2; /* stmib */
5687 if (unsorted_offsets[order[nops - 1]] == 0)
5688 return 3; /* stmda */
5690 if (unsorted_offsets[order[nops - 1]] == -4)
5691 return 4; /* stmdb */
5693 return 0;
5696 const char *
5697 emit_stm_seq (rtx *operands, int nops)
5699 int regs[4];
5700 int base_reg;
5701 HOST_WIDE_INT offset;
5702 char buf[100];
5703 int i;
5705 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5707 case 1:
5708 strcpy (buf, "stm%?ia\t");
5709 break;
5711 case 2:
5712 strcpy (buf, "stm%?ib\t");
5713 break;
5715 case 3:
5716 strcpy (buf, "stm%?da\t");
5717 break;
5719 case 4:
5720 strcpy (buf, "stm%?db\t");
5721 break;
5723 default:
5724 gcc_unreachable ();
5727 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5728 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5730 for (i = 1; i < nops; i++)
5731 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5732 reg_names[regs[i]]);
5734 strcat (buf, "}\t%@ phole stm");
5736 output_asm_insn (buf, operands);
5737 return "";
5741 /* Routines for use in generating RTL. */
5744 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5745 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5747 HOST_WIDE_INT offset = *offsetp;
5748 int i = 0, j;
5749 rtx result;
5750 int sign = up ? 1 : -1;
5751 rtx mem, addr;
5753 /* XScale has load-store double instructions, but they have stricter
5754 alignment requirements than load-store multiple, so we cannot
5755 use them.
5757 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5758 the pipeline until completion.
5760 NREGS CYCLES
5766 An ldr instruction takes 1-3 cycles, but does not block the
5767 pipeline.
5769 NREGS CYCLES
5770 1 1-3
5771 2 2-6
5772 3 3-9
5773 4 4-12
5775 Best case ldr will always win. However, the more ldr instructions
5776 we issue, the less likely we are to be able to schedule them well.
5777 Using ldr instructions also increases code size.
5779 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5780 for counts of 3 or 4 regs. */
5781 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5783 rtx seq;
5785 start_sequence ();
5787 for (i = 0; i < count; i++)
5789 addr = plus_constant (from, i * 4 * sign);
5790 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5791 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5792 offset += 4 * sign;
5795 if (write_back)
5797 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5798 *offsetp = offset;
5801 seq = get_insns ();
5802 end_sequence ();
5804 return seq;
5807 result = gen_rtx_PARALLEL (VOIDmode,
5808 rtvec_alloc (count + (write_back ? 1 : 0)));
5809 if (write_back)
5811 XVECEXP (result, 0, 0)
5812 = gen_rtx_SET (GET_MODE (from), from,
5813 plus_constant (from, count * 4 * sign));
5814 i = 1;
5815 count++;
5818 for (j = 0; i < count; i++, j++)
5820 addr = plus_constant (from, j * 4 * sign);
5821 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5822 XVECEXP (result, 0, i)
5823 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5824 offset += 4 * sign;
5827 if (write_back)
5828 *offsetp = offset;
5830 return result;
5834 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5835 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5837 HOST_WIDE_INT offset = *offsetp;
5838 int i = 0, j;
5839 rtx result;
5840 int sign = up ? 1 : -1;
5841 rtx mem, addr;
5843 /* See arm_gen_load_multiple for discussion of
5844 the pros/cons of ldm/stm usage for XScale. */
5845 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5847 rtx seq;
5849 start_sequence ();
5851 for (i = 0; i < count; i++)
5853 addr = plus_constant (to, i * 4 * sign);
5854 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5855 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5856 offset += 4 * sign;
5859 if (write_back)
5861 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5862 *offsetp = offset;
5865 seq = get_insns ();
5866 end_sequence ();
5868 return seq;
5871 result = gen_rtx_PARALLEL (VOIDmode,
5872 rtvec_alloc (count + (write_back ? 1 : 0)));
5873 if (write_back)
5875 XVECEXP (result, 0, 0)
5876 = gen_rtx_SET (GET_MODE (to), to,
5877 plus_constant (to, count * 4 * sign));
5878 i = 1;
5879 count++;
5882 for (j = 0; i < count; i++, j++)
5884 addr = plus_constant (to, j * 4 * sign);
5885 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5886 XVECEXP (result, 0, i)
5887 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5888 offset += 4 * sign;
5891 if (write_back)
5892 *offsetp = offset;
5894 return result;
5898 arm_gen_movmemqi (rtx *operands)
5900 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5901 HOST_WIDE_INT srcoffset, dstoffset;
5902 int i;
5903 rtx src, dst, srcbase, dstbase;
5904 rtx part_bytes_reg = NULL;
5905 rtx mem;
5907 if (GET_CODE (operands[2]) != CONST_INT
5908 || GET_CODE (operands[3]) != CONST_INT
5909 || INTVAL (operands[2]) > 64
5910 || INTVAL (operands[3]) & 3)
5911 return 0;
5913 dstbase = operands[0];
5914 srcbase = operands[1];
5916 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5917 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5919 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5920 out_words_to_go = INTVAL (operands[2]) / 4;
5921 last_bytes = INTVAL (operands[2]) & 3;
5922 dstoffset = srcoffset = 0;
5924 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5925 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5927 for (i = 0; in_words_to_go >= 2; i+=4)
5929 if (in_words_to_go > 4)
5930 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5931 srcbase, &srcoffset));
5932 else
5933 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5934 FALSE, srcbase, &srcoffset));
5936 if (out_words_to_go)
5938 if (out_words_to_go > 4)
5939 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5940 dstbase, &dstoffset));
5941 else if (out_words_to_go != 1)
5942 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5943 dst, TRUE,
5944 (last_bytes == 0
5945 ? FALSE : TRUE),
5946 dstbase, &dstoffset));
5947 else
5949 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5950 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5951 if (last_bytes != 0)
5953 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5954 dstoffset += 4;
5959 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5960 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5963 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5964 if (out_words_to_go)
5966 rtx sreg;
5968 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5969 sreg = copy_to_reg (mem);
5971 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5972 emit_move_insn (mem, sreg);
5973 in_words_to_go--;
5975 gcc_assert (!in_words_to_go); /* Sanity check */
5978 if (in_words_to_go)
5980 gcc_assert (in_words_to_go > 0);
5982 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5983 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5986 gcc_assert (!last_bytes || part_bytes_reg);
5988 if (BYTES_BIG_ENDIAN && last_bytes)
5990 rtx tmp = gen_reg_rtx (SImode);
5992 /* The bytes we want are in the top end of the word. */
5993 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5994 GEN_INT (8 * (4 - last_bytes))));
5995 part_bytes_reg = tmp;
5997 while (last_bytes)
5999 mem = adjust_automodify_address (dstbase, QImode,
6000 plus_constant (dst, last_bytes - 1),
6001 dstoffset + last_bytes - 1);
6002 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6004 if (--last_bytes)
6006 tmp = gen_reg_rtx (SImode);
6007 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6008 part_bytes_reg = tmp;
6013 else
6015 if (last_bytes > 1)
6017 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6018 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6019 last_bytes -= 2;
6020 if (last_bytes)
6022 rtx tmp = gen_reg_rtx (SImode);
6023 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6024 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6025 part_bytes_reg = tmp;
6026 dstoffset += 2;
6030 if (last_bytes)
6032 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6033 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6037 return 1;
6040 /* Generate a memory reference for a half word, such that it will be loaded
6041 into the top 16 bits of the word. We can assume that the address is
6042 known to be alignable and of the form reg, or plus (reg, const). */
6045 arm_gen_rotated_half_load (rtx memref)
6047 HOST_WIDE_INT offset = 0;
6048 rtx base = XEXP (memref, 0);
6050 if (GET_CODE (base) == PLUS)
6052 offset = INTVAL (XEXP (base, 1));
6053 base = XEXP (base, 0);
6056 /* If we aren't allowed to generate unaligned addresses, then fail. */
6057 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6058 return NULL;
6060 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6062 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6063 return base;
6065 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6068 /* Select a dominance comparison mode if possible for a test of the general
6069 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6070 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6071 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6072 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6073 In all cases OP will be either EQ or NE, but we don't need to know which
6074 here. If we are unable to support a dominance comparison we return
6075 CC mode. This will then fail to match for the RTL expressions that
6076 generate this call. */
6077 enum machine_mode
6078 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6080 enum rtx_code cond1, cond2;
6081 int swapped = 0;
6083 /* Currently we will probably get the wrong result if the individual
6084 comparisons are not simple. This also ensures that it is safe to
6085 reverse a comparison if necessary. */
6086 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6087 != CCmode)
6088 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6089 != CCmode))
6090 return CCmode;
6092 /* The if_then_else variant of this tests the second condition if the
6093 first passes, but is true if the first fails. Reverse the first
6094 condition to get a true "inclusive-or" expression. */
6095 if (cond_or == DOM_CC_NX_OR_Y)
6096 cond1 = reverse_condition (cond1);
6098 /* If the comparisons are not equal, and one doesn't dominate the other,
6099 then we can't do this. */
6100 if (cond1 != cond2
6101 && !comparison_dominates_p (cond1, cond2)
6102 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6103 return CCmode;
6105 if (swapped)
6107 enum rtx_code temp = cond1;
6108 cond1 = cond2;
6109 cond2 = temp;
6112 switch (cond1)
6114 case EQ:
6115 if (cond_or == DOM_CC_X_AND_Y)
6116 return CC_DEQmode;
6118 switch (cond2)
6120 case EQ: return CC_DEQmode;
6121 case LE: return CC_DLEmode;
6122 case LEU: return CC_DLEUmode;
6123 case GE: return CC_DGEmode;
6124 case GEU: return CC_DGEUmode;
6125 default: gcc_unreachable ();
6128 case LT:
6129 if (cond_or == DOM_CC_X_AND_Y)
6130 return CC_DLTmode;
6132 switch (cond2)
6134 case LT:
6135 return CC_DLTmode;
6136 case LE:
6137 return CC_DLEmode;
6138 case NE:
6139 return CC_DNEmode;
6140 default:
6141 gcc_unreachable ();
6144 case GT:
6145 if (cond_or == DOM_CC_X_AND_Y)
6146 return CC_DGTmode;
6148 switch (cond2)
6150 case GT:
6151 return CC_DGTmode;
6152 case GE:
6153 return CC_DGEmode;
6154 case NE:
6155 return CC_DNEmode;
6156 default:
6157 gcc_unreachable ();
6160 case LTU:
6161 if (cond_or == DOM_CC_X_AND_Y)
6162 return CC_DLTUmode;
6164 switch (cond2)
6166 case LTU:
6167 return CC_DLTUmode;
6168 case LEU:
6169 return CC_DLEUmode;
6170 case NE:
6171 return CC_DNEmode;
6172 default:
6173 gcc_unreachable ();
6176 case GTU:
6177 if (cond_or == DOM_CC_X_AND_Y)
6178 return CC_DGTUmode;
6180 switch (cond2)
6182 case GTU:
6183 return CC_DGTUmode;
6184 case GEU:
6185 return CC_DGEUmode;
6186 case NE:
6187 return CC_DNEmode;
6188 default:
6189 gcc_unreachable ();
6192 /* The remaining cases only occur when both comparisons are the
6193 same. */
6194 case NE:
6195 gcc_assert (cond1 == cond2);
6196 return CC_DNEmode;
6198 case LE:
6199 gcc_assert (cond1 == cond2);
6200 return CC_DLEmode;
6202 case GE:
6203 gcc_assert (cond1 == cond2);
6204 return CC_DGEmode;
6206 case LEU:
6207 gcc_assert (cond1 == cond2);
6208 return CC_DLEUmode;
6210 case GEU:
6211 gcc_assert (cond1 == cond2);
6212 return CC_DGEUmode;
6214 default:
6215 gcc_unreachable ();
6219 enum machine_mode
6220 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6222 /* All floating point compares return CCFP if it is an equality
6223 comparison, and CCFPE otherwise. */
6224 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6226 switch (op)
6228 case EQ:
6229 case NE:
6230 case UNORDERED:
6231 case ORDERED:
6232 case UNLT:
6233 case UNLE:
6234 case UNGT:
6235 case UNGE:
6236 case UNEQ:
6237 case LTGT:
6238 return CCFPmode;
6240 case LT:
6241 case LE:
6242 case GT:
6243 case GE:
6244 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6245 return CCFPmode;
6246 return CCFPEmode;
6248 default:
6249 gcc_unreachable ();
6253 /* A compare with a shifted operand. Because of canonicalization, the
6254 comparison will have to be swapped when we emit the assembler. */
6255 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6256 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6257 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6258 || GET_CODE (x) == ROTATERT))
6259 return CC_SWPmode;
6261 /* This operation is performed swapped, but since we only rely on the Z
6262 flag we don't need an additional mode. */
6263 if (GET_MODE (y) == SImode && REG_P (y)
6264 && GET_CODE (x) == NEG
6265 && (op == EQ || op == NE))
6266 return CC_Zmode;
6268 /* This is a special case that is used by combine to allow a
6269 comparison of a shifted byte load to be split into a zero-extend
6270 followed by a comparison of the shifted integer (only valid for
6271 equalities and unsigned inequalities). */
6272 if (GET_MODE (x) == SImode
6273 && GET_CODE (x) == ASHIFT
6274 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6275 && GET_CODE (XEXP (x, 0)) == SUBREG
6276 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6277 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6278 && (op == EQ || op == NE
6279 || op == GEU || op == GTU || op == LTU || op == LEU)
6280 && GET_CODE (y) == CONST_INT)
6281 return CC_Zmode;
6283 /* A construct for a conditional compare, if the false arm contains
6284 0, then both conditions must be true, otherwise either condition
6285 must be true. Not all conditions are possible, so CCmode is
6286 returned if it can't be done. */
6287 if (GET_CODE (x) == IF_THEN_ELSE
6288 && (XEXP (x, 2) == const0_rtx
6289 || XEXP (x, 2) == const1_rtx)
6290 && COMPARISON_P (XEXP (x, 0))
6291 && COMPARISON_P (XEXP (x, 1)))
6292 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6293 INTVAL (XEXP (x, 2)));
6295 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6296 if (GET_CODE (x) == AND
6297 && COMPARISON_P (XEXP (x, 0))
6298 && COMPARISON_P (XEXP (x, 1)))
6299 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6300 DOM_CC_X_AND_Y);
6302 if (GET_CODE (x) == IOR
6303 && COMPARISON_P (XEXP (x, 0))
6304 && COMPARISON_P (XEXP (x, 1)))
6305 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6306 DOM_CC_X_OR_Y);
6308 /* An operation (on Thumb) where we want to test for a single bit.
6309 This is done by shifting that bit up into the top bit of a
6310 scratch register; we can then branch on the sign bit. */
6311 if (TARGET_THUMB
6312 && GET_MODE (x) == SImode
6313 && (op == EQ || op == NE)
6314 && (GET_CODE (x) == ZERO_EXTRACT))
6315 return CC_Nmode;
6317 /* An operation that sets the condition codes as a side-effect, the
6318 V flag is not set correctly, so we can only use comparisons where
6319 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6320 instead.) */
6321 if (GET_MODE (x) == SImode
6322 && y == const0_rtx
6323 && (op == EQ || op == NE || op == LT || op == GE)
6324 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6325 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6326 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6327 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6328 || GET_CODE (x) == LSHIFTRT
6329 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6330 || GET_CODE (x) == ROTATERT
6331 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6332 return CC_NOOVmode;
6334 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6335 return CC_Zmode;
6337 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6338 && GET_CODE (x) == PLUS
6339 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6340 return CC_Cmode;
6342 return CCmode;
6345 /* X and Y are two things to compare using CODE. Emit the compare insn and
6346 return the rtx for register 0 in the proper mode. FP means this is a
6347 floating point compare: I don't think that it is needed on the arm. */
6349 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6351 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6352 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6354 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6355 gen_rtx_COMPARE (mode, x, y)));
6357 return cc_reg;
6360 /* Generate a sequence of insns that will generate the correct return
6361 address mask depending on the physical architecture that the program
6362 is running on. */
6364 arm_gen_return_addr_mask (void)
6366 rtx reg = gen_reg_rtx (Pmode);
6368 emit_insn (gen_return_addr_mask (reg));
6369 return reg;
6372 void
6373 arm_reload_in_hi (rtx *operands)
6375 rtx ref = operands[1];
6376 rtx base, scratch;
6377 HOST_WIDE_INT offset = 0;
6379 if (GET_CODE (ref) == SUBREG)
6381 offset = SUBREG_BYTE (ref);
6382 ref = SUBREG_REG (ref);
6385 if (GET_CODE (ref) == REG)
6387 /* We have a pseudo which has been spilt onto the stack; there
6388 are two cases here: the first where there is a simple
6389 stack-slot replacement and a second where the stack-slot is
6390 out of range, or is used as a subreg. */
6391 if (reg_equiv_mem[REGNO (ref)])
6393 ref = reg_equiv_mem[REGNO (ref)];
6394 base = find_replacement (&XEXP (ref, 0));
6396 else
6397 /* The slot is out of range, or was dressed up in a SUBREG. */
6398 base = reg_equiv_address[REGNO (ref)];
6400 else
6401 base = find_replacement (&XEXP (ref, 0));
6403 /* Handle the case where the address is too complex to be offset by 1. */
6404 if (GET_CODE (base) == MINUS
6405 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6407 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6409 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6410 base = base_plus;
6412 else if (GET_CODE (base) == PLUS)
6414 /* The addend must be CONST_INT, or we would have dealt with it above. */
6415 HOST_WIDE_INT hi, lo;
6417 offset += INTVAL (XEXP (base, 1));
6418 base = XEXP (base, 0);
6420 /* Rework the address into a legal sequence of insns. */
6421 /* Valid range for lo is -4095 -> 4095 */
6422 lo = (offset >= 0
6423 ? (offset & 0xfff)
6424 : -((-offset) & 0xfff));
6426 /* Corner case, if lo is the max offset then we would be out of range
6427 once we have added the additional 1 below, so bump the msb into the
6428 pre-loading insn(s). */
6429 if (lo == 4095)
6430 lo &= 0x7ff;
6432 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6433 ^ (HOST_WIDE_INT) 0x80000000)
6434 - (HOST_WIDE_INT) 0x80000000);
6436 gcc_assert (hi + lo == offset);
6438 if (hi != 0)
6440 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6442 /* Get the base address; addsi3 knows how to handle constants
6443 that require more than one insn. */
6444 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6445 base = base_plus;
6446 offset = lo;
6450 /* Operands[2] may overlap operands[0] (though it won't overlap
6451 operands[1]), that's why we asked for a DImode reg -- so we can
6452 use the bit that does not overlap. */
6453 if (REGNO (operands[2]) == REGNO (operands[0]))
6454 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6455 else
6456 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6458 emit_insn (gen_zero_extendqisi2 (scratch,
6459 gen_rtx_MEM (QImode,
6460 plus_constant (base,
6461 offset))));
6462 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6463 gen_rtx_MEM (QImode,
6464 plus_constant (base,
6465 offset + 1))));
6466 if (!BYTES_BIG_ENDIAN)
6467 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6468 gen_rtx_IOR (SImode,
6469 gen_rtx_ASHIFT
6470 (SImode,
6471 gen_rtx_SUBREG (SImode, operands[0], 0),
6472 GEN_INT (8)),
6473 scratch)));
6474 else
6475 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6476 gen_rtx_IOR (SImode,
6477 gen_rtx_ASHIFT (SImode, scratch,
6478 GEN_INT (8)),
6479 gen_rtx_SUBREG (SImode, operands[0],
6480 0))));
6483 /* Handle storing a half-word to memory during reload by synthesizing as two
6484 byte stores. Take care not to clobber the input values until after we
6485 have moved them somewhere safe. This code assumes that if the DImode
6486 scratch in operands[2] overlaps either the input value or output address
6487 in some way, then that value must die in this insn (we absolutely need
6488 two scratch registers for some corner cases). */
6489 void
6490 arm_reload_out_hi (rtx *operands)
6492 rtx ref = operands[0];
6493 rtx outval = operands[1];
6494 rtx base, scratch;
6495 HOST_WIDE_INT offset = 0;
6497 if (GET_CODE (ref) == SUBREG)
6499 offset = SUBREG_BYTE (ref);
6500 ref = SUBREG_REG (ref);
6503 if (GET_CODE (ref) == REG)
6505 /* We have a pseudo which has been spilt onto the stack; there
6506 are two cases here: the first where there is a simple
6507 stack-slot replacement and a second where the stack-slot is
6508 out of range, or is used as a subreg. */
6509 if (reg_equiv_mem[REGNO (ref)])
6511 ref = reg_equiv_mem[REGNO (ref)];
6512 base = find_replacement (&XEXP (ref, 0));
6514 else
6515 /* The slot is out of range, or was dressed up in a SUBREG. */
6516 base = reg_equiv_address[REGNO (ref)];
6518 else
6519 base = find_replacement (&XEXP (ref, 0));
6521 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6523 /* Handle the case where the address is too complex to be offset by 1. */
6524 if (GET_CODE (base) == MINUS
6525 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6527 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6529 /* Be careful not to destroy OUTVAL. */
6530 if (reg_overlap_mentioned_p (base_plus, outval))
6532 /* Updating base_plus might destroy outval, see if we can
6533 swap the scratch and base_plus. */
6534 if (!reg_overlap_mentioned_p (scratch, outval))
6536 rtx tmp = scratch;
6537 scratch = base_plus;
6538 base_plus = tmp;
6540 else
6542 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6544 /* Be conservative and copy OUTVAL into the scratch now,
6545 this should only be necessary if outval is a subreg
6546 of something larger than a word. */
6547 /* XXX Might this clobber base? I can't see how it can,
6548 since scratch is known to overlap with OUTVAL, and
6549 must be wider than a word. */
6550 emit_insn (gen_movhi (scratch_hi, outval));
6551 outval = scratch_hi;
6555 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6556 base = base_plus;
6558 else if (GET_CODE (base) == PLUS)
6560 /* The addend must be CONST_INT, or we would have dealt with it above. */
6561 HOST_WIDE_INT hi, lo;
6563 offset += INTVAL (XEXP (base, 1));
6564 base = XEXP (base, 0);
6566 /* Rework the address into a legal sequence of insns. */
6567 /* Valid range for lo is -4095 -> 4095 */
6568 lo = (offset >= 0
6569 ? (offset & 0xfff)
6570 : -((-offset) & 0xfff));
6572 /* Corner case, if lo is the max offset then we would be out of range
6573 once we have added the additional 1 below, so bump the msb into the
6574 pre-loading insn(s). */
6575 if (lo == 4095)
6576 lo &= 0x7ff;
6578 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6579 ^ (HOST_WIDE_INT) 0x80000000)
6580 - (HOST_WIDE_INT) 0x80000000);
6582 gcc_assert (hi + lo == offset);
6584 if (hi != 0)
6586 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6588 /* Be careful not to destroy OUTVAL. */
6589 if (reg_overlap_mentioned_p (base_plus, outval))
6591 /* Updating base_plus might destroy outval, see if we
6592 can swap the scratch and base_plus. */
6593 if (!reg_overlap_mentioned_p (scratch, outval))
6595 rtx tmp = scratch;
6596 scratch = base_plus;
6597 base_plus = tmp;
6599 else
6601 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6603 /* Be conservative and copy outval into scratch now,
6604 this should only be necessary if outval is a
6605 subreg of something larger than a word. */
6606 /* XXX Might this clobber base? I can't see how it
6607 can, since scratch is known to overlap with
6608 outval. */
6609 emit_insn (gen_movhi (scratch_hi, outval));
6610 outval = scratch_hi;
6614 /* Get the base address; addsi3 knows how to handle constants
6615 that require more than one insn. */
6616 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6617 base = base_plus;
6618 offset = lo;
6622 if (BYTES_BIG_ENDIAN)
6624 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6625 plus_constant (base, offset + 1)),
6626 gen_lowpart (QImode, outval)));
6627 emit_insn (gen_lshrsi3 (scratch,
6628 gen_rtx_SUBREG (SImode, outval, 0),
6629 GEN_INT (8)));
6630 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6631 gen_lowpart (QImode, scratch)));
6633 else
6635 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6636 gen_lowpart (QImode, outval)));
6637 emit_insn (gen_lshrsi3 (scratch,
6638 gen_rtx_SUBREG (SImode, outval, 0),
6639 GEN_INT (8)));
6640 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6641 plus_constant (base, offset + 1)),
6642 gen_lowpart (QImode, scratch)));
6646 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6647 (padded to the size of a word) should be passed in a register. */
6649 static bool
6650 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6652 if (TARGET_AAPCS_BASED)
6653 return must_pass_in_stack_var_size (mode, type);
6654 else
6655 return must_pass_in_stack_var_size_or_pad (mode, type);
6659 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6660 Return true if an argument passed on the stack should be padded upwards,
6661 i.e. if the least-significant byte has useful data.
6662 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
6663 aggregate types are placed in the lowest memory address. */
6665 bool
6666 arm_pad_arg_upward (enum machine_mode mode, tree type)
6668 if (!TARGET_AAPCS_BASED)
6669 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
6671 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6672 return false;
6674 return true;
6678 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6679 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6680 byte of the register has useful data, and return the opposite if the
6681 most significant byte does.
6682 For AAPCS, small aggregates and small complex types are always padded
6683 upwards. */
6685 bool
6686 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6687 tree type, int first ATTRIBUTE_UNUSED)
6689 if (TARGET_AAPCS_BASED
6690 && BYTES_BIG_ENDIAN
6691 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6692 && int_size_in_bytes (type) <= 4)
6693 return true;
6695 /* Otherwise, use default padding. */
6696 return !BYTES_BIG_ENDIAN;
6701 /* Print a symbolic form of X to the debug file, F. */
6702 static void
6703 arm_print_value (FILE *f, rtx x)
6705 switch (GET_CODE (x))
6707 case CONST_INT:
6708 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6709 return;
6711 case CONST_DOUBLE:
6712 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6713 return;
6715 case CONST_VECTOR:
6717 int i;
6719 fprintf (f, "<");
6720 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6722 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6723 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6724 fputc (',', f);
6726 fprintf (f, ">");
6728 return;
6730 case CONST_STRING:
6731 fprintf (f, "\"%s\"", XSTR (x, 0));
6732 return;
6734 case SYMBOL_REF:
6735 fprintf (f, "`%s'", XSTR (x, 0));
6736 return;
6738 case LABEL_REF:
6739 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6740 return;
6742 case CONST:
6743 arm_print_value (f, XEXP (x, 0));
6744 return;
6746 case PLUS:
6747 arm_print_value (f, XEXP (x, 0));
6748 fprintf (f, "+");
6749 arm_print_value (f, XEXP (x, 1));
6750 return;
6752 case PC:
6753 fprintf (f, "pc");
6754 return;
6756 default:
6757 fprintf (f, "????");
6758 return;
6762 /* Routines for manipulation of the constant pool. */
6764 /* Arm instructions cannot load a large constant directly into a
6765 register; they have to come from a pc relative load. The constant
6766 must therefore be placed in the addressable range of the pc
6767 relative load. Depending on the precise pc relative load
6768 instruction the range is somewhere between 256 bytes and 4k. This
6769 means that we often have to dump a constant inside a function, and
6770 generate code to branch around it.
6772 It is important to minimize this, since the branches will slow
6773 things down and make the code larger.
6775 Normally we can hide the table after an existing unconditional
6776 branch so that there is no interruption of the flow, but in the
6777 worst case the code looks like this:
6779 ldr rn, L1
6781 b L2
6782 align
6783 L1: .long value
6787 ldr rn, L3
6789 b L4
6790 align
6791 L3: .long value
6795 We fix this by performing a scan after scheduling, which notices
6796 which instructions need to have their operands fetched from the
6797 constant table and builds the table.
6799 The algorithm starts by building a table of all the constants that
6800 need fixing up and all the natural barriers in the function (places
6801 where a constant table can be dropped without breaking the flow).
6802 For each fixup we note how far the pc-relative replacement will be
6803 able to reach and the offset of the instruction into the function.
6805 Having built the table we then group the fixes together to form
6806 tables that are as large as possible (subject to addressing
6807 constraints) and emit each table of constants after the last
6808 barrier that is within range of all the instructions in the group.
6809 If a group does not contain a barrier, then we forcibly create one
6810 by inserting a jump instruction into the flow. Once the table has
6811 been inserted, the insns are then modified to reference the
6812 relevant entry in the pool.
6814 Possible enhancements to the algorithm (not implemented) are:
6816 1) For some processors and object formats, there may be benefit in
6817 aligning the pools to the start of cache lines; this alignment
6818 would need to be taken into account when calculating addressability
6819 of a pool. */
6821 /* These typedefs are located at the start of this file, so that
6822 they can be used in the prototypes there. This comment is to
6823 remind readers of that fact so that the following structures
6824 can be understood more easily.
6826 typedef struct minipool_node Mnode;
6827 typedef struct minipool_fixup Mfix; */
6829 struct minipool_node
6831 /* Doubly linked chain of entries. */
6832 Mnode * next;
6833 Mnode * prev;
6834 /* The maximum offset into the code that this entry can be placed. While
6835 pushing fixes for forward references, all entries are sorted in order
6836 of increasing max_address. */
6837 HOST_WIDE_INT max_address;
6838 /* Similarly for an entry inserted for a backwards ref. */
6839 HOST_WIDE_INT min_address;
6840 /* The number of fixes referencing this entry. This can become zero
6841 if we "unpush" an entry. In this case we ignore the entry when we
6842 come to emit the code. */
6843 int refcount;
6844 /* The offset from the start of the minipool. */
6845 HOST_WIDE_INT offset;
6846 /* The value in table. */
6847 rtx value;
6848 /* The mode of value. */
6849 enum machine_mode mode;
6850 /* The size of the value. With iWMMXt enabled
6851 sizes > 4 also imply an alignment of 8-bytes. */
6852 int fix_size;
6855 struct minipool_fixup
6857 Mfix * next;
6858 rtx insn;
6859 HOST_WIDE_INT address;
6860 rtx * loc;
6861 enum machine_mode mode;
6862 int fix_size;
6863 rtx value;
6864 Mnode * minipool;
6865 HOST_WIDE_INT forwards;
6866 HOST_WIDE_INT backwards;
6869 /* Fixes less than a word need padding out to a word boundary. */
6870 #define MINIPOOL_FIX_SIZE(mode) \
6871 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6873 static Mnode * minipool_vector_head;
6874 static Mnode * minipool_vector_tail;
6875 static rtx minipool_vector_label;
6877 /* The linked list of all minipool fixes required for this function. */
6878 Mfix * minipool_fix_head;
6879 Mfix * minipool_fix_tail;
6880 /* The fix entry for the current minipool, once it has been placed. */
6881 Mfix * minipool_barrier;
6883 /* Determines if INSN is the start of a jump table. Returns the end
6884 of the TABLE or NULL_RTX. */
6885 static rtx
6886 is_jump_table (rtx insn)
6888 rtx table;
6890 if (GET_CODE (insn) == JUMP_INSN
6891 && JUMP_LABEL (insn) != NULL
6892 && ((table = next_real_insn (JUMP_LABEL (insn)))
6893 == next_real_insn (insn))
6894 && table != NULL
6895 && GET_CODE (table) == JUMP_INSN
6896 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6897 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6898 return table;
6900 return NULL_RTX;
6903 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6904 #define JUMP_TABLES_IN_TEXT_SECTION 0
6905 #endif
6907 static HOST_WIDE_INT
6908 get_jump_table_size (rtx insn)
6910 /* ADDR_VECs only take room if read-only data does into the text
6911 section. */
6912 if (JUMP_TABLES_IN_TEXT_SECTION
6913 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6914 || 1
6915 #endif
6918 rtx body = PATTERN (insn);
6919 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6921 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6924 return 0;
6927 /* Move a minipool fix MP from its current location to before MAX_MP.
6928 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6929 constraints may need updating. */
6930 static Mnode *
6931 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6932 HOST_WIDE_INT max_address)
6934 /* The code below assumes these are different. */
6935 gcc_assert (mp != max_mp);
6937 if (max_mp == NULL)
6939 if (max_address < mp->max_address)
6940 mp->max_address = max_address;
6942 else
6944 if (max_address > max_mp->max_address - mp->fix_size)
6945 mp->max_address = max_mp->max_address - mp->fix_size;
6946 else
6947 mp->max_address = max_address;
6949 /* Unlink MP from its current position. Since max_mp is non-null,
6950 mp->prev must be non-null. */
6951 mp->prev->next = mp->next;
6952 if (mp->next != NULL)
6953 mp->next->prev = mp->prev;
6954 else
6955 minipool_vector_tail = mp->prev;
6957 /* Re-insert it before MAX_MP. */
6958 mp->next = max_mp;
6959 mp->prev = max_mp->prev;
6960 max_mp->prev = mp;
6962 if (mp->prev != NULL)
6963 mp->prev->next = mp;
6964 else
6965 minipool_vector_head = mp;
6968 /* Save the new entry. */
6969 max_mp = mp;
6971 /* Scan over the preceding entries and adjust their addresses as
6972 required. */
6973 while (mp->prev != NULL
6974 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6976 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6977 mp = mp->prev;
6980 return max_mp;
6983 /* Add a constant to the minipool for a forward reference. Returns the
6984 node added or NULL if the constant will not fit in this pool. */
6985 static Mnode *
6986 add_minipool_forward_ref (Mfix *fix)
6988 /* If set, max_mp is the first pool_entry that has a lower
6989 constraint than the one we are trying to add. */
6990 Mnode * max_mp = NULL;
6991 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6992 Mnode * mp;
6994 /* If this fix's address is greater than the address of the first
6995 entry, then we can't put the fix in this pool. We subtract the
6996 size of the current fix to ensure that if the table is fully
6997 packed we still have enough room to insert this value by shuffling
6998 the other fixes forwards. */
6999 if (minipool_vector_head &&
7000 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7001 return NULL;
7003 /* Scan the pool to see if a constant with the same value has
7004 already been added. While we are doing this, also note the
7005 location where we must insert the constant if it doesn't already
7006 exist. */
7007 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7009 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7010 && fix->mode == mp->mode
7011 && (GET_CODE (fix->value) != CODE_LABEL
7012 || (CODE_LABEL_NUMBER (fix->value)
7013 == CODE_LABEL_NUMBER (mp->value)))
7014 && rtx_equal_p (fix->value, mp->value))
7016 /* More than one fix references this entry. */
7017 mp->refcount++;
7018 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7021 /* Note the insertion point if necessary. */
7022 if (max_mp == NULL
7023 && mp->max_address > max_address)
7024 max_mp = mp;
7026 /* If we are inserting an 8-bytes aligned quantity and
7027 we have not already found an insertion point, then
7028 make sure that all such 8-byte aligned quantities are
7029 placed at the start of the pool. */
7030 if (ARM_DOUBLEWORD_ALIGN
7031 && max_mp == NULL
7032 && fix->fix_size == 8
7033 && mp->fix_size != 8)
7035 max_mp = mp;
7036 max_address = mp->max_address;
7040 /* The value is not currently in the minipool, so we need to create
7041 a new entry for it. If MAX_MP is NULL, the entry will be put on
7042 the end of the list since the placement is less constrained than
7043 any existing entry. Otherwise, we insert the new fix before
7044 MAX_MP and, if necessary, adjust the constraints on the other
7045 entries. */
7046 mp = xmalloc (sizeof (* mp));
7047 mp->fix_size = fix->fix_size;
7048 mp->mode = fix->mode;
7049 mp->value = fix->value;
7050 mp->refcount = 1;
7051 /* Not yet required for a backwards ref. */
7052 mp->min_address = -65536;
7054 if (max_mp == NULL)
7056 mp->max_address = max_address;
7057 mp->next = NULL;
7058 mp->prev = minipool_vector_tail;
7060 if (mp->prev == NULL)
7062 minipool_vector_head = mp;
7063 minipool_vector_label = gen_label_rtx ();
7065 else
7066 mp->prev->next = mp;
7068 minipool_vector_tail = mp;
7070 else
7072 if (max_address > max_mp->max_address - mp->fix_size)
7073 mp->max_address = max_mp->max_address - mp->fix_size;
7074 else
7075 mp->max_address = max_address;
7077 mp->next = max_mp;
7078 mp->prev = max_mp->prev;
7079 max_mp->prev = mp;
7080 if (mp->prev != NULL)
7081 mp->prev->next = mp;
7082 else
7083 minipool_vector_head = mp;
7086 /* Save the new entry. */
7087 max_mp = mp;
7089 /* Scan over the preceding entries and adjust their addresses as
7090 required. */
7091 while (mp->prev != NULL
7092 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7094 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7095 mp = mp->prev;
7098 return max_mp;
7101 static Mnode *
7102 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7103 HOST_WIDE_INT min_address)
7105 HOST_WIDE_INT offset;
7107 /* The code below assumes these are different. */
7108 gcc_assert (mp != min_mp);
7110 if (min_mp == NULL)
7112 if (min_address > mp->min_address)
7113 mp->min_address = min_address;
7115 else
7117 /* We will adjust this below if it is too loose. */
7118 mp->min_address = min_address;
7120 /* Unlink MP from its current position. Since min_mp is non-null,
7121 mp->next must be non-null. */
7122 mp->next->prev = mp->prev;
7123 if (mp->prev != NULL)
7124 mp->prev->next = mp->next;
7125 else
7126 minipool_vector_head = mp->next;
7128 /* Reinsert it after MIN_MP. */
7129 mp->prev = min_mp;
7130 mp->next = min_mp->next;
7131 min_mp->next = mp;
7132 if (mp->next != NULL)
7133 mp->next->prev = mp;
7134 else
7135 minipool_vector_tail = mp;
7138 min_mp = mp;
7140 offset = 0;
7141 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7143 mp->offset = offset;
7144 if (mp->refcount > 0)
7145 offset += mp->fix_size;
7147 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7148 mp->next->min_address = mp->min_address + mp->fix_size;
7151 return min_mp;
7154 /* Add a constant to the minipool for a backward reference. Returns the
7155 node added or NULL if the constant will not fit in this pool.
7157 Note that the code for insertion for a backwards reference can be
7158 somewhat confusing because the calculated offsets for each fix do
7159 not take into account the size of the pool (which is still under
7160 construction. */
7161 static Mnode *
7162 add_minipool_backward_ref (Mfix *fix)
7164 /* If set, min_mp is the last pool_entry that has a lower constraint
7165 than the one we are trying to add. */
7166 Mnode *min_mp = NULL;
7167 /* This can be negative, since it is only a constraint. */
7168 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7169 Mnode *mp;
7171 /* If we can't reach the current pool from this insn, or if we can't
7172 insert this entry at the end of the pool without pushing other
7173 fixes out of range, then we don't try. This ensures that we
7174 can't fail later on. */
7175 if (min_address >= minipool_barrier->address
7176 || (minipool_vector_tail->min_address + fix->fix_size
7177 >= minipool_barrier->address))
7178 return NULL;
7180 /* Scan the pool to see if a constant with the same value has
7181 already been added. While we are doing this, also note the
7182 location where we must insert the constant if it doesn't already
7183 exist. */
7184 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7186 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7187 && fix->mode == mp->mode
7188 && (GET_CODE (fix->value) != CODE_LABEL
7189 || (CODE_LABEL_NUMBER (fix->value)
7190 == CODE_LABEL_NUMBER (mp->value)))
7191 && rtx_equal_p (fix->value, mp->value)
7192 /* Check that there is enough slack to move this entry to the
7193 end of the table (this is conservative). */
7194 && (mp->max_address
7195 > (minipool_barrier->address
7196 + minipool_vector_tail->offset
7197 + minipool_vector_tail->fix_size)))
7199 mp->refcount++;
7200 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7203 if (min_mp != NULL)
7204 mp->min_address += fix->fix_size;
7205 else
7207 /* Note the insertion point if necessary. */
7208 if (mp->min_address < min_address)
7210 /* For now, we do not allow the insertion of 8-byte alignment
7211 requiring nodes anywhere but at the start of the pool. */
7212 if (ARM_DOUBLEWORD_ALIGN
7213 && fix->fix_size == 8 && mp->fix_size != 8)
7214 return NULL;
7215 else
7216 min_mp = mp;
7218 else if (mp->max_address
7219 < minipool_barrier->address + mp->offset + fix->fix_size)
7221 /* Inserting before this entry would push the fix beyond
7222 its maximum address (which can happen if we have
7223 re-located a forwards fix); force the new fix to come
7224 after it. */
7225 min_mp = mp;
7226 min_address = mp->min_address + fix->fix_size;
7228 /* If we are inserting an 8-bytes aligned quantity and
7229 we have not already found an insertion point, then
7230 make sure that all such 8-byte aligned quantities are
7231 placed at the start of the pool. */
7232 else if (ARM_DOUBLEWORD_ALIGN
7233 && min_mp == NULL
7234 && fix->fix_size == 8
7235 && mp->fix_size < 8)
7237 min_mp = mp;
7238 min_address = mp->min_address + fix->fix_size;
7243 /* We need to create a new entry. */
7244 mp = xmalloc (sizeof (* mp));
7245 mp->fix_size = fix->fix_size;
7246 mp->mode = fix->mode;
7247 mp->value = fix->value;
7248 mp->refcount = 1;
7249 mp->max_address = minipool_barrier->address + 65536;
7251 mp->min_address = min_address;
7253 if (min_mp == NULL)
7255 mp->prev = NULL;
7256 mp->next = minipool_vector_head;
7258 if (mp->next == NULL)
7260 minipool_vector_tail = mp;
7261 minipool_vector_label = gen_label_rtx ();
7263 else
7264 mp->next->prev = mp;
7266 minipool_vector_head = mp;
7268 else
7270 mp->next = min_mp->next;
7271 mp->prev = min_mp;
7272 min_mp->next = mp;
7274 if (mp->next != NULL)
7275 mp->next->prev = mp;
7276 else
7277 minipool_vector_tail = mp;
7280 /* Save the new entry. */
7281 min_mp = mp;
7283 if (mp->prev)
7284 mp = mp->prev;
7285 else
7286 mp->offset = 0;
7288 /* Scan over the following entries and adjust their offsets. */
7289 while (mp->next != NULL)
7291 if (mp->next->min_address < mp->min_address + mp->fix_size)
7292 mp->next->min_address = mp->min_address + mp->fix_size;
7294 if (mp->refcount)
7295 mp->next->offset = mp->offset + mp->fix_size;
7296 else
7297 mp->next->offset = mp->offset;
7299 mp = mp->next;
7302 return min_mp;
7305 static void
7306 assign_minipool_offsets (Mfix *barrier)
7308 HOST_WIDE_INT offset = 0;
7309 Mnode *mp;
7311 minipool_barrier = barrier;
7313 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7315 mp->offset = offset;
7317 if (mp->refcount > 0)
7318 offset += mp->fix_size;
7322 /* Output the literal table */
7323 static void
7324 dump_minipool (rtx scan)
7326 Mnode * mp;
7327 Mnode * nmp;
7328 int align64 = 0;
7330 if (ARM_DOUBLEWORD_ALIGN)
7331 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7332 if (mp->refcount > 0 && mp->fix_size == 8)
7334 align64 = 1;
7335 break;
7338 if (dump_file)
7339 fprintf (dump_file,
7340 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7341 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7343 scan = emit_label_after (gen_label_rtx (), scan);
7344 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7345 scan = emit_label_after (minipool_vector_label, scan);
7347 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7349 if (mp->refcount > 0)
7351 if (dump_file)
7353 fprintf (dump_file,
7354 ";; Offset %u, min %ld, max %ld ",
7355 (unsigned) mp->offset, (unsigned long) mp->min_address,
7356 (unsigned long) mp->max_address);
7357 arm_print_value (dump_file, mp->value);
7358 fputc ('\n', dump_file);
7361 switch (mp->fix_size)
7363 #ifdef HAVE_consttable_1
7364 case 1:
7365 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7366 break;
7368 #endif
7369 #ifdef HAVE_consttable_2
7370 case 2:
7371 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7372 break;
7374 #endif
7375 #ifdef HAVE_consttable_4
7376 case 4:
7377 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7378 break;
7380 #endif
7381 #ifdef HAVE_consttable_8
7382 case 8:
7383 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7384 break;
7386 #endif
7387 default:
7388 gcc_unreachable ();
7392 nmp = mp->next;
7393 free (mp);
7396 minipool_vector_head = minipool_vector_tail = NULL;
7397 scan = emit_insn_after (gen_consttable_end (), scan);
7398 scan = emit_barrier_after (scan);
7401 /* Return the cost of forcibly inserting a barrier after INSN. */
7402 static int
7403 arm_barrier_cost (rtx insn)
7405 /* Basing the location of the pool on the loop depth is preferable,
7406 but at the moment, the basic block information seems to be
7407 corrupt by this stage of the compilation. */
7408 int base_cost = 50;
7409 rtx next = next_nonnote_insn (insn);
7411 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7412 base_cost -= 20;
7414 switch (GET_CODE (insn))
7416 case CODE_LABEL:
7417 /* It will always be better to place the table before the label, rather
7418 than after it. */
7419 return 50;
7421 case INSN:
7422 case CALL_INSN:
7423 return base_cost;
7425 case JUMP_INSN:
7426 return base_cost - 10;
7428 default:
7429 return base_cost + 10;
7433 /* Find the best place in the insn stream in the range
7434 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7435 Create the barrier by inserting a jump and add a new fix entry for
7436 it. */
7437 static Mfix *
7438 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7440 HOST_WIDE_INT count = 0;
7441 rtx barrier;
7442 rtx from = fix->insn;
7443 rtx selected = from;
7444 int selected_cost;
7445 HOST_WIDE_INT selected_address;
7446 Mfix * new_fix;
7447 HOST_WIDE_INT max_count = max_address - fix->address;
7448 rtx label = gen_label_rtx ();
7450 selected_cost = arm_barrier_cost (from);
7451 selected_address = fix->address;
7453 while (from && count < max_count)
7455 rtx tmp;
7456 int new_cost;
7458 /* This code shouldn't have been called if there was a natural barrier
7459 within range. */
7460 gcc_assert (GET_CODE (from) != BARRIER);
7462 /* Count the length of this insn. */
7463 count += get_attr_length (from);
7465 /* If there is a jump table, add its length. */
7466 tmp = is_jump_table (from);
7467 if (tmp != NULL)
7469 count += get_jump_table_size (tmp);
7471 /* Jump tables aren't in a basic block, so base the cost on
7472 the dispatch insn. If we select this location, we will
7473 still put the pool after the table. */
7474 new_cost = arm_barrier_cost (from);
7476 if (count < max_count && new_cost <= selected_cost)
7478 selected = tmp;
7479 selected_cost = new_cost;
7480 selected_address = fix->address + count;
7483 /* Continue after the dispatch table. */
7484 from = NEXT_INSN (tmp);
7485 continue;
7488 new_cost = arm_barrier_cost (from);
7490 if (count < max_count && new_cost <= selected_cost)
7492 selected = from;
7493 selected_cost = new_cost;
7494 selected_address = fix->address + count;
7497 from = NEXT_INSN (from);
7500 /* Create a new JUMP_INSN that branches around a barrier. */
7501 from = emit_jump_insn_after (gen_jump (label), selected);
7502 JUMP_LABEL (from) = label;
7503 barrier = emit_barrier_after (from);
7504 emit_label_after (label, barrier);
7506 /* Create a minipool barrier entry for the new barrier. */
7507 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7508 new_fix->insn = barrier;
7509 new_fix->address = selected_address;
7510 new_fix->next = fix->next;
7511 fix->next = new_fix;
7513 return new_fix;
7516 /* Record that there is a natural barrier in the insn stream at
7517 ADDRESS. */
7518 static void
7519 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7521 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7523 fix->insn = insn;
7524 fix->address = address;
7526 fix->next = NULL;
7527 if (minipool_fix_head != NULL)
7528 minipool_fix_tail->next = fix;
7529 else
7530 minipool_fix_head = fix;
7532 minipool_fix_tail = fix;
7535 /* Record INSN, which will need fixing up to load a value from the
7536 minipool. ADDRESS is the offset of the insn since the start of the
7537 function; LOC is a pointer to the part of the insn which requires
7538 fixing; VALUE is the constant that must be loaded, which is of type
7539 MODE. */
7540 static void
7541 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7542 enum machine_mode mode, rtx value)
7544 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7546 #ifdef AOF_ASSEMBLER
7547 /* PIC symbol references need to be converted into offsets into the
7548 based area. */
7549 /* XXX This shouldn't be done here. */
7550 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7551 value = aof_pic_entry (value);
7552 #endif /* AOF_ASSEMBLER */
7554 fix->insn = insn;
7555 fix->address = address;
7556 fix->loc = loc;
7557 fix->mode = mode;
7558 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7559 fix->value = value;
7560 fix->forwards = get_attr_pool_range (insn);
7561 fix->backwards = get_attr_neg_pool_range (insn);
7562 fix->minipool = NULL;
7564 /* If an insn doesn't have a range defined for it, then it isn't
7565 expecting to be reworked by this code. Better to stop now than
7566 to generate duff assembly code. */
7567 gcc_assert (fix->forwards || fix->backwards);
7569 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7570 So there might be an empty word before the start of the pool.
7571 Hence we reduce the forward range by 4 to allow for this
7572 possibility. */
7573 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7574 fix->forwards -= 4;
7576 if (dump_file)
7578 fprintf (dump_file,
7579 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7580 GET_MODE_NAME (mode),
7581 INSN_UID (insn), (unsigned long) address,
7582 -1 * (long)fix->backwards, (long)fix->forwards);
7583 arm_print_value (dump_file, fix->value);
7584 fprintf (dump_file, "\n");
7587 /* Add it to the chain of fixes. */
7588 fix->next = NULL;
7590 if (minipool_fix_head != NULL)
7591 minipool_fix_tail->next = fix;
7592 else
7593 minipool_fix_head = fix;
7595 minipool_fix_tail = fix;
7598 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7599 Returns the number of insns needed, or 99 if we don't know how to
7600 do it. */
7602 arm_const_double_inline_cost (rtx val)
7604 rtx lowpart, highpart;
7605 enum machine_mode mode;
7607 mode = GET_MODE (val);
7609 if (mode == VOIDmode)
7610 mode = DImode;
7612 gcc_assert (GET_MODE_SIZE (mode) == 8);
7614 lowpart = gen_lowpart (SImode, val);
7615 highpart = gen_highpart_mode (SImode, mode, val);
7617 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7618 gcc_assert (GET_CODE (highpart) == CONST_INT);
7620 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7621 NULL_RTX, NULL_RTX, 0, 0)
7622 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7623 NULL_RTX, NULL_RTX, 0, 0));
7626 /* Return true if it is worthwhile to split a 64-bit constant into two
7627 32-bit operations. This is the case if optimizing for size, or
7628 if we have load delay slots, or if one 32-bit part can be done with
7629 a single data operation. */
7630 bool
7631 arm_const_double_by_parts (rtx val)
7633 enum machine_mode mode = GET_MODE (val);
7634 rtx part;
7636 if (optimize_size || arm_ld_sched)
7637 return true;
7639 if (mode == VOIDmode)
7640 mode = DImode;
7642 part = gen_highpart_mode (SImode, mode, val);
7644 gcc_assert (GET_CODE (part) == CONST_INT);
7646 if (const_ok_for_arm (INTVAL (part))
7647 || const_ok_for_arm (~INTVAL (part)))
7648 return true;
7650 part = gen_lowpart (SImode, val);
7652 gcc_assert (GET_CODE (part) == CONST_INT);
7654 if (const_ok_for_arm (INTVAL (part))
7655 || const_ok_for_arm (~INTVAL (part)))
7656 return true;
7658 return false;
7661 /* Scan INSN and note any of its operands that need fixing.
7662 If DO_PUSHES is false we do not actually push any of the fixups
7663 needed. The function returns TRUE if any fixups were needed/pushed.
7664 This is used by arm_memory_load_p() which needs to know about loads
7665 of constants that will be converted into minipool loads. */
7666 static bool
7667 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7669 bool result = false;
7670 int opno;
7672 extract_insn (insn);
7674 if (!constrain_operands (1))
7675 fatal_insn_not_found (insn);
7677 if (recog_data.n_alternatives == 0)
7678 return false;
7680 /* Fill in recog_op_alt with information about the constraints of
7681 this insn. */
7682 preprocess_constraints ();
7684 for (opno = 0; opno < recog_data.n_operands; opno++)
7686 /* Things we need to fix can only occur in inputs. */
7687 if (recog_data.operand_type[opno] != OP_IN)
7688 continue;
7690 /* If this alternative is a memory reference, then any mention
7691 of constants in this alternative is really to fool reload
7692 into allowing us to accept one there. We need to fix them up
7693 now so that we output the right code. */
7694 if (recog_op_alt[opno][which_alternative].memory_ok)
7696 rtx op = recog_data.operand[opno];
7698 if (CONSTANT_P (op))
7700 if (do_pushes)
7701 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7702 recog_data.operand_mode[opno], op);
7703 result = true;
7705 else if (GET_CODE (op) == MEM
7706 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7707 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7709 if (do_pushes)
7711 rtx cop = avoid_constant_pool_reference (op);
7713 /* Casting the address of something to a mode narrower
7714 than a word can cause avoid_constant_pool_reference()
7715 to return the pool reference itself. That's no good to
7716 us here. Lets just hope that we can use the
7717 constant pool value directly. */
7718 if (op == cop)
7719 cop = get_pool_constant (XEXP (op, 0));
7721 push_minipool_fix (insn, address,
7722 recog_data.operand_loc[opno],
7723 recog_data.operand_mode[opno], cop);
7726 result = true;
7731 return result;
7734 /* Gcc puts the pool in the wrong place for ARM, since we can only
7735 load addresses a limited distance around the pc. We do some
7736 special munging to move the constant pool values to the correct
7737 point in the code. */
7738 static void
7739 arm_reorg (void)
7741 rtx insn;
7742 HOST_WIDE_INT address = 0;
7743 Mfix * fix;
7745 minipool_fix_head = minipool_fix_tail = NULL;
7747 /* The first insn must always be a note, or the code below won't
7748 scan it properly. */
7749 insn = get_insns ();
7750 gcc_assert (GET_CODE (insn) == NOTE);
7752 /* Scan all the insns and record the operands that will need fixing. */
7753 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7755 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7756 && (arm_cirrus_insn_p (insn)
7757 || GET_CODE (insn) == JUMP_INSN
7758 || arm_memory_load_p (insn)))
7759 cirrus_reorg (insn);
7761 if (GET_CODE (insn) == BARRIER)
7762 push_minipool_barrier (insn, address);
7763 else if (INSN_P (insn))
7765 rtx table;
7767 note_invalid_constants (insn, address, true);
7768 address += get_attr_length (insn);
7770 /* If the insn is a vector jump, add the size of the table
7771 and skip the table. */
7772 if ((table = is_jump_table (insn)) != NULL)
7774 address += get_jump_table_size (table);
7775 insn = table;
7780 fix = minipool_fix_head;
7782 /* Now scan the fixups and perform the required changes. */
7783 while (fix)
7785 Mfix * ftmp;
7786 Mfix * fdel;
7787 Mfix * last_added_fix;
7788 Mfix * last_barrier = NULL;
7789 Mfix * this_fix;
7791 /* Skip any further barriers before the next fix. */
7792 while (fix && GET_CODE (fix->insn) == BARRIER)
7793 fix = fix->next;
7795 /* No more fixes. */
7796 if (fix == NULL)
7797 break;
7799 last_added_fix = NULL;
7801 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7803 if (GET_CODE (ftmp->insn) == BARRIER)
7805 if (ftmp->address >= minipool_vector_head->max_address)
7806 break;
7808 last_barrier = ftmp;
7810 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7811 break;
7813 last_added_fix = ftmp; /* Keep track of the last fix added. */
7816 /* If we found a barrier, drop back to that; any fixes that we
7817 could have reached but come after the barrier will now go in
7818 the next mini-pool. */
7819 if (last_barrier != NULL)
7821 /* Reduce the refcount for those fixes that won't go into this
7822 pool after all. */
7823 for (fdel = last_barrier->next;
7824 fdel && fdel != ftmp;
7825 fdel = fdel->next)
7827 fdel->minipool->refcount--;
7828 fdel->minipool = NULL;
7831 ftmp = last_barrier;
7833 else
7835 /* ftmp is first fix that we can't fit into this pool and
7836 there no natural barriers that we could use. Insert a
7837 new barrier in the code somewhere between the previous
7838 fix and this one, and arrange to jump around it. */
7839 HOST_WIDE_INT max_address;
7841 /* The last item on the list of fixes must be a barrier, so
7842 we can never run off the end of the list of fixes without
7843 last_barrier being set. */
7844 gcc_assert (ftmp);
7846 max_address = minipool_vector_head->max_address;
7847 /* Check that there isn't another fix that is in range that
7848 we couldn't fit into this pool because the pool was
7849 already too large: we need to put the pool before such an
7850 instruction. */
7851 if (ftmp->address < max_address)
7852 max_address = ftmp->address;
7854 last_barrier = create_fix_barrier (last_added_fix, max_address);
7857 assign_minipool_offsets (last_barrier);
7859 while (ftmp)
7861 if (GET_CODE (ftmp->insn) != BARRIER
7862 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7863 == NULL))
7864 break;
7866 ftmp = ftmp->next;
7869 /* Scan over the fixes we have identified for this pool, fixing them
7870 up and adding the constants to the pool itself. */
7871 for (this_fix = fix; this_fix && ftmp != this_fix;
7872 this_fix = this_fix->next)
7873 if (GET_CODE (this_fix->insn) != BARRIER)
7875 rtx addr
7876 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7877 minipool_vector_label),
7878 this_fix->minipool->offset);
7879 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7882 dump_minipool (last_barrier->insn);
7883 fix = ftmp;
7886 /* From now on we must synthesize any constants that we can't handle
7887 directly. This can happen if the RTL gets split during final
7888 instruction generation. */
7889 after_arm_reorg = 1;
7891 /* Free the minipool memory. */
7892 obstack_free (&minipool_obstack, minipool_startobj);
7895 /* Routines to output assembly language. */
7897 /* If the rtx is the correct value then return the string of the number.
7898 In this way we can ensure that valid double constants are generated even
7899 when cross compiling. */
7900 const char *
7901 fp_immediate_constant (rtx x)
7903 REAL_VALUE_TYPE r;
7904 int i;
7906 if (!fp_consts_inited)
7907 init_fp_table ();
7909 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7910 for (i = 0; i < 8; i++)
7911 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7912 return strings_fp[i];
7914 gcc_unreachable ();
7917 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7918 static const char *
7919 fp_const_from_val (REAL_VALUE_TYPE *r)
7921 int i;
7923 if (!fp_consts_inited)
7924 init_fp_table ();
7926 for (i = 0; i < 8; i++)
7927 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7928 return strings_fp[i];
7930 gcc_unreachable ();
7933 /* Output the operands of a LDM/STM instruction to STREAM.
7934 MASK is the ARM register set mask of which only bits 0-15 are important.
7935 REG is the base register, either the frame pointer or the stack pointer,
7936 INSTR is the possibly suffixed load or store instruction. */
7938 static void
7939 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7940 unsigned long mask)
7942 unsigned i;
7943 bool not_first = FALSE;
7945 fputc ('\t', stream);
7946 asm_fprintf (stream, instr, reg);
7947 fputs (", {", stream);
7949 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7950 if (mask & (1 << i))
7952 if (not_first)
7953 fprintf (stream, ", ");
7955 asm_fprintf (stream, "%r", i);
7956 not_first = TRUE;
7959 fprintf (stream, "}\n");
7963 /* Output a FLDMX instruction to STREAM.
7964 BASE if the register containing the address.
7965 REG and COUNT specify the register range.
7966 Extra registers may be added to avoid hardware bugs. */
7968 static void
7969 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7971 int i;
7973 /* Workaround ARM10 VFPr1 bug. */
7974 if (count == 2 && !arm_arch6)
7976 if (reg == 15)
7977 reg--;
7978 count++;
7981 fputc ('\t', stream);
7982 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7984 for (i = reg; i < reg + count; i++)
7986 if (i > reg)
7987 fputs (", ", stream);
7988 asm_fprintf (stream, "d%d", i);
7990 fputs ("}\n", stream);
7995 /* Output the assembly for a store multiple. */
7997 const char *
7998 vfp_output_fstmx (rtx * operands)
8000 char pattern[100];
8001 int p;
8002 int base;
8003 int i;
8005 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8006 p = strlen (pattern);
8008 gcc_assert (GET_CODE (operands[1]) == REG);
8010 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8011 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8013 p += sprintf (&pattern[p], ", d%d", base + i);
8015 strcpy (&pattern[p], "}");
8017 output_asm_insn (pattern, operands);
8018 return "";
8022 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8023 number of bytes pushed. */
8025 static int
8026 vfp_emit_fstmx (int base_reg, int count)
8028 rtx par;
8029 rtx dwarf;
8030 rtx tmp, reg;
8031 int i;
8033 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8034 register pairs are stored by a store multiple insn. We avoid this
8035 by pushing an extra pair. */
8036 if (count == 2 && !arm_arch6)
8038 if (base_reg == LAST_VFP_REGNUM - 3)
8039 base_reg -= 2;
8040 count++;
8043 /* ??? The frame layout is implementation defined. We describe
8044 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8045 We really need some way of representing the whole block so that the
8046 unwinder can figure it out at runtime. */
8047 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8048 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8050 reg = gen_rtx_REG (DFmode, base_reg);
8051 base_reg += 2;
8053 XVECEXP (par, 0, 0)
8054 = gen_rtx_SET (VOIDmode,
8055 gen_rtx_MEM (BLKmode,
8056 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8057 gen_rtx_UNSPEC (BLKmode,
8058 gen_rtvec (1, reg),
8059 UNSPEC_PUSH_MULT));
8061 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8062 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8063 GEN_INT (-(count * 8 + 4))));
8064 RTX_FRAME_RELATED_P (tmp) = 1;
8065 XVECEXP (dwarf, 0, 0) = tmp;
8067 tmp = gen_rtx_SET (VOIDmode,
8068 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8069 reg);
8070 RTX_FRAME_RELATED_P (tmp) = 1;
8071 XVECEXP (dwarf, 0, 1) = tmp;
8073 for (i = 1; i < count; i++)
8075 reg = gen_rtx_REG (DFmode, base_reg);
8076 base_reg += 2;
8077 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8079 tmp = gen_rtx_SET (VOIDmode,
8080 gen_rtx_MEM (DFmode,
8081 gen_rtx_PLUS (SImode,
8082 stack_pointer_rtx,
8083 GEN_INT (i * 8))),
8084 reg);
8085 RTX_FRAME_RELATED_P (tmp) = 1;
8086 XVECEXP (dwarf, 0, i + 1) = tmp;
8089 par = emit_insn (par);
8090 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8091 REG_NOTES (par));
8092 RTX_FRAME_RELATED_P (par) = 1;
8094 return count * 8 + 4;
8098 /* Output a 'call' insn. */
8099 const char *
8100 output_call (rtx *operands)
8102 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8104 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8105 if (REGNO (operands[0]) == LR_REGNUM)
8107 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8108 output_asm_insn ("mov%?\t%0, %|lr", operands);
8111 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8113 if (TARGET_INTERWORK || arm_arch4t)
8114 output_asm_insn ("bx%?\t%0", operands);
8115 else
8116 output_asm_insn ("mov%?\t%|pc, %0", operands);
8118 return "";
8121 /* Output a 'call' insn that is a reference in memory. */
8122 const char *
8123 output_call_mem (rtx *operands)
8125 if (TARGET_INTERWORK && !arm_arch5)
8127 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8128 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8129 output_asm_insn ("bx%?\t%|ip", operands);
8131 else if (regno_use_in (LR_REGNUM, operands[0]))
8133 /* LR is used in the memory address. We load the address in the
8134 first instruction. It's safe to use IP as the target of the
8135 load since the call will kill it anyway. */
8136 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8137 if (arm_arch5)
8138 output_asm_insn ("blx%?\t%|ip", operands);
8139 else
8141 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8142 if (arm_arch4t)
8143 output_asm_insn ("bx%?\t%|ip", operands);
8144 else
8145 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8148 else
8150 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8151 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8154 return "";
8158 /* Output a move from arm registers to an fpa registers.
8159 OPERANDS[0] is an fpa register.
8160 OPERANDS[1] is the first registers of an arm register pair. */
8161 const char *
8162 output_mov_long_double_fpa_from_arm (rtx *operands)
8164 int arm_reg0 = REGNO (operands[1]);
8165 rtx ops[3];
8167 gcc_assert (arm_reg0 != IP_REGNUM);
8169 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8170 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8171 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8173 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8174 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8176 return "";
8179 /* Output a move from an fpa register to arm registers.
8180 OPERANDS[0] is the first registers of an arm register pair.
8181 OPERANDS[1] is an fpa register. */
8182 const char *
8183 output_mov_long_double_arm_from_fpa (rtx *operands)
8185 int arm_reg0 = REGNO (operands[0]);
8186 rtx ops[3];
8188 gcc_assert (arm_reg0 != IP_REGNUM);
8190 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8191 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8192 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8194 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8195 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8196 return "";
8199 /* Output a move from arm registers to arm registers of a long double
8200 OPERANDS[0] is the destination.
8201 OPERANDS[1] is the source. */
8202 const char *
8203 output_mov_long_double_arm_from_arm (rtx *operands)
8205 /* We have to be careful here because the two might overlap. */
8206 int dest_start = REGNO (operands[0]);
8207 int src_start = REGNO (operands[1]);
8208 rtx ops[2];
8209 int i;
8211 if (dest_start < src_start)
8213 for (i = 0; i < 3; i++)
8215 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8216 ops[1] = gen_rtx_REG (SImode, src_start + i);
8217 output_asm_insn ("mov%?\t%0, %1", ops);
8220 else
8222 for (i = 2; i >= 0; i--)
8224 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8225 ops[1] = gen_rtx_REG (SImode, src_start + i);
8226 output_asm_insn ("mov%?\t%0, %1", ops);
8230 return "";
8234 /* Output a move from arm registers to an fpa registers.
8235 OPERANDS[0] is an fpa register.
8236 OPERANDS[1] is the first registers of an arm register pair. */
8237 const char *
8238 output_mov_double_fpa_from_arm (rtx *operands)
8240 int arm_reg0 = REGNO (operands[1]);
8241 rtx ops[2];
8243 gcc_assert (arm_reg0 != IP_REGNUM);
8245 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8246 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8247 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8248 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8249 return "";
8252 /* Output a move from an fpa register to arm registers.
8253 OPERANDS[0] is the first registers of an arm register pair.
8254 OPERANDS[1] is an fpa register. */
8255 const char *
8256 output_mov_double_arm_from_fpa (rtx *operands)
8258 int arm_reg0 = REGNO (operands[0]);
8259 rtx ops[2];
8261 gcc_assert (arm_reg0 != IP_REGNUM);
8263 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8264 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8265 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8266 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8267 return "";
8270 /* Output a move between double words.
8271 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8272 or MEM<-REG and all MEMs must be offsettable addresses. */
8273 const char *
8274 output_move_double (rtx *operands)
8276 enum rtx_code code0 = GET_CODE (operands[0]);
8277 enum rtx_code code1 = GET_CODE (operands[1]);
8278 rtx otherops[3];
8280 if (code0 == REG)
8282 int reg0 = REGNO (operands[0]);
8284 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8286 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8288 switch (GET_CODE (XEXP (operands[1], 0)))
8290 case REG:
8291 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8292 break;
8294 case PRE_INC:
8295 gcc_assert (TARGET_LDRD);
8296 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8297 break;
8299 case PRE_DEC:
8300 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8301 break;
8303 case POST_INC:
8304 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8305 break;
8307 case POST_DEC:
8308 gcc_assert (TARGET_LDRD);
8309 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8310 break;
8312 case PRE_MODIFY:
8313 case POST_MODIFY:
8314 otherops[0] = operands[0];
8315 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8316 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8318 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8320 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8322 /* Registers overlap so split out the increment. */
8323 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8324 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8326 else
8327 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8329 else
8331 /* We only allow constant increments, so this is safe. */
8332 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8334 break;
8336 case LABEL_REF:
8337 case CONST:
8338 output_asm_insn ("adr%?\t%0, %1", operands);
8339 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8340 break;
8342 default:
8343 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8344 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8346 otherops[0] = operands[0];
8347 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8348 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8350 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8352 if (GET_CODE (otherops[2]) == CONST_INT)
8354 switch ((int) INTVAL (otherops[2]))
8356 case -8:
8357 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8358 return "";
8359 case -4:
8360 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8361 return "";
8362 case 4:
8363 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8364 return "";
8367 if (TARGET_LDRD
8368 && (GET_CODE (otherops[2]) == REG
8369 || (GET_CODE (otherops[2]) == CONST_INT
8370 && INTVAL (otherops[2]) > -256
8371 && INTVAL (otherops[2]) < 256)))
8373 if (reg_overlap_mentioned_p (otherops[0],
8374 otherops[2]))
8376 /* Swap base and index registers over to
8377 avoid a conflict. */
8378 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8379 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8382 /* If both registers conflict, it will usually
8383 have been fixed by a splitter. */
8384 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8386 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8387 output_asm_insn ("ldr%?d\t%0, [%1]",
8388 otherops);
8390 else
8391 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8392 return "";
8395 if (GET_CODE (otherops[2]) == CONST_INT)
8397 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8398 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8399 else
8400 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8402 else
8403 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8405 else
8406 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8408 return "ldm%?ia\t%0, %M0";
8410 else
8412 otherops[1] = adjust_address (operands[1], SImode, 4);
8413 /* Take care of overlapping base/data reg. */
8414 if (reg_mentioned_p (operands[0], operands[1]))
8416 output_asm_insn ("ldr%?\t%0, %1", otherops);
8417 output_asm_insn ("ldr%?\t%0, %1", operands);
8419 else
8421 output_asm_insn ("ldr%?\t%0, %1", operands);
8422 output_asm_insn ("ldr%?\t%0, %1", otherops);
8427 else
8429 /* Constraints should ensure this. */
8430 gcc_assert (code0 == MEM && code1 == REG);
8431 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8433 switch (GET_CODE (XEXP (operands[0], 0)))
8435 case REG:
8436 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8437 break;
8439 case PRE_INC:
8440 gcc_assert (TARGET_LDRD);
8441 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8442 break;
8444 case PRE_DEC:
8445 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8446 break;
8448 case POST_INC:
8449 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8450 break;
8452 case POST_DEC:
8453 gcc_assert (TARGET_LDRD);
8454 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8455 break;
8457 case PRE_MODIFY:
8458 case POST_MODIFY:
8459 otherops[0] = operands[1];
8460 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8461 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8463 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8464 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8465 else
8466 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8467 break;
8469 case PLUS:
8470 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8471 if (GET_CODE (otherops[2]) == CONST_INT)
8473 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8475 case -8:
8476 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8477 return "";
8479 case -4:
8480 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8481 return "";
8483 case 4:
8484 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8485 return "";
8488 if (TARGET_LDRD
8489 && (GET_CODE (otherops[2]) == REG
8490 || (GET_CODE (otherops[2]) == CONST_INT
8491 && INTVAL (otherops[2]) > -256
8492 && INTVAL (otherops[2]) < 256)))
8494 otherops[0] = operands[1];
8495 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8496 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8497 return "";
8499 /* Fall through */
8501 default:
8502 otherops[0] = adjust_address (operands[0], SImode, 4);
8503 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8504 output_asm_insn ("str%?\t%1, %0", operands);
8505 output_asm_insn ("str%?\t%1, %0", otherops);
8509 return "";
8512 /* Output an ADD r, s, #n where n may be too big for one instruction.
8513 If adding zero to one register, output nothing. */
8514 const char *
8515 output_add_immediate (rtx *operands)
8517 HOST_WIDE_INT n = INTVAL (operands[2]);
8519 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8521 if (n < 0)
8522 output_multi_immediate (operands,
8523 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8524 -n);
8525 else
8526 output_multi_immediate (operands,
8527 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8531 return "";
8534 /* Output a multiple immediate operation.
8535 OPERANDS is the vector of operands referred to in the output patterns.
8536 INSTR1 is the output pattern to use for the first constant.
8537 INSTR2 is the output pattern to use for subsequent constants.
8538 IMMED_OP is the index of the constant slot in OPERANDS.
8539 N is the constant value. */
8540 static const char *
8541 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8542 int immed_op, HOST_WIDE_INT n)
8544 #if HOST_BITS_PER_WIDE_INT > 32
8545 n &= 0xffffffff;
8546 #endif
8548 if (n == 0)
8550 /* Quick and easy output. */
8551 operands[immed_op] = const0_rtx;
8552 output_asm_insn (instr1, operands);
8554 else
8556 int i;
8557 const char * instr = instr1;
8559 /* Note that n is never zero here (which would give no output). */
8560 for (i = 0; i < 32; i += 2)
8562 if (n & (3 << i))
8564 operands[immed_op] = GEN_INT (n & (255 << i));
8565 output_asm_insn (instr, operands);
8566 instr = instr2;
8567 i += 6;
8572 return "";
8575 /* Return the appropriate ARM instruction for the operation code.
8576 The returned result should not be overwritten. OP is the rtx of the
8577 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8578 was shifted. */
8579 const char *
8580 arithmetic_instr (rtx op, int shift_first_arg)
8582 switch (GET_CODE (op))
8584 case PLUS:
8585 return "add";
8587 case MINUS:
8588 return shift_first_arg ? "rsb" : "sub";
8590 case IOR:
8591 return "orr";
8593 case XOR:
8594 return "eor";
8596 case AND:
8597 return "and";
8599 default:
8600 gcc_unreachable ();
8604 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8605 for the operation code. The returned result should not be overwritten.
8606 OP is the rtx code of the shift.
8607 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8608 shift. */
8609 static const char *
8610 shift_op (rtx op, HOST_WIDE_INT *amountp)
8612 const char * mnem;
8613 enum rtx_code code = GET_CODE (op);
8615 switch (GET_CODE (XEXP (op, 1)))
8617 case REG:
8618 case SUBREG:
8619 *amountp = -1;
8620 break;
8622 case CONST_INT:
8623 *amountp = INTVAL (XEXP (op, 1));
8624 break;
8626 default:
8627 gcc_unreachable ();
8630 switch (code)
8632 case ASHIFT:
8633 mnem = "asl";
8634 break;
8636 case ASHIFTRT:
8637 mnem = "asr";
8638 break;
8640 case LSHIFTRT:
8641 mnem = "lsr";
8642 break;
8644 case ROTATE:
8645 gcc_assert (*amountp != -1);
8646 *amountp = 32 - *amountp;
8648 /* Fall through. */
8650 case ROTATERT:
8651 mnem = "ror";
8652 break;
8654 case MULT:
8655 /* We never have to worry about the amount being other than a
8656 power of 2, since this case can never be reloaded from a reg. */
8657 gcc_assert (*amountp != -1);
8658 *amountp = int_log2 (*amountp);
8659 return "asl";
8661 default:
8662 gcc_unreachable ();
8665 if (*amountp != -1)
8667 /* This is not 100% correct, but follows from the desire to merge
8668 multiplication by a power of 2 with the recognizer for a
8669 shift. >=32 is not a valid shift for "asl", so we must try and
8670 output a shift that produces the correct arithmetical result.
8671 Using lsr #32 is identical except for the fact that the carry bit
8672 is not set correctly if we set the flags; but we never use the
8673 carry bit from such an operation, so we can ignore that. */
8674 if (code == ROTATERT)
8675 /* Rotate is just modulo 32. */
8676 *amountp &= 31;
8677 else if (*amountp != (*amountp & 31))
8679 if (code == ASHIFT)
8680 mnem = "lsr";
8681 *amountp = 32;
8684 /* Shifts of 0 are no-ops. */
8685 if (*amountp == 0)
8686 return NULL;
8689 return mnem;
8692 /* Obtain the shift from the POWER of two. */
8694 static HOST_WIDE_INT
8695 int_log2 (HOST_WIDE_INT power)
8697 HOST_WIDE_INT shift = 0;
8699 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8701 gcc_assert (shift <= 31);
8702 shift++;
8705 return shift;
8708 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8709 because /bin/as is horribly restrictive. The judgement about
8710 whether or not each character is 'printable' (and can be output as
8711 is) or not (and must be printed with an octal escape) must be made
8712 with reference to the *host* character set -- the situation is
8713 similar to that discussed in the comments above pp_c_char in
8714 c-pretty-print.c. */
8716 #define MAX_ASCII_LEN 51
8718 void
8719 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8721 int i;
8722 int len_so_far = 0;
8724 fputs ("\t.ascii\t\"", stream);
8726 for (i = 0; i < len; i++)
8728 int c = p[i];
8730 if (len_so_far >= MAX_ASCII_LEN)
8732 fputs ("\"\n\t.ascii\t\"", stream);
8733 len_so_far = 0;
8736 if (ISPRINT (c))
8738 if (c == '\\' || c == '\"')
8740 putc ('\\', stream);
8741 len_so_far++;
8743 putc (c, stream);
8744 len_so_far++;
8746 else
8748 fprintf (stream, "\\%03o", c);
8749 len_so_far += 4;
8753 fputs ("\"\n", stream);
8756 /* Compute the register save mask for registers 0 through 12
8757 inclusive. This code is used by arm_compute_save_reg_mask. */
8759 static unsigned long
8760 arm_compute_save_reg0_reg12_mask (void)
8762 unsigned long func_type = arm_current_func_type ();
8763 unsigned long save_reg_mask = 0;
8764 unsigned int reg;
8766 if (IS_INTERRUPT (func_type))
8768 unsigned int max_reg;
8769 /* Interrupt functions must not corrupt any registers,
8770 even call clobbered ones. If this is a leaf function
8771 we can just examine the registers used by the RTL, but
8772 otherwise we have to assume that whatever function is
8773 called might clobber anything, and so we have to save
8774 all the call-clobbered registers as well. */
8775 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8776 /* FIQ handlers have registers r8 - r12 banked, so
8777 we only need to check r0 - r7, Normal ISRs only
8778 bank r14 and r15, so we must check up to r12.
8779 r13 is the stack pointer which is always preserved,
8780 so we do not need to consider it here. */
8781 max_reg = 7;
8782 else
8783 max_reg = 12;
8785 for (reg = 0; reg <= max_reg; reg++)
8786 if (regs_ever_live[reg]
8787 || (! current_function_is_leaf && call_used_regs [reg]))
8788 save_reg_mask |= (1 << reg);
8790 /* Also save the pic base register if necessary. */
8791 if (flag_pic
8792 && !TARGET_SINGLE_PIC_BASE
8793 && current_function_uses_pic_offset_table)
8794 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8796 else
8798 /* In the normal case we only need to save those registers
8799 which are call saved and which are used by this function. */
8800 for (reg = 0; reg <= 10; reg++)
8801 if (regs_ever_live[reg] && ! call_used_regs [reg])
8802 save_reg_mask |= (1 << reg);
8804 /* Handle the frame pointer as a special case. */
8805 if (! TARGET_APCS_FRAME
8806 && ! frame_pointer_needed
8807 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8808 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8809 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8811 /* If we aren't loading the PIC register,
8812 don't stack it even though it may be live. */
8813 if (flag_pic
8814 && !TARGET_SINGLE_PIC_BASE
8815 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8816 || current_function_uses_pic_offset_table))
8817 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8820 /* Save registers so the exception handler can modify them. */
8821 if (current_function_calls_eh_return)
8823 unsigned int i;
8825 for (i = 0; ; i++)
8827 reg = EH_RETURN_DATA_REGNO (i);
8828 if (reg == INVALID_REGNUM)
8829 break;
8830 save_reg_mask |= 1 << reg;
8834 return save_reg_mask;
8837 /* Compute a bit mask of which registers need to be
8838 saved on the stack for the current function. */
8840 static unsigned long
8841 arm_compute_save_reg_mask (void)
8843 unsigned int save_reg_mask = 0;
8844 unsigned long func_type = arm_current_func_type ();
8846 if (IS_NAKED (func_type))
8847 /* This should never really happen. */
8848 return 0;
8850 /* If we are creating a stack frame, then we must save the frame pointer,
8851 IP (which will hold the old stack pointer), LR and the PC. */
8852 if (frame_pointer_needed)
8853 save_reg_mask |=
8854 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8855 | (1 << IP_REGNUM)
8856 | (1 << LR_REGNUM)
8857 | (1 << PC_REGNUM);
8859 /* Volatile functions do not return, so there
8860 is no need to save any other registers. */
8861 if (IS_VOLATILE (func_type))
8862 return save_reg_mask;
8864 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8866 /* Decide if we need to save the link register.
8867 Interrupt routines have their own banked link register,
8868 so they never need to save it.
8869 Otherwise if we do not use the link register we do not need to save
8870 it. If we are pushing other registers onto the stack however, we
8871 can save an instruction in the epilogue by pushing the link register
8872 now and then popping it back into the PC. This incurs extra memory
8873 accesses though, so we only do it when optimizing for size, and only
8874 if we know that we will not need a fancy return sequence. */
8875 if (regs_ever_live [LR_REGNUM]
8876 || (save_reg_mask
8877 && optimize_size
8878 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8879 && !current_function_calls_eh_return))
8880 save_reg_mask |= 1 << LR_REGNUM;
8882 if (cfun->machine->lr_save_eliminated)
8883 save_reg_mask &= ~ (1 << LR_REGNUM);
8885 if (TARGET_REALLY_IWMMXT
8886 && ((bit_count (save_reg_mask)
8887 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8889 unsigned int reg;
8891 /* The total number of registers that are going to be pushed
8892 onto the stack is odd. We need to ensure that the stack
8893 is 64-bit aligned before we start to save iWMMXt registers,
8894 and also before we start to create locals. (A local variable
8895 might be a double or long long which we will load/store using
8896 an iWMMXt instruction). Therefore we need to push another
8897 ARM register, so that the stack will be 64-bit aligned. We
8898 try to avoid using the arg registers (r0 -r3) as they might be
8899 used to pass values in a tail call. */
8900 for (reg = 4; reg <= 12; reg++)
8901 if ((save_reg_mask & (1 << reg)) == 0)
8902 break;
8904 if (reg <= 12)
8905 save_reg_mask |= (1 << reg);
8906 else
8908 cfun->machine->sibcall_blocked = 1;
8909 save_reg_mask |= (1 << 3);
8913 return save_reg_mask;
8917 /* Compute a bit mask of which registers need to be
8918 saved on the stack for the current function. */
8919 static unsigned long
8920 thumb_compute_save_reg_mask (void)
8922 unsigned long mask;
8923 unsigned reg;
8925 mask = 0;
8926 for (reg = 0; reg < 12; reg ++)
8927 if (regs_ever_live[reg] && !call_used_regs[reg])
8928 mask |= 1 << reg;
8930 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8931 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8933 if (TARGET_SINGLE_PIC_BASE)
8934 mask &= ~(1 << arm_pic_register);
8936 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8937 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8938 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8940 /* LR will also be pushed if any lo regs are pushed. */
8941 if (mask & 0xff || thumb_force_lr_save ())
8942 mask |= (1 << LR_REGNUM);
8944 /* Make sure we have a low work register if we need one.
8945 We will need one if we are going to push a high register,
8946 but we are not currently intending to push a low register. */
8947 if ((mask & 0xff) == 0
8948 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8950 /* Use thumb_find_work_register to choose which register
8951 we will use. If the register is live then we will
8952 have to push it. Use LAST_LO_REGNUM as our fallback
8953 choice for the register to select. */
8954 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8956 if (! call_used_regs[reg])
8957 mask |= 1 << reg;
8960 return mask;
8964 /* Return the number of bytes required to save VFP registers. */
8965 static int
8966 arm_get_vfp_saved_size (void)
8968 unsigned int regno;
8969 int count;
8970 int saved;
8972 saved = 0;
8973 /* Space for saved VFP registers. */
8974 if (TARGET_HARD_FLOAT && TARGET_VFP)
8976 count = 0;
8977 for (regno = FIRST_VFP_REGNUM;
8978 regno < LAST_VFP_REGNUM;
8979 regno += 2)
8981 if ((!regs_ever_live[regno] || call_used_regs[regno])
8982 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8984 if (count > 0)
8986 /* Workaround ARM10 VFPr1 bug. */
8987 if (count == 2 && !arm_arch6)
8988 count++;
8989 saved += count * 8 + 4;
8991 count = 0;
8993 else
8994 count++;
8996 if (count > 0)
8998 if (count == 2 && !arm_arch6)
8999 count++;
9000 saved += count * 8 + 4;
9003 return saved;
9007 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9008 everything bar the final return instruction. */
9009 const char *
9010 output_return_instruction (rtx operand, int really_return, int reverse)
9012 char conditional[10];
9013 char instr[100];
9014 unsigned reg;
9015 unsigned long live_regs_mask;
9016 unsigned long func_type;
9017 arm_stack_offsets *offsets;
9019 func_type = arm_current_func_type ();
9021 if (IS_NAKED (func_type))
9022 return "";
9024 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9026 /* If this function was declared non-returning, and we have
9027 found a tail call, then we have to trust that the called
9028 function won't return. */
9029 if (really_return)
9031 rtx ops[2];
9033 /* Otherwise, trap an attempted return by aborting. */
9034 ops[0] = operand;
9035 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9036 : "abort");
9037 assemble_external_libcall (ops[1]);
9038 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9041 return "";
9044 gcc_assert (!current_function_calls_alloca || really_return);
9046 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9048 return_used_this_function = 1;
9050 live_regs_mask = arm_compute_save_reg_mask ();
9052 if (live_regs_mask)
9054 const char * return_reg;
9056 /* If we do not have any special requirements for function exit
9057 (e.g. interworking, or ISR) then we can load the return address
9058 directly into the PC. Otherwise we must load it into LR. */
9059 if (really_return
9060 && ! TARGET_INTERWORK)
9061 return_reg = reg_names[PC_REGNUM];
9062 else
9063 return_reg = reg_names[LR_REGNUM];
9065 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9067 /* There are three possible reasons for the IP register
9068 being saved. 1) a stack frame was created, in which case
9069 IP contains the old stack pointer, or 2) an ISR routine
9070 corrupted it, or 3) it was saved to align the stack on
9071 iWMMXt. In case 1, restore IP into SP, otherwise just
9072 restore IP. */
9073 if (frame_pointer_needed)
9075 live_regs_mask &= ~ (1 << IP_REGNUM);
9076 live_regs_mask |= (1 << SP_REGNUM);
9078 else
9079 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9082 /* On some ARM architectures it is faster to use LDR rather than
9083 LDM to load a single register. On other architectures, the
9084 cost is the same. In 26 bit mode, or for exception handlers,
9085 we have to use LDM to load the PC so that the CPSR is also
9086 restored. */
9087 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9088 if (live_regs_mask == (1U << reg))
9089 break;
9091 if (reg <= LAST_ARM_REGNUM
9092 && (reg != LR_REGNUM
9093 || ! really_return
9094 || ! IS_INTERRUPT (func_type)))
9096 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9097 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9099 else
9101 char *p;
9102 int first = 1;
9104 /* Generate the load multiple instruction to restore the
9105 registers. Note we can get here, even if
9106 frame_pointer_needed is true, but only if sp already
9107 points to the base of the saved core registers. */
9108 if (live_regs_mask & (1 << SP_REGNUM))
9110 unsigned HOST_WIDE_INT stack_adjust;
9112 offsets = arm_get_frame_offsets ();
9113 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9114 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9116 if (stack_adjust && arm_arch5)
9117 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9118 else
9120 /* If we can't use ldmib (SA110 bug),
9121 then try to pop r3 instead. */
9122 if (stack_adjust)
9123 live_regs_mask |= 1 << 3;
9124 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9127 else
9128 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9130 p = instr + strlen (instr);
9132 for (reg = 0; reg <= SP_REGNUM; reg++)
9133 if (live_regs_mask & (1 << reg))
9135 int l = strlen (reg_names[reg]);
9137 if (first)
9138 first = 0;
9139 else
9141 memcpy (p, ", ", 2);
9142 p += 2;
9145 memcpy (p, "%|", 2);
9146 memcpy (p + 2, reg_names[reg], l);
9147 p += l + 2;
9150 if (live_regs_mask & (1 << LR_REGNUM))
9152 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9153 /* If returning from an interrupt, restore the CPSR. */
9154 if (IS_INTERRUPT (func_type))
9155 strcat (p, "^");
9157 else
9158 strcpy (p, "}");
9161 output_asm_insn (instr, & operand);
9163 /* See if we need to generate an extra instruction to
9164 perform the actual function return. */
9165 if (really_return
9166 && func_type != ARM_FT_INTERWORKED
9167 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9169 /* The return has already been handled
9170 by loading the LR into the PC. */
9171 really_return = 0;
9175 if (really_return)
9177 switch ((int) ARM_FUNC_TYPE (func_type))
9179 case ARM_FT_ISR:
9180 case ARM_FT_FIQ:
9181 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9182 break;
9184 case ARM_FT_INTERWORKED:
9185 sprintf (instr, "bx%s\t%%|lr", conditional);
9186 break;
9188 case ARM_FT_EXCEPTION:
9189 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9190 break;
9192 default:
9193 /* Use bx if it's available. */
9194 if (arm_arch5 || arm_arch4t)
9195 sprintf (instr, "bx%s\t%%|lr", conditional);
9196 else
9197 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9198 break;
9201 output_asm_insn (instr, & operand);
9204 return "";
9207 /* Write the function name into the code section, directly preceding
9208 the function prologue.
9210 Code will be output similar to this:
9212 .ascii "arm_poke_function_name", 0
9213 .align
9215 .word 0xff000000 + (t1 - t0)
9216 arm_poke_function_name
9217 mov ip, sp
9218 stmfd sp!, {fp, ip, lr, pc}
9219 sub fp, ip, #4
9221 When performing a stack backtrace, code can inspect the value
9222 of 'pc' stored at 'fp' + 0. If the trace function then looks
9223 at location pc - 12 and the top 8 bits are set, then we know
9224 that there is a function name embedded immediately preceding this
9225 location and has length ((pc[-3]) & 0xff000000).
9227 We assume that pc is declared as a pointer to an unsigned long.
9229 It is of no benefit to output the function name if we are assembling
9230 a leaf function. These function types will not contain a stack
9231 backtrace structure, therefore it is not possible to determine the
9232 function name. */
9233 void
9234 arm_poke_function_name (FILE *stream, const char *name)
9236 unsigned long alignlength;
9237 unsigned long length;
9238 rtx x;
9240 length = strlen (name) + 1;
9241 alignlength = ROUND_UP_WORD (length);
9243 ASM_OUTPUT_ASCII (stream, name, length);
9244 ASM_OUTPUT_ALIGN (stream, 2);
9245 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9246 assemble_aligned_integer (UNITS_PER_WORD, x);
9249 /* Place some comments into the assembler stream
9250 describing the current function. */
9251 static void
9252 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9254 unsigned long func_type;
9256 if (!TARGET_ARM)
9258 thumb_output_function_prologue (f, frame_size);
9259 return;
9262 /* Sanity check. */
9263 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9265 func_type = arm_current_func_type ();
9267 switch ((int) ARM_FUNC_TYPE (func_type))
9269 default:
9270 case ARM_FT_NORMAL:
9271 break;
9272 case ARM_FT_INTERWORKED:
9273 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9274 break;
9275 case ARM_FT_ISR:
9276 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9277 break;
9278 case ARM_FT_FIQ:
9279 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9280 break;
9281 case ARM_FT_EXCEPTION:
9282 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9283 break;
9286 if (IS_NAKED (func_type))
9287 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9289 if (IS_VOLATILE (func_type))
9290 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9292 if (IS_NESTED (func_type))
9293 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9295 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9296 current_function_args_size,
9297 current_function_pretend_args_size, frame_size);
9299 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9300 frame_pointer_needed,
9301 cfun->machine->uses_anonymous_args);
9303 if (cfun->machine->lr_save_eliminated)
9304 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9306 if (current_function_calls_eh_return)
9307 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9309 #ifdef AOF_ASSEMBLER
9310 if (flag_pic)
9311 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9312 #endif
9314 return_used_this_function = 0;
9317 const char *
9318 arm_output_epilogue (rtx sibling)
9320 int reg;
9321 unsigned long saved_regs_mask;
9322 unsigned long func_type;
9323 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9324 frame that is $fp + 4 for a non-variadic function. */
9325 int floats_offset = 0;
9326 rtx operands[3];
9327 FILE * f = asm_out_file;
9328 unsigned int lrm_count = 0;
9329 int really_return = (sibling == NULL);
9330 int start_reg;
9331 arm_stack_offsets *offsets;
9333 /* If we have already generated the return instruction
9334 then it is futile to generate anything else. */
9335 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9336 return "";
9338 func_type = arm_current_func_type ();
9340 if (IS_NAKED (func_type))
9341 /* Naked functions don't have epilogues. */
9342 return "";
9344 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9346 rtx op;
9348 /* A volatile function should never return. Call abort. */
9349 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9350 assemble_external_libcall (op);
9351 output_asm_insn ("bl\t%a0", &op);
9353 return "";
9356 /* If we are throwing an exception, then we really must be doing a
9357 return, so we can't tail-call. */
9358 gcc_assert (!current_function_calls_eh_return || really_return);
9360 offsets = arm_get_frame_offsets ();
9361 saved_regs_mask = arm_compute_save_reg_mask ();
9363 if (TARGET_IWMMXT)
9364 lrm_count = bit_count (saved_regs_mask);
9366 floats_offset = offsets->saved_args;
9367 /* Compute how far away the floats will be. */
9368 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9369 if (saved_regs_mask & (1 << reg))
9370 floats_offset += 4;
9372 if (frame_pointer_needed)
9374 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9375 int vfp_offset = offsets->frame;
9377 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9379 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9380 if (regs_ever_live[reg] && !call_used_regs[reg])
9382 floats_offset += 12;
9383 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9384 reg, FP_REGNUM, floats_offset - vfp_offset);
9387 else
9389 start_reg = LAST_FPA_REGNUM;
9391 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9393 if (regs_ever_live[reg] && !call_used_regs[reg])
9395 floats_offset += 12;
9397 /* We can't unstack more than four registers at once. */
9398 if (start_reg - reg == 3)
9400 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9401 reg, FP_REGNUM, floats_offset - vfp_offset);
9402 start_reg = reg - 1;
9405 else
9407 if (reg != start_reg)
9408 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9409 reg + 1, start_reg - reg,
9410 FP_REGNUM, floats_offset - vfp_offset);
9411 start_reg = reg - 1;
9415 /* Just in case the last register checked also needs unstacking. */
9416 if (reg != start_reg)
9417 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9418 reg + 1, start_reg - reg,
9419 FP_REGNUM, floats_offset - vfp_offset);
9422 if (TARGET_HARD_FLOAT && TARGET_VFP)
9424 int saved_size;
9426 /* The fldmx insn does not have base+offset addressing modes,
9427 so we use IP to hold the address. */
9428 saved_size = arm_get_vfp_saved_size ();
9430 if (saved_size > 0)
9432 floats_offset += saved_size;
9433 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9434 FP_REGNUM, floats_offset - vfp_offset);
9436 start_reg = FIRST_VFP_REGNUM;
9437 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9439 if ((!regs_ever_live[reg] || call_used_regs[reg])
9440 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9442 if (start_reg != reg)
9443 arm_output_fldmx (f, IP_REGNUM,
9444 (start_reg - FIRST_VFP_REGNUM) / 2,
9445 (reg - start_reg) / 2);
9446 start_reg = reg + 2;
9449 if (start_reg != reg)
9450 arm_output_fldmx (f, IP_REGNUM,
9451 (start_reg - FIRST_VFP_REGNUM) / 2,
9452 (reg - start_reg) / 2);
9455 if (TARGET_IWMMXT)
9457 /* The frame pointer is guaranteed to be non-double-word aligned.
9458 This is because it is set to (old_stack_pointer - 4) and the
9459 old_stack_pointer was double word aligned. Thus the offset to
9460 the iWMMXt registers to be loaded must also be non-double-word
9461 sized, so that the resultant address *is* double-word aligned.
9462 We can ignore floats_offset since that was already included in
9463 the live_regs_mask. */
9464 lrm_count += (lrm_count % 2 ? 2 : 1);
9466 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9467 if (regs_ever_live[reg] && !call_used_regs[reg])
9469 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9470 reg, FP_REGNUM, lrm_count * 4);
9471 lrm_count += 2;
9475 /* saved_regs_mask should contain the IP, which at the time of stack
9476 frame generation actually contains the old stack pointer. So a
9477 quick way to unwind the stack is just pop the IP register directly
9478 into the stack pointer. */
9479 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9480 saved_regs_mask &= ~ (1 << IP_REGNUM);
9481 saved_regs_mask |= (1 << SP_REGNUM);
9483 /* There are two registers left in saved_regs_mask - LR and PC. We
9484 only need to restore the LR register (the return address), but to
9485 save time we can load it directly into the PC, unless we need a
9486 special function exit sequence, or we are not really returning. */
9487 if (really_return
9488 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9489 && !current_function_calls_eh_return)
9490 /* Delete the LR from the register mask, so that the LR on
9491 the stack is loaded into the PC in the register mask. */
9492 saved_regs_mask &= ~ (1 << LR_REGNUM);
9493 else
9494 saved_regs_mask &= ~ (1 << PC_REGNUM);
9496 /* We must use SP as the base register, because SP is one of the
9497 registers being restored. If an interrupt or page fault
9498 happens in the ldm instruction, the SP might or might not
9499 have been restored. That would be bad, as then SP will no
9500 longer indicate the safe area of stack, and we can get stack
9501 corruption. Using SP as the base register means that it will
9502 be reset correctly to the original value, should an interrupt
9503 occur. If the stack pointer already points at the right
9504 place, then omit the subtraction. */
9505 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9506 || current_function_calls_alloca)
9507 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9508 4 * bit_count (saved_regs_mask));
9509 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9511 if (IS_INTERRUPT (func_type))
9512 /* Interrupt handlers will have pushed the
9513 IP onto the stack, so restore it now. */
9514 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9516 else
9518 /* Restore stack pointer if necessary. */
9519 if (offsets->outgoing_args != offsets->saved_regs)
9521 operands[0] = operands[1] = stack_pointer_rtx;
9522 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9523 output_add_immediate (operands);
9526 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9528 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9529 if (regs_ever_live[reg] && !call_used_regs[reg])
9530 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9531 reg, SP_REGNUM);
9533 else
9535 start_reg = FIRST_FPA_REGNUM;
9537 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9539 if (regs_ever_live[reg] && !call_used_regs[reg])
9541 if (reg - start_reg == 3)
9543 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9544 start_reg, SP_REGNUM);
9545 start_reg = reg + 1;
9548 else
9550 if (reg != start_reg)
9551 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9552 start_reg, reg - start_reg,
9553 SP_REGNUM);
9555 start_reg = reg + 1;
9559 /* Just in case the last register checked also needs unstacking. */
9560 if (reg != start_reg)
9561 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9562 start_reg, reg - start_reg, SP_REGNUM);
9565 if (TARGET_HARD_FLOAT && TARGET_VFP)
9567 start_reg = FIRST_VFP_REGNUM;
9568 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9570 if ((!regs_ever_live[reg] || call_used_regs[reg])
9571 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9573 if (start_reg != reg)
9574 arm_output_fldmx (f, SP_REGNUM,
9575 (start_reg - FIRST_VFP_REGNUM) / 2,
9576 (reg - start_reg) / 2);
9577 start_reg = reg + 2;
9580 if (start_reg != reg)
9581 arm_output_fldmx (f, SP_REGNUM,
9582 (start_reg - FIRST_VFP_REGNUM) / 2,
9583 (reg - start_reg) / 2);
9585 if (TARGET_IWMMXT)
9586 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9587 if (regs_ever_live[reg] && !call_used_regs[reg])
9588 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9590 /* If we can, restore the LR into the PC. */
9591 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9592 && really_return
9593 && current_function_pretend_args_size == 0
9594 && saved_regs_mask & (1 << LR_REGNUM)
9595 && !current_function_calls_eh_return)
9597 saved_regs_mask &= ~ (1 << LR_REGNUM);
9598 saved_regs_mask |= (1 << PC_REGNUM);
9601 /* Load the registers off the stack. If we only have one register
9602 to load use the LDR instruction - it is faster. */
9603 if (saved_regs_mask == (1 << LR_REGNUM))
9605 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9607 else if (saved_regs_mask)
9609 if (saved_regs_mask & (1 << SP_REGNUM))
9610 /* Note - write back to the stack register is not enabled
9611 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9612 in the list of registers and if we add writeback the
9613 instruction becomes UNPREDICTABLE. */
9614 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9615 else
9616 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9619 if (current_function_pretend_args_size)
9621 /* Unwind the pre-pushed regs. */
9622 operands[0] = operands[1] = stack_pointer_rtx;
9623 operands[2] = GEN_INT (current_function_pretend_args_size);
9624 output_add_immediate (operands);
9628 /* We may have already restored PC directly from the stack. */
9629 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9630 return "";
9632 /* Stack adjustment for exception handler. */
9633 if (current_function_calls_eh_return)
9634 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9635 ARM_EH_STACKADJ_REGNUM);
9637 /* Generate the return instruction. */
9638 switch ((int) ARM_FUNC_TYPE (func_type))
9640 case ARM_FT_ISR:
9641 case ARM_FT_FIQ:
9642 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9643 break;
9645 case ARM_FT_EXCEPTION:
9646 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9647 break;
9649 case ARM_FT_INTERWORKED:
9650 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9651 break;
9653 default:
9654 if (arm_arch5 || arm_arch4t)
9655 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9656 else
9657 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9658 break;
9661 return "";
9664 static void
9665 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9666 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9668 arm_stack_offsets *offsets;
9670 if (TARGET_THUMB)
9672 int regno;
9674 /* Emit any call-via-reg trampolines that are needed for v4t support
9675 of call_reg and call_value_reg type insns. */
9676 for (regno = 0; regno < LR_REGNUM; regno++)
9678 rtx label = cfun->machine->call_via[regno];
9680 if (label != NULL)
9682 function_section (current_function_decl);
9683 targetm.asm_out.internal_label (asm_out_file, "L",
9684 CODE_LABEL_NUMBER (label));
9685 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9689 /* ??? Probably not safe to set this here, since it assumes that a
9690 function will be emitted as assembly immediately after we generate
9691 RTL for it. This does not happen for inline functions. */
9692 return_used_this_function = 0;
9694 else
9696 /* We need to take into account any stack-frame rounding. */
9697 offsets = arm_get_frame_offsets ();
9699 gcc_assert (!use_return_insn (FALSE, NULL)
9700 || !return_used_this_function
9701 || offsets->saved_regs == offsets->outgoing_args
9702 || frame_pointer_needed);
9704 /* Reset the ARM-specific per-function variables. */
9705 after_arm_reorg = 0;
9709 /* Generate and emit an insn that we will recognize as a push_multi.
9710 Unfortunately, since this insn does not reflect very well the actual
9711 semantics of the operation, we need to annotate the insn for the benefit
9712 of DWARF2 frame unwind information. */
9713 static rtx
9714 emit_multi_reg_push (unsigned long mask)
9716 int num_regs = 0;
9717 int num_dwarf_regs;
9718 int i, j;
9719 rtx par;
9720 rtx dwarf;
9721 int dwarf_par_index;
9722 rtx tmp, reg;
9724 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9725 if (mask & (1 << i))
9726 num_regs++;
9728 gcc_assert (num_regs && num_regs <= 16);
9730 /* We don't record the PC in the dwarf frame information. */
9731 num_dwarf_regs = num_regs;
9732 if (mask & (1 << PC_REGNUM))
9733 num_dwarf_regs--;
9735 /* For the body of the insn we are going to generate an UNSPEC in
9736 parallel with several USEs. This allows the insn to be recognized
9737 by the push_multi pattern in the arm.md file. The insn looks
9738 something like this:
9740 (parallel [
9741 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9742 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9743 (use (reg:SI 11 fp))
9744 (use (reg:SI 12 ip))
9745 (use (reg:SI 14 lr))
9746 (use (reg:SI 15 pc))
9749 For the frame note however, we try to be more explicit and actually
9750 show each register being stored into the stack frame, plus a (single)
9751 decrement of the stack pointer. We do it this way in order to be
9752 friendly to the stack unwinding code, which only wants to see a single
9753 stack decrement per instruction. The RTL we generate for the note looks
9754 something like this:
9756 (sequence [
9757 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9758 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9759 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9760 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9761 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9764 This sequence is used both by the code to support stack unwinding for
9765 exceptions handlers and the code to generate dwarf2 frame debugging. */
9767 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9768 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9769 dwarf_par_index = 1;
9771 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9773 if (mask & (1 << i))
9775 reg = gen_rtx_REG (SImode, i);
9777 XVECEXP (par, 0, 0)
9778 = gen_rtx_SET (VOIDmode,
9779 gen_rtx_MEM (BLKmode,
9780 gen_rtx_PRE_DEC (BLKmode,
9781 stack_pointer_rtx)),
9782 gen_rtx_UNSPEC (BLKmode,
9783 gen_rtvec (1, reg),
9784 UNSPEC_PUSH_MULT));
9786 if (i != PC_REGNUM)
9788 tmp = gen_rtx_SET (VOIDmode,
9789 gen_rtx_MEM (SImode, stack_pointer_rtx),
9790 reg);
9791 RTX_FRAME_RELATED_P (tmp) = 1;
9792 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9793 dwarf_par_index++;
9796 break;
9800 for (j = 1, i++; j < num_regs; i++)
9802 if (mask & (1 << i))
9804 reg = gen_rtx_REG (SImode, i);
9806 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9808 if (i != PC_REGNUM)
9810 tmp = gen_rtx_SET (VOIDmode,
9811 gen_rtx_MEM (SImode,
9812 plus_constant (stack_pointer_rtx,
9813 4 * j)),
9814 reg);
9815 RTX_FRAME_RELATED_P (tmp) = 1;
9816 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9819 j++;
9823 par = emit_insn (par);
9825 tmp = gen_rtx_SET (SImode,
9826 stack_pointer_rtx,
9827 gen_rtx_PLUS (SImode,
9828 stack_pointer_rtx,
9829 GEN_INT (-4 * num_regs)));
9830 RTX_FRAME_RELATED_P (tmp) = 1;
9831 XVECEXP (dwarf, 0, 0) = tmp;
9833 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9834 REG_NOTES (par));
9835 return par;
9838 /* Calculate the size of the return value that is passed in registers. */
9839 static int
9840 arm_size_return_regs (void)
9842 enum machine_mode mode;
9844 if (current_function_return_rtx != 0)
9845 mode = GET_MODE (current_function_return_rtx);
9846 else
9847 mode = DECL_MODE (DECL_RESULT (current_function_decl));
9849 return GET_MODE_SIZE (mode);
9852 static rtx
9853 emit_sfm (int base_reg, int count)
9855 rtx par;
9856 rtx dwarf;
9857 rtx tmp, reg;
9858 int i;
9860 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9861 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9863 reg = gen_rtx_REG (XFmode, base_reg++);
9865 XVECEXP (par, 0, 0)
9866 = gen_rtx_SET (VOIDmode,
9867 gen_rtx_MEM (BLKmode,
9868 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9869 gen_rtx_UNSPEC (BLKmode,
9870 gen_rtvec (1, reg),
9871 UNSPEC_PUSH_MULT));
9872 tmp = gen_rtx_SET (VOIDmode,
9873 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9874 RTX_FRAME_RELATED_P (tmp) = 1;
9875 XVECEXP (dwarf, 0, 1) = tmp;
9877 for (i = 1; i < count; i++)
9879 reg = gen_rtx_REG (XFmode, base_reg++);
9880 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9882 tmp = gen_rtx_SET (VOIDmode,
9883 gen_rtx_MEM (XFmode,
9884 plus_constant (stack_pointer_rtx,
9885 i * 12)),
9886 reg);
9887 RTX_FRAME_RELATED_P (tmp) = 1;
9888 XVECEXP (dwarf, 0, i + 1) = tmp;
9891 tmp = gen_rtx_SET (VOIDmode,
9892 stack_pointer_rtx,
9893 gen_rtx_PLUS (SImode,
9894 stack_pointer_rtx,
9895 GEN_INT (-12 * count)));
9896 RTX_FRAME_RELATED_P (tmp) = 1;
9897 XVECEXP (dwarf, 0, 0) = tmp;
9899 par = emit_insn (par);
9900 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9901 REG_NOTES (par));
9902 return par;
9906 /* Return true if the current function needs to save/restore LR. */
9908 static bool
9909 thumb_force_lr_save (void)
9911 return !cfun->machine->lr_save_eliminated
9912 && (!leaf_function_p ()
9913 || thumb_far_jump_used_p ()
9914 || regs_ever_live [LR_REGNUM]);
9918 /* Compute the distance from register FROM to register TO.
9919 These can be the arg pointer (26), the soft frame pointer (25),
9920 the stack pointer (13) or the hard frame pointer (11).
9921 In thumb mode r7 is used as the soft frame pointer, if needed.
9922 Typical stack layout looks like this:
9924 old stack pointer -> | |
9925 ----
9926 | | \
9927 | | saved arguments for
9928 | | vararg functions
9929 | | /
9931 hard FP & arg pointer -> | | \
9932 | | stack
9933 | | frame
9934 | | /
9936 | | \
9937 | | call saved
9938 | | registers
9939 soft frame pointer -> | | /
9941 | | \
9942 | | local
9943 | | variables
9944 | | /
9946 | | \
9947 | | outgoing
9948 | | arguments
9949 current stack pointer -> | | /
9952 For a given function some or all of these stack components
9953 may not be needed, giving rise to the possibility of
9954 eliminating some of the registers.
9956 The values returned by this function must reflect the behavior
9957 of arm_expand_prologue() and arm_compute_save_reg_mask().
9959 The sign of the number returned reflects the direction of stack
9960 growth, so the values are positive for all eliminations except
9961 from the soft frame pointer to the hard frame pointer.
9963 SFP may point just inside the local variables block to ensure correct
9964 alignment. */
9967 /* Calculate stack offsets. These are used to calculate register elimination
9968 offsets and in prologue/epilogue code. */
9970 static arm_stack_offsets *
9971 arm_get_frame_offsets (void)
9973 struct arm_stack_offsets *offsets;
9974 unsigned long func_type;
9975 int leaf;
9976 int saved;
9977 HOST_WIDE_INT frame_size;
9979 offsets = &cfun->machine->stack_offsets;
9981 /* We need to know if we are a leaf function. Unfortunately, it
9982 is possible to be called after start_sequence has been called,
9983 which causes get_insns to return the insns for the sequence,
9984 not the function, which will cause leaf_function_p to return
9985 the incorrect result.
9987 to know about leaf functions once reload has completed, and the
9988 frame size cannot be changed after that time, so we can safely
9989 use the cached value. */
9991 if (reload_completed)
9992 return offsets;
9994 /* Initially this is the size of the local variables. It will translated
9995 into an offset once we have determined the size of preceding data. */
9996 frame_size = ROUND_UP_WORD (get_frame_size ());
9998 leaf = leaf_function_p ();
10000 /* Space for variadic functions. */
10001 offsets->saved_args = current_function_pretend_args_size;
10003 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10005 if (TARGET_ARM)
10007 unsigned int regno;
10009 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10011 /* We know that SP will be doubleword aligned on entry, and we must
10012 preserve that condition at any subroutine call. We also require the
10013 soft frame pointer to be doubleword aligned. */
10015 if (TARGET_REALLY_IWMMXT)
10017 /* Check for the call-saved iWMMXt registers. */
10018 for (regno = FIRST_IWMMXT_REGNUM;
10019 regno <= LAST_IWMMXT_REGNUM;
10020 regno++)
10021 if (regs_ever_live [regno] && ! call_used_regs [regno])
10022 saved += 8;
10025 func_type = arm_current_func_type ();
10026 if (! IS_VOLATILE (func_type))
10028 /* Space for saved FPA registers. */
10029 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10030 if (regs_ever_live[regno] && ! call_used_regs[regno])
10031 saved += 12;
10033 /* Space for saved VFP registers. */
10034 if (TARGET_HARD_FLOAT && TARGET_VFP)
10035 saved += arm_get_vfp_saved_size ();
10038 else /* TARGET_THUMB */
10040 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10041 if (TARGET_BACKTRACE)
10042 saved += 16;
10045 /* Saved registers include the stack frame. */
10046 offsets->saved_regs = offsets->saved_args + saved;
10047 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10048 /* A leaf function does not need any stack alignment if it has nothing
10049 on the stack. */
10050 if (leaf && frame_size == 0)
10052 offsets->outgoing_args = offsets->soft_frame;
10053 return offsets;
10056 /* Ensure SFP has the correct alignment. */
10057 if (ARM_DOUBLEWORD_ALIGN
10058 && (offsets->soft_frame & 7))
10059 offsets->soft_frame += 4;
10061 offsets->outgoing_args = offsets->soft_frame + frame_size
10062 + current_function_outgoing_args_size;
10064 if (ARM_DOUBLEWORD_ALIGN)
10066 /* Ensure SP remains doubleword aligned. */
10067 if (offsets->outgoing_args & 7)
10068 offsets->outgoing_args += 4;
10069 gcc_assert (!(offsets->outgoing_args & 7));
10072 return offsets;
10076 /* Calculate the relative offsets for the different stack pointers. Positive
10077 offsets are in the direction of stack growth. */
10079 HOST_WIDE_INT
10080 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10082 arm_stack_offsets *offsets;
10084 offsets = arm_get_frame_offsets ();
10086 /* OK, now we have enough information to compute the distances.
10087 There must be an entry in these switch tables for each pair
10088 of registers in ELIMINABLE_REGS, even if some of the entries
10089 seem to be redundant or useless. */
10090 switch (from)
10092 case ARG_POINTER_REGNUM:
10093 switch (to)
10095 case THUMB_HARD_FRAME_POINTER_REGNUM:
10096 return 0;
10098 case FRAME_POINTER_REGNUM:
10099 /* This is the reverse of the soft frame pointer
10100 to hard frame pointer elimination below. */
10101 return offsets->soft_frame - offsets->saved_args;
10103 case ARM_HARD_FRAME_POINTER_REGNUM:
10104 /* If there is no stack frame then the hard
10105 frame pointer and the arg pointer coincide. */
10106 if (offsets->frame == offsets->saved_regs)
10107 return 0;
10108 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10109 return (frame_pointer_needed
10110 && cfun->static_chain_decl != NULL
10111 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10113 case STACK_POINTER_REGNUM:
10114 /* If nothing has been pushed on the stack at all
10115 then this will return -4. This *is* correct! */
10116 return offsets->outgoing_args - (offsets->saved_args + 4);
10118 default:
10119 gcc_unreachable ();
10121 gcc_unreachable ();
10123 case FRAME_POINTER_REGNUM:
10124 switch (to)
10126 case THUMB_HARD_FRAME_POINTER_REGNUM:
10127 return 0;
10129 case ARM_HARD_FRAME_POINTER_REGNUM:
10130 /* The hard frame pointer points to the top entry in the
10131 stack frame. The soft frame pointer to the bottom entry
10132 in the stack frame. If there is no stack frame at all,
10133 then they are identical. */
10135 return offsets->frame - offsets->soft_frame;
10137 case STACK_POINTER_REGNUM:
10138 return offsets->outgoing_args - offsets->soft_frame;
10140 default:
10141 gcc_unreachable ();
10143 gcc_unreachable ();
10145 default:
10146 /* You cannot eliminate from the stack pointer.
10147 In theory you could eliminate from the hard frame
10148 pointer to the stack pointer, but this will never
10149 happen, since if a stack frame is not needed the
10150 hard frame pointer will never be used. */
10151 gcc_unreachable ();
10156 /* Generate the prologue instructions for entry into an ARM function. */
10157 void
10158 arm_expand_prologue (void)
10160 int reg;
10161 rtx amount;
10162 rtx insn;
10163 rtx ip_rtx;
10164 unsigned long live_regs_mask;
10165 unsigned long func_type;
10166 int fp_offset = 0;
10167 int saved_pretend_args = 0;
10168 int saved_regs = 0;
10169 unsigned HOST_WIDE_INT args_to_push;
10170 arm_stack_offsets *offsets;
10172 func_type = arm_current_func_type ();
10174 /* Naked functions don't have prologues. */
10175 if (IS_NAKED (func_type))
10176 return;
10178 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10179 args_to_push = current_function_pretend_args_size;
10181 /* Compute which register we will have to save onto the stack. */
10182 live_regs_mask = arm_compute_save_reg_mask ();
10184 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10186 if (frame_pointer_needed)
10188 if (IS_INTERRUPT (func_type))
10190 /* Interrupt functions must not corrupt any registers.
10191 Creating a frame pointer however, corrupts the IP
10192 register, so we must push it first. */
10193 insn = emit_multi_reg_push (1 << IP_REGNUM);
10195 /* Do not set RTX_FRAME_RELATED_P on this insn.
10196 The dwarf stack unwinding code only wants to see one
10197 stack decrement per function, and this is not it. If
10198 this instruction is labeled as being part of the frame
10199 creation sequence then dwarf2out_frame_debug_expr will
10200 die when it encounters the assignment of IP to FP
10201 later on, since the use of SP here establishes SP as
10202 the CFA register and not IP.
10204 Anyway this instruction is not really part of the stack
10205 frame creation although it is part of the prologue. */
10207 else if (IS_NESTED (func_type))
10209 /* The Static chain register is the same as the IP register
10210 used as a scratch register during stack frame creation.
10211 To get around this need to find somewhere to store IP
10212 whilst the frame is being created. We try the following
10213 places in order:
10215 1. The last argument register.
10216 2. A slot on the stack above the frame. (This only
10217 works if the function is not a varargs function).
10218 3. Register r3, after pushing the argument registers
10219 onto the stack.
10221 Note - we only need to tell the dwarf2 backend about the SP
10222 adjustment in the second variant; the static chain register
10223 doesn't need to be unwound, as it doesn't contain a value
10224 inherited from the caller. */
10226 if (regs_ever_live[3] == 0)
10228 insn = gen_rtx_REG (SImode, 3);
10229 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10230 insn = emit_insn (insn);
10232 else if (args_to_push == 0)
10234 rtx dwarf;
10235 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10236 insn = gen_rtx_MEM (SImode, insn);
10237 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10238 insn = emit_insn (insn);
10240 fp_offset = 4;
10242 /* Just tell the dwarf backend that we adjusted SP. */
10243 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10244 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10245 GEN_INT (-fp_offset)));
10246 RTX_FRAME_RELATED_P (insn) = 1;
10247 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10248 dwarf, REG_NOTES (insn));
10250 else
10252 /* Store the args on the stack. */
10253 if (cfun->machine->uses_anonymous_args)
10254 insn = emit_multi_reg_push
10255 ((0xf0 >> (args_to_push / 4)) & 0xf);
10256 else
10257 insn = emit_insn
10258 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10259 GEN_INT (- args_to_push)));
10261 RTX_FRAME_RELATED_P (insn) = 1;
10263 saved_pretend_args = 1;
10264 fp_offset = args_to_push;
10265 args_to_push = 0;
10267 /* Now reuse r3 to preserve IP. */
10268 insn = gen_rtx_REG (SImode, 3);
10269 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10270 (void) emit_insn (insn);
10274 if (fp_offset)
10276 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10277 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10279 else
10280 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10282 insn = emit_insn (insn);
10283 RTX_FRAME_RELATED_P (insn) = 1;
10286 if (args_to_push)
10288 /* Push the argument registers, or reserve space for them. */
10289 if (cfun->machine->uses_anonymous_args)
10290 insn = emit_multi_reg_push
10291 ((0xf0 >> (args_to_push / 4)) & 0xf);
10292 else
10293 insn = emit_insn
10294 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10295 GEN_INT (- args_to_push)));
10296 RTX_FRAME_RELATED_P (insn) = 1;
10299 /* If this is an interrupt service routine, and the link register
10300 is going to be pushed, and we are not creating a stack frame,
10301 (which would involve an extra push of IP and a pop in the epilogue)
10302 subtracting four from LR now will mean that the function return
10303 can be done with a single instruction. */
10304 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10305 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10306 && ! frame_pointer_needed)
10307 emit_insn (gen_rtx_SET (SImode,
10308 gen_rtx_REG (SImode, LR_REGNUM),
10309 gen_rtx_PLUS (SImode,
10310 gen_rtx_REG (SImode, LR_REGNUM),
10311 GEN_INT (-4))));
10313 if (live_regs_mask)
10315 insn = emit_multi_reg_push (live_regs_mask);
10316 saved_regs += bit_count (live_regs_mask) * 4;
10317 RTX_FRAME_RELATED_P (insn) = 1;
10320 if (TARGET_IWMMXT)
10321 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10322 if (regs_ever_live[reg] && ! call_used_regs [reg])
10324 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10325 insn = gen_rtx_MEM (V2SImode, insn);
10326 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10327 gen_rtx_REG (V2SImode, reg)));
10328 RTX_FRAME_RELATED_P (insn) = 1;
10329 saved_regs += 8;
10332 if (! IS_VOLATILE (func_type))
10334 int start_reg;
10336 /* Save any floating point call-saved registers used by this
10337 function. */
10338 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10340 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10341 if (regs_ever_live[reg] && !call_used_regs[reg])
10343 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10344 insn = gen_rtx_MEM (XFmode, insn);
10345 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10346 gen_rtx_REG (XFmode, reg)));
10347 RTX_FRAME_RELATED_P (insn) = 1;
10348 saved_regs += 12;
10351 else
10353 start_reg = LAST_FPA_REGNUM;
10355 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10357 if (regs_ever_live[reg] && !call_used_regs[reg])
10359 if (start_reg - reg == 3)
10361 insn = emit_sfm (reg, 4);
10362 RTX_FRAME_RELATED_P (insn) = 1;
10363 saved_regs += 48;
10364 start_reg = reg - 1;
10367 else
10369 if (start_reg != reg)
10371 insn = emit_sfm (reg + 1, start_reg - reg);
10372 RTX_FRAME_RELATED_P (insn) = 1;
10373 saved_regs += (start_reg - reg) * 12;
10375 start_reg = reg - 1;
10379 if (start_reg != reg)
10381 insn = emit_sfm (reg + 1, start_reg - reg);
10382 saved_regs += (start_reg - reg) * 12;
10383 RTX_FRAME_RELATED_P (insn) = 1;
10386 if (TARGET_HARD_FLOAT && TARGET_VFP)
10388 start_reg = FIRST_VFP_REGNUM;
10390 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10392 if ((!regs_ever_live[reg] || call_used_regs[reg])
10393 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10395 if (start_reg != reg)
10396 saved_regs += vfp_emit_fstmx (start_reg,
10397 (reg - start_reg) / 2);
10398 start_reg = reg + 2;
10401 if (start_reg != reg)
10402 saved_regs += vfp_emit_fstmx (start_reg,
10403 (reg - start_reg) / 2);
10407 if (frame_pointer_needed)
10409 /* Create the new frame pointer. */
10410 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10411 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10412 RTX_FRAME_RELATED_P (insn) = 1;
10414 if (IS_NESTED (func_type))
10416 /* Recover the static chain register. */
10417 if (regs_ever_live [3] == 0
10418 || saved_pretend_args)
10419 insn = gen_rtx_REG (SImode, 3);
10420 else /* if (current_function_pretend_args_size == 0) */
10422 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10423 GEN_INT (4));
10424 insn = gen_rtx_MEM (SImode, insn);
10427 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10428 /* Add a USE to stop propagate_one_insn() from barfing. */
10429 emit_insn (gen_prologue_use (ip_rtx));
10433 offsets = arm_get_frame_offsets ();
10434 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10436 /* This add can produce multiple insns for a large constant, so we
10437 need to get tricky. */
10438 rtx last = get_last_insn ();
10440 amount = GEN_INT (offsets->saved_args + saved_regs
10441 - offsets->outgoing_args);
10443 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10444 amount));
10447 last = last ? NEXT_INSN (last) : get_insns ();
10448 RTX_FRAME_RELATED_P (last) = 1;
10450 while (last != insn);
10452 /* If the frame pointer is needed, emit a special barrier that
10453 will prevent the scheduler from moving stores to the frame
10454 before the stack adjustment. */
10455 if (frame_pointer_needed)
10456 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10457 hard_frame_pointer_rtx));
10461 if (flag_pic)
10462 arm_load_pic_register (INVALID_REGNUM);
10464 /* If we are profiling, make sure no instructions are scheduled before
10465 the call to mcount. Similarly if the user has requested no
10466 scheduling in the prolog. */
10467 if (current_function_profile || !TARGET_SCHED_PROLOG)
10468 emit_insn (gen_blockage ());
10470 /* If the link register is being kept alive, with the return address in it,
10471 then make sure that it does not get reused by the ce2 pass. */
10472 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10474 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10475 cfun->machine->lr_save_eliminated = 1;
10479 /* If CODE is 'd', then the X is a condition operand and the instruction
10480 should only be executed if the condition is true.
10481 if CODE is 'D', then the X is a condition operand and the instruction
10482 should only be executed if the condition is false: however, if the mode
10483 of the comparison is CCFPEmode, then always execute the instruction -- we
10484 do this because in these circumstances !GE does not necessarily imply LT;
10485 in these cases the instruction pattern will take care to make sure that
10486 an instruction containing %d will follow, thereby undoing the effects of
10487 doing this instruction unconditionally.
10488 If CODE is 'N' then X is a floating point operand that must be negated
10489 before output.
10490 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10491 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10492 void
10493 arm_print_operand (FILE *stream, rtx x, int code)
10495 switch (code)
10497 case '@':
10498 fputs (ASM_COMMENT_START, stream);
10499 return;
10501 case '_':
10502 fputs (user_label_prefix, stream);
10503 return;
10505 case '|':
10506 fputs (REGISTER_PREFIX, stream);
10507 return;
10509 case '?':
10510 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10512 if (TARGET_THUMB)
10514 output_operand_lossage ("predicated Thumb instruction");
10515 break;
10517 if (current_insn_predicate != NULL)
10519 output_operand_lossage
10520 ("predicated instruction in conditional sequence");
10521 break;
10524 fputs (arm_condition_codes[arm_current_cc], stream);
10526 else if (current_insn_predicate)
10528 enum arm_cond_code code;
10530 if (TARGET_THUMB)
10532 output_operand_lossage ("predicated Thumb instruction");
10533 break;
10536 code = get_arm_condition_code (current_insn_predicate);
10537 fputs (arm_condition_codes[code], stream);
10539 return;
10541 case 'N':
10543 REAL_VALUE_TYPE r;
10544 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10545 r = REAL_VALUE_NEGATE (r);
10546 fprintf (stream, "%s", fp_const_from_val (&r));
10548 return;
10550 case 'B':
10551 if (GET_CODE (x) == CONST_INT)
10553 HOST_WIDE_INT val;
10554 val = ARM_SIGN_EXTEND (~INTVAL (x));
10555 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10557 else
10559 putc ('~', stream);
10560 output_addr_const (stream, x);
10562 return;
10564 case 'i':
10565 fprintf (stream, "%s", arithmetic_instr (x, 1));
10566 return;
10568 /* Truncate Cirrus shift counts. */
10569 case 's':
10570 if (GET_CODE (x) == CONST_INT)
10572 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10573 return;
10575 arm_print_operand (stream, x, 0);
10576 return;
10578 case 'I':
10579 fprintf (stream, "%s", arithmetic_instr (x, 0));
10580 return;
10582 case 'S':
10584 HOST_WIDE_INT val;
10585 const char * shift = shift_op (x, &val);
10587 if (shift)
10589 fprintf (stream, ", %s ", shift_op (x, &val));
10590 if (val == -1)
10591 arm_print_operand (stream, XEXP (x, 1), 0);
10592 else
10593 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10596 return;
10598 /* An explanation of the 'Q', 'R' and 'H' register operands:
10600 In a pair of registers containing a DI or DF value the 'Q'
10601 operand returns the register number of the register containing
10602 the least significant part of the value. The 'R' operand returns
10603 the register number of the register containing the most
10604 significant part of the value.
10606 The 'H' operand returns the higher of the two register numbers.
10607 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10608 same as the 'Q' operand, since the most significant part of the
10609 value is held in the lower number register. The reverse is true
10610 on systems where WORDS_BIG_ENDIAN is false.
10612 The purpose of these operands is to distinguish between cases
10613 where the endian-ness of the values is important (for example
10614 when they are added together), and cases where the endian-ness
10615 is irrelevant, but the order of register operations is important.
10616 For example when loading a value from memory into a register
10617 pair, the endian-ness does not matter. Provided that the value
10618 from the lower memory address is put into the lower numbered
10619 register, and the value from the higher address is put into the
10620 higher numbered register, the load will work regardless of whether
10621 the value being loaded is big-wordian or little-wordian. The
10622 order of the two register loads can matter however, if the address
10623 of the memory location is actually held in one of the registers
10624 being overwritten by the load. */
10625 case 'Q':
10626 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10628 output_operand_lossage ("invalid operand for code '%c'", code);
10629 return;
10632 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10633 return;
10635 case 'R':
10636 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10638 output_operand_lossage ("invalid operand for code '%c'", code);
10639 return;
10642 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10643 return;
10645 case 'H':
10646 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10648 output_operand_lossage ("invalid operand for code '%c'", code);
10649 return;
10652 asm_fprintf (stream, "%r", REGNO (x) + 1);
10653 return;
10655 case 'm':
10656 asm_fprintf (stream, "%r",
10657 GET_CODE (XEXP (x, 0)) == REG
10658 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10659 return;
10661 case 'M':
10662 asm_fprintf (stream, "{%r-%r}",
10663 REGNO (x),
10664 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10665 return;
10667 case 'd':
10668 /* CONST_TRUE_RTX means always -- that's the default. */
10669 if (x == const_true_rtx)
10670 return;
10672 if (!COMPARISON_P (x))
10674 output_operand_lossage ("invalid operand for code '%c'", code);
10675 return;
10678 fputs (arm_condition_codes[get_arm_condition_code (x)],
10679 stream);
10680 return;
10682 case 'D':
10683 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10684 want to do that. */
10685 if (x == const_true_rtx)
10687 output_operand_lossage ("instruction never exectued");
10688 return;
10690 if (!COMPARISON_P (x))
10692 output_operand_lossage ("invalid operand for code '%c'", code);
10693 return;
10696 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10697 (get_arm_condition_code (x))],
10698 stream);
10699 return;
10701 /* Cirrus registers can be accessed in a variety of ways:
10702 single floating point (f)
10703 double floating point (d)
10704 32bit integer (fx)
10705 64bit integer (dx). */
10706 case 'W': /* Cirrus register in F mode. */
10707 case 'X': /* Cirrus register in D mode. */
10708 case 'Y': /* Cirrus register in FX mode. */
10709 case 'Z': /* Cirrus register in DX mode. */
10710 gcc_assert (GET_CODE (x) == REG
10711 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10713 fprintf (stream, "mv%s%s",
10714 code == 'W' ? "f"
10715 : code == 'X' ? "d"
10716 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10718 return;
10720 /* Print cirrus register in the mode specified by the register's mode. */
10721 case 'V':
10723 int mode = GET_MODE (x);
10725 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10727 output_operand_lossage ("invalid operand for code '%c'", code);
10728 return;
10731 fprintf (stream, "mv%s%s",
10732 mode == DFmode ? "d"
10733 : mode == SImode ? "fx"
10734 : mode == DImode ? "dx"
10735 : "f", reg_names[REGNO (x)] + 2);
10737 return;
10740 case 'U':
10741 if (GET_CODE (x) != REG
10742 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10743 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10744 /* Bad value for wCG register number. */
10746 output_operand_lossage ("invalid operand for code '%c'", code);
10747 return;
10750 else
10751 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10752 return;
10754 /* Print an iWMMXt control register name. */
10755 case 'w':
10756 if (GET_CODE (x) != CONST_INT
10757 || INTVAL (x) < 0
10758 || INTVAL (x) >= 16)
10759 /* Bad value for wC register number. */
10761 output_operand_lossage ("invalid operand for code '%c'", code);
10762 return;
10765 else
10767 static const char * wc_reg_names [16] =
10769 "wCID", "wCon", "wCSSF", "wCASF",
10770 "wC4", "wC5", "wC6", "wC7",
10771 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10772 "wC12", "wC13", "wC14", "wC15"
10775 fprintf (stream, wc_reg_names [INTVAL (x)]);
10777 return;
10779 /* Print a VFP double precision register name. */
10780 case 'P':
10782 int mode = GET_MODE (x);
10783 int num;
10785 if (mode != DImode && mode != DFmode)
10787 output_operand_lossage ("invalid operand for code '%c'", code);
10788 return;
10791 if (GET_CODE (x) != REG
10792 || !IS_VFP_REGNUM (REGNO (x)))
10794 output_operand_lossage ("invalid operand for code '%c'", code);
10795 return;
10798 num = REGNO(x) - FIRST_VFP_REGNUM;
10799 if (num & 1)
10801 output_operand_lossage ("invalid operand for code '%c'", code);
10802 return;
10805 fprintf (stream, "d%d", num >> 1);
10807 return;
10809 default:
10810 if (x == 0)
10812 output_operand_lossage ("missing operand");
10813 return;
10816 switch (GET_CODE (x))
10818 case REG:
10819 asm_fprintf (stream, "%r", REGNO (x));
10820 break;
10822 case MEM:
10823 output_memory_reference_mode = GET_MODE (x);
10824 output_address (XEXP (x, 0));
10825 break;
10827 case CONST_DOUBLE:
10828 fprintf (stream, "#%s", fp_immediate_constant (x));
10829 break;
10831 default:
10832 gcc_assert (GET_CODE (x) != NEG);
10833 fputc ('#', stream);
10834 output_addr_const (stream, x);
10835 break;
10840 #ifndef AOF_ASSEMBLER
10841 /* Target hook for assembling integer objects. The ARM version needs to
10842 handle word-sized values specially. */
10843 static bool
10844 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10846 if (size == UNITS_PER_WORD && aligned_p)
10848 fputs ("\t.word\t", asm_out_file);
10849 output_addr_const (asm_out_file, x);
10851 /* Mark symbols as position independent. We only do this in the
10852 .text segment, not in the .data segment. */
10853 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10854 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10856 if (GET_CODE (x) == SYMBOL_REF
10857 && (CONSTANT_POOL_ADDRESS_P (x)
10858 || SYMBOL_REF_LOCAL_P (x)))
10859 fputs ("(GOTOFF)", asm_out_file);
10860 else if (GET_CODE (x) == LABEL_REF)
10861 fputs ("(GOTOFF)", asm_out_file);
10862 else
10863 fputs ("(GOT)", asm_out_file);
10865 fputc ('\n', asm_out_file);
10866 return true;
10869 if (arm_vector_mode_supported_p (GET_MODE (x)))
10871 int i, units;
10873 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10875 units = CONST_VECTOR_NUNITS (x);
10877 switch (GET_MODE (x))
10879 case V2SImode: size = 4; break;
10880 case V4HImode: size = 2; break;
10881 case V8QImode: size = 1; break;
10882 default:
10883 gcc_unreachable ();
10886 for (i = 0; i < units; i++)
10888 rtx elt;
10890 elt = CONST_VECTOR_ELT (x, i);
10891 assemble_integer
10892 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10895 return true;
10898 return default_assemble_integer (x, size, aligned_p);
10902 /* Add a function to the list of static constructors. */
10904 static void
10905 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10907 if (!TARGET_AAPCS_BASED)
10909 default_named_section_asm_out_constructor (symbol, priority);
10910 return;
10913 /* Put these in the .init_array section, using a special relocation. */
10914 ctors_section ();
10915 assemble_align (POINTER_SIZE);
10916 fputs ("\t.word\t", asm_out_file);
10917 output_addr_const (asm_out_file, symbol);
10918 fputs ("(target1)\n", asm_out_file);
10920 #endif
10922 /* A finite state machine takes care of noticing whether or not instructions
10923 can be conditionally executed, and thus decrease execution time and code
10924 size by deleting branch instructions. The fsm is controlled by
10925 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10927 /* The state of the fsm controlling condition codes are:
10928 0: normal, do nothing special
10929 1: make ASM_OUTPUT_OPCODE not output this instruction
10930 2: make ASM_OUTPUT_OPCODE not output this instruction
10931 3: make instructions conditional
10932 4: make instructions conditional
10934 State transitions (state->state by whom under condition):
10935 0 -> 1 final_prescan_insn if the `target' is a label
10936 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10937 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10938 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10939 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10940 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10941 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10942 (the target insn is arm_target_insn).
10944 If the jump clobbers the conditions then we use states 2 and 4.
10946 A similar thing can be done with conditional return insns.
10948 XXX In case the `target' is an unconditional branch, this conditionalising
10949 of the instructions always reduces code size, but not always execution
10950 time. But then, I want to reduce the code size to somewhere near what
10951 /bin/cc produces. */
10953 /* Returns the index of the ARM condition code string in
10954 `arm_condition_codes'. COMPARISON should be an rtx like
10955 `(eq (...) (...))'. */
10956 static enum arm_cond_code
10957 get_arm_condition_code (rtx comparison)
10959 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10960 int code;
10961 enum rtx_code comp_code = GET_CODE (comparison);
10963 if (GET_MODE_CLASS (mode) != MODE_CC)
10964 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10965 XEXP (comparison, 1));
10967 switch (mode)
10969 case CC_DNEmode: code = ARM_NE; goto dominance;
10970 case CC_DEQmode: code = ARM_EQ; goto dominance;
10971 case CC_DGEmode: code = ARM_GE; goto dominance;
10972 case CC_DGTmode: code = ARM_GT; goto dominance;
10973 case CC_DLEmode: code = ARM_LE; goto dominance;
10974 case CC_DLTmode: code = ARM_LT; goto dominance;
10975 case CC_DGEUmode: code = ARM_CS; goto dominance;
10976 case CC_DGTUmode: code = ARM_HI; goto dominance;
10977 case CC_DLEUmode: code = ARM_LS; goto dominance;
10978 case CC_DLTUmode: code = ARM_CC;
10980 dominance:
10981 gcc_assert (comp_code == EQ || comp_code == NE);
10983 if (comp_code == EQ)
10984 return ARM_INVERSE_CONDITION_CODE (code);
10985 return code;
10987 case CC_NOOVmode:
10988 switch (comp_code)
10990 case NE: return ARM_NE;
10991 case EQ: return ARM_EQ;
10992 case GE: return ARM_PL;
10993 case LT: return ARM_MI;
10994 default: gcc_unreachable ();
10997 case CC_Zmode:
10998 switch (comp_code)
11000 case NE: return ARM_NE;
11001 case EQ: return ARM_EQ;
11002 default: gcc_unreachable ();
11005 case CC_Nmode:
11006 switch (comp_code)
11008 case NE: return ARM_MI;
11009 case EQ: return ARM_PL;
11010 default: gcc_unreachable ();
11013 case CCFPEmode:
11014 case CCFPmode:
11015 /* These encodings assume that AC=1 in the FPA system control
11016 byte. This allows us to handle all cases except UNEQ and
11017 LTGT. */
11018 switch (comp_code)
11020 case GE: return ARM_GE;
11021 case GT: return ARM_GT;
11022 case LE: return ARM_LS;
11023 case LT: return ARM_MI;
11024 case NE: return ARM_NE;
11025 case EQ: return ARM_EQ;
11026 case ORDERED: return ARM_VC;
11027 case UNORDERED: return ARM_VS;
11028 case UNLT: return ARM_LT;
11029 case UNLE: return ARM_LE;
11030 case UNGT: return ARM_HI;
11031 case UNGE: return ARM_PL;
11032 /* UNEQ and LTGT do not have a representation. */
11033 case UNEQ: /* Fall through. */
11034 case LTGT: /* Fall through. */
11035 default: gcc_unreachable ();
11038 case CC_SWPmode:
11039 switch (comp_code)
11041 case NE: return ARM_NE;
11042 case EQ: return ARM_EQ;
11043 case GE: return ARM_LE;
11044 case GT: return ARM_LT;
11045 case LE: return ARM_GE;
11046 case LT: return ARM_GT;
11047 case GEU: return ARM_LS;
11048 case GTU: return ARM_CC;
11049 case LEU: return ARM_CS;
11050 case LTU: return ARM_HI;
11051 default: gcc_unreachable ();
11054 case CC_Cmode:
11055 switch (comp_code)
11057 case LTU: return ARM_CS;
11058 case GEU: return ARM_CC;
11059 default: gcc_unreachable ();
11062 case CCmode:
11063 switch (comp_code)
11065 case NE: return ARM_NE;
11066 case EQ: return ARM_EQ;
11067 case GE: return ARM_GE;
11068 case GT: return ARM_GT;
11069 case LE: return ARM_LE;
11070 case LT: return ARM_LT;
11071 case GEU: return ARM_CS;
11072 case GTU: return ARM_HI;
11073 case LEU: return ARM_LS;
11074 case LTU: return ARM_CC;
11075 default: gcc_unreachable ();
11078 default: gcc_unreachable ();
11082 void
11083 arm_final_prescan_insn (rtx insn)
11085 /* BODY will hold the body of INSN. */
11086 rtx body = PATTERN (insn);
11088 /* This will be 1 if trying to repeat the trick, and things need to be
11089 reversed if it appears to fail. */
11090 int reverse = 0;
11092 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11093 taken are clobbered, even if the rtl suggests otherwise. It also
11094 means that we have to grub around within the jump expression to find
11095 out what the conditions are when the jump isn't taken. */
11096 int jump_clobbers = 0;
11098 /* If we start with a return insn, we only succeed if we find another one. */
11099 int seeking_return = 0;
11101 /* START_INSN will hold the insn from where we start looking. This is the
11102 first insn after the following code_label if REVERSE is true. */
11103 rtx start_insn = insn;
11105 /* If in state 4, check if the target branch is reached, in order to
11106 change back to state 0. */
11107 if (arm_ccfsm_state == 4)
11109 if (insn == arm_target_insn)
11111 arm_target_insn = NULL;
11112 arm_ccfsm_state = 0;
11114 return;
11117 /* If in state 3, it is possible to repeat the trick, if this insn is an
11118 unconditional branch to a label, and immediately following this branch
11119 is the previous target label which is only used once, and the label this
11120 branch jumps to is not too far off. */
11121 if (arm_ccfsm_state == 3)
11123 if (simplejump_p (insn))
11125 start_insn = next_nonnote_insn (start_insn);
11126 if (GET_CODE (start_insn) == BARRIER)
11128 /* XXX Isn't this always a barrier? */
11129 start_insn = next_nonnote_insn (start_insn);
11131 if (GET_CODE (start_insn) == CODE_LABEL
11132 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11133 && LABEL_NUSES (start_insn) == 1)
11134 reverse = TRUE;
11135 else
11136 return;
11138 else if (GET_CODE (body) == RETURN)
11140 start_insn = next_nonnote_insn (start_insn);
11141 if (GET_CODE (start_insn) == BARRIER)
11142 start_insn = next_nonnote_insn (start_insn);
11143 if (GET_CODE (start_insn) == CODE_LABEL
11144 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11145 && LABEL_NUSES (start_insn) == 1)
11147 reverse = TRUE;
11148 seeking_return = 1;
11150 else
11151 return;
11153 else
11154 return;
11157 gcc_assert (!arm_ccfsm_state || reverse);
11158 if (GET_CODE (insn) != JUMP_INSN)
11159 return;
11161 /* This jump might be paralleled with a clobber of the condition codes
11162 the jump should always come first */
11163 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11164 body = XVECEXP (body, 0, 0);
11166 if (reverse
11167 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11168 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11170 int insns_skipped;
11171 int fail = FALSE, succeed = FALSE;
11172 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11173 int then_not_else = TRUE;
11174 rtx this_insn = start_insn, label = 0;
11176 /* If the jump cannot be done with one instruction, we cannot
11177 conditionally execute the instruction in the inverse case. */
11178 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11180 jump_clobbers = 1;
11181 return;
11184 /* Register the insn jumped to. */
11185 if (reverse)
11187 if (!seeking_return)
11188 label = XEXP (SET_SRC (body), 0);
11190 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11191 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11192 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11194 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11195 then_not_else = FALSE;
11197 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11198 seeking_return = 1;
11199 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11201 seeking_return = 1;
11202 then_not_else = FALSE;
11204 else
11205 gcc_unreachable ();
11207 /* See how many insns this branch skips, and what kind of insns. If all
11208 insns are okay, and the label or unconditional branch to the same
11209 label is not too far away, succeed. */
11210 for (insns_skipped = 0;
11211 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11213 rtx scanbody;
11215 this_insn = next_nonnote_insn (this_insn);
11216 if (!this_insn)
11217 break;
11219 switch (GET_CODE (this_insn))
11221 case CODE_LABEL:
11222 /* Succeed if it is the target label, otherwise fail since
11223 control falls in from somewhere else. */
11224 if (this_insn == label)
11226 if (jump_clobbers)
11228 arm_ccfsm_state = 2;
11229 this_insn = next_nonnote_insn (this_insn);
11231 else
11232 arm_ccfsm_state = 1;
11233 succeed = TRUE;
11235 else
11236 fail = TRUE;
11237 break;
11239 case BARRIER:
11240 /* Succeed if the following insn is the target label.
11241 Otherwise fail.
11242 If return insns are used then the last insn in a function
11243 will be a barrier. */
11244 this_insn = next_nonnote_insn (this_insn);
11245 if (this_insn && this_insn == label)
11247 if (jump_clobbers)
11249 arm_ccfsm_state = 2;
11250 this_insn = next_nonnote_insn (this_insn);
11252 else
11253 arm_ccfsm_state = 1;
11254 succeed = TRUE;
11256 else
11257 fail = TRUE;
11258 break;
11260 case CALL_INSN:
11261 /* The AAPCS says that conditional calls should not be
11262 used since they make interworking inefficient (the
11263 linker can't transform BL<cond> into BLX). That's
11264 only a problem if the machine has BLX. */
11265 if (arm_arch5)
11267 fail = TRUE;
11268 break;
11271 /* Succeed if the following insn is the target label, or
11272 if the following two insns are a barrier and the
11273 target label. */
11274 this_insn = next_nonnote_insn (this_insn);
11275 if (this_insn && GET_CODE (this_insn) == BARRIER)
11276 this_insn = next_nonnote_insn (this_insn);
11278 if (this_insn && this_insn == label
11279 && insns_skipped < max_insns_skipped)
11281 if (jump_clobbers)
11283 arm_ccfsm_state = 2;
11284 this_insn = next_nonnote_insn (this_insn);
11286 else
11287 arm_ccfsm_state = 1;
11288 succeed = TRUE;
11290 else
11291 fail = TRUE;
11292 break;
11294 case JUMP_INSN:
11295 /* If this is an unconditional branch to the same label, succeed.
11296 If it is to another label, do nothing. If it is conditional,
11297 fail. */
11298 /* XXX Probably, the tests for SET and the PC are
11299 unnecessary. */
11301 scanbody = PATTERN (this_insn);
11302 if (GET_CODE (scanbody) == SET
11303 && GET_CODE (SET_DEST (scanbody)) == PC)
11305 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11306 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11308 arm_ccfsm_state = 2;
11309 succeed = TRUE;
11311 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11312 fail = TRUE;
11314 /* Fail if a conditional return is undesirable (e.g. on a
11315 StrongARM), but still allow this if optimizing for size. */
11316 else if (GET_CODE (scanbody) == RETURN
11317 && !use_return_insn (TRUE, NULL)
11318 && !optimize_size)
11319 fail = TRUE;
11320 else if (GET_CODE (scanbody) == RETURN
11321 && seeking_return)
11323 arm_ccfsm_state = 2;
11324 succeed = TRUE;
11326 else if (GET_CODE (scanbody) == PARALLEL)
11328 switch (get_attr_conds (this_insn))
11330 case CONDS_NOCOND:
11331 break;
11332 default:
11333 fail = TRUE;
11334 break;
11337 else
11338 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11340 break;
11342 case INSN:
11343 /* Instructions using or affecting the condition codes make it
11344 fail. */
11345 scanbody = PATTERN (this_insn);
11346 if (!(GET_CODE (scanbody) == SET
11347 || GET_CODE (scanbody) == PARALLEL)
11348 || get_attr_conds (this_insn) != CONDS_NOCOND)
11349 fail = TRUE;
11351 /* A conditional cirrus instruction must be followed by
11352 a non Cirrus instruction. However, since we
11353 conditionalize instructions in this function and by
11354 the time we get here we can't add instructions
11355 (nops), because shorten_branches() has already been
11356 called, we will disable conditionalizing Cirrus
11357 instructions to be safe. */
11358 if (GET_CODE (scanbody) != USE
11359 && GET_CODE (scanbody) != CLOBBER
11360 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11361 fail = TRUE;
11362 break;
11364 default:
11365 break;
11368 if (succeed)
11370 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11371 arm_target_label = CODE_LABEL_NUMBER (label);
11372 else
11374 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11376 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11378 this_insn = next_nonnote_insn (this_insn);
11379 gcc_assert (!this_insn
11380 || (GET_CODE (this_insn) != BARRIER
11381 && GET_CODE (this_insn) != CODE_LABEL));
11383 if (!this_insn)
11385 /* Oh, dear! we ran off the end.. give up. */
11386 recog (PATTERN (insn), insn, NULL);
11387 arm_ccfsm_state = 0;
11388 arm_target_insn = NULL;
11389 return;
11391 arm_target_insn = this_insn;
11393 if (jump_clobbers)
11395 gcc_assert (!reverse);
11396 arm_current_cc =
11397 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11398 0), 0), 1));
11399 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11400 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11401 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11402 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11404 else
11406 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11407 what it was. */
11408 if (!reverse)
11409 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11410 0));
11413 if (reverse || then_not_else)
11414 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11417 /* Restore recog_data (getting the attributes of other insns can
11418 destroy this array, but final.c assumes that it remains intact
11419 across this call; since the insn has been recognized already we
11420 call recog direct). */
11421 recog (PATTERN (insn), insn, NULL);
11425 /* Returns true if REGNO is a valid register
11426 for holding a quantity of type MODE. */
11428 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11430 if (GET_MODE_CLASS (mode) == MODE_CC)
11431 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11433 if (TARGET_THUMB)
11434 /* For the Thumb we only allow values bigger than SImode in
11435 registers 0 - 6, so that there is always a second low
11436 register available to hold the upper part of the value.
11437 We probably we ought to ensure that the register is the
11438 start of an even numbered register pair. */
11439 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11441 if (IS_CIRRUS_REGNUM (regno))
11442 /* We have outlawed SI values in Cirrus registers because they
11443 reside in the lower 32 bits, but SF values reside in the
11444 upper 32 bits. This causes gcc all sorts of grief. We can't
11445 even split the registers into pairs because Cirrus SI values
11446 get sign extended to 64bits-- aldyh. */
11447 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11449 if (IS_VFP_REGNUM (regno))
11451 if (mode == SFmode || mode == SImode)
11452 return TRUE;
11454 /* DFmode values are only valid in even register pairs. */
11455 if (mode == DFmode)
11456 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11457 return FALSE;
11460 if (IS_IWMMXT_GR_REGNUM (regno))
11461 return mode == SImode;
11463 if (IS_IWMMXT_REGNUM (regno))
11464 return VALID_IWMMXT_REG_MODE (mode);
11466 /* We allow any value to be stored in the general registers.
11467 Restrict doubleword quantities to even register pairs so that we can
11468 use ldrd. */
11469 if (regno <= LAST_ARM_REGNUM)
11470 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11472 if ( regno == FRAME_POINTER_REGNUM
11473 || regno == ARG_POINTER_REGNUM)
11474 /* We only allow integers in the fake hard registers. */
11475 return GET_MODE_CLASS (mode) == MODE_INT;
11477 /* The only registers left are the FPA registers
11478 which we only allow to hold FP values. */
11479 return GET_MODE_CLASS (mode) == MODE_FLOAT
11480 && regno >= FIRST_FPA_REGNUM
11481 && regno <= LAST_FPA_REGNUM;
11485 arm_regno_class (int regno)
11487 if (TARGET_THUMB)
11489 if (regno == STACK_POINTER_REGNUM)
11490 return STACK_REG;
11491 if (regno == CC_REGNUM)
11492 return CC_REG;
11493 if (regno < 8)
11494 return LO_REGS;
11495 return HI_REGS;
11498 if ( regno <= LAST_ARM_REGNUM
11499 || regno == FRAME_POINTER_REGNUM
11500 || regno == ARG_POINTER_REGNUM)
11501 return GENERAL_REGS;
11503 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11504 return NO_REGS;
11506 if (IS_CIRRUS_REGNUM (regno))
11507 return CIRRUS_REGS;
11509 if (IS_VFP_REGNUM (regno))
11510 return VFP_REGS;
11512 if (IS_IWMMXT_REGNUM (regno))
11513 return IWMMXT_REGS;
11515 if (IS_IWMMXT_GR_REGNUM (regno))
11516 return IWMMXT_GR_REGS;
11518 return FPA_REGS;
11521 /* Handle a special case when computing the offset
11522 of an argument from the frame pointer. */
11524 arm_debugger_arg_offset (int value, rtx addr)
11526 rtx insn;
11528 /* We are only interested if dbxout_parms() failed to compute the offset. */
11529 if (value != 0)
11530 return 0;
11532 /* We can only cope with the case where the address is held in a register. */
11533 if (GET_CODE (addr) != REG)
11534 return 0;
11536 /* If we are using the frame pointer to point at the argument, then
11537 an offset of 0 is correct. */
11538 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11539 return 0;
11541 /* If we are using the stack pointer to point at the
11542 argument, then an offset of 0 is correct. */
11543 if ((TARGET_THUMB || !frame_pointer_needed)
11544 && REGNO (addr) == SP_REGNUM)
11545 return 0;
11547 /* Oh dear. The argument is pointed to by a register rather
11548 than being held in a register, or being stored at a known
11549 offset from the frame pointer. Since GDB only understands
11550 those two kinds of argument we must translate the address
11551 held in the register into an offset from the frame pointer.
11552 We do this by searching through the insns for the function
11553 looking to see where this register gets its value. If the
11554 register is initialized from the frame pointer plus an offset
11555 then we are in luck and we can continue, otherwise we give up.
11557 This code is exercised by producing debugging information
11558 for a function with arguments like this:
11560 double func (double a, double b, int c, double d) {return d;}
11562 Without this code the stab for parameter 'd' will be set to
11563 an offset of 0 from the frame pointer, rather than 8. */
11565 /* The if() statement says:
11567 If the insn is a normal instruction
11568 and if the insn is setting the value in a register
11569 and if the register being set is the register holding the address of the argument
11570 and if the address is computing by an addition
11571 that involves adding to a register
11572 which is the frame pointer
11573 a constant integer
11575 then... */
11577 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11579 if ( GET_CODE (insn) == INSN
11580 && GET_CODE (PATTERN (insn)) == SET
11581 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11582 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11583 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11584 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11585 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11588 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11590 break;
11594 if (value == 0)
11596 debug_rtx (addr);
11597 warning (0, "unable to compute real location of stacked parameter");
11598 value = 8; /* XXX magic hack */
11601 return value;
11604 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11605 do \
11607 if ((MASK) & insn_flags) \
11608 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11609 BUILT_IN_MD, NULL, NULL_TREE); \
11611 while (0)
11613 struct builtin_description
11615 const unsigned int mask;
11616 const enum insn_code icode;
11617 const char * const name;
11618 const enum arm_builtins code;
11619 const enum rtx_code comparison;
11620 const unsigned int flag;
11623 static const struct builtin_description bdesc_2arg[] =
11625 #define IWMMXT_BUILTIN(code, string, builtin) \
11626 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11627 ARM_BUILTIN_##builtin, 0, 0 },
11629 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11630 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11631 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11632 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11633 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11634 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11635 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11636 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11637 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11638 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11639 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11640 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11641 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11642 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11643 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11644 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11645 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11646 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11647 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11648 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11649 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11650 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11651 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11652 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11653 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11654 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11655 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11656 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11657 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11658 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11659 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11660 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11661 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11662 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11663 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11664 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11665 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11666 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11667 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11668 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11669 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11670 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11671 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11672 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11673 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11674 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11675 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11676 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11677 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11678 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11679 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11680 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11681 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11682 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11683 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11684 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11685 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11686 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11688 #define IWMMXT_BUILTIN2(code, builtin) \
11689 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11691 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11692 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11693 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11694 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11695 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11696 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11697 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11698 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11699 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11700 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11701 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11702 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11703 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11704 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11705 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11706 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11707 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11708 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11709 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11710 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11711 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11712 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11713 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11714 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11715 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11716 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11717 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11718 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11719 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11720 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11721 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11722 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11725 static const struct builtin_description bdesc_1arg[] =
11727 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11728 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11729 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11730 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11731 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11732 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11733 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11734 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11735 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11736 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11737 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11738 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11739 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11740 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11741 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11742 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11743 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11744 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11747 /* Set up all the iWMMXt builtins. This is
11748 not called if TARGET_IWMMXT is zero. */
11750 static void
11751 arm_init_iwmmxt_builtins (void)
11753 const struct builtin_description * d;
11754 size_t i;
11755 tree endlink = void_list_node;
11757 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11758 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11759 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11761 tree int_ftype_int
11762 = build_function_type (integer_type_node,
11763 tree_cons (NULL_TREE, integer_type_node, endlink));
11764 tree v8qi_ftype_v8qi_v8qi_int
11765 = build_function_type (V8QI_type_node,
11766 tree_cons (NULL_TREE, V8QI_type_node,
11767 tree_cons (NULL_TREE, V8QI_type_node,
11768 tree_cons (NULL_TREE,
11769 integer_type_node,
11770 endlink))));
11771 tree v4hi_ftype_v4hi_int
11772 = build_function_type (V4HI_type_node,
11773 tree_cons (NULL_TREE, V4HI_type_node,
11774 tree_cons (NULL_TREE, integer_type_node,
11775 endlink)));
11776 tree v2si_ftype_v2si_int
11777 = build_function_type (V2SI_type_node,
11778 tree_cons (NULL_TREE, V2SI_type_node,
11779 tree_cons (NULL_TREE, integer_type_node,
11780 endlink)));
11781 tree v2si_ftype_di_di
11782 = build_function_type (V2SI_type_node,
11783 tree_cons (NULL_TREE, long_long_integer_type_node,
11784 tree_cons (NULL_TREE, long_long_integer_type_node,
11785 endlink)));
11786 tree di_ftype_di_int
11787 = build_function_type (long_long_integer_type_node,
11788 tree_cons (NULL_TREE, long_long_integer_type_node,
11789 tree_cons (NULL_TREE, integer_type_node,
11790 endlink)));
11791 tree di_ftype_di_int_int
11792 = build_function_type (long_long_integer_type_node,
11793 tree_cons (NULL_TREE, long_long_integer_type_node,
11794 tree_cons (NULL_TREE, integer_type_node,
11795 tree_cons (NULL_TREE,
11796 integer_type_node,
11797 endlink))));
11798 tree int_ftype_v8qi
11799 = build_function_type (integer_type_node,
11800 tree_cons (NULL_TREE, V8QI_type_node,
11801 endlink));
11802 tree int_ftype_v4hi
11803 = build_function_type (integer_type_node,
11804 tree_cons (NULL_TREE, V4HI_type_node,
11805 endlink));
11806 tree int_ftype_v2si
11807 = build_function_type (integer_type_node,
11808 tree_cons (NULL_TREE, V2SI_type_node,
11809 endlink));
11810 tree int_ftype_v8qi_int
11811 = build_function_type (integer_type_node,
11812 tree_cons (NULL_TREE, V8QI_type_node,
11813 tree_cons (NULL_TREE, integer_type_node,
11814 endlink)));
11815 tree int_ftype_v4hi_int
11816 = build_function_type (integer_type_node,
11817 tree_cons (NULL_TREE, V4HI_type_node,
11818 tree_cons (NULL_TREE, integer_type_node,
11819 endlink)));
11820 tree int_ftype_v2si_int
11821 = build_function_type (integer_type_node,
11822 tree_cons (NULL_TREE, V2SI_type_node,
11823 tree_cons (NULL_TREE, integer_type_node,
11824 endlink)));
11825 tree v8qi_ftype_v8qi_int_int
11826 = build_function_type (V8QI_type_node,
11827 tree_cons (NULL_TREE, V8QI_type_node,
11828 tree_cons (NULL_TREE, integer_type_node,
11829 tree_cons (NULL_TREE,
11830 integer_type_node,
11831 endlink))));
11832 tree v4hi_ftype_v4hi_int_int
11833 = build_function_type (V4HI_type_node,
11834 tree_cons (NULL_TREE, V4HI_type_node,
11835 tree_cons (NULL_TREE, integer_type_node,
11836 tree_cons (NULL_TREE,
11837 integer_type_node,
11838 endlink))));
11839 tree v2si_ftype_v2si_int_int
11840 = build_function_type (V2SI_type_node,
11841 tree_cons (NULL_TREE, V2SI_type_node,
11842 tree_cons (NULL_TREE, integer_type_node,
11843 tree_cons (NULL_TREE,
11844 integer_type_node,
11845 endlink))));
11846 /* Miscellaneous. */
11847 tree v8qi_ftype_v4hi_v4hi
11848 = build_function_type (V8QI_type_node,
11849 tree_cons (NULL_TREE, V4HI_type_node,
11850 tree_cons (NULL_TREE, V4HI_type_node,
11851 endlink)));
11852 tree v4hi_ftype_v2si_v2si
11853 = build_function_type (V4HI_type_node,
11854 tree_cons (NULL_TREE, V2SI_type_node,
11855 tree_cons (NULL_TREE, V2SI_type_node,
11856 endlink)));
11857 tree v2si_ftype_v4hi_v4hi
11858 = build_function_type (V2SI_type_node,
11859 tree_cons (NULL_TREE, V4HI_type_node,
11860 tree_cons (NULL_TREE, V4HI_type_node,
11861 endlink)));
11862 tree v2si_ftype_v8qi_v8qi
11863 = build_function_type (V2SI_type_node,
11864 tree_cons (NULL_TREE, V8QI_type_node,
11865 tree_cons (NULL_TREE, V8QI_type_node,
11866 endlink)));
11867 tree v4hi_ftype_v4hi_di
11868 = build_function_type (V4HI_type_node,
11869 tree_cons (NULL_TREE, V4HI_type_node,
11870 tree_cons (NULL_TREE,
11871 long_long_integer_type_node,
11872 endlink)));
11873 tree v2si_ftype_v2si_di
11874 = build_function_type (V2SI_type_node,
11875 tree_cons (NULL_TREE, V2SI_type_node,
11876 tree_cons (NULL_TREE,
11877 long_long_integer_type_node,
11878 endlink)));
11879 tree void_ftype_int_int
11880 = build_function_type (void_type_node,
11881 tree_cons (NULL_TREE, integer_type_node,
11882 tree_cons (NULL_TREE, integer_type_node,
11883 endlink)));
11884 tree di_ftype_void
11885 = build_function_type (long_long_unsigned_type_node, endlink);
11886 tree di_ftype_v8qi
11887 = build_function_type (long_long_integer_type_node,
11888 tree_cons (NULL_TREE, V8QI_type_node,
11889 endlink));
11890 tree di_ftype_v4hi
11891 = build_function_type (long_long_integer_type_node,
11892 tree_cons (NULL_TREE, V4HI_type_node,
11893 endlink));
11894 tree di_ftype_v2si
11895 = build_function_type (long_long_integer_type_node,
11896 tree_cons (NULL_TREE, V2SI_type_node,
11897 endlink));
11898 tree v2si_ftype_v4hi
11899 = build_function_type (V2SI_type_node,
11900 tree_cons (NULL_TREE, V4HI_type_node,
11901 endlink));
11902 tree v4hi_ftype_v8qi
11903 = build_function_type (V4HI_type_node,
11904 tree_cons (NULL_TREE, V8QI_type_node,
11905 endlink));
11907 tree di_ftype_di_v4hi_v4hi
11908 = build_function_type (long_long_unsigned_type_node,
11909 tree_cons (NULL_TREE,
11910 long_long_unsigned_type_node,
11911 tree_cons (NULL_TREE, V4HI_type_node,
11912 tree_cons (NULL_TREE,
11913 V4HI_type_node,
11914 endlink))));
11916 tree di_ftype_v4hi_v4hi
11917 = build_function_type (long_long_unsigned_type_node,
11918 tree_cons (NULL_TREE, V4HI_type_node,
11919 tree_cons (NULL_TREE, V4HI_type_node,
11920 endlink)));
11922 /* Normal vector binops. */
11923 tree v8qi_ftype_v8qi_v8qi
11924 = build_function_type (V8QI_type_node,
11925 tree_cons (NULL_TREE, V8QI_type_node,
11926 tree_cons (NULL_TREE, V8QI_type_node,
11927 endlink)));
11928 tree v4hi_ftype_v4hi_v4hi
11929 = build_function_type (V4HI_type_node,
11930 tree_cons (NULL_TREE, V4HI_type_node,
11931 tree_cons (NULL_TREE, V4HI_type_node,
11932 endlink)));
11933 tree v2si_ftype_v2si_v2si
11934 = build_function_type (V2SI_type_node,
11935 tree_cons (NULL_TREE, V2SI_type_node,
11936 tree_cons (NULL_TREE, V2SI_type_node,
11937 endlink)));
11938 tree di_ftype_di_di
11939 = build_function_type (long_long_unsigned_type_node,
11940 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11941 tree_cons (NULL_TREE,
11942 long_long_unsigned_type_node,
11943 endlink)));
11945 /* Add all builtins that are more or less simple operations on two
11946 operands. */
11947 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11949 /* Use one of the operands; the target can have a different mode for
11950 mask-generating compares. */
11951 enum machine_mode mode;
11952 tree type;
11954 if (d->name == 0)
11955 continue;
11957 mode = insn_data[d->icode].operand[1].mode;
11959 switch (mode)
11961 case V8QImode:
11962 type = v8qi_ftype_v8qi_v8qi;
11963 break;
11964 case V4HImode:
11965 type = v4hi_ftype_v4hi_v4hi;
11966 break;
11967 case V2SImode:
11968 type = v2si_ftype_v2si_v2si;
11969 break;
11970 case DImode:
11971 type = di_ftype_di_di;
11972 break;
11974 default:
11975 gcc_unreachable ();
11978 def_mbuiltin (d->mask, d->name, type, d->code);
11981 /* Add the remaining MMX insns with somewhat more complicated types. */
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12041 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12048 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12050 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12053 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12054 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12055 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12056 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12057 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12059 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12060 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12061 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12062 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12064 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12065 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12066 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12067 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12068 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12069 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12070 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12073 static void
12074 arm_init_builtins (void)
12076 if (TARGET_REALLY_IWMMXT)
12077 arm_init_iwmmxt_builtins ();
12080 /* Errors in the source file can cause expand_expr to return const0_rtx
12081 where we expect a vector. To avoid crashing, use one of the vector
12082 clear instructions. */
12084 static rtx
12085 safe_vector_operand (rtx x, enum machine_mode mode)
12087 if (x != const0_rtx)
12088 return x;
12089 x = gen_reg_rtx (mode);
12091 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12092 : gen_rtx_SUBREG (DImode, x, 0)));
12093 return x;
12096 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12098 static rtx
12099 arm_expand_binop_builtin (enum insn_code icode,
12100 tree arglist, rtx target)
12102 rtx pat;
12103 tree arg0 = TREE_VALUE (arglist);
12104 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12105 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12106 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12107 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12108 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12109 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12111 if (VECTOR_MODE_P (mode0))
12112 op0 = safe_vector_operand (op0, mode0);
12113 if (VECTOR_MODE_P (mode1))
12114 op1 = safe_vector_operand (op1, mode1);
12116 if (! target
12117 || GET_MODE (target) != tmode
12118 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12119 target = gen_reg_rtx (tmode);
12121 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12123 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12124 op0 = copy_to_mode_reg (mode0, op0);
12125 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12126 op1 = copy_to_mode_reg (mode1, op1);
12128 pat = GEN_FCN (icode) (target, op0, op1);
12129 if (! pat)
12130 return 0;
12131 emit_insn (pat);
12132 return target;
12135 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12137 static rtx
12138 arm_expand_unop_builtin (enum insn_code icode,
12139 tree arglist, rtx target, int do_load)
12141 rtx pat;
12142 tree arg0 = TREE_VALUE (arglist);
12143 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12144 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12145 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12147 if (! target
12148 || GET_MODE (target) != tmode
12149 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12150 target = gen_reg_rtx (tmode);
12151 if (do_load)
12152 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12153 else
12155 if (VECTOR_MODE_P (mode0))
12156 op0 = safe_vector_operand (op0, mode0);
12158 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12159 op0 = copy_to_mode_reg (mode0, op0);
12162 pat = GEN_FCN (icode) (target, op0);
12163 if (! pat)
12164 return 0;
12165 emit_insn (pat);
12166 return target;
12169 /* Expand an expression EXP that calls a built-in function,
12170 with result going to TARGET if that's convenient
12171 (and in mode MODE if that's convenient).
12172 SUBTARGET may be used as the target for computing one of EXP's operands.
12173 IGNORE is nonzero if the value is to be ignored. */
12175 static rtx
12176 arm_expand_builtin (tree exp,
12177 rtx target,
12178 rtx subtarget ATTRIBUTE_UNUSED,
12179 enum machine_mode mode ATTRIBUTE_UNUSED,
12180 int ignore ATTRIBUTE_UNUSED)
12182 const struct builtin_description * d;
12183 enum insn_code icode;
12184 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12185 tree arglist = TREE_OPERAND (exp, 1);
12186 tree arg0;
12187 tree arg1;
12188 tree arg2;
12189 rtx op0;
12190 rtx op1;
12191 rtx op2;
12192 rtx pat;
12193 int fcode = DECL_FUNCTION_CODE (fndecl);
12194 size_t i;
12195 enum machine_mode tmode;
12196 enum machine_mode mode0;
12197 enum machine_mode mode1;
12198 enum machine_mode mode2;
12200 switch (fcode)
12202 case ARM_BUILTIN_TEXTRMSB:
12203 case ARM_BUILTIN_TEXTRMUB:
12204 case ARM_BUILTIN_TEXTRMSH:
12205 case ARM_BUILTIN_TEXTRMUH:
12206 case ARM_BUILTIN_TEXTRMSW:
12207 case ARM_BUILTIN_TEXTRMUW:
12208 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12209 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12210 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12211 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12212 : CODE_FOR_iwmmxt_textrmw);
12214 arg0 = TREE_VALUE (arglist);
12215 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12216 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12217 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12218 tmode = insn_data[icode].operand[0].mode;
12219 mode0 = insn_data[icode].operand[1].mode;
12220 mode1 = insn_data[icode].operand[2].mode;
12222 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12223 op0 = copy_to_mode_reg (mode0, op0);
12224 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12226 /* @@@ better error message */
12227 error ("selector must be an immediate");
12228 return gen_reg_rtx (tmode);
12230 if (target == 0
12231 || GET_MODE (target) != tmode
12232 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12233 target = gen_reg_rtx (tmode);
12234 pat = GEN_FCN (icode) (target, op0, op1);
12235 if (! pat)
12236 return 0;
12237 emit_insn (pat);
12238 return target;
12240 case ARM_BUILTIN_TINSRB:
12241 case ARM_BUILTIN_TINSRH:
12242 case ARM_BUILTIN_TINSRW:
12243 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12244 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12245 : CODE_FOR_iwmmxt_tinsrw);
12246 arg0 = TREE_VALUE (arglist);
12247 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12248 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12249 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12250 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12251 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12252 tmode = insn_data[icode].operand[0].mode;
12253 mode0 = insn_data[icode].operand[1].mode;
12254 mode1 = insn_data[icode].operand[2].mode;
12255 mode2 = insn_data[icode].operand[3].mode;
12257 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12258 op0 = copy_to_mode_reg (mode0, op0);
12259 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12260 op1 = copy_to_mode_reg (mode1, op1);
12261 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12263 /* @@@ better error message */
12264 error ("selector must be an immediate");
12265 return const0_rtx;
12267 if (target == 0
12268 || GET_MODE (target) != tmode
12269 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12270 target = gen_reg_rtx (tmode);
12271 pat = GEN_FCN (icode) (target, op0, op1, op2);
12272 if (! pat)
12273 return 0;
12274 emit_insn (pat);
12275 return target;
12277 case ARM_BUILTIN_SETWCX:
12278 arg0 = TREE_VALUE (arglist);
12279 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12280 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12281 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12282 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12283 return 0;
12285 case ARM_BUILTIN_GETWCX:
12286 arg0 = TREE_VALUE (arglist);
12287 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12288 target = gen_reg_rtx (SImode);
12289 emit_insn (gen_iwmmxt_tmrc (target, op0));
12290 return target;
12292 case ARM_BUILTIN_WSHUFH:
12293 icode = CODE_FOR_iwmmxt_wshufh;
12294 arg0 = TREE_VALUE (arglist);
12295 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12296 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12297 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12298 tmode = insn_data[icode].operand[0].mode;
12299 mode1 = insn_data[icode].operand[1].mode;
12300 mode2 = insn_data[icode].operand[2].mode;
12302 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12303 op0 = copy_to_mode_reg (mode1, op0);
12304 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12306 /* @@@ better error message */
12307 error ("mask must be an immediate");
12308 return const0_rtx;
12310 if (target == 0
12311 || GET_MODE (target) != tmode
12312 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12313 target = gen_reg_rtx (tmode);
12314 pat = GEN_FCN (icode) (target, op0, op1);
12315 if (! pat)
12316 return 0;
12317 emit_insn (pat);
12318 return target;
12320 case ARM_BUILTIN_WSADB:
12321 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12322 case ARM_BUILTIN_WSADH:
12323 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12324 case ARM_BUILTIN_WSADBZ:
12325 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12326 case ARM_BUILTIN_WSADHZ:
12327 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12329 /* Several three-argument builtins. */
12330 case ARM_BUILTIN_WMACS:
12331 case ARM_BUILTIN_WMACU:
12332 case ARM_BUILTIN_WALIGN:
12333 case ARM_BUILTIN_TMIA:
12334 case ARM_BUILTIN_TMIAPH:
12335 case ARM_BUILTIN_TMIATT:
12336 case ARM_BUILTIN_TMIATB:
12337 case ARM_BUILTIN_TMIABT:
12338 case ARM_BUILTIN_TMIABB:
12339 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12340 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12341 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12342 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12343 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12344 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12345 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12346 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12347 : CODE_FOR_iwmmxt_walign);
12348 arg0 = TREE_VALUE (arglist);
12349 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12350 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12351 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12352 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12353 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12354 tmode = insn_data[icode].operand[0].mode;
12355 mode0 = insn_data[icode].operand[1].mode;
12356 mode1 = insn_data[icode].operand[2].mode;
12357 mode2 = insn_data[icode].operand[3].mode;
12359 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12360 op0 = copy_to_mode_reg (mode0, op0);
12361 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12362 op1 = copy_to_mode_reg (mode1, op1);
12363 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12364 op2 = copy_to_mode_reg (mode2, op2);
12365 if (target == 0
12366 || GET_MODE (target) != tmode
12367 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12368 target = gen_reg_rtx (tmode);
12369 pat = GEN_FCN (icode) (target, op0, op1, op2);
12370 if (! pat)
12371 return 0;
12372 emit_insn (pat);
12373 return target;
12375 case ARM_BUILTIN_WZERO:
12376 target = gen_reg_rtx (DImode);
12377 emit_insn (gen_iwmmxt_clrdi (target));
12378 return target;
12380 default:
12381 break;
12384 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12385 if (d->code == (const enum arm_builtins) fcode)
12386 return arm_expand_binop_builtin (d->icode, arglist, target);
12388 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12389 if (d->code == (const enum arm_builtins) fcode)
12390 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12392 /* @@@ Should really do something sensible here. */
12393 return NULL_RTX;
12396 /* Return the number (counting from 0) of
12397 the least significant set bit in MASK. */
12399 inline static int
12400 number_of_first_bit_set (unsigned mask)
12402 int bit;
12404 for (bit = 0;
12405 (mask & (1 << bit)) == 0;
12406 ++bit)
12407 continue;
12409 return bit;
12412 /* Emit code to push or pop registers to or from the stack. F is the
12413 assembly file. MASK is the registers to push or pop. PUSH is
12414 nonzero if we should push, and zero if we should pop. For debugging
12415 output, if pushing, adjust CFA_OFFSET by the amount of space added
12416 to the stack. REAL_REGS should have the same number of bits set as
12417 MASK, and will be used instead (in the same order) to describe which
12418 registers were saved - this is used to mark the save slots when we
12419 push high registers after moving them to low registers. */
12420 static void
12421 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12422 unsigned long real_regs)
12424 int regno;
12425 int lo_mask = mask & 0xFF;
12426 int pushed_words = 0;
12428 gcc_assert (mask);
12430 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12432 /* Special case. Do not generate a POP PC statement here, do it in
12433 thumb_exit() */
12434 thumb_exit (f, -1);
12435 return;
12438 if (ARM_EABI_UNWIND_TABLES && push)
12440 fprintf (f, "\t.save\t{");
12441 for (regno = 0; regno < 15; regno++)
12443 if (real_regs & (1 << regno))
12445 if (real_regs & ((1 << regno) -1))
12446 fprintf (f, ", ");
12447 asm_fprintf (f, "%r", regno);
12450 fprintf (f, "}\n");
12453 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12455 /* Look at the low registers first. */
12456 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12458 if (lo_mask & 1)
12460 asm_fprintf (f, "%r", regno);
12462 if ((lo_mask & ~1) != 0)
12463 fprintf (f, ", ");
12465 pushed_words++;
12469 if (push && (mask & (1 << LR_REGNUM)))
12471 /* Catch pushing the LR. */
12472 if (mask & 0xFF)
12473 fprintf (f, ", ");
12475 asm_fprintf (f, "%r", LR_REGNUM);
12477 pushed_words++;
12479 else if (!push && (mask & (1 << PC_REGNUM)))
12481 /* Catch popping the PC. */
12482 if (TARGET_INTERWORK || TARGET_BACKTRACE
12483 || current_function_calls_eh_return)
12485 /* The PC is never poped directly, instead
12486 it is popped into r3 and then BX is used. */
12487 fprintf (f, "}\n");
12489 thumb_exit (f, -1);
12491 return;
12493 else
12495 if (mask & 0xFF)
12496 fprintf (f, ", ");
12498 asm_fprintf (f, "%r", PC_REGNUM);
12502 fprintf (f, "}\n");
12504 if (push && pushed_words && dwarf2out_do_frame ())
12506 char *l = dwarf2out_cfi_label ();
12507 int pushed_mask = real_regs;
12509 *cfa_offset += pushed_words * 4;
12510 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12512 pushed_words = 0;
12513 pushed_mask = real_regs;
12514 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12516 if (pushed_mask & 1)
12517 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12522 /* Generate code to return from a thumb function.
12523 If 'reg_containing_return_addr' is -1, then the return address is
12524 actually on the stack, at the stack pointer. */
12525 static void
12526 thumb_exit (FILE *f, int reg_containing_return_addr)
12528 unsigned regs_available_for_popping;
12529 unsigned regs_to_pop;
12530 int pops_needed;
12531 unsigned available;
12532 unsigned required;
12533 int mode;
12534 int size;
12535 int restore_a4 = FALSE;
12537 /* Compute the registers we need to pop. */
12538 regs_to_pop = 0;
12539 pops_needed = 0;
12541 if (reg_containing_return_addr == -1)
12543 regs_to_pop |= 1 << LR_REGNUM;
12544 ++pops_needed;
12547 if (TARGET_BACKTRACE)
12549 /* Restore the (ARM) frame pointer and stack pointer. */
12550 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12551 pops_needed += 2;
12554 /* If there is nothing to pop then just emit the BX instruction and
12555 return. */
12556 if (pops_needed == 0)
12558 if (current_function_calls_eh_return)
12559 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12561 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12562 return;
12564 /* Otherwise if we are not supporting interworking and we have not created
12565 a backtrace structure and the function was not entered in ARM mode then
12566 just pop the return address straight into the PC. */
12567 else if (!TARGET_INTERWORK
12568 && !TARGET_BACKTRACE
12569 && !is_called_in_ARM_mode (current_function_decl)
12570 && !current_function_calls_eh_return)
12572 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12573 return;
12576 /* Find out how many of the (return) argument registers we can corrupt. */
12577 regs_available_for_popping = 0;
12579 /* If returning via __builtin_eh_return, the bottom three registers
12580 all contain information needed for the return. */
12581 if (current_function_calls_eh_return)
12582 size = 12;
12583 else
12585 /* If we can deduce the registers used from the function's
12586 return value. This is more reliable that examining
12587 regs_ever_live[] because that will be set if the register is
12588 ever used in the function, not just if the register is used
12589 to hold a return value. */
12591 if (current_function_return_rtx != 0)
12592 mode = GET_MODE (current_function_return_rtx);
12593 else
12594 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12596 size = GET_MODE_SIZE (mode);
12598 if (size == 0)
12600 /* In a void function we can use any argument register.
12601 In a function that returns a structure on the stack
12602 we can use the second and third argument registers. */
12603 if (mode == VOIDmode)
12604 regs_available_for_popping =
12605 (1 << ARG_REGISTER (1))
12606 | (1 << ARG_REGISTER (2))
12607 | (1 << ARG_REGISTER (3));
12608 else
12609 regs_available_for_popping =
12610 (1 << ARG_REGISTER (2))
12611 | (1 << ARG_REGISTER (3));
12613 else if (size <= 4)
12614 regs_available_for_popping =
12615 (1 << ARG_REGISTER (2))
12616 | (1 << ARG_REGISTER (3));
12617 else if (size <= 8)
12618 regs_available_for_popping =
12619 (1 << ARG_REGISTER (3));
12622 /* Match registers to be popped with registers into which we pop them. */
12623 for (available = regs_available_for_popping,
12624 required = regs_to_pop;
12625 required != 0 && available != 0;
12626 available &= ~(available & - available),
12627 required &= ~(required & - required))
12628 -- pops_needed;
12630 /* If we have any popping registers left over, remove them. */
12631 if (available > 0)
12632 regs_available_for_popping &= ~available;
12634 /* Otherwise if we need another popping register we can use
12635 the fourth argument register. */
12636 else if (pops_needed)
12638 /* If we have not found any free argument registers and
12639 reg a4 contains the return address, we must move it. */
12640 if (regs_available_for_popping == 0
12641 && reg_containing_return_addr == LAST_ARG_REGNUM)
12643 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12644 reg_containing_return_addr = LR_REGNUM;
12646 else if (size > 12)
12648 /* Register a4 is being used to hold part of the return value,
12649 but we have dire need of a free, low register. */
12650 restore_a4 = TRUE;
12652 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12655 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12657 /* The fourth argument register is available. */
12658 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12660 --pops_needed;
12664 /* Pop as many registers as we can. */
12665 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12666 regs_available_for_popping);
12668 /* Process the registers we popped. */
12669 if (reg_containing_return_addr == -1)
12671 /* The return address was popped into the lowest numbered register. */
12672 regs_to_pop &= ~(1 << LR_REGNUM);
12674 reg_containing_return_addr =
12675 number_of_first_bit_set (regs_available_for_popping);
12677 /* Remove this register for the mask of available registers, so that
12678 the return address will not be corrupted by further pops. */
12679 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12682 /* If we popped other registers then handle them here. */
12683 if (regs_available_for_popping)
12685 int frame_pointer;
12687 /* Work out which register currently contains the frame pointer. */
12688 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12690 /* Move it into the correct place. */
12691 asm_fprintf (f, "\tmov\t%r, %r\n",
12692 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12694 /* (Temporarily) remove it from the mask of popped registers. */
12695 regs_available_for_popping &= ~(1 << frame_pointer);
12696 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12698 if (regs_available_for_popping)
12700 int stack_pointer;
12702 /* We popped the stack pointer as well,
12703 find the register that contains it. */
12704 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12706 /* Move it into the stack register. */
12707 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12709 /* At this point we have popped all necessary registers, so
12710 do not worry about restoring regs_available_for_popping
12711 to its correct value:
12713 assert (pops_needed == 0)
12714 assert (regs_available_for_popping == (1 << frame_pointer))
12715 assert (regs_to_pop == (1 << STACK_POINTER)) */
12717 else
12719 /* Since we have just move the popped value into the frame
12720 pointer, the popping register is available for reuse, and
12721 we know that we still have the stack pointer left to pop. */
12722 regs_available_for_popping |= (1 << frame_pointer);
12726 /* If we still have registers left on the stack, but we no longer have
12727 any registers into which we can pop them, then we must move the return
12728 address into the link register and make available the register that
12729 contained it. */
12730 if (regs_available_for_popping == 0 && pops_needed > 0)
12732 regs_available_for_popping |= 1 << reg_containing_return_addr;
12734 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12735 reg_containing_return_addr);
12737 reg_containing_return_addr = LR_REGNUM;
12740 /* If we have registers left on the stack then pop some more.
12741 We know that at most we will want to pop FP and SP. */
12742 if (pops_needed > 0)
12744 int popped_into;
12745 int move_to;
12747 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12748 regs_available_for_popping);
12750 /* We have popped either FP or SP.
12751 Move whichever one it is into the correct register. */
12752 popped_into = number_of_first_bit_set (regs_available_for_popping);
12753 move_to = number_of_first_bit_set (regs_to_pop);
12755 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12757 regs_to_pop &= ~(1 << move_to);
12759 --pops_needed;
12762 /* If we still have not popped everything then we must have only
12763 had one register available to us and we are now popping the SP. */
12764 if (pops_needed > 0)
12766 int popped_into;
12768 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12769 regs_available_for_popping);
12771 popped_into = number_of_first_bit_set (regs_available_for_popping);
12773 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12775 assert (regs_to_pop == (1 << STACK_POINTER))
12776 assert (pops_needed == 1)
12780 /* If necessary restore the a4 register. */
12781 if (restore_a4)
12783 if (reg_containing_return_addr != LR_REGNUM)
12785 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12786 reg_containing_return_addr = LR_REGNUM;
12789 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12792 if (current_function_calls_eh_return)
12793 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12795 /* Return to caller. */
12796 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12800 void
12801 thumb_final_prescan_insn (rtx insn)
12803 if (flag_print_asm_name)
12804 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12805 INSN_ADDRESSES (INSN_UID (insn)));
12809 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12811 unsigned HOST_WIDE_INT mask = 0xff;
12812 int i;
12814 if (val == 0) /* XXX */
12815 return 0;
12817 for (i = 0; i < 25; i++)
12818 if ((val & (mask << i)) == val)
12819 return 1;
12821 return 0;
12824 /* Returns nonzero if the current function contains,
12825 or might contain a far jump. */
12826 static int
12827 thumb_far_jump_used_p (void)
12829 rtx insn;
12831 /* This test is only important for leaf functions. */
12832 /* assert (!leaf_function_p ()); */
12834 /* If we have already decided that far jumps may be used,
12835 do not bother checking again, and always return true even if
12836 it turns out that they are not being used. Once we have made
12837 the decision that far jumps are present (and that hence the link
12838 register will be pushed onto the stack) we cannot go back on it. */
12839 if (cfun->machine->far_jump_used)
12840 return 1;
12842 /* If this function is not being called from the prologue/epilogue
12843 generation code then it must be being called from the
12844 INITIAL_ELIMINATION_OFFSET macro. */
12845 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12847 /* In this case we know that we are being asked about the elimination
12848 of the arg pointer register. If that register is not being used,
12849 then there are no arguments on the stack, and we do not have to
12850 worry that a far jump might force the prologue to push the link
12851 register, changing the stack offsets. In this case we can just
12852 return false, since the presence of far jumps in the function will
12853 not affect stack offsets.
12855 If the arg pointer is live (or if it was live, but has now been
12856 eliminated and so set to dead) then we do have to test to see if
12857 the function might contain a far jump. This test can lead to some
12858 false negatives, since before reload is completed, then length of
12859 branch instructions is not known, so gcc defaults to returning their
12860 longest length, which in turn sets the far jump attribute to true.
12862 A false negative will not result in bad code being generated, but it
12863 will result in a needless push and pop of the link register. We
12864 hope that this does not occur too often.
12866 If we need doubleword stack alignment this could affect the other
12867 elimination offsets so we can't risk getting it wrong. */
12868 if (regs_ever_live [ARG_POINTER_REGNUM])
12869 cfun->machine->arg_pointer_live = 1;
12870 else if (!cfun->machine->arg_pointer_live)
12871 return 0;
12874 /* Check to see if the function contains a branch
12875 insn with the far jump attribute set. */
12876 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12878 if (GET_CODE (insn) == JUMP_INSN
12879 /* Ignore tablejump patterns. */
12880 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12881 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12882 && get_attr_far_jump (insn) == FAR_JUMP_YES
12885 /* Record the fact that we have decided that
12886 the function does use far jumps. */
12887 cfun->machine->far_jump_used = 1;
12888 return 1;
12892 return 0;
12895 /* Return nonzero if FUNC must be entered in ARM mode. */
12897 is_called_in_ARM_mode (tree func)
12899 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12901 /* Ignore the problem about functions whose address is taken. */
12902 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12903 return TRUE;
12905 #ifdef ARM_PE
12906 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12907 #else
12908 return FALSE;
12909 #endif
12912 /* The bits which aren't usefully expanded as rtl. */
12913 const char *
12914 thumb_unexpanded_epilogue (void)
12916 int regno;
12917 unsigned long live_regs_mask = 0;
12918 int high_regs_pushed = 0;
12919 int had_to_push_lr;
12920 int size;
12922 if (return_used_this_function)
12923 return "";
12925 if (IS_NAKED (arm_current_func_type ()))
12926 return "";
12928 live_regs_mask = thumb_compute_save_reg_mask ();
12929 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12931 /* If we can deduce the registers used from the function's return value.
12932 This is more reliable that examining regs_ever_live[] because that
12933 will be set if the register is ever used in the function, not just if
12934 the register is used to hold a return value. */
12935 size = arm_size_return_regs ();
12937 /* The prolog may have pushed some high registers to use as
12938 work registers. e.g. the testsuite file:
12939 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12940 compiles to produce:
12941 push {r4, r5, r6, r7, lr}
12942 mov r7, r9
12943 mov r6, r8
12944 push {r6, r7}
12945 as part of the prolog. We have to undo that pushing here. */
12947 if (high_regs_pushed)
12949 unsigned long mask = live_regs_mask & 0xff;
12950 int next_hi_reg;
12952 /* The available low registers depend on the size of the value we are
12953 returning. */
12954 if (size <= 12)
12955 mask |= 1 << 3;
12956 if (size <= 8)
12957 mask |= 1 << 2;
12959 if (mask == 0)
12960 /* Oh dear! We have no low registers into which we can pop
12961 high registers! */
12962 internal_error
12963 ("no low registers available for popping high registers");
12965 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12966 if (live_regs_mask & (1 << next_hi_reg))
12967 break;
12969 while (high_regs_pushed)
12971 /* Find lo register(s) into which the high register(s) can
12972 be popped. */
12973 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12975 if (mask & (1 << regno))
12976 high_regs_pushed--;
12977 if (high_regs_pushed == 0)
12978 break;
12981 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12983 /* Pop the values into the low register(s). */
12984 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12986 /* Move the value(s) into the high registers. */
12987 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12989 if (mask & (1 << regno))
12991 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12992 regno);
12994 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12995 if (live_regs_mask & (1 << next_hi_reg))
12996 break;
13000 live_regs_mask &= ~0x0f00;
13003 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13004 live_regs_mask &= 0xff;
13006 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13008 /* Pop the return address into the PC. */
13009 if (had_to_push_lr)
13010 live_regs_mask |= 1 << PC_REGNUM;
13012 /* Either no argument registers were pushed or a backtrace
13013 structure was created which includes an adjusted stack
13014 pointer, so just pop everything. */
13015 if (live_regs_mask)
13016 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13017 live_regs_mask);
13019 /* We have either just popped the return address into the
13020 PC or it is was kept in LR for the entire function. */
13021 if (!had_to_push_lr)
13022 thumb_exit (asm_out_file, LR_REGNUM);
13024 else
13026 /* Pop everything but the return address. */
13027 if (live_regs_mask)
13028 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13029 live_regs_mask);
13031 if (had_to_push_lr)
13033 if (size > 12)
13035 /* We have no free low regs, so save one. */
13036 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13037 LAST_ARG_REGNUM);
13040 /* Get the return address into a temporary register. */
13041 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13042 1 << LAST_ARG_REGNUM);
13044 if (size > 12)
13046 /* Move the return address to lr. */
13047 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13048 LAST_ARG_REGNUM);
13049 /* Restore the low register. */
13050 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13051 IP_REGNUM);
13052 regno = LR_REGNUM;
13054 else
13055 regno = LAST_ARG_REGNUM;
13057 else
13058 regno = LR_REGNUM;
13060 /* Remove the argument registers that were pushed onto the stack. */
13061 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13062 SP_REGNUM, SP_REGNUM,
13063 current_function_pretend_args_size);
13065 thumb_exit (asm_out_file, regno);
13068 return "";
13071 /* Functions to save and restore machine-specific function data. */
13072 static struct machine_function *
13073 arm_init_machine_status (void)
13075 struct machine_function *machine;
13076 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13078 #if ARM_FT_UNKNOWN != 0
13079 machine->func_type = ARM_FT_UNKNOWN;
13080 #endif
13081 return machine;
13084 /* Return an RTX indicating where the return address to the
13085 calling function can be found. */
13087 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13089 if (count != 0)
13090 return NULL_RTX;
13092 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13095 /* Do anything needed before RTL is emitted for each function. */
13096 void
13097 arm_init_expanders (void)
13099 /* Arrange to initialize and mark the machine per-function status. */
13100 init_machine_status = arm_init_machine_status;
13102 /* This is to stop the combine pass optimizing away the alignment
13103 adjustment of va_arg. */
13104 /* ??? It is claimed that this should not be necessary. */
13105 if (cfun)
13106 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13110 /* Like arm_compute_initial_elimination offset. Simpler because
13111 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13113 HOST_WIDE_INT
13114 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13116 arm_stack_offsets *offsets;
13118 offsets = arm_get_frame_offsets ();
13120 switch (from)
13122 case ARG_POINTER_REGNUM:
13123 switch (to)
13125 case STACK_POINTER_REGNUM:
13126 return offsets->outgoing_args - offsets->saved_args;
13128 case FRAME_POINTER_REGNUM:
13129 return offsets->soft_frame - offsets->saved_args;
13131 case THUMB_HARD_FRAME_POINTER_REGNUM:
13132 case ARM_HARD_FRAME_POINTER_REGNUM:
13133 return offsets->saved_regs - offsets->saved_args;
13135 default:
13136 gcc_unreachable ();
13138 break;
13140 case FRAME_POINTER_REGNUM:
13141 switch (to)
13143 case STACK_POINTER_REGNUM:
13144 return offsets->outgoing_args - offsets->soft_frame;
13146 case THUMB_HARD_FRAME_POINTER_REGNUM:
13147 case ARM_HARD_FRAME_POINTER_REGNUM:
13148 return offsets->saved_regs - offsets->soft_frame;
13150 default:
13151 gcc_unreachable ();
13153 break;
13155 default:
13156 gcc_unreachable ();
13161 /* Generate the rest of a function's prologue. */
13162 void
13163 thumb_expand_prologue (void)
13165 rtx insn, dwarf;
13167 HOST_WIDE_INT amount;
13168 arm_stack_offsets *offsets;
13169 unsigned long func_type;
13170 int regno;
13171 unsigned long live_regs_mask;
13173 func_type = arm_current_func_type ();
13175 /* Naked functions don't have prologues. */
13176 if (IS_NAKED (func_type))
13177 return;
13179 if (IS_INTERRUPT (func_type))
13181 error ("interrupt Service Routines cannot be coded in Thumb mode");
13182 return;
13185 live_regs_mask = thumb_compute_save_reg_mask ();
13186 /* Load the pic register before setting the frame pointer,
13187 so we can use r7 as a temporary work register. */
13188 if (flag_pic)
13189 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13191 offsets = arm_get_frame_offsets ();
13193 if (frame_pointer_needed)
13195 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13196 stack_pointer_rtx));
13197 RTX_FRAME_RELATED_P (insn) = 1;
13199 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13200 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13201 stack_pointer_rtx);
13203 amount = offsets->outgoing_args - offsets->saved_regs;
13204 if (amount)
13206 if (amount < 512)
13208 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13209 GEN_INT (- amount)));
13210 RTX_FRAME_RELATED_P (insn) = 1;
13212 else
13214 rtx reg;
13216 /* The stack decrement is too big for an immediate value in a single
13217 insn. In theory we could issue multiple subtracts, but after
13218 three of them it becomes more space efficient to place the full
13219 value in the constant pool and load into a register. (Also the
13220 ARM debugger really likes to see only one stack decrement per
13221 function). So instead we look for a scratch register into which
13222 we can load the decrement, and then we subtract this from the
13223 stack pointer. Unfortunately on the thumb the only available
13224 scratch registers are the argument registers, and we cannot use
13225 these as they may hold arguments to the function. Instead we
13226 attempt to locate a call preserved register which is used by this
13227 function. If we can find one, then we know that it will have
13228 been pushed at the start of the prologue and so we can corrupt
13229 it now. */
13230 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13231 if (live_regs_mask & (1 << regno)
13232 && !(frame_pointer_needed
13233 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13234 break;
13236 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13238 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13240 /* Choose an arbitrary, non-argument low register. */
13241 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13243 /* Save it by copying it into a high, scratch register. */
13244 emit_insn (gen_movsi (spare, reg));
13245 /* Add a USE to stop propagate_one_insn() from barfing. */
13246 emit_insn (gen_prologue_use (spare));
13248 /* Decrement the stack. */
13249 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13250 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13251 stack_pointer_rtx, reg));
13252 RTX_FRAME_RELATED_P (insn) = 1;
13253 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13254 plus_constant (stack_pointer_rtx,
13255 -amount));
13256 RTX_FRAME_RELATED_P (dwarf) = 1;
13257 REG_NOTES (insn)
13258 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13259 REG_NOTES (insn));
13261 /* Restore the low register's original value. */
13262 emit_insn (gen_movsi (reg, spare));
13264 /* Emit a USE of the restored scratch register, so that flow
13265 analysis will not consider the restore redundant. The
13266 register won't be used again in this function and isn't
13267 restored by the epilogue. */
13268 emit_insn (gen_prologue_use (reg));
13270 else
13272 reg = gen_rtx_REG (SImode, regno);
13274 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13276 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13277 stack_pointer_rtx, reg));
13278 RTX_FRAME_RELATED_P (insn) = 1;
13279 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13280 plus_constant (stack_pointer_rtx,
13281 -amount));
13282 RTX_FRAME_RELATED_P (dwarf) = 1;
13283 REG_NOTES (insn)
13284 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13285 REG_NOTES (insn));
13288 /* If the frame pointer is needed, emit a special barrier that
13289 will prevent the scheduler from moving stores to the frame
13290 before the stack adjustment. */
13291 if (frame_pointer_needed)
13292 emit_insn (gen_stack_tie (stack_pointer_rtx,
13293 hard_frame_pointer_rtx));
13296 if (current_function_profile || !TARGET_SCHED_PROLOG)
13297 emit_insn (gen_blockage ());
13299 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13300 if (live_regs_mask & 0xff)
13301 cfun->machine->lr_save_eliminated = 0;
13303 /* If the link register is being kept alive, with the return address in it,
13304 then make sure that it does not get reused by the ce2 pass. */
13305 if (cfun->machine->lr_save_eliminated)
13306 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13310 void
13311 thumb_expand_epilogue (void)
13313 HOST_WIDE_INT amount;
13314 arm_stack_offsets *offsets;
13315 int regno;
13317 /* Naked functions don't have prologues. */
13318 if (IS_NAKED (arm_current_func_type ()))
13319 return;
13321 offsets = arm_get_frame_offsets ();
13322 amount = offsets->outgoing_args - offsets->saved_regs;
13324 if (frame_pointer_needed)
13325 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13326 else if (amount)
13328 if (amount < 512)
13329 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13330 GEN_INT (amount)));
13331 else
13333 /* r3 is always free in the epilogue. */
13334 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13336 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13337 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13341 /* Emit a USE (stack_pointer_rtx), so that
13342 the stack adjustment will not be deleted. */
13343 emit_insn (gen_prologue_use (stack_pointer_rtx));
13345 if (current_function_profile || !TARGET_SCHED_PROLOG)
13346 emit_insn (gen_blockage ());
13348 /* Emit a clobber for each insn that will be restored in the epilogue,
13349 so that flow2 will get register lifetimes correct. */
13350 for (regno = 0; regno < 13; regno++)
13351 if (regs_ever_live[regno] && !call_used_regs[regno])
13352 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13354 if (! regs_ever_live[LR_REGNUM])
13355 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13358 static void
13359 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13361 unsigned long live_regs_mask = 0;
13362 unsigned long l_mask;
13363 unsigned high_regs_pushed = 0;
13364 int cfa_offset = 0;
13365 int regno;
13367 if (IS_NAKED (arm_current_func_type ()))
13368 return;
13370 if (is_called_in_ARM_mode (current_function_decl))
13372 const char * name;
13374 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13375 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13376 == SYMBOL_REF);
13377 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13379 /* Generate code sequence to switch us into Thumb mode. */
13380 /* The .code 32 directive has already been emitted by
13381 ASM_DECLARE_FUNCTION_NAME. */
13382 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13383 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13385 /* Generate a label, so that the debugger will notice the
13386 change in instruction sets. This label is also used by
13387 the assembler to bypass the ARM code when this function
13388 is called from a Thumb encoded function elsewhere in the
13389 same file. Hence the definition of STUB_NAME here must
13390 agree with the definition in gas/config/tc-arm.c. */
13392 #define STUB_NAME ".real_start_of"
13394 fprintf (f, "\t.code\t16\n");
13395 #ifdef ARM_PE
13396 if (arm_dllexport_name_p (name))
13397 name = arm_strip_name_encoding (name);
13398 #endif
13399 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13400 fprintf (f, "\t.thumb_func\n");
13401 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13404 if (current_function_pretend_args_size)
13406 /* Output unwind directive for the stack adjustment. */
13407 if (ARM_EABI_UNWIND_TABLES)
13408 fprintf (f, "\t.pad #%d\n",
13409 current_function_pretend_args_size);
13411 if (cfun->machine->uses_anonymous_args)
13413 int num_pushes;
13415 fprintf (f, "\tpush\t{");
13417 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13419 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13420 regno <= LAST_ARG_REGNUM;
13421 regno++)
13422 asm_fprintf (f, "%r%s", regno,
13423 regno == LAST_ARG_REGNUM ? "" : ", ");
13425 fprintf (f, "}\n");
13427 else
13428 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13429 SP_REGNUM, SP_REGNUM,
13430 current_function_pretend_args_size);
13432 /* We don't need to record the stores for unwinding (would it
13433 help the debugger any if we did?), but record the change in
13434 the stack pointer. */
13435 if (dwarf2out_do_frame ())
13437 char *l = dwarf2out_cfi_label ();
13439 cfa_offset = cfa_offset + current_function_pretend_args_size;
13440 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13444 /* Get the registers we are going to push. */
13445 live_regs_mask = thumb_compute_save_reg_mask ();
13446 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13447 l_mask = live_regs_mask & 0x40ff;
13448 /* Then count how many other high registers will need to be pushed. */
13449 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13451 if (TARGET_BACKTRACE)
13453 unsigned offset;
13454 unsigned work_register;
13456 /* We have been asked to create a stack backtrace structure.
13457 The code looks like this:
13459 0 .align 2
13460 0 func:
13461 0 sub SP, #16 Reserve space for 4 registers.
13462 2 push {R7} Push low registers.
13463 4 add R7, SP, #20 Get the stack pointer before the push.
13464 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13465 8 mov R7, PC Get hold of the start of this code plus 12.
13466 10 str R7, [SP, #16] Store it.
13467 12 mov R7, FP Get hold of the current frame pointer.
13468 14 str R7, [SP, #4] Store it.
13469 16 mov R7, LR Get hold of the current return address.
13470 18 str R7, [SP, #12] Store it.
13471 20 add R7, SP, #16 Point at the start of the backtrace structure.
13472 22 mov FP, R7 Put this value into the frame pointer. */
13474 work_register = thumb_find_work_register (live_regs_mask);
13476 if (ARM_EABI_UNWIND_TABLES)
13477 asm_fprintf (f, "\t.pad #16\n");
13479 asm_fprintf
13480 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13481 SP_REGNUM, SP_REGNUM);
13483 if (dwarf2out_do_frame ())
13485 char *l = dwarf2out_cfi_label ();
13487 cfa_offset = cfa_offset + 16;
13488 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13491 if (l_mask)
13493 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13494 offset = bit_count (l_mask) * UNITS_PER_WORD;
13496 else
13497 offset = 0;
13499 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13500 offset + 16 + current_function_pretend_args_size);
13502 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13503 offset + 4);
13505 /* Make sure that the instruction fetching the PC is in the right place
13506 to calculate "start of backtrace creation code + 12". */
13507 if (l_mask)
13509 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13510 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13511 offset + 12);
13512 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13513 ARM_HARD_FRAME_POINTER_REGNUM);
13514 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13515 offset);
13517 else
13519 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13520 ARM_HARD_FRAME_POINTER_REGNUM);
13521 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13522 offset);
13523 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13524 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13525 offset + 12);
13528 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13529 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13530 offset + 8);
13531 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13532 offset + 12);
13533 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13534 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13536 /* Optimization: If we are not pushing any low registers but we are going
13537 to push some high registers then delay our first push. This will just
13538 be a push of LR and we can combine it with the push of the first high
13539 register. */
13540 else if ((l_mask & 0xff) != 0
13541 || (high_regs_pushed == 0 && l_mask))
13542 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13544 if (high_regs_pushed)
13546 unsigned pushable_regs;
13547 unsigned next_hi_reg;
13549 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13550 if (live_regs_mask & (1 << next_hi_reg))
13551 break;
13553 pushable_regs = l_mask & 0xff;
13555 if (pushable_regs == 0)
13556 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13558 while (high_regs_pushed > 0)
13560 unsigned long real_regs_mask = 0;
13562 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13564 if (pushable_regs & (1 << regno))
13566 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13568 high_regs_pushed --;
13569 real_regs_mask |= (1 << next_hi_reg);
13571 if (high_regs_pushed)
13573 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13574 next_hi_reg --)
13575 if (live_regs_mask & (1 << next_hi_reg))
13576 break;
13578 else
13580 pushable_regs &= ~((1 << regno) - 1);
13581 break;
13586 /* If we had to find a work register and we have not yet
13587 saved the LR then add it to the list of regs to push. */
13588 if (l_mask == (1 << LR_REGNUM))
13590 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13591 1, &cfa_offset,
13592 real_regs_mask | (1 << LR_REGNUM));
13593 l_mask = 0;
13595 else
13596 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13601 /* Handle the case of a double word load into a low register from
13602 a computed memory address. The computed address may involve a
13603 register which is overwritten by the load. */
13604 const char *
13605 thumb_load_double_from_address (rtx *operands)
13607 rtx addr;
13608 rtx base;
13609 rtx offset;
13610 rtx arg1;
13611 rtx arg2;
13613 gcc_assert (GET_CODE (operands[0]) == REG);
13614 gcc_assert (GET_CODE (operands[1]) == MEM);
13616 /* Get the memory address. */
13617 addr = XEXP (operands[1], 0);
13619 /* Work out how the memory address is computed. */
13620 switch (GET_CODE (addr))
13622 case REG:
13623 operands[2] = gen_rtx_MEM (SImode,
13624 plus_constant (XEXP (operands[1], 0), 4));
13626 if (REGNO (operands[0]) == REGNO (addr))
13628 output_asm_insn ("ldr\t%H0, %2", operands);
13629 output_asm_insn ("ldr\t%0, %1", operands);
13631 else
13633 output_asm_insn ("ldr\t%0, %1", operands);
13634 output_asm_insn ("ldr\t%H0, %2", operands);
13636 break;
13638 case CONST:
13639 /* Compute <address> + 4 for the high order load. */
13640 operands[2] = gen_rtx_MEM (SImode,
13641 plus_constant (XEXP (operands[1], 0), 4));
13643 output_asm_insn ("ldr\t%0, %1", operands);
13644 output_asm_insn ("ldr\t%H0, %2", operands);
13645 break;
13647 case PLUS:
13648 arg1 = XEXP (addr, 0);
13649 arg2 = XEXP (addr, 1);
13651 if (CONSTANT_P (arg1))
13652 base = arg2, offset = arg1;
13653 else
13654 base = arg1, offset = arg2;
13656 gcc_assert (GET_CODE (base) == REG);
13658 /* Catch the case of <address> = <reg> + <reg> */
13659 if (GET_CODE (offset) == REG)
13661 int reg_offset = REGNO (offset);
13662 int reg_base = REGNO (base);
13663 int reg_dest = REGNO (operands[0]);
13665 /* Add the base and offset registers together into the
13666 higher destination register. */
13667 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13668 reg_dest + 1, reg_base, reg_offset);
13670 /* Load the lower destination register from the address in
13671 the higher destination register. */
13672 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13673 reg_dest, reg_dest + 1);
13675 /* Load the higher destination register from its own address
13676 plus 4. */
13677 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13678 reg_dest + 1, reg_dest + 1);
13680 else
13682 /* Compute <address> + 4 for the high order load. */
13683 operands[2] = gen_rtx_MEM (SImode,
13684 plus_constant (XEXP (operands[1], 0), 4));
13686 /* If the computed address is held in the low order register
13687 then load the high order register first, otherwise always
13688 load the low order register first. */
13689 if (REGNO (operands[0]) == REGNO (base))
13691 output_asm_insn ("ldr\t%H0, %2", operands);
13692 output_asm_insn ("ldr\t%0, %1", operands);
13694 else
13696 output_asm_insn ("ldr\t%0, %1", operands);
13697 output_asm_insn ("ldr\t%H0, %2", operands);
13700 break;
13702 case LABEL_REF:
13703 /* With no registers to worry about we can just load the value
13704 directly. */
13705 operands[2] = gen_rtx_MEM (SImode,
13706 plus_constant (XEXP (operands[1], 0), 4));
13708 output_asm_insn ("ldr\t%H0, %2", operands);
13709 output_asm_insn ("ldr\t%0, %1", operands);
13710 break;
13712 default:
13713 gcc_unreachable ();
13716 return "";
13719 const char *
13720 thumb_output_move_mem_multiple (int n, rtx *operands)
13722 rtx tmp;
13724 switch (n)
13726 case 2:
13727 if (REGNO (operands[4]) > REGNO (operands[5]))
13729 tmp = operands[4];
13730 operands[4] = operands[5];
13731 operands[5] = tmp;
13733 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13734 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13735 break;
13737 case 3:
13738 if (REGNO (operands[4]) > REGNO (operands[5]))
13740 tmp = operands[4];
13741 operands[4] = operands[5];
13742 operands[5] = tmp;
13744 if (REGNO (operands[5]) > REGNO (operands[6]))
13746 tmp = operands[5];
13747 operands[5] = operands[6];
13748 operands[6] = tmp;
13750 if (REGNO (operands[4]) > REGNO (operands[5]))
13752 tmp = operands[4];
13753 operands[4] = operands[5];
13754 operands[5] = tmp;
13757 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13758 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13759 break;
13761 default:
13762 gcc_unreachable ();
13765 return "";
13768 /* Output a call-via instruction for thumb state. */
13769 const char *
13770 thumb_call_via_reg (rtx reg)
13772 int regno = REGNO (reg);
13773 rtx *labelp;
13775 gcc_assert (regno < LR_REGNUM);
13777 /* If we are in the normal text section we can use a single instance
13778 per compilation unit. If we are doing function sections, then we need
13779 an entry per section, since we can't rely on reachability. */
13780 if (in_text_section ())
13782 thumb_call_reg_needed = 1;
13784 if (thumb_call_via_label[regno] == NULL)
13785 thumb_call_via_label[regno] = gen_label_rtx ();
13786 labelp = thumb_call_via_label + regno;
13788 else
13790 if (cfun->machine->call_via[regno] == NULL)
13791 cfun->machine->call_via[regno] = gen_label_rtx ();
13792 labelp = cfun->machine->call_via + regno;
13795 output_asm_insn ("bl\t%a0", labelp);
13796 return "";
13799 /* Routines for generating rtl. */
13800 void
13801 thumb_expand_movmemqi (rtx *operands)
13803 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13804 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13805 HOST_WIDE_INT len = INTVAL (operands[2]);
13806 HOST_WIDE_INT offset = 0;
13808 while (len >= 12)
13810 emit_insn (gen_movmem12b (out, in, out, in));
13811 len -= 12;
13814 if (len >= 8)
13816 emit_insn (gen_movmem8b (out, in, out, in));
13817 len -= 8;
13820 if (len >= 4)
13822 rtx reg = gen_reg_rtx (SImode);
13823 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13824 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13825 len -= 4;
13826 offset += 4;
13829 if (len >= 2)
13831 rtx reg = gen_reg_rtx (HImode);
13832 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13833 plus_constant (in, offset))));
13834 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13835 reg));
13836 len -= 2;
13837 offset += 2;
13840 if (len)
13842 rtx reg = gen_reg_rtx (QImode);
13843 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13844 plus_constant (in, offset))));
13845 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13846 reg));
13850 void
13851 thumb_reload_out_hi (rtx *operands)
13853 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13856 /* Handle reading a half-word from memory during reload. */
13857 void
13858 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13860 gcc_unreachable ();
13863 /* Return the length of a function name prefix
13864 that starts with the character 'c'. */
13865 static int
13866 arm_get_strip_length (int c)
13868 switch (c)
13870 ARM_NAME_ENCODING_LENGTHS
13871 default: return 0;
13875 /* Return a pointer to a function's name with any
13876 and all prefix encodings stripped from it. */
13877 const char *
13878 arm_strip_name_encoding (const char *name)
13880 int skip;
13882 while ((skip = arm_get_strip_length (* name)))
13883 name += skip;
13885 return name;
13888 /* If there is a '*' anywhere in the name's prefix, then
13889 emit the stripped name verbatim, otherwise prepend an
13890 underscore if leading underscores are being used. */
13891 void
13892 arm_asm_output_labelref (FILE *stream, const char *name)
13894 int skip;
13895 int verbatim = 0;
13897 while ((skip = arm_get_strip_length (* name)))
13899 verbatim |= (*name == '*');
13900 name += skip;
13903 if (verbatim)
13904 fputs (name, stream);
13905 else
13906 asm_fprintf (stream, "%U%s", name);
13909 static void
13910 arm_file_end (void)
13912 int regno;
13914 if (! thumb_call_reg_needed)
13915 return;
13917 text_section ();
13918 asm_fprintf (asm_out_file, "\t.code 16\n");
13919 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13921 for (regno = 0; regno < LR_REGNUM; regno++)
13923 rtx label = thumb_call_via_label[regno];
13925 if (label != 0)
13927 targetm.asm_out.internal_label (asm_out_file, "L",
13928 CODE_LABEL_NUMBER (label));
13929 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13934 rtx aof_pic_label;
13936 #ifdef AOF_ASSEMBLER
13937 /* Special functions only needed when producing AOF syntax assembler. */
13939 struct pic_chain
13941 struct pic_chain * next;
13942 const char * symname;
13945 static struct pic_chain * aof_pic_chain = NULL;
13948 aof_pic_entry (rtx x)
13950 struct pic_chain ** chainp;
13951 int offset;
13953 if (aof_pic_label == NULL_RTX)
13955 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13958 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13959 offset += 4, chainp = &(*chainp)->next)
13960 if ((*chainp)->symname == XSTR (x, 0))
13961 return plus_constant (aof_pic_label, offset);
13963 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13964 (*chainp)->next = NULL;
13965 (*chainp)->symname = XSTR (x, 0);
13966 return plus_constant (aof_pic_label, offset);
13969 void
13970 aof_dump_pic_table (FILE *f)
13972 struct pic_chain * chain;
13974 if (aof_pic_chain == NULL)
13975 return;
13977 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13978 PIC_OFFSET_TABLE_REGNUM,
13979 PIC_OFFSET_TABLE_REGNUM);
13980 fputs ("|x$adcons|\n", f);
13982 for (chain = aof_pic_chain; chain; chain = chain->next)
13984 fputs ("\tDCD\t", f);
13985 assemble_name (f, chain->symname);
13986 fputs ("\n", f);
13990 int arm_text_section_count = 1;
13992 char *
13993 aof_text_section (void )
13995 static char buf[100];
13996 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13997 arm_text_section_count++);
13998 if (flag_pic)
13999 strcat (buf, ", PIC, REENTRANT");
14000 return buf;
14003 static int arm_data_section_count = 1;
14005 char *
14006 aof_data_section (void)
14008 static char buf[100];
14009 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14010 return buf;
14013 /* The AOF assembler is religiously strict about declarations of
14014 imported and exported symbols, so that it is impossible to declare
14015 a function as imported near the beginning of the file, and then to
14016 export it later on. It is, however, possible to delay the decision
14017 until all the functions in the file have been compiled. To get
14018 around this, we maintain a list of the imports and exports, and
14019 delete from it any that are subsequently defined. At the end of
14020 compilation we spit the remainder of the list out before the END
14021 directive. */
14023 struct import
14025 struct import * next;
14026 const char * name;
14029 static struct import * imports_list = NULL;
14031 void
14032 aof_add_import (const char *name)
14034 struct import * new;
14036 for (new = imports_list; new; new = new->next)
14037 if (new->name == name)
14038 return;
14040 new = (struct import *) xmalloc (sizeof (struct import));
14041 new->next = imports_list;
14042 imports_list = new;
14043 new->name = name;
14046 void
14047 aof_delete_import (const char *name)
14049 struct import ** old;
14051 for (old = &imports_list; *old; old = & (*old)->next)
14053 if ((*old)->name == name)
14055 *old = (*old)->next;
14056 return;
14061 int arm_main_function = 0;
14063 static void
14064 aof_dump_imports (FILE *f)
14066 /* The AOF assembler needs this to cause the startup code to be extracted
14067 from the library. Brining in __main causes the whole thing to work
14068 automagically. */
14069 if (arm_main_function)
14071 text_section ();
14072 fputs ("\tIMPORT __main\n", f);
14073 fputs ("\tDCD __main\n", f);
14076 /* Now dump the remaining imports. */
14077 while (imports_list)
14079 fprintf (f, "\tIMPORT\t");
14080 assemble_name (f, imports_list->name);
14081 fputc ('\n', f);
14082 imports_list = imports_list->next;
14086 static void
14087 aof_globalize_label (FILE *stream, const char *name)
14089 default_globalize_label (stream, name);
14090 if (! strcmp (name, "main"))
14091 arm_main_function = 1;
14094 static void
14095 aof_file_start (void)
14097 fputs ("__r0\tRN\t0\n", asm_out_file);
14098 fputs ("__a1\tRN\t0\n", asm_out_file);
14099 fputs ("__a2\tRN\t1\n", asm_out_file);
14100 fputs ("__a3\tRN\t2\n", asm_out_file);
14101 fputs ("__a4\tRN\t3\n", asm_out_file);
14102 fputs ("__v1\tRN\t4\n", asm_out_file);
14103 fputs ("__v2\tRN\t5\n", asm_out_file);
14104 fputs ("__v3\tRN\t6\n", asm_out_file);
14105 fputs ("__v4\tRN\t7\n", asm_out_file);
14106 fputs ("__v5\tRN\t8\n", asm_out_file);
14107 fputs ("__v6\tRN\t9\n", asm_out_file);
14108 fputs ("__sl\tRN\t10\n", asm_out_file);
14109 fputs ("__fp\tRN\t11\n", asm_out_file);
14110 fputs ("__ip\tRN\t12\n", asm_out_file);
14111 fputs ("__sp\tRN\t13\n", asm_out_file);
14112 fputs ("__lr\tRN\t14\n", asm_out_file);
14113 fputs ("__pc\tRN\t15\n", asm_out_file);
14114 fputs ("__f0\tFN\t0\n", asm_out_file);
14115 fputs ("__f1\tFN\t1\n", asm_out_file);
14116 fputs ("__f2\tFN\t2\n", asm_out_file);
14117 fputs ("__f3\tFN\t3\n", asm_out_file);
14118 fputs ("__f4\tFN\t4\n", asm_out_file);
14119 fputs ("__f5\tFN\t5\n", asm_out_file);
14120 fputs ("__f6\tFN\t6\n", asm_out_file);
14121 fputs ("__f7\tFN\t7\n", asm_out_file);
14122 text_section ();
14125 static void
14126 aof_file_end (void)
14128 if (flag_pic)
14129 aof_dump_pic_table (asm_out_file);
14130 arm_file_end ();
14131 aof_dump_imports (asm_out_file);
14132 fputs ("\tEND\n", asm_out_file);
14134 #endif /* AOF_ASSEMBLER */
14136 #ifndef ARM_PE
14137 /* Symbols in the text segment can be accessed without indirecting via the
14138 constant pool; it may take an extra binary operation, but this is still
14139 faster than indirecting via memory. Don't do this when not optimizing,
14140 since we won't be calculating al of the offsets necessary to do this
14141 simplification. */
14143 static void
14144 arm_encode_section_info (tree decl, rtx rtl, int first)
14146 /* This doesn't work with AOF syntax, since the string table may be in
14147 a different AREA. */
14148 #ifndef AOF_ASSEMBLER
14149 if (optimize > 0 && TREE_CONSTANT (decl))
14150 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14151 #endif
14153 /* If we are referencing a function that is weak then encode a long call
14154 flag in the function name, otherwise if the function is static or
14155 or known to be defined in this file then encode a short call flag. */
14156 if (first && DECL_P (decl))
14158 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14159 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14160 else if (! TREE_PUBLIC (decl))
14161 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14164 #endif /* !ARM_PE */
14166 static void
14167 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14169 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14170 && !strcmp (prefix, "L"))
14172 arm_ccfsm_state = 0;
14173 arm_target_insn = NULL;
14175 default_internal_label (stream, prefix, labelno);
14178 /* Output code to add DELTA to the first argument, and then jump
14179 to FUNCTION. Used for C++ multiple inheritance. */
14180 static void
14181 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14182 HOST_WIDE_INT delta,
14183 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14184 tree function)
14186 static int thunk_label = 0;
14187 char label[256];
14188 int mi_delta = delta;
14189 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14190 int shift = 0;
14191 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14192 ? 1 : 0);
14193 if (mi_delta < 0)
14194 mi_delta = - mi_delta;
14195 if (TARGET_THUMB)
14197 int labelno = thunk_label++;
14198 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14199 fputs ("\tldr\tr12, ", file);
14200 assemble_name (file, label);
14201 fputc ('\n', file);
14203 while (mi_delta != 0)
14205 if ((mi_delta & (3 << shift)) == 0)
14206 shift += 2;
14207 else
14209 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14210 mi_op, this_regno, this_regno,
14211 mi_delta & (0xff << shift));
14212 mi_delta &= ~(0xff << shift);
14213 shift += 8;
14216 if (TARGET_THUMB)
14218 fprintf (file, "\tbx\tr12\n");
14219 ASM_OUTPUT_ALIGN (file, 2);
14220 assemble_name (file, label);
14221 fputs (":\n", file);
14222 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14224 else
14226 fputs ("\tb\t", file);
14227 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14228 if (NEED_PLT_RELOC)
14229 fputs ("(PLT)", file);
14230 fputc ('\n', file);
14235 arm_emit_vector_const (FILE *file, rtx x)
14237 int i;
14238 const char * pattern;
14240 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14242 switch (GET_MODE (x))
14244 case V2SImode: pattern = "%08x"; break;
14245 case V4HImode: pattern = "%04x"; break;
14246 case V8QImode: pattern = "%02x"; break;
14247 default: gcc_unreachable ();
14250 fprintf (file, "0x");
14251 for (i = CONST_VECTOR_NUNITS (x); i--;)
14253 rtx element;
14255 element = CONST_VECTOR_ELT (x, i);
14256 fprintf (file, pattern, INTVAL (element));
14259 return 1;
14262 const char *
14263 arm_output_load_gr (rtx *operands)
14265 rtx reg;
14266 rtx offset;
14267 rtx wcgr;
14268 rtx sum;
14270 if (GET_CODE (operands [1]) != MEM
14271 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14272 || GET_CODE (reg = XEXP (sum, 0)) != REG
14273 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14274 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14275 return "wldrw%?\t%0, %1";
14277 /* Fix up an out-of-range load of a GR register. */
14278 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14279 wcgr = operands[0];
14280 operands[0] = reg;
14281 output_asm_insn ("ldr%?\t%0, %1", operands);
14283 operands[0] = wcgr;
14284 operands[1] = reg;
14285 output_asm_insn ("tmcr%?\t%0, %1", operands);
14286 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14288 return "";
14291 static rtx
14292 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14293 int incoming ATTRIBUTE_UNUSED)
14295 #if 0
14296 /* FIXME: The ARM backend has special code to handle structure
14297 returns, and will reserve its own hidden first argument. So
14298 if this macro is enabled a *second* hidden argument will be
14299 reserved, which will break binary compatibility with old
14300 toolchains and also thunk handling. One day this should be
14301 fixed. */
14302 return 0;
14303 #else
14304 /* Register in which address to store a structure value
14305 is passed to a function. */
14306 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14307 #endif
14310 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14312 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14313 named arg and all anonymous args onto the stack.
14314 XXX I know the prologue shouldn't be pushing registers, but it is faster
14315 that way. */
14317 static void
14318 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14319 enum machine_mode mode ATTRIBUTE_UNUSED,
14320 tree type ATTRIBUTE_UNUSED,
14321 int *pretend_size,
14322 int second_time ATTRIBUTE_UNUSED)
14324 cfun->machine->uses_anonymous_args = 1;
14325 if (cum->nregs < NUM_ARG_REGS)
14326 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14329 /* Return nonzero if the CONSUMER instruction (a store) does not need
14330 PRODUCER's value to calculate the address. */
14333 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14335 rtx value = PATTERN (producer);
14336 rtx addr = PATTERN (consumer);
14338 if (GET_CODE (value) == COND_EXEC)
14339 value = COND_EXEC_CODE (value);
14340 if (GET_CODE (value) == PARALLEL)
14341 value = XVECEXP (value, 0, 0);
14342 value = XEXP (value, 0);
14343 if (GET_CODE (addr) == COND_EXEC)
14344 addr = COND_EXEC_CODE (addr);
14345 if (GET_CODE (addr) == PARALLEL)
14346 addr = XVECEXP (addr, 0, 0);
14347 addr = XEXP (addr, 0);
14349 return !reg_overlap_mentioned_p (value, addr);
14352 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14353 have an early register shift value or amount dependency on the
14354 result of PRODUCER. */
14357 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14359 rtx value = PATTERN (producer);
14360 rtx op = PATTERN (consumer);
14361 rtx early_op;
14363 if (GET_CODE (value) == COND_EXEC)
14364 value = COND_EXEC_CODE (value);
14365 if (GET_CODE (value) == PARALLEL)
14366 value = XVECEXP (value, 0, 0);
14367 value = XEXP (value, 0);
14368 if (GET_CODE (op) == COND_EXEC)
14369 op = COND_EXEC_CODE (op);
14370 if (GET_CODE (op) == PARALLEL)
14371 op = XVECEXP (op, 0, 0);
14372 op = XEXP (op, 1);
14374 early_op = XEXP (op, 0);
14375 /* This is either an actual independent shift, or a shift applied to
14376 the first operand of another operation. We want the whole shift
14377 operation. */
14378 if (GET_CODE (early_op) == REG)
14379 early_op = op;
14381 return !reg_overlap_mentioned_p (value, early_op);
14384 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14385 have an early register shift value dependency on the result of
14386 PRODUCER. */
14389 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14391 rtx value = PATTERN (producer);
14392 rtx op = PATTERN (consumer);
14393 rtx early_op;
14395 if (GET_CODE (value) == COND_EXEC)
14396 value = COND_EXEC_CODE (value);
14397 if (GET_CODE (value) == PARALLEL)
14398 value = XVECEXP (value, 0, 0);
14399 value = XEXP (value, 0);
14400 if (GET_CODE (op) == COND_EXEC)
14401 op = COND_EXEC_CODE (op);
14402 if (GET_CODE (op) == PARALLEL)
14403 op = XVECEXP (op, 0, 0);
14404 op = XEXP (op, 1);
14406 early_op = XEXP (op, 0);
14408 /* This is either an actual independent shift, or a shift applied to
14409 the first operand of another operation. We want the value being
14410 shifted, in either case. */
14411 if (GET_CODE (early_op) != REG)
14412 early_op = XEXP (early_op, 0);
14414 return !reg_overlap_mentioned_p (value, early_op);
14417 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14418 have an early register mult dependency on the result of
14419 PRODUCER. */
14422 arm_no_early_mul_dep (rtx producer, rtx consumer)
14424 rtx value = PATTERN (producer);
14425 rtx op = PATTERN (consumer);
14427 if (GET_CODE (value) == COND_EXEC)
14428 value = COND_EXEC_CODE (value);
14429 if (GET_CODE (value) == PARALLEL)
14430 value = XVECEXP (value, 0, 0);
14431 value = XEXP (value, 0);
14432 if (GET_CODE (op) == COND_EXEC)
14433 op = COND_EXEC_CODE (op);
14434 if (GET_CODE (op) == PARALLEL)
14435 op = XVECEXP (op, 0, 0);
14436 op = XEXP (op, 1);
14438 return (GET_CODE (op) == PLUS
14439 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14443 /* We can't rely on the caller doing the proper promotion when
14444 using APCS or ATPCS. */
14446 static bool
14447 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14449 return !TARGET_AAPCS_BASED;
14453 /* AAPCS based ABIs use short enums by default. */
14455 static bool
14456 arm_default_short_enums (void)
14458 return TARGET_AAPCS_BASED;
14462 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14464 static bool
14465 arm_align_anon_bitfield (void)
14467 return TARGET_AAPCS_BASED;
14471 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14473 static tree
14474 arm_cxx_guard_type (void)
14476 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14480 /* The EABI says test the least significant bit of a guard variable. */
14482 static bool
14483 arm_cxx_guard_mask_bit (void)
14485 return TARGET_AAPCS_BASED;
14489 /* The EABI specifies that all array cookies are 8 bytes long. */
14491 static tree
14492 arm_get_cookie_size (tree type)
14494 tree size;
14496 if (!TARGET_AAPCS_BASED)
14497 return default_cxx_get_cookie_size (type);
14499 size = build_int_cst (sizetype, 8);
14500 return size;
14504 /* The EABI says that array cookies should also contain the element size. */
14506 static bool
14507 arm_cookie_has_size (void)
14509 return TARGET_AAPCS_BASED;
14513 /* The EABI says constructors and destructors should return a pointer to
14514 the object constructed/destroyed. */
14516 static bool
14517 arm_cxx_cdtor_returns_this (void)
14519 return TARGET_AAPCS_BASED;
14522 /* The EABI says that an inline function may never be the key
14523 method. */
14525 static bool
14526 arm_cxx_key_method_may_be_inline (void)
14528 return !TARGET_AAPCS_BASED;
14531 static void
14532 arm_cxx_determine_class_data_visibility (tree decl)
14534 if (!TARGET_AAPCS_BASED)
14535 return;
14537 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14538 is exported. However, on systems without dynamic vague linkage,
14539 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14540 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14541 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14542 else
14543 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14544 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14547 static bool
14548 arm_cxx_class_data_always_comdat (void)
14550 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14551 vague linkage if the class has no key function. */
14552 return !TARGET_AAPCS_BASED;
14556 /* The EABI says __aeabi_atexit should be used to register static
14557 destructors. */
14559 static bool
14560 arm_cxx_use_aeabi_atexit (void)
14562 return TARGET_AAPCS_BASED;
14566 void
14567 arm_set_return_address (rtx source, rtx scratch)
14569 arm_stack_offsets *offsets;
14570 HOST_WIDE_INT delta;
14571 rtx addr;
14572 unsigned long saved_regs;
14574 saved_regs = arm_compute_save_reg_mask ();
14576 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14577 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14578 else
14580 if (frame_pointer_needed)
14581 addr = plus_constant(hard_frame_pointer_rtx, -4);
14582 else
14584 /* LR will be the first saved register. */
14585 offsets = arm_get_frame_offsets ();
14586 delta = offsets->outgoing_args - (offsets->frame + 4);
14589 if (delta >= 4096)
14591 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14592 GEN_INT (delta & ~4095)));
14593 addr = scratch;
14594 delta &= 4095;
14596 else
14597 addr = stack_pointer_rtx;
14599 addr = plus_constant (addr, delta);
14601 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14606 void
14607 thumb_set_return_address (rtx source, rtx scratch)
14609 arm_stack_offsets *offsets;
14610 HOST_WIDE_INT delta;
14611 int reg;
14612 rtx addr;
14613 unsigned long mask;
14615 emit_insn (gen_rtx_USE (VOIDmode, source));
14617 mask = thumb_compute_save_reg_mask ();
14618 if (mask & (1 << LR_REGNUM))
14620 offsets = arm_get_frame_offsets ();
14622 /* Find the saved regs. */
14623 if (frame_pointer_needed)
14625 delta = offsets->soft_frame - offsets->saved_args;
14626 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14628 else
14630 delta = offsets->outgoing_args - offsets->saved_args;
14631 reg = SP_REGNUM;
14633 /* Allow for the stack frame. */
14634 if (TARGET_BACKTRACE)
14635 delta -= 16;
14636 /* The link register is always the first saved register. */
14637 delta -= 4;
14639 /* Construct the address. */
14640 addr = gen_rtx_REG (SImode, reg);
14641 if ((reg != SP_REGNUM && delta >= 128)
14642 || delta >= 1024)
14644 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14645 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14646 addr = scratch;
14648 else
14649 addr = plus_constant (addr, delta);
14651 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14653 else
14654 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14657 /* Implements target hook vector_mode_supported_p. */
14658 bool
14659 arm_vector_mode_supported_p (enum machine_mode mode)
14661 if ((mode == V2SImode)
14662 || (mode == V4HImode)
14663 || (mode == V8QImode))
14664 return true;
14666 return false;
14669 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14670 ARM insns and therefore guarantee that the shift count is modulo 256.
14671 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14672 guarantee no particular behavior for out-of-range counts. */
14674 static unsigned HOST_WIDE_INT
14675 arm_shift_truncation_mask (enum machine_mode mode)
14677 return mode == SImode ? 255 : 0;
14681 /* Map internal gcc register numbers to DWARF2 register numbers. */
14683 unsigned int
14684 arm_dbx_register_number (unsigned int regno)
14686 if (regno < 16)
14687 return regno;
14689 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14690 compatibility. The EABI defines them as registers 96-103. */
14691 if (IS_FPA_REGNUM (regno))
14692 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14694 if (IS_VFP_REGNUM (regno))
14695 return 64 + regno - FIRST_VFP_REGNUM;
14697 if (IS_IWMMXT_GR_REGNUM (regno))
14698 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14700 if (IS_IWMMXT_REGNUM (regno))
14701 return 112 + regno - FIRST_IWMMXT_REGNUM;
14703 gcc_unreachable ();
14707 #ifdef TARGET_UNWIND_INFO
14708 /* Emit unwind directives for a store-multiple instruction. This should
14709 only ever be generated by the function prologue code, so we expect it
14710 to have a particular form. */
14712 static void
14713 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
14715 int i;
14716 HOST_WIDE_INT offset;
14717 HOST_WIDE_INT nregs;
14718 int reg_size;
14719 unsigned reg;
14720 unsigned lastreg;
14721 rtx e;
14723 /* First insn will adjust the stack pointer. */
14724 e = XVECEXP (p, 0, 0);
14725 if (GET_CODE (e) != SET
14726 || GET_CODE (XEXP (e, 0)) != REG
14727 || REGNO (XEXP (e, 0)) != SP_REGNUM
14728 || GET_CODE (XEXP (e, 1)) != PLUS)
14729 abort ();
14731 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
14732 nregs = XVECLEN (p, 0) - 1;
14734 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
14735 if (reg < 16)
14737 /* The function prologue may also push pc, but not annotate it as it is
14738 never restored. We turn this into an stack pointer adjustment. */
14739 if (nregs * 4 == offset - 4)
14741 fprintf (asm_out_file, "\t.pad #4\n");
14742 offset -= 4;
14744 reg_size = 4;
14746 else if (IS_VFP_REGNUM (reg))
14748 /* FPA register saves use an additional word. */
14749 offset -= 4;
14750 reg_size = 8;
14752 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
14754 /* FPA registers are done differently. */
14755 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
14756 return;
14758 else
14759 /* Unknown register type. */
14760 abort ();
14762 /* If the stack increment doesn't match the size of the saved registers,
14763 something has gone horribly wrong. */
14764 if (offset != nregs * reg_size)
14765 abort ();
14767 fprintf (asm_out_file, "\t.save {");
14769 offset = 0;
14770 lastreg = 0;
14771 /* The remaining insns will describe the stores. */
14772 for (i = 1; i <= nregs; i++)
14774 /* Expect (set (mem <addr>) (reg)).
14775 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
14776 e = XVECEXP (p, 0, i);
14777 if (GET_CODE (e) != SET
14778 || GET_CODE (XEXP (e, 0)) != MEM
14779 || GET_CODE (XEXP (e, 1)) != REG)
14780 abort ();
14782 reg = REGNO (XEXP (e, 1));
14783 if (reg < lastreg)
14784 abort ();
14786 if (i != 1)
14787 fprintf (asm_out_file, ", ");
14788 /* We can't use %r for vfp because we need to use the
14789 double precision register names. */
14790 if (IS_VFP_REGNUM (reg))
14791 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
14792 else
14793 asm_fprintf (asm_out_file, "%r", reg);
14795 #ifdef ENABLE_CHECKING
14796 /* Check that the addresses are consecutive. */
14797 e = XEXP (XEXP (e, 0), 0);
14798 if (GET_CODE (e) == PLUS)
14800 offset += reg_size;
14801 if (GET_CODE (XEXP (e, 0)) != REG
14802 || REGNO (XEXP (e, 0)) != SP_REGNUM
14803 || GET_CODE (XEXP (e, 1)) != CONST_INT
14804 || offset != INTVAL (XEXP (e, 1)))
14805 abort ();
14807 else if (i != 1
14808 || GET_CODE (e) != REG
14809 || REGNO (e) != SP_REGNUM)
14810 abort ();
14811 #endif
14813 fprintf (asm_out_file, "}\n");
14816 /* Emit unwind directives for a SET. */
14818 static void
14819 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
14821 rtx e0;
14822 rtx e1;
14824 e0 = XEXP (p, 0);
14825 e1 = XEXP (p, 1);
14826 switch (GET_CODE (e0))
14828 case MEM:
14829 /* Pushing a single register. */
14830 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
14831 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
14832 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
14833 abort ();
14835 asm_fprintf (asm_out_file, "\t.save ");
14836 if (IS_VFP_REGNUM (REGNO (e1)))
14837 asm_fprintf(asm_out_file, "{d%d}\n",
14838 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
14839 else
14840 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
14841 break;
14843 case REG:
14844 if (REGNO (e0) == SP_REGNUM)
14846 /* A stack increment. */
14847 if (GET_CODE (e1) != PLUS
14848 || GET_CODE (XEXP (e1, 0)) != REG
14849 || REGNO (XEXP (e1, 0)) != SP_REGNUM
14850 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14851 abort ();
14853 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
14854 -INTVAL (XEXP (e1, 1)));
14856 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
14858 HOST_WIDE_INT offset;
14859 unsigned reg;
14861 if (GET_CODE (e1) == PLUS)
14863 if (GET_CODE (XEXP (e1, 0)) != REG
14864 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
14865 abort ();
14866 reg = REGNO (XEXP (e1, 0));
14867 offset = INTVAL (XEXP (e1, 1));
14868 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
14869 HARD_FRAME_POINTER_REGNUM, reg,
14870 INTVAL (XEXP (e1, 1)));
14872 else if (GET_CODE (e1) == REG)
14874 reg = REGNO (e1);
14875 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
14876 HARD_FRAME_POINTER_REGNUM, reg);
14878 else
14879 abort ();
14881 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
14883 /* Move from sp to reg. */
14884 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
14886 else
14887 abort ();
14888 break;
14890 default:
14891 abort ();
14896 /* Emit unwind directives for the given insn. */
14898 static void
14899 arm_unwind_emit (FILE * asm_out_file, rtx insn)
14901 rtx pat;
14903 if (!ARM_EABI_UNWIND_TABLES)
14904 return;
14906 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
14907 return;
14909 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
14910 if (pat)
14911 pat = XEXP (pat, 0);
14912 else
14913 pat = PATTERN (insn);
14915 switch (GET_CODE (pat))
14917 case SET:
14918 arm_unwind_emit_set (asm_out_file, pat);
14919 break;
14921 case SEQUENCE:
14922 /* Store multiple. */
14923 arm_unwind_emit_stm (asm_out_file, pat);
14924 break;
14926 default:
14927 abort();
14932 /* Output a reference from a function exception table to the type_info
14933 object X. The EABI specifies that the symbol should be relocated by
14934 an R_ARM_TARGET2 relocation. */
14936 static bool
14937 arm_output_ttype (rtx x)
14939 fputs ("\t.word\t", asm_out_file);
14940 output_addr_const (asm_out_file, x);
14941 /* Use special relocations for symbol references. */
14942 if (GET_CODE (x) != CONST_INT)
14943 fputs ("(TARGET2)", asm_out_file);
14944 fputc ('\n', asm_out_file);
14946 return TRUE;
14948 #endif /* TARGET_UNWIND_INFO */
14951 /* Output unwind directives for the start/end of a function. */
14953 void
14954 arm_output_fn_unwind (FILE * f, bool prologue)
14956 if (!ARM_EABI_UNWIND_TABLES)
14957 return;
14959 if (prologue)
14960 fputs ("\t.fnstart\n", f);
14961 else
14962 fputs ("\t.fnend\n", f);