2006-04-21 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / config / arm / arm.c
blob3b221e2977e680038f5c3ca37261a5aeafccb278
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static rtx emit_set_insn (rtx, rtx);
146 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
147 tree, bool);
149 #ifdef OBJECT_FORMAT_ELF
150 static void arm_elf_asm_constructor (rtx, int);
151 #endif
152 #ifndef ARM_PE
153 static void arm_encode_section_info (tree, rtx, int);
154 #endif
156 static void arm_file_end (void);
158 #ifdef AOF_ASSEMBLER
159 static void aof_globalize_label (FILE *, const char *);
160 static void aof_dump_imports (FILE *);
161 static void aof_dump_pic_table (FILE *);
162 static void aof_file_start (void);
163 static void aof_file_end (void);
164 static void aof_asm_init_sections (void);
165 #endif
166 static rtx arm_struct_value_rtx (tree, int);
167 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
168 tree, int *, int);
169 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
170 enum machine_mode, tree, bool);
171 static bool arm_promote_prototypes (tree);
172 static bool arm_default_short_enums (void);
173 static bool arm_align_anon_bitfield (void);
174 static bool arm_return_in_msb (tree);
175 static bool arm_must_pass_in_stack (enum machine_mode, tree);
176 #ifdef TARGET_UNWIND_INFO
177 static void arm_unwind_emit (FILE *, rtx);
178 static bool arm_output_ttype (rtx);
179 #endif
181 static tree arm_cxx_guard_type (void);
182 static bool arm_cxx_guard_mask_bit (void);
183 static tree arm_get_cookie_size (tree);
184 static bool arm_cookie_has_size (void);
185 static bool arm_cxx_cdtor_returns_this (void);
186 static bool arm_cxx_key_method_may_be_inline (void);
187 static void arm_cxx_determine_class_data_visibility (tree);
188 static bool arm_cxx_class_data_always_comdat (void);
189 static bool arm_cxx_use_aeabi_atexit (void);
190 static void arm_init_libfuncs (void);
191 static bool arm_handle_option (size_t, const char *, int);
192 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
193 static bool arm_cannot_copy_insn_p (rtx);
194 static bool arm_tls_symbol_p (rtx x);
197 /* Initialize the GCC target structure. */
198 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
199 #undef TARGET_MERGE_DECL_ATTRIBUTES
200 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
201 #endif
203 #undef TARGET_ATTRIBUTE_TABLE
204 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
206 #undef TARGET_ASM_FILE_END
207 #define TARGET_ASM_FILE_END arm_file_end
209 #ifdef AOF_ASSEMBLER
210 #undef TARGET_ASM_BYTE_OP
211 #define TARGET_ASM_BYTE_OP "\tDCB\t"
212 #undef TARGET_ASM_ALIGNED_HI_OP
213 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
214 #undef TARGET_ASM_ALIGNED_SI_OP
215 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
216 #undef TARGET_ASM_GLOBALIZE_LABEL
217 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
218 #undef TARGET_ASM_FILE_START
219 #define TARGET_ASM_FILE_START aof_file_start
220 #undef TARGET_ASM_FILE_END
221 #define TARGET_ASM_FILE_END aof_file_end
222 #else
223 #undef TARGET_ASM_ALIGNED_SI_OP
224 #define TARGET_ASM_ALIGNED_SI_OP NULL
225 #undef TARGET_ASM_INTEGER
226 #define TARGET_ASM_INTEGER arm_assemble_integer
227 #endif
229 #undef TARGET_ASM_FUNCTION_PROLOGUE
230 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
232 #undef TARGET_ASM_FUNCTION_EPILOGUE
233 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
235 #undef TARGET_DEFAULT_TARGET_FLAGS
236 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
237 #undef TARGET_HANDLE_OPTION
238 #define TARGET_HANDLE_OPTION arm_handle_option
240 #undef TARGET_COMP_TYPE_ATTRIBUTES
241 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
243 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
244 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
246 #undef TARGET_SCHED_ADJUST_COST
247 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
249 #undef TARGET_ENCODE_SECTION_INFO
250 #ifdef ARM_PE
251 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
252 #else
253 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
254 #endif
256 #undef TARGET_STRIP_NAME_ENCODING
257 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
259 #undef TARGET_ASM_INTERNAL_LABEL
260 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
262 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
263 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
265 #undef TARGET_ASM_OUTPUT_MI_THUNK
266 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
267 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
268 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
270 /* This will be overridden in arm_override_options. */
271 #undef TARGET_RTX_COSTS
272 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
273 #undef TARGET_ADDRESS_COST
274 #define TARGET_ADDRESS_COST arm_address_cost
276 #undef TARGET_SHIFT_TRUNCATION_MASK
277 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
278 #undef TARGET_VECTOR_MODE_SUPPORTED_P
279 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
281 #undef TARGET_MACHINE_DEPENDENT_REORG
282 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
284 #undef TARGET_INIT_BUILTINS
285 #define TARGET_INIT_BUILTINS arm_init_builtins
286 #undef TARGET_EXPAND_BUILTIN
287 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
289 #undef TARGET_INIT_LIBFUNCS
290 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
292 #undef TARGET_PROMOTE_FUNCTION_ARGS
293 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
294 #undef TARGET_PROMOTE_FUNCTION_RETURN
295 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
296 #undef TARGET_PROMOTE_PROTOTYPES
297 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
298 #undef TARGET_PASS_BY_REFERENCE
299 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
300 #undef TARGET_ARG_PARTIAL_BYTES
301 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
303 #undef TARGET_STRUCT_VALUE_RTX
304 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
306 #undef TARGET_SETUP_INCOMING_VARARGS
307 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
309 #undef TARGET_DEFAULT_SHORT_ENUMS
310 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
312 #undef TARGET_ALIGN_ANON_BITFIELD
313 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
315 #undef TARGET_NARROW_VOLATILE_BITFIELD
316 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
318 #undef TARGET_CXX_GUARD_TYPE
319 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
321 #undef TARGET_CXX_GUARD_MASK_BIT
322 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
324 #undef TARGET_CXX_GET_COOKIE_SIZE
325 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
327 #undef TARGET_CXX_COOKIE_HAS_SIZE
328 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
330 #undef TARGET_CXX_CDTOR_RETURNS_THIS
331 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
333 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
334 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
336 #undef TARGET_CXX_USE_AEABI_ATEXIT
337 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
339 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
340 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
341 arm_cxx_determine_class_data_visibility
343 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
344 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
346 #undef TARGET_RETURN_IN_MSB
347 #define TARGET_RETURN_IN_MSB arm_return_in_msb
349 #undef TARGET_MUST_PASS_IN_STACK
350 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
352 #ifdef TARGET_UNWIND_INFO
353 #undef TARGET_UNWIND_EMIT
354 #define TARGET_UNWIND_EMIT arm_unwind_emit
356 /* EABI unwinding tables use a different format for the typeinfo tables. */
357 #undef TARGET_ASM_TTYPE
358 #define TARGET_ASM_TTYPE arm_output_ttype
360 #undef TARGET_ARM_EABI_UNWINDER
361 #define TARGET_ARM_EABI_UNWINDER true
362 #endif /* TARGET_UNWIND_INFO */
364 #undef TARGET_CANNOT_COPY_INSN_P
365 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
367 #ifdef HAVE_AS_TLS
368 #undef TARGET_HAVE_TLS
369 #define TARGET_HAVE_TLS true
370 #endif
372 #undef TARGET_CANNOT_FORCE_CONST_MEM
373 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
375 struct gcc_target targetm = TARGET_INITIALIZER;
377 /* Obstack for minipool constant handling. */
378 static struct obstack minipool_obstack;
379 static char * minipool_startobj;
381 /* The maximum number of insns skipped which
382 will be conditionalised if possible. */
383 static int max_insns_skipped = 5;
385 extern FILE * asm_out_file;
387 /* True if we are currently building a constant table. */
388 int making_const_table;
390 /* Define the information needed to generate branch insns. This is
391 stored from the compare operation. */
392 rtx arm_compare_op0, arm_compare_op1;
394 /* The processor for which instructions should be scheduled. */
395 enum processor_type arm_tune = arm_none;
397 /* Which floating point model to use. */
398 enum arm_fp_model arm_fp_model;
400 /* Which floating point hardware is available. */
401 enum fputype arm_fpu_arch;
403 /* Which floating point hardware to schedule for. */
404 enum fputype arm_fpu_tune;
406 /* Whether to use floating point hardware. */
407 enum float_abi_type arm_float_abi;
409 /* Which ABI to use. */
410 enum arm_abi_type arm_abi;
412 /* Which thread pointer model to use. */
413 enum arm_tp_type target_thread_pointer = TP_AUTO;
415 /* Used to parse -mstructure_size_boundary command line option. */
416 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
418 /* Used for Thumb call_via trampolines. */
419 rtx thumb_call_via_label[14];
420 static int thumb_call_reg_needed;
422 /* Bit values used to identify processor capabilities. */
423 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
424 #define FL_ARCH3M (1 << 1) /* Extended multiply */
425 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
426 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
427 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
428 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
429 #define FL_THUMB (1 << 6) /* Thumb aware */
430 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
431 #define FL_STRONG (1 << 8) /* StrongARM */
432 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
433 #define FL_XSCALE (1 << 10) /* XScale */
434 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
435 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
436 media instructions. */
437 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
438 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
439 Note: ARM6 & 7 derivatives only. */
440 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
442 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
444 #define FL_FOR_ARCH2 0
445 #define FL_FOR_ARCH3 FL_MODE32
446 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
447 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
448 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
449 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
450 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
451 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
452 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
453 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
454 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
455 #define FL_FOR_ARCH6J FL_FOR_ARCH6
456 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
457 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
458 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
460 /* The bits in this mask specify which
461 instructions we are allowed to generate. */
462 static unsigned long insn_flags = 0;
464 /* The bits in this mask specify which instruction scheduling options should
465 be used. */
466 static unsigned long tune_flags = 0;
468 /* The following are used in the arm.md file as equivalents to bits
469 in the above two flag variables. */
471 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
472 int arm_arch3m = 0;
474 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
475 int arm_arch4 = 0;
477 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
478 int arm_arch4t = 0;
480 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
481 int arm_arch5 = 0;
483 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
484 int arm_arch5e = 0;
486 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
487 int arm_arch6 = 0;
489 /* Nonzero if this chip supports the ARM 6K extensions. */
490 int arm_arch6k = 0;
492 /* Nonzero if this chip can benefit from load scheduling. */
493 int arm_ld_sched = 0;
495 /* Nonzero if this chip is a StrongARM. */
496 int arm_tune_strongarm = 0;
498 /* Nonzero if this chip is a Cirrus variant. */
499 int arm_arch_cirrus = 0;
501 /* Nonzero if this chip supports Intel Wireless MMX technology. */
502 int arm_arch_iwmmxt = 0;
504 /* Nonzero if this chip is an XScale. */
505 int arm_arch_xscale = 0;
507 /* Nonzero if tuning for XScale */
508 int arm_tune_xscale = 0;
510 /* Nonzero if we want to tune for stores that access the write-buffer.
511 This typically means an ARM6 or ARM7 with MMU or MPU. */
512 int arm_tune_wbuf = 0;
514 /* Nonzero if generating Thumb instructions. */
515 int thumb_code = 0;
517 /* Nonzero if we should define __THUMB_INTERWORK__ in the
518 preprocessor.
519 XXX This is a bit of a hack, it's intended to help work around
520 problems in GLD which doesn't understand that armv5t code is
521 interworking clean. */
522 int arm_cpp_interwork = 0;
524 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
525 must report the mode of the memory reference from PRINT_OPERAND to
526 PRINT_OPERAND_ADDRESS. */
527 enum machine_mode output_memory_reference_mode;
529 /* The register number to be used for the PIC offset register. */
530 unsigned arm_pic_register = INVALID_REGNUM;
532 /* Set to 1 when a return insn is output, this means that the epilogue
533 is not needed. */
534 int return_used_this_function;
536 /* Set to 1 after arm_reorg has started. Reset to start at the start of
537 the next function. */
538 static int after_arm_reorg = 0;
540 /* The maximum number of insns to be used when loading a constant. */
541 static int arm_constant_limit = 3;
543 /* For an explanation of these variables, see final_prescan_insn below. */
544 int arm_ccfsm_state;
545 enum arm_cond_code arm_current_cc;
546 rtx arm_target_insn;
547 int arm_target_label;
549 /* The condition codes of the ARM, and the inverse function. */
550 static const char * const arm_condition_codes[] =
552 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
553 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
556 #define streq(string1, string2) (strcmp (string1, string2) == 0)
558 /* Initialization code. */
560 struct processors
562 const char *const name;
563 enum processor_type core;
564 const char *arch;
565 const unsigned long flags;
566 bool (* rtx_costs) (rtx, int, int, int *);
569 /* Not all of these give usefully different compilation alternatives,
570 but there is no simple way of generalizing them. */
571 static const struct processors all_cores[] =
573 /* ARM Cores */
574 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
575 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
576 #include "arm-cores.def"
577 #undef ARM_CORE
578 {NULL, arm_none, NULL, 0, NULL}
581 static const struct processors all_architectures[] =
583 /* ARM Architectures */
584 /* We don't specify rtx_costs here as it will be figured out
585 from the core. */
587 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
588 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
589 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
590 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
591 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
592 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
593 implementations that support it, so we will leave it out for now. */
594 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
595 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
596 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
597 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
598 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
599 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
600 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
601 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
602 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
603 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
604 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
605 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
606 {NULL, arm_none, NULL, 0 , NULL}
609 struct arm_cpu_select
611 const char * string;
612 const char * name;
613 const struct processors * processors;
616 /* This is a magic structure. The 'string' field is magically filled in
617 with a pointer to the value specified by the user on the command line
618 assuming that the user has specified such a value. */
620 static struct arm_cpu_select arm_select[] =
622 /* string name processors */
623 { NULL, "-mcpu=", all_cores },
624 { NULL, "-march=", all_architectures },
625 { NULL, "-mtune=", all_cores }
628 /* Defines representing the indexes into the above table. */
629 #define ARM_OPT_SET_CPU 0
630 #define ARM_OPT_SET_ARCH 1
631 #define ARM_OPT_SET_TUNE 2
633 /* The name of the proprocessor macro to define for this architecture. */
635 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
637 struct fpu_desc
639 const char * name;
640 enum fputype fpu;
644 /* Available values for -mfpu=. */
646 static const struct fpu_desc all_fpus[] =
648 {"fpa", FPUTYPE_FPA},
649 {"fpe2", FPUTYPE_FPA_EMU2},
650 {"fpe3", FPUTYPE_FPA_EMU2},
651 {"maverick", FPUTYPE_MAVERICK},
652 {"vfp", FPUTYPE_VFP}
656 /* Floating point models used by the different hardware.
657 See fputype in arm.h. */
659 static const enum fputype fp_model_for_fpu[] =
661 /* No FP hardware. */
662 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
663 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
664 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
665 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
666 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
667 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
671 struct float_abi
673 const char * name;
674 enum float_abi_type abi_type;
678 /* Available values for -mfloat-abi=. */
680 static const struct float_abi all_float_abis[] =
682 {"soft", ARM_FLOAT_ABI_SOFT},
683 {"softfp", ARM_FLOAT_ABI_SOFTFP},
684 {"hard", ARM_FLOAT_ABI_HARD}
688 struct abi_name
690 const char *name;
691 enum arm_abi_type abi_type;
695 /* Available values for -mabi=. */
697 static const struct abi_name arm_all_abis[] =
699 {"apcs-gnu", ARM_ABI_APCS},
700 {"atpcs", ARM_ABI_ATPCS},
701 {"aapcs", ARM_ABI_AAPCS},
702 {"iwmmxt", ARM_ABI_IWMMXT},
703 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
706 /* Supported TLS relocations. */
708 enum tls_reloc {
709 TLS_GD32,
710 TLS_LDM32,
711 TLS_LDO32,
712 TLS_IE32,
713 TLS_LE32
716 /* Emit an insn that's a simple single-set. Both the operands must be known
717 to be valid. */
718 inline static rtx
719 emit_set_insn (rtx x, rtx y)
721 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
724 /* Return the number of bits set in VALUE. */
725 static unsigned
726 bit_count (unsigned long value)
728 unsigned long count = 0;
730 while (value)
732 count++;
733 value &= value - 1; /* Clear the least-significant set bit. */
736 return count;
739 /* Set up library functions unique to ARM. */
741 static void
742 arm_init_libfuncs (void)
744 /* There are no special library functions unless we are using the
745 ARM BPABI. */
746 if (!TARGET_BPABI)
747 return;
749 /* The functions below are described in Section 4 of the "Run-Time
750 ABI for the ARM architecture", Version 1.0. */
752 /* Double-precision floating-point arithmetic. Table 2. */
753 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
754 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
755 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
756 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
757 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
759 /* Double-precision comparisons. Table 3. */
760 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
761 set_optab_libfunc (ne_optab, DFmode, NULL);
762 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
763 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
764 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
765 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
766 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
768 /* Single-precision floating-point arithmetic. Table 4. */
769 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
770 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
771 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
772 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
773 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
775 /* Single-precision comparisons. Table 5. */
776 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
777 set_optab_libfunc (ne_optab, SFmode, NULL);
778 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
779 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
780 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
781 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
782 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
784 /* Floating-point to integer conversions. Table 6. */
785 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
786 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
787 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
788 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
789 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
790 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
791 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
792 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
794 /* Conversions between floating types. Table 7. */
795 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
796 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
798 /* Integer to floating-point conversions. Table 8. */
799 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
800 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
801 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
802 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
803 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
804 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
805 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
806 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
808 /* Long long. Table 9. */
809 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
810 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
811 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
812 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
813 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
814 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
815 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
816 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
818 /* Integer (32/32->32) division. \S 4.3.1. */
819 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
820 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
822 /* The divmod functions are designed so that they can be used for
823 plain division, even though they return both the quotient and the
824 remainder. The quotient is returned in the usual location (i.e.,
825 r0 for SImode, {r0, r1} for DImode), just as would be expected
826 for an ordinary division routine. Because the AAPCS calling
827 conventions specify that all of { r0, r1, r2, r3 } are
828 callee-saved registers, there is no need to tell the compiler
829 explicitly that those registers are clobbered by these
830 routines. */
831 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
832 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
834 /* For SImode division the ABI provides div-without-mod routines,
835 which are faster. */
836 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
837 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
839 /* We don't have mod libcalls. Fortunately gcc knows how to use the
840 divmod libcalls instead. */
841 set_optab_libfunc (smod_optab, DImode, NULL);
842 set_optab_libfunc (umod_optab, DImode, NULL);
843 set_optab_libfunc (smod_optab, SImode, NULL);
844 set_optab_libfunc (umod_optab, SImode, NULL);
847 /* Implement TARGET_HANDLE_OPTION. */
849 static bool
850 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
852 switch (code)
854 case OPT_march_:
855 arm_select[1].string = arg;
856 return true;
858 case OPT_mcpu_:
859 arm_select[0].string = arg;
860 return true;
862 case OPT_mhard_float:
863 target_float_abi_name = "hard";
864 return true;
866 case OPT_msoft_float:
867 target_float_abi_name = "soft";
868 return true;
870 case OPT_mtune_:
871 arm_select[2].string = arg;
872 return true;
874 default:
875 return true;
879 /* Fix up any incompatible options that the user has specified.
880 This has now turned into a maze. */
881 void
882 arm_override_options (void)
884 unsigned i;
885 enum processor_type target_arch_cpu = arm_none;
887 /* Set up the flags based on the cpu/architecture selected by the user. */
888 for (i = ARRAY_SIZE (arm_select); i--;)
890 struct arm_cpu_select * ptr = arm_select + i;
892 if (ptr->string != NULL && ptr->string[0] != '\0')
894 const struct processors * sel;
896 for (sel = ptr->processors; sel->name != NULL; sel++)
897 if (streq (ptr->string, sel->name))
899 /* Set the architecture define. */
900 if (i != ARM_OPT_SET_TUNE)
901 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
903 /* Determine the processor core for which we should
904 tune code-generation. */
905 if (/* -mcpu= is a sensible default. */
906 i == ARM_OPT_SET_CPU
907 /* -mtune= overrides -mcpu= and -march=. */
908 || i == ARM_OPT_SET_TUNE)
909 arm_tune = (enum processor_type) (sel - ptr->processors);
911 /* Remember the CPU associated with this architecture.
912 If no other option is used to set the CPU type,
913 we'll use this to guess the most suitable tuning
914 options. */
915 if (i == ARM_OPT_SET_ARCH)
916 target_arch_cpu = sel->core;
918 if (i != ARM_OPT_SET_TUNE)
920 /* If we have been given an architecture and a processor
921 make sure that they are compatible. We only generate
922 a warning though, and we prefer the CPU over the
923 architecture. */
924 if (insn_flags != 0 && (insn_flags ^ sel->flags))
925 warning (0, "switch -mcpu=%s conflicts with -march= switch",
926 ptr->string);
928 insn_flags = sel->flags;
931 break;
934 if (sel->name == NULL)
935 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
939 /* Guess the tuning options from the architecture if necessary. */
940 if (arm_tune == arm_none)
941 arm_tune = target_arch_cpu;
943 /* If the user did not specify a processor, choose one for them. */
944 if (insn_flags == 0)
946 const struct processors * sel;
947 unsigned int sought;
948 enum processor_type cpu;
950 cpu = TARGET_CPU_DEFAULT;
951 if (cpu == arm_none)
953 #ifdef SUBTARGET_CPU_DEFAULT
954 /* Use the subtarget default CPU if none was specified by
955 configure. */
956 cpu = SUBTARGET_CPU_DEFAULT;
957 #endif
958 /* Default to ARM6. */
959 if (cpu == arm_none)
960 cpu = arm6;
962 sel = &all_cores[cpu];
964 insn_flags = sel->flags;
966 /* Now check to see if the user has specified some command line
967 switch that require certain abilities from the cpu. */
968 sought = 0;
970 if (TARGET_INTERWORK || TARGET_THUMB)
972 sought |= (FL_THUMB | FL_MODE32);
974 /* There are no ARM processors that support both APCS-26 and
975 interworking. Therefore we force FL_MODE26 to be removed
976 from insn_flags here (if it was set), so that the search
977 below will always be able to find a compatible processor. */
978 insn_flags &= ~FL_MODE26;
981 if (sought != 0 && ((sought & insn_flags) != sought))
983 /* Try to locate a CPU type that supports all of the abilities
984 of the default CPU, plus the extra abilities requested by
985 the user. */
986 for (sel = all_cores; sel->name != NULL; sel++)
987 if ((sel->flags & sought) == (sought | insn_flags))
988 break;
990 if (sel->name == NULL)
992 unsigned current_bit_count = 0;
993 const struct processors * best_fit = NULL;
995 /* Ideally we would like to issue an error message here
996 saying that it was not possible to find a CPU compatible
997 with the default CPU, but which also supports the command
998 line options specified by the programmer, and so they
999 ought to use the -mcpu=<name> command line option to
1000 override the default CPU type.
1002 If we cannot find a cpu that has both the
1003 characteristics of the default cpu and the given
1004 command line options we scan the array again looking
1005 for a best match. */
1006 for (sel = all_cores; sel->name != NULL; sel++)
1007 if ((sel->flags & sought) == sought)
1009 unsigned count;
1011 count = bit_count (sel->flags & insn_flags);
1013 if (count >= current_bit_count)
1015 best_fit = sel;
1016 current_bit_count = count;
1020 gcc_assert (best_fit);
1021 sel = best_fit;
1024 insn_flags = sel->flags;
1026 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1027 if (arm_tune == arm_none)
1028 arm_tune = (enum processor_type) (sel - all_cores);
1031 /* The processor for which we should tune should now have been
1032 chosen. */
1033 gcc_assert (arm_tune != arm_none);
1035 tune_flags = all_cores[(int)arm_tune].flags;
1036 if (optimize_size)
1037 targetm.rtx_costs = arm_size_rtx_costs;
1038 else
1039 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1041 /* Make sure that the processor choice does not conflict with any of the
1042 other command line choices. */
1043 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1045 warning (0, "target CPU does not support interworking" );
1046 target_flags &= ~MASK_INTERWORK;
1049 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1051 warning (0, "target CPU does not support THUMB instructions");
1052 target_flags &= ~MASK_THUMB;
1055 if (TARGET_APCS_FRAME && TARGET_THUMB)
1057 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1058 target_flags &= ~MASK_APCS_FRAME;
1061 /* Callee super interworking implies thumb interworking. Adding
1062 this to the flags here simplifies the logic elsewhere. */
1063 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1064 target_flags |= MASK_INTERWORK;
1066 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1067 from here where no function is being compiled currently. */
1068 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1069 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1071 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1072 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1074 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1075 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1077 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1079 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1080 target_flags |= MASK_APCS_FRAME;
1083 if (TARGET_POKE_FUNCTION_NAME)
1084 target_flags |= MASK_APCS_FRAME;
1086 if (TARGET_APCS_REENT && flag_pic)
1087 error ("-fpic and -mapcs-reent are incompatible");
1089 if (TARGET_APCS_REENT)
1090 warning (0, "APCS reentrant code not supported. Ignored");
1092 /* If this target is normally configured to use APCS frames, warn if they
1093 are turned off and debugging is turned on. */
1094 if (TARGET_ARM
1095 && write_symbols != NO_DEBUG
1096 && !TARGET_APCS_FRAME
1097 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1098 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1100 /* If stack checking is disabled, we can use r10 as the PIC register,
1101 which keeps r9 available. */
1102 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1103 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1105 if (TARGET_APCS_FLOAT)
1106 warning (0, "passing floating point arguments in fp regs not yet supported");
1108 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1109 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1110 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1111 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1112 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1113 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1114 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1115 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1116 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1117 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1119 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1120 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1121 thumb_code = (TARGET_ARM == 0);
1122 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1123 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1124 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1126 /* V5 code we generate is completely interworking capable, so we turn off
1127 TARGET_INTERWORK here to avoid many tests later on. */
1129 /* XXX However, we must pass the right pre-processor defines to CPP
1130 or GLD can get confused. This is a hack. */
1131 if (TARGET_INTERWORK)
1132 arm_cpp_interwork = 1;
1134 if (arm_arch5)
1135 target_flags &= ~MASK_INTERWORK;
1137 if (target_abi_name)
1139 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1141 if (streq (arm_all_abis[i].name, target_abi_name))
1143 arm_abi = arm_all_abis[i].abi_type;
1144 break;
1147 if (i == ARRAY_SIZE (arm_all_abis))
1148 error ("invalid ABI option: -mabi=%s", target_abi_name);
1150 else
1151 arm_abi = ARM_DEFAULT_ABI;
1153 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1154 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1156 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1157 error ("iwmmxt abi requires an iwmmxt capable cpu");
1159 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1160 if (target_fpu_name == NULL && target_fpe_name != NULL)
1162 if (streq (target_fpe_name, "2"))
1163 target_fpu_name = "fpe2";
1164 else if (streq (target_fpe_name, "3"))
1165 target_fpu_name = "fpe3";
1166 else
1167 error ("invalid floating point emulation option: -mfpe=%s",
1168 target_fpe_name);
1170 if (target_fpu_name != NULL)
1172 /* The user specified a FPU. */
1173 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1175 if (streq (all_fpus[i].name, target_fpu_name))
1177 arm_fpu_arch = all_fpus[i].fpu;
1178 arm_fpu_tune = arm_fpu_arch;
1179 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1180 break;
1183 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1184 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1186 else
1188 #ifdef FPUTYPE_DEFAULT
1189 /* Use the default if it is specified for this platform. */
1190 arm_fpu_arch = FPUTYPE_DEFAULT;
1191 arm_fpu_tune = FPUTYPE_DEFAULT;
1192 #else
1193 /* Pick one based on CPU type. */
1194 /* ??? Some targets assume FPA is the default.
1195 if ((insn_flags & FL_VFP) != 0)
1196 arm_fpu_arch = FPUTYPE_VFP;
1197 else
1199 if (arm_arch_cirrus)
1200 arm_fpu_arch = FPUTYPE_MAVERICK;
1201 else
1202 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1203 #endif
1204 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1205 arm_fpu_tune = FPUTYPE_FPA;
1206 else
1207 arm_fpu_tune = arm_fpu_arch;
1208 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1209 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1212 if (target_float_abi_name != NULL)
1214 /* The user specified a FP ABI. */
1215 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1217 if (streq (all_float_abis[i].name, target_float_abi_name))
1219 arm_float_abi = all_float_abis[i].abi_type;
1220 break;
1223 if (i == ARRAY_SIZE (all_float_abis))
1224 error ("invalid floating point abi: -mfloat-abi=%s",
1225 target_float_abi_name);
1227 else
1228 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1230 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1231 sorry ("-mfloat-abi=hard and VFP");
1233 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1234 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1235 will ever exist. GCC makes no attempt to support this combination. */
1236 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1237 sorry ("iWMMXt and hardware floating point");
1239 /* If soft-float is specified then don't use FPU. */
1240 if (TARGET_SOFT_FLOAT)
1241 arm_fpu_arch = FPUTYPE_NONE;
1243 /* For arm2/3 there is no need to do any scheduling if there is only
1244 a floating point emulator, or we are doing software floating-point. */
1245 if ((TARGET_SOFT_FLOAT
1246 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1247 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1248 && (tune_flags & FL_MODE32) == 0)
1249 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1251 if (target_thread_switch)
1253 if (strcmp (target_thread_switch, "soft") == 0)
1254 target_thread_pointer = TP_SOFT;
1255 else if (strcmp (target_thread_switch, "auto") == 0)
1256 target_thread_pointer = TP_AUTO;
1257 else if (strcmp (target_thread_switch, "cp15") == 0)
1258 target_thread_pointer = TP_CP15;
1259 else
1260 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1263 /* Use the cp15 method if it is available. */
1264 if (target_thread_pointer == TP_AUTO)
1266 if (arm_arch6k && !TARGET_THUMB)
1267 target_thread_pointer = TP_CP15;
1268 else
1269 target_thread_pointer = TP_SOFT;
1272 if (TARGET_HARD_TP && TARGET_THUMB)
1273 error ("can not use -mtp=cp15 with -mthumb");
1275 /* Override the default structure alignment for AAPCS ABI. */
1276 if (TARGET_AAPCS_BASED)
1277 arm_structure_size_boundary = 8;
1279 if (structure_size_string != NULL)
1281 int size = strtol (structure_size_string, NULL, 0);
1283 if (size == 8 || size == 32
1284 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1285 arm_structure_size_boundary = size;
1286 else
1287 warning (0, "structure size boundary can only be set to %s",
1288 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1291 if (arm_pic_register_string != NULL)
1293 int pic_register = decode_reg_name (arm_pic_register_string);
1295 if (!flag_pic)
1296 warning (0, "-mpic-register= is useless without -fpic");
1298 /* Prevent the user from choosing an obviously stupid PIC register. */
1299 else if (pic_register < 0 || call_used_regs[pic_register]
1300 || pic_register == HARD_FRAME_POINTER_REGNUM
1301 || pic_register == STACK_POINTER_REGNUM
1302 || pic_register >= PC_REGNUM)
1303 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1304 else
1305 arm_pic_register = pic_register;
1308 if (TARGET_THUMB && flag_schedule_insns)
1310 /* Don't warn since it's on by default in -O2. */
1311 flag_schedule_insns = 0;
1314 if (optimize_size)
1316 arm_constant_limit = 1;
1318 /* If optimizing for size, bump the number of instructions that we
1319 are prepared to conditionally execute (even on a StrongARM). */
1320 max_insns_skipped = 6;
1322 else
1324 /* For processors with load scheduling, it never costs more than
1325 2 cycles to load a constant, and the load scheduler may well
1326 reduce that to 1. */
1327 if (arm_ld_sched)
1328 arm_constant_limit = 1;
1330 /* On XScale the longer latency of a load makes it more difficult
1331 to achieve a good schedule, so it's faster to synthesize
1332 constants that can be done in two insns. */
1333 if (arm_tune_xscale)
1334 arm_constant_limit = 2;
1336 /* StrongARM has early execution of branches, so a sequence
1337 that is worth skipping is shorter. */
1338 if (arm_tune_strongarm)
1339 max_insns_skipped = 3;
1342 /* Register global variables with the garbage collector. */
1343 arm_add_gc_roots ();
1346 static void
1347 arm_add_gc_roots (void)
1349 gcc_obstack_init(&minipool_obstack);
1350 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1353 /* A table of known ARM exception types.
1354 For use with the interrupt function attribute. */
1356 typedef struct
1358 const char *const arg;
1359 const unsigned long return_value;
1361 isr_attribute_arg;
1363 static const isr_attribute_arg isr_attribute_args [] =
1365 { "IRQ", ARM_FT_ISR },
1366 { "irq", ARM_FT_ISR },
1367 { "FIQ", ARM_FT_FIQ },
1368 { "fiq", ARM_FT_FIQ },
1369 { "ABORT", ARM_FT_ISR },
1370 { "abort", ARM_FT_ISR },
1371 { "ABORT", ARM_FT_ISR },
1372 { "abort", ARM_FT_ISR },
1373 { "UNDEF", ARM_FT_EXCEPTION },
1374 { "undef", ARM_FT_EXCEPTION },
1375 { "SWI", ARM_FT_EXCEPTION },
1376 { "swi", ARM_FT_EXCEPTION },
1377 { NULL, ARM_FT_NORMAL }
1380 /* Returns the (interrupt) function type of the current
1381 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1383 static unsigned long
1384 arm_isr_value (tree argument)
1386 const isr_attribute_arg * ptr;
1387 const char * arg;
1389 /* No argument - default to IRQ. */
1390 if (argument == NULL_TREE)
1391 return ARM_FT_ISR;
1393 /* Get the value of the argument. */
1394 if (TREE_VALUE (argument) == NULL_TREE
1395 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1396 return ARM_FT_UNKNOWN;
1398 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1400 /* Check it against the list of known arguments. */
1401 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1402 if (streq (arg, ptr->arg))
1403 return ptr->return_value;
1405 /* An unrecognized interrupt type. */
1406 return ARM_FT_UNKNOWN;
1409 /* Computes the type of the current function. */
1411 static unsigned long
1412 arm_compute_func_type (void)
1414 unsigned long type = ARM_FT_UNKNOWN;
1415 tree a;
1416 tree attr;
1418 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1420 /* Decide if the current function is volatile. Such functions
1421 never return, and many memory cycles can be saved by not storing
1422 register values that will never be needed again. This optimization
1423 was added to speed up context switching in a kernel application. */
1424 if (optimize > 0
1425 && (TREE_NOTHROW (current_function_decl)
1426 || !(flag_unwind_tables
1427 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1428 && TREE_THIS_VOLATILE (current_function_decl))
1429 type |= ARM_FT_VOLATILE;
1431 if (cfun->static_chain_decl != NULL)
1432 type |= ARM_FT_NESTED;
1434 attr = DECL_ATTRIBUTES (current_function_decl);
1436 a = lookup_attribute ("naked", attr);
1437 if (a != NULL_TREE)
1438 type |= ARM_FT_NAKED;
1440 a = lookup_attribute ("isr", attr);
1441 if (a == NULL_TREE)
1442 a = lookup_attribute ("interrupt", attr);
1444 if (a == NULL_TREE)
1445 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1446 else
1447 type |= arm_isr_value (TREE_VALUE (a));
1449 return type;
1452 /* Returns the type of the current function. */
1454 unsigned long
1455 arm_current_func_type (void)
1457 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1458 cfun->machine->func_type = arm_compute_func_type ();
1460 return cfun->machine->func_type;
1463 /* Return 1 if it is possible to return using a single instruction.
1464 If SIBLING is non-null, this is a test for a return before a sibling
1465 call. SIBLING is the call insn, so we can examine its register usage. */
1468 use_return_insn (int iscond, rtx sibling)
1470 int regno;
1471 unsigned int func_type;
1472 unsigned long saved_int_regs;
1473 unsigned HOST_WIDE_INT stack_adjust;
1474 arm_stack_offsets *offsets;
1476 /* Never use a return instruction before reload has run. */
1477 if (!reload_completed)
1478 return 0;
1480 func_type = arm_current_func_type ();
1482 /* Naked functions and volatile functions need special
1483 consideration. */
1484 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1485 return 0;
1487 /* So do interrupt functions that use the frame pointer. */
1488 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1489 return 0;
1491 offsets = arm_get_frame_offsets ();
1492 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1494 /* As do variadic functions. */
1495 if (current_function_pretend_args_size
1496 || cfun->machine->uses_anonymous_args
1497 /* Or if the function calls __builtin_eh_return () */
1498 || current_function_calls_eh_return
1499 /* Or if the function calls alloca */
1500 || current_function_calls_alloca
1501 /* Or if there is a stack adjustment. However, if the stack pointer
1502 is saved on the stack, we can use a pre-incrementing stack load. */
1503 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1504 return 0;
1506 saved_int_regs = arm_compute_save_reg_mask ();
1508 /* Unfortunately, the insn
1510 ldmib sp, {..., sp, ...}
1512 triggers a bug on most SA-110 based devices, such that the stack
1513 pointer won't be correctly restored if the instruction takes a
1514 page fault. We work around this problem by popping r3 along with
1515 the other registers, since that is never slower than executing
1516 another instruction.
1518 We test for !arm_arch5 here, because code for any architecture
1519 less than this could potentially be run on one of the buggy
1520 chips. */
1521 if (stack_adjust == 4 && !arm_arch5)
1523 /* Validate that r3 is a call-clobbered register (always true in
1524 the default abi) ... */
1525 if (!call_used_regs[3])
1526 return 0;
1528 /* ... that it isn't being used for a return value ... */
1529 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1530 return 0;
1532 /* ... or for a tail-call argument ... */
1533 if (sibling)
1535 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1537 if (find_regno_fusage (sibling, USE, 3))
1538 return 0;
1541 /* ... and that there are no call-saved registers in r0-r2
1542 (always true in the default ABI). */
1543 if (saved_int_regs & 0x7)
1544 return 0;
1547 /* Can't be done if interworking with Thumb, and any registers have been
1548 stacked. */
1549 if (TARGET_INTERWORK && saved_int_regs != 0)
1550 return 0;
1552 /* On StrongARM, conditional returns are expensive if they aren't
1553 taken and multiple registers have been stacked. */
1554 if (iscond && arm_tune_strongarm)
1556 /* Conditional return when just the LR is stored is a simple
1557 conditional-load instruction, that's not expensive. */
1558 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1559 return 0;
1561 if (flag_pic
1562 && arm_pic_register != INVALID_REGNUM
1563 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1564 return 0;
1567 /* If there are saved registers but the LR isn't saved, then we need
1568 two instructions for the return. */
1569 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1570 return 0;
1572 /* Can't be done if any of the FPA regs are pushed,
1573 since this also requires an insn. */
1574 if (TARGET_HARD_FLOAT && TARGET_FPA)
1575 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1576 if (regs_ever_live[regno] && !call_used_regs[regno])
1577 return 0;
1579 /* Likewise VFP regs. */
1580 if (TARGET_HARD_FLOAT && TARGET_VFP)
1581 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1582 if (regs_ever_live[regno] && !call_used_regs[regno])
1583 return 0;
1585 if (TARGET_REALLY_IWMMXT)
1586 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1587 if (regs_ever_live[regno] && ! call_used_regs [regno])
1588 return 0;
1590 return 1;
1593 /* Return TRUE if int I is a valid immediate ARM constant. */
1596 const_ok_for_arm (HOST_WIDE_INT i)
1598 int lowbit;
1600 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1601 be all zero, or all one. */
1602 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1603 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1604 != ((~(unsigned HOST_WIDE_INT) 0)
1605 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1606 return FALSE;
1608 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1610 /* Fast return for 0 and small values. We must do this for zero, since
1611 the code below can't handle that one case. */
1612 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1613 return TRUE;
1615 /* Get the number of trailing zeros, rounded down to the nearest even
1616 number. */
1617 lowbit = (ffs ((int) i) - 1) & ~1;
1619 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1620 return TRUE;
1621 else if (lowbit <= 4
1622 && ((i & ~0xc000003f) == 0
1623 || (i & ~0xf000000f) == 0
1624 || (i & ~0xfc000003) == 0))
1625 return TRUE;
1627 return FALSE;
1630 /* Return true if I is a valid constant for the operation CODE. */
1631 static int
1632 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1634 if (const_ok_for_arm (i))
1635 return 1;
1637 switch (code)
1639 case PLUS:
1640 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1642 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1643 case XOR:
1644 case IOR:
1645 return 0;
1647 case AND:
1648 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1650 default:
1651 gcc_unreachable ();
1655 /* Emit a sequence of insns to handle a large constant.
1656 CODE is the code of the operation required, it can be any of SET, PLUS,
1657 IOR, AND, XOR, MINUS;
1658 MODE is the mode in which the operation is being performed;
1659 VAL is the integer to operate on;
1660 SOURCE is the other operand (a register, or a null-pointer for SET);
1661 SUBTARGETS means it is safe to create scratch registers if that will
1662 either produce a simpler sequence, or we will want to cse the values.
1663 Return value is the number of insns emitted. */
1666 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1667 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1669 rtx cond;
1671 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1672 cond = COND_EXEC_TEST (PATTERN (insn));
1673 else
1674 cond = NULL_RTX;
1676 if (subtargets || code == SET
1677 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1678 && REGNO (target) != REGNO (source)))
1680 /* After arm_reorg has been called, we can't fix up expensive
1681 constants by pushing them into memory so we must synthesize
1682 them in-line, regardless of the cost. This is only likely to
1683 be more costly on chips that have load delay slots and we are
1684 compiling without running the scheduler (so no splitting
1685 occurred before the final instruction emission).
1687 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1689 if (!after_arm_reorg
1690 && !cond
1691 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1692 1, 0)
1693 > arm_constant_limit + (code != SET)))
1695 if (code == SET)
1697 /* Currently SET is the only monadic value for CODE, all
1698 the rest are diadic. */
1699 emit_set_insn (target, GEN_INT (val));
1700 return 1;
1702 else
1704 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1706 emit_set_insn (temp, GEN_INT (val));
1707 /* For MINUS, the value is subtracted from, since we never
1708 have subtraction of a constant. */
1709 if (code == MINUS)
1710 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1711 else
1712 emit_set_insn (target,
1713 gen_rtx_fmt_ee (code, mode, source, temp));
1714 return 2;
1719 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1723 static int
1724 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1726 HOST_WIDE_INT temp1;
1727 int num_insns = 0;
1730 int end;
1732 if (i <= 0)
1733 i += 32;
1734 if (remainder & (3 << (i - 2)))
1736 end = i - 8;
1737 if (end < 0)
1738 end += 32;
1739 temp1 = remainder & ((0x0ff << end)
1740 | ((i < end) ? (0xff >> (32 - end)) : 0));
1741 remainder &= ~temp1;
1742 num_insns++;
1743 i -= 6;
1745 i -= 2;
1746 } while (remainder);
1747 return num_insns;
1750 /* Emit an instruction with the indicated PATTERN. If COND is
1751 non-NULL, conditionalize the execution of the instruction on COND
1752 being true. */
1754 static void
1755 emit_constant_insn (rtx cond, rtx pattern)
1757 if (cond)
1758 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1759 emit_insn (pattern);
1762 /* As above, but extra parameter GENERATE which, if clear, suppresses
1763 RTL generation. */
1765 static int
1766 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1767 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1768 int generate)
1770 int can_invert = 0;
1771 int can_negate = 0;
1772 int can_negate_initial = 0;
1773 int can_shift = 0;
1774 int i;
1775 int num_bits_set = 0;
1776 int set_sign_bit_copies = 0;
1777 int clear_sign_bit_copies = 0;
1778 int clear_zero_bit_copies = 0;
1779 int set_zero_bit_copies = 0;
1780 int insns = 0;
1781 unsigned HOST_WIDE_INT temp1, temp2;
1782 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1784 /* Find out which operations are safe for a given CODE. Also do a quick
1785 check for degenerate cases; these can occur when DImode operations
1786 are split. */
1787 switch (code)
1789 case SET:
1790 can_invert = 1;
1791 can_shift = 1;
1792 can_negate = 1;
1793 break;
1795 case PLUS:
1796 can_negate = 1;
1797 can_negate_initial = 1;
1798 break;
1800 case IOR:
1801 if (remainder == 0xffffffff)
1803 if (generate)
1804 emit_constant_insn (cond,
1805 gen_rtx_SET (VOIDmode, target,
1806 GEN_INT (ARM_SIGN_EXTEND (val))));
1807 return 1;
1809 if (remainder == 0)
1811 if (reload_completed && rtx_equal_p (target, source))
1812 return 0;
1813 if (generate)
1814 emit_constant_insn (cond,
1815 gen_rtx_SET (VOIDmode, target, source));
1816 return 1;
1818 break;
1820 case AND:
1821 if (remainder == 0)
1823 if (generate)
1824 emit_constant_insn (cond,
1825 gen_rtx_SET (VOIDmode, target, const0_rtx));
1826 return 1;
1828 if (remainder == 0xffffffff)
1830 if (reload_completed && rtx_equal_p (target, source))
1831 return 0;
1832 if (generate)
1833 emit_constant_insn (cond,
1834 gen_rtx_SET (VOIDmode, target, source));
1835 return 1;
1837 can_invert = 1;
1838 break;
1840 case XOR:
1841 if (remainder == 0)
1843 if (reload_completed && rtx_equal_p (target, source))
1844 return 0;
1845 if (generate)
1846 emit_constant_insn (cond,
1847 gen_rtx_SET (VOIDmode, target, source));
1848 return 1;
1851 /* We don't know how to handle other cases yet. */
1852 gcc_assert (remainder == 0xffffffff);
1854 if (generate)
1855 emit_constant_insn (cond,
1856 gen_rtx_SET (VOIDmode, target,
1857 gen_rtx_NOT (mode, source)));
1858 return 1;
1860 case MINUS:
1861 /* We treat MINUS as (val - source), since (source - val) is always
1862 passed as (source + (-val)). */
1863 if (remainder == 0)
1865 if (generate)
1866 emit_constant_insn (cond,
1867 gen_rtx_SET (VOIDmode, target,
1868 gen_rtx_NEG (mode, source)));
1869 return 1;
1871 if (const_ok_for_arm (val))
1873 if (generate)
1874 emit_constant_insn (cond,
1875 gen_rtx_SET (VOIDmode, target,
1876 gen_rtx_MINUS (mode, GEN_INT (val),
1877 source)));
1878 return 1;
1880 can_negate = 1;
1882 break;
1884 default:
1885 gcc_unreachable ();
1888 /* If we can do it in one insn get out quickly. */
1889 if (const_ok_for_arm (val)
1890 || (can_negate_initial && const_ok_for_arm (-val))
1891 || (can_invert && const_ok_for_arm (~val)))
1893 if (generate)
1894 emit_constant_insn (cond,
1895 gen_rtx_SET (VOIDmode, target,
1896 (source
1897 ? gen_rtx_fmt_ee (code, mode, source,
1898 GEN_INT (val))
1899 : GEN_INT (val))));
1900 return 1;
1903 /* Calculate a few attributes that may be useful for specific
1904 optimizations. */
1905 for (i = 31; i >= 0; i--)
1907 if ((remainder & (1 << i)) == 0)
1908 clear_sign_bit_copies++;
1909 else
1910 break;
1913 for (i = 31; i >= 0; i--)
1915 if ((remainder & (1 << i)) != 0)
1916 set_sign_bit_copies++;
1917 else
1918 break;
1921 for (i = 0; i <= 31; i++)
1923 if ((remainder & (1 << i)) == 0)
1924 clear_zero_bit_copies++;
1925 else
1926 break;
1929 for (i = 0; i <= 31; i++)
1931 if ((remainder & (1 << i)) != 0)
1932 set_zero_bit_copies++;
1933 else
1934 break;
1937 switch (code)
1939 case SET:
1940 /* See if we can do this by sign_extending a constant that is known
1941 to be negative. This is a good, way of doing it, since the shift
1942 may well merge into a subsequent insn. */
1943 if (set_sign_bit_copies > 1)
1945 if (const_ok_for_arm
1946 (temp1 = ARM_SIGN_EXTEND (remainder
1947 << (set_sign_bit_copies - 1))))
1949 if (generate)
1951 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1952 emit_constant_insn (cond,
1953 gen_rtx_SET (VOIDmode, new_src,
1954 GEN_INT (temp1)));
1955 emit_constant_insn (cond,
1956 gen_ashrsi3 (target, new_src,
1957 GEN_INT (set_sign_bit_copies - 1)));
1959 return 2;
1961 /* For an inverted constant, we will need to set the low bits,
1962 these will be shifted out of harm's way. */
1963 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1964 if (const_ok_for_arm (~temp1))
1966 if (generate)
1968 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1969 emit_constant_insn (cond,
1970 gen_rtx_SET (VOIDmode, new_src,
1971 GEN_INT (temp1)));
1972 emit_constant_insn (cond,
1973 gen_ashrsi3 (target, new_src,
1974 GEN_INT (set_sign_bit_copies - 1)));
1976 return 2;
1980 /* See if we can calculate the value as the difference between two
1981 valid immediates. */
1982 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1984 int topshift = clear_sign_bit_copies & ~1;
1986 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1987 & (0xff000000 >> topshift));
1989 /* If temp1 is zero, then that means the 9 most significant
1990 bits of remainder were 1 and we've caused it to overflow.
1991 When topshift is 0 we don't need to do anything since we
1992 can borrow from 'bit 32'. */
1993 if (temp1 == 0 && topshift != 0)
1994 temp1 = 0x80000000 >> (topshift - 1);
1996 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1998 if (const_ok_for_arm (temp2))
2000 if (generate)
2002 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2003 emit_constant_insn (cond,
2004 gen_rtx_SET (VOIDmode, new_src,
2005 GEN_INT (temp1)));
2006 emit_constant_insn (cond,
2007 gen_addsi3 (target, new_src,
2008 GEN_INT (-temp2)));
2011 return 2;
2015 /* See if we can generate this by setting the bottom (or the top)
2016 16 bits, and then shifting these into the other half of the
2017 word. We only look for the simplest cases, to do more would cost
2018 too much. Be careful, however, not to generate this when the
2019 alternative would take fewer insns. */
2020 if (val & 0xffff0000)
2022 temp1 = remainder & 0xffff0000;
2023 temp2 = remainder & 0x0000ffff;
2025 /* Overlaps outside this range are best done using other methods. */
2026 for (i = 9; i < 24; i++)
2028 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2029 && !const_ok_for_arm (temp2))
2031 rtx new_src = (subtargets
2032 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2033 : target);
2034 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2035 source, subtargets, generate);
2036 source = new_src;
2037 if (generate)
2038 emit_constant_insn
2039 (cond,
2040 gen_rtx_SET
2041 (VOIDmode, target,
2042 gen_rtx_IOR (mode,
2043 gen_rtx_ASHIFT (mode, source,
2044 GEN_INT (i)),
2045 source)));
2046 return insns + 1;
2050 /* Don't duplicate cases already considered. */
2051 for (i = 17; i < 24; i++)
2053 if (((temp1 | (temp1 >> i)) == remainder)
2054 && !const_ok_for_arm (temp1))
2056 rtx new_src = (subtargets
2057 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2058 : target);
2059 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2060 source, subtargets, generate);
2061 source = new_src;
2062 if (generate)
2063 emit_constant_insn
2064 (cond,
2065 gen_rtx_SET (VOIDmode, target,
2066 gen_rtx_IOR
2067 (mode,
2068 gen_rtx_LSHIFTRT (mode, source,
2069 GEN_INT (i)),
2070 source)));
2071 return insns + 1;
2075 break;
2077 case IOR:
2078 case XOR:
2079 /* If we have IOR or XOR, and the constant can be loaded in a
2080 single instruction, and we can find a temporary to put it in,
2081 then this can be done in two instructions instead of 3-4. */
2082 if (subtargets
2083 /* TARGET can't be NULL if SUBTARGETS is 0 */
2084 || (reload_completed && !reg_mentioned_p (target, source)))
2086 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2088 if (generate)
2090 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2092 emit_constant_insn (cond,
2093 gen_rtx_SET (VOIDmode, sub,
2094 GEN_INT (val)));
2095 emit_constant_insn (cond,
2096 gen_rtx_SET (VOIDmode, target,
2097 gen_rtx_fmt_ee (code, mode,
2098 source, sub)));
2100 return 2;
2104 if (code == XOR)
2105 break;
2107 if (set_sign_bit_copies > 8
2108 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2110 if (generate)
2112 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2113 rtx shift = GEN_INT (set_sign_bit_copies);
2115 emit_constant_insn
2116 (cond,
2117 gen_rtx_SET (VOIDmode, sub,
2118 gen_rtx_NOT (mode,
2119 gen_rtx_ASHIFT (mode,
2120 source,
2121 shift))));
2122 emit_constant_insn
2123 (cond,
2124 gen_rtx_SET (VOIDmode, target,
2125 gen_rtx_NOT (mode,
2126 gen_rtx_LSHIFTRT (mode, sub,
2127 shift))));
2129 return 2;
2132 if (set_zero_bit_copies > 8
2133 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2135 if (generate)
2137 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2138 rtx shift = GEN_INT (set_zero_bit_copies);
2140 emit_constant_insn
2141 (cond,
2142 gen_rtx_SET (VOIDmode, sub,
2143 gen_rtx_NOT (mode,
2144 gen_rtx_LSHIFTRT (mode,
2145 source,
2146 shift))));
2147 emit_constant_insn
2148 (cond,
2149 gen_rtx_SET (VOIDmode, target,
2150 gen_rtx_NOT (mode,
2151 gen_rtx_ASHIFT (mode, sub,
2152 shift))));
2154 return 2;
2157 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2159 if (generate)
2161 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2162 emit_constant_insn (cond,
2163 gen_rtx_SET (VOIDmode, sub,
2164 gen_rtx_NOT (mode, source)));
2165 source = sub;
2166 if (subtargets)
2167 sub = gen_reg_rtx (mode);
2168 emit_constant_insn (cond,
2169 gen_rtx_SET (VOIDmode, sub,
2170 gen_rtx_AND (mode, source,
2171 GEN_INT (temp1))));
2172 emit_constant_insn (cond,
2173 gen_rtx_SET (VOIDmode, target,
2174 gen_rtx_NOT (mode, sub)));
2176 return 3;
2178 break;
2180 case AND:
2181 /* See if two shifts will do 2 or more insn's worth of work. */
2182 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2184 HOST_WIDE_INT shift_mask = ((0xffffffff
2185 << (32 - clear_sign_bit_copies))
2186 & 0xffffffff);
2188 if ((remainder | shift_mask) != 0xffffffff)
2190 if (generate)
2192 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2193 insns = arm_gen_constant (AND, mode, cond,
2194 remainder | shift_mask,
2195 new_src, source, subtargets, 1);
2196 source = new_src;
2198 else
2200 rtx targ = subtargets ? NULL_RTX : target;
2201 insns = arm_gen_constant (AND, mode, cond,
2202 remainder | shift_mask,
2203 targ, source, subtargets, 0);
2207 if (generate)
2209 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2210 rtx shift = GEN_INT (clear_sign_bit_copies);
2212 emit_insn (gen_ashlsi3 (new_src, source, shift));
2213 emit_insn (gen_lshrsi3 (target, new_src, shift));
2216 return insns + 2;
2219 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2221 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2223 if ((remainder | shift_mask) != 0xffffffff)
2225 if (generate)
2227 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2229 insns = arm_gen_constant (AND, mode, cond,
2230 remainder | shift_mask,
2231 new_src, source, subtargets, 1);
2232 source = new_src;
2234 else
2236 rtx targ = subtargets ? NULL_RTX : target;
2238 insns = arm_gen_constant (AND, mode, cond,
2239 remainder | shift_mask,
2240 targ, source, subtargets, 0);
2244 if (generate)
2246 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2247 rtx shift = GEN_INT (clear_zero_bit_copies);
2249 emit_insn (gen_lshrsi3 (new_src, source, shift));
2250 emit_insn (gen_ashlsi3 (target, new_src, shift));
2253 return insns + 2;
2256 break;
2258 default:
2259 break;
2262 for (i = 0; i < 32; i++)
2263 if (remainder & (1 << i))
2264 num_bits_set++;
2266 if (code == AND || (can_invert && num_bits_set > 16))
2267 remainder = (~remainder) & 0xffffffff;
2268 else if (code == PLUS && num_bits_set > 16)
2269 remainder = (-remainder) & 0xffffffff;
2270 else
2272 can_invert = 0;
2273 can_negate = 0;
2276 /* Now try and find a way of doing the job in either two or three
2277 instructions.
2278 We start by looking for the largest block of zeros that are aligned on
2279 a 2-bit boundary, we then fill up the temps, wrapping around to the
2280 top of the word when we drop off the bottom.
2281 In the worst case this code should produce no more than four insns. */
2283 int best_start = 0;
2284 int best_consecutive_zeros = 0;
2286 for (i = 0; i < 32; i += 2)
2288 int consecutive_zeros = 0;
2290 if (!(remainder & (3 << i)))
2292 while ((i < 32) && !(remainder & (3 << i)))
2294 consecutive_zeros += 2;
2295 i += 2;
2297 if (consecutive_zeros > best_consecutive_zeros)
2299 best_consecutive_zeros = consecutive_zeros;
2300 best_start = i - consecutive_zeros;
2302 i -= 2;
2306 /* So long as it won't require any more insns to do so, it's
2307 desirable to emit a small constant (in bits 0...9) in the last
2308 insn. This way there is more chance that it can be combined with
2309 a later addressing insn to form a pre-indexed load or store
2310 operation. Consider:
2312 *((volatile int *)0xe0000100) = 1;
2313 *((volatile int *)0xe0000110) = 2;
2315 We want this to wind up as:
2317 mov rA, #0xe0000000
2318 mov rB, #1
2319 str rB, [rA, #0x100]
2320 mov rB, #2
2321 str rB, [rA, #0x110]
2323 rather than having to synthesize both large constants from scratch.
2325 Therefore, we calculate how many insns would be required to emit
2326 the constant starting from `best_start', and also starting from
2327 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2328 yield a shorter sequence, we may as well use zero. */
2329 if (best_start != 0
2330 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2331 && (count_insns_for_constant (remainder, 0) <=
2332 count_insns_for_constant (remainder, best_start)))
2333 best_start = 0;
2335 /* Now start emitting the insns. */
2336 i = best_start;
2339 int end;
2341 if (i <= 0)
2342 i += 32;
2343 if (remainder & (3 << (i - 2)))
2345 end = i - 8;
2346 if (end < 0)
2347 end += 32;
2348 temp1 = remainder & ((0x0ff << end)
2349 | ((i < end) ? (0xff >> (32 - end)) : 0));
2350 remainder &= ~temp1;
2352 if (generate)
2354 rtx new_src, temp1_rtx;
2356 if (code == SET || code == MINUS)
2358 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2359 if (can_invert && code != MINUS)
2360 temp1 = ~temp1;
2362 else
2364 if (remainder && subtargets)
2365 new_src = gen_reg_rtx (mode);
2366 else
2367 new_src = target;
2368 if (can_invert)
2369 temp1 = ~temp1;
2370 else if (can_negate)
2371 temp1 = -temp1;
2374 temp1 = trunc_int_for_mode (temp1, mode);
2375 temp1_rtx = GEN_INT (temp1);
2377 if (code == SET)
2379 else if (code == MINUS)
2380 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2381 else
2382 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2384 emit_constant_insn (cond,
2385 gen_rtx_SET (VOIDmode, new_src,
2386 temp1_rtx));
2387 source = new_src;
2390 if (code == SET)
2392 can_invert = 0;
2393 code = PLUS;
2395 else if (code == MINUS)
2396 code = PLUS;
2398 insns++;
2399 i -= 6;
2401 i -= 2;
2403 while (remainder);
2406 return insns;
2409 /* Canonicalize a comparison so that we are more likely to recognize it.
2410 This can be done for a few constant compares, where we can make the
2411 immediate value easier to load. */
2413 enum rtx_code
2414 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2415 rtx * op1)
2417 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2418 unsigned HOST_WIDE_INT maxval;
2419 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2421 switch (code)
2423 case EQ:
2424 case NE:
2425 return code;
2427 case GT:
2428 case LE:
2429 if (i != maxval
2430 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2432 *op1 = GEN_INT (i + 1);
2433 return code == GT ? GE : LT;
2435 break;
2437 case GE:
2438 case LT:
2439 if (i != ~maxval
2440 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2442 *op1 = GEN_INT (i - 1);
2443 return code == GE ? GT : LE;
2445 break;
2447 case GTU:
2448 case LEU:
2449 if (i != ~((unsigned HOST_WIDE_INT) 0)
2450 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2452 *op1 = GEN_INT (i + 1);
2453 return code == GTU ? GEU : LTU;
2455 break;
2457 case GEU:
2458 case LTU:
2459 if (i != 0
2460 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2462 *op1 = GEN_INT (i - 1);
2463 return code == GEU ? GTU : LEU;
2465 break;
2467 default:
2468 gcc_unreachable ();
2471 return code;
2475 /* Define how to find the value returned by a function. */
2478 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2480 enum machine_mode mode;
2481 int unsignedp ATTRIBUTE_UNUSED;
2482 rtx r ATTRIBUTE_UNUSED;
2484 mode = TYPE_MODE (type);
2485 /* Promote integer types. */
2486 if (INTEGRAL_TYPE_P (type))
2487 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2489 /* Promotes small structs returned in a register to full-word size
2490 for big-endian AAPCS. */
2491 if (arm_return_in_msb (type))
2493 HOST_WIDE_INT size = int_size_in_bytes (type);
2494 if (size % UNITS_PER_WORD != 0)
2496 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2497 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2501 return LIBCALL_VALUE(mode);
2504 /* Determine the amount of memory needed to store the possible return
2505 registers of an untyped call. */
2507 arm_apply_result_size (void)
2509 int size = 16;
2511 if (TARGET_ARM)
2513 if (TARGET_HARD_FLOAT_ABI)
2515 if (TARGET_FPA)
2516 size += 12;
2517 if (TARGET_MAVERICK)
2518 size += 8;
2520 if (TARGET_IWMMXT_ABI)
2521 size += 8;
2524 return size;
2527 /* Decide whether a type should be returned in memory (true)
2528 or in a register (false). This is called by the macro
2529 RETURN_IN_MEMORY. */
2531 arm_return_in_memory (tree type)
2533 HOST_WIDE_INT size;
2535 if (!AGGREGATE_TYPE_P (type) &&
2536 (TREE_CODE (type) != VECTOR_TYPE) &&
2537 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2538 /* All simple types are returned in registers.
2539 For AAPCS, complex types are treated the same as aggregates. */
2540 return 0;
2542 size = int_size_in_bytes (type);
2544 if (arm_abi != ARM_ABI_APCS)
2546 /* ATPCS and later return aggregate types in memory only if they are
2547 larger than a word (or are variable size). */
2548 return (size < 0 || size > UNITS_PER_WORD);
2551 /* To maximize backwards compatibility with previous versions of gcc,
2552 return vectors up to 4 words in registers. */
2553 if (TREE_CODE (type) == VECTOR_TYPE)
2554 return (size < 0 || size > (4 * UNITS_PER_WORD));
2556 /* For the arm-wince targets we choose to be compatible with Microsoft's
2557 ARM and Thumb compilers, which always return aggregates in memory. */
2558 #ifndef ARM_WINCE
2559 /* All structures/unions bigger than one word are returned in memory.
2560 Also catch the case where int_size_in_bytes returns -1. In this case
2561 the aggregate is either huge or of variable size, and in either case
2562 we will want to return it via memory and not in a register. */
2563 if (size < 0 || size > UNITS_PER_WORD)
2564 return 1;
2566 if (TREE_CODE (type) == RECORD_TYPE)
2568 tree field;
2570 /* For a struct the APCS says that we only return in a register
2571 if the type is 'integer like' and every addressable element
2572 has an offset of zero. For practical purposes this means
2573 that the structure can have at most one non bit-field element
2574 and that this element must be the first one in the structure. */
2576 /* Find the first field, ignoring non FIELD_DECL things which will
2577 have been created by C++. */
2578 for (field = TYPE_FIELDS (type);
2579 field && TREE_CODE (field) != FIELD_DECL;
2580 field = TREE_CHAIN (field))
2581 continue;
2583 if (field == NULL)
2584 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2586 /* Check that the first field is valid for returning in a register. */
2588 /* ... Floats are not allowed */
2589 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2590 return 1;
2592 /* ... Aggregates that are not themselves valid for returning in
2593 a register are not allowed. */
2594 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2595 return 1;
2597 /* Now check the remaining fields, if any. Only bitfields are allowed,
2598 since they are not addressable. */
2599 for (field = TREE_CHAIN (field);
2600 field;
2601 field = TREE_CHAIN (field))
2603 if (TREE_CODE (field) != FIELD_DECL)
2604 continue;
2606 if (!DECL_BIT_FIELD_TYPE (field))
2607 return 1;
2610 return 0;
2613 if (TREE_CODE (type) == UNION_TYPE)
2615 tree field;
2617 /* Unions can be returned in registers if every element is
2618 integral, or can be returned in an integer register. */
2619 for (field = TYPE_FIELDS (type);
2620 field;
2621 field = TREE_CHAIN (field))
2623 if (TREE_CODE (field) != FIELD_DECL)
2624 continue;
2626 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2627 return 1;
2629 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2630 return 1;
2633 return 0;
2635 #endif /* not ARM_WINCE */
2637 /* Return all other types in memory. */
2638 return 1;
2641 /* Indicate whether or not words of a double are in big-endian order. */
2644 arm_float_words_big_endian (void)
2646 if (TARGET_MAVERICK)
2647 return 0;
2649 /* For FPA, float words are always big-endian. For VFP, floats words
2650 follow the memory system mode. */
2652 if (TARGET_FPA)
2654 return 1;
2657 if (TARGET_VFP)
2658 return (TARGET_BIG_END ? 1 : 0);
2660 return 1;
2663 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2664 for a call to a function whose data type is FNTYPE.
2665 For a library call, FNTYPE is NULL. */
2666 void
2667 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2668 rtx libname ATTRIBUTE_UNUSED,
2669 tree fndecl ATTRIBUTE_UNUSED)
2671 /* On the ARM, the offset starts at 0. */
2672 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2673 pcum->iwmmxt_nregs = 0;
2674 pcum->can_split = true;
2676 pcum->call_cookie = CALL_NORMAL;
2678 if (TARGET_LONG_CALLS)
2679 pcum->call_cookie = CALL_LONG;
2681 /* Check for long call/short call attributes. The attributes
2682 override any command line option. */
2683 if (fntype)
2685 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2686 pcum->call_cookie = CALL_SHORT;
2687 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2688 pcum->call_cookie = CALL_LONG;
2691 /* Varargs vectors are treated the same as long long.
2692 named_count avoids having to change the way arm handles 'named' */
2693 pcum->named_count = 0;
2694 pcum->nargs = 0;
2696 if (TARGET_REALLY_IWMMXT && fntype)
2698 tree fn_arg;
2700 for (fn_arg = TYPE_ARG_TYPES (fntype);
2701 fn_arg;
2702 fn_arg = TREE_CHAIN (fn_arg))
2703 pcum->named_count += 1;
2705 if (! pcum->named_count)
2706 pcum->named_count = INT_MAX;
2711 /* Return true if mode/type need doubleword alignment. */
2712 bool
2713 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2715 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2716 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2720 /* Determine where to put an argument to a function.
2721 Value is zero to push the argument on the stack,
2722 or a hard register in which to store the argument.
2724 MODE is the argument's machine mode.
2725 TYPE is the data type of the argument (as a tree).
2726 This is null for libcalls where that information may
2727 not be available.
2728 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2729 the preceding args and about the function being called.
2730 NAMED is nonzero if this argument is a named parameter
2731 (otherwise it is an extra parameter matching an ellipsis). */
2734 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2735 tree type, int named)
2737 int nregs;
2739 /* Varargs vectors are treated the same as long long.
2740 named_count avoids having to change the way arm handles 'named' */
2741 if (TARGET_IWMMXT_ABI
2742 && arm_vector_mode_supported_p (mode)
2743 && pcum->named_count > pcum->nargs + 1)
2745 if (pcum->iwmmxt_nregs <= 9)
2746 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2747 else
2749 pcum->can_split = false;
2750 return NULL_RTX;
2754 /* Put doubleword aligned quantities in even register pairs. */
2755 if (pcum->nregs & 1
2756 && ARM_DOUBLEWORD_ALIGN
2757 && arm_needs_doubleword_align (mode, type))
2758 pcum->nregs++;
2760 if (mode == VOIDmode)
2761 /* Compute operand 2 of the call insn. */
2762 return GEN_INT (pcum->call_cookie);
2764 /* Only allow splitting an arg between regs and memory if all preceding
2765 args were allocated to regs. For args passed by reference we only count
2766 the reference pointer. */
2767 if (pcum->can_split)
2768 nregs = 1;
2769 else
2770 nregs = ARM_NUM_REGS2 (mode, type);
2772 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2773 return NULL_RTX;
2775 return gen_rtx_REG (mode, pcum->nregs);
2778 static int
2779 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2780 tree type, bool named ATTRIBUTE_UNUSED)
2782 int nregs = pcum->nregs;
2784 if (arm_vector_mode_supported_p (mode))
2785 return 0;
2787 if (NUM_ARG_REGS > nregs
2788 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2789 && pcum->can_split)
2790 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2792 return 0;
2795 /* Variable sized types are passed by reference. This is a GCC
2796 extension to the ARM ABI. */
2798 static bool
2799 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2800 enum machine_mode mode ATTRIBUTE_UNUSED,
2801 tree type, bool named ATTRIBUTE_UNUSED)
2803 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2806 /* Encode the current state of the #pragma [no_]long_calls. */
2807 typedef enum
2809 OFF, /* No #pramgma [no_]long_calls is in effect. */
2810 LONG, /* #pragma long_calls is in effect. */
2811 SHORT /* #pragma no_long_calls is in effect. */
2812 } arm_pragma_enum;
2814 static arm_pragma_enum arm_pragma_long_calls = OFF;
2816 void
2817 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2819 arm_pragma_long_calls = LONG;
2822 void
2823 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2825 arm_pragma_long_calls = SHORT;
2828 void
2829 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2831 arm_pragma_long_calls = OFF;
2834 /* Table of machine attributes. */
2835 const struct attribute_spec arm_attribute_table[] =
2837 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2838 /* Function calls made to this symbol must be done indirectly, because
2839 it may lie outside of the 26 bit addressing range of a normal function
2840 call. */
2841 { "long_call", 0, 0, false, true, true, NULL },
2842 /* Whereas these functions are always known to reside within the 26 bit
2843 addressing range. */
2844 { "short_call", 0, 0, false, true, true, NULL },
2845 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2846 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2847 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2848 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2849 #ifdef ARM_PE
2850 /* ARM/PE has three new attributes:
2851 interfacearm - ?
2852 dllexport - for exporting a function/variable that will live in a dll
2853 dllimport - for importing a function/variable from a dll
2855 Microsoft allows multiple declspecs in one __declspec, separating
2856 them with spaces. We do NOT support this. Instead, use __declspec
2857 multiple times.
2859 { "dllimport", 0, 0, true, false, false, NULL },
2860 { "dllexport", 0, 0, true, false, false, NULL },
2861 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2862 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2863 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2864 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2865 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2866 #endif
2867 { NULL, 0, 0, false, false, false, NULL }
2870 /* Handle an attribute requiring a FUNCTION_DECL;
2871 arguments as in struct attribute_spec.handler. */
2872 static tree
2873 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2874 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2876 if (TREE_CODE (*node) != FUNCTION_DECL)
2878 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2879 IDENTIFIER_POINTER (name));
2880 *no_add_attrs = true;
2883 return NULL_TREE;
2886 /* Handle an "interrupt" or "isr" attribute;
2887 arguments as in struct attribute_spec.handler. */
2888 static tree
2889 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2890 bool *no_add_attrs)
2892 if (DECL_P (*node))
2894 if (TREE_CODE (*node) != FUNCTION_DECL)
2896 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2897 IDENTIFIER_POINTER (name));
2898 *no_add_attrs = true;
2900 /* FIXME: the argument if any is checked for type attributes;
2901 should it be checked for decl ones? */
2903 else
2905 if (TREE_CODE (*node) == FUNCTION_TYPE
2906 || TREE_CODE (*node) == METHOD_TYPE)
2908 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2910 warning (OPT_Wattributes, "%qs attribute ignored",
2911 IDENTIFIER_POINTER (name));
2912 *no_add_attrs = true;
2915 else if (TREE_CODE (*node) == POINTER_TYPE
2916 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2917 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2918 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2920 *node = build_variant_type_copy (*node);
2921 TREE_TYPE (*node) = build_type_attribute_variant
2922 (TREE_TYPE (*node),
2923 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2924 *no_add_attrs = true;
2926 else
2928 /* Possibly pass this attribute on from the type to a decl. */
2929 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2930 | (int) ATTR_FLAG_FUNCTION_NEXT
2931 | (int) ATTR_FLAG_ARRAY_NEXT))
2933 *no_add_attrs = true;
2934 return tree_cons (name, args, NULL_TREE);
2936 else
2938 warning (OPT_Wattributes, "%qs attribute ignored",
2939 IDENTIFIER_POINTER (name));
2944 return NULL_TREE;
2947 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2948 /* Handle the "notshared" attribute. This attribute is another way of
2949 requesting hidden visibility. ARM's compiler supports
2950 "__declspec(notshared)"; we support the same thing via an
2951 attribute. */
2953 static tree
2954 arm_handle_notshared_attribute (tree *node,
2955 tree name ATTRIBUTE_UNUSED,
2956 tree args ATTRIBUTE_UNUSED,
2957 int flags ATTRIBUTE_UNUSED,
2958 bool *no_add_attrs)
2960 tree decl = TYPE_NAME (*node);
2962 if (decl)
2964 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2965 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2966 *no_add_attrs = false;
2968 return NULL_TREE;
2970 #endif
2972 /* Return 0 if the attributes for two types are incompatible, 1 if they
2973 are compatible, and 2 if they are nearly compatible (which causes a
2974 warning to be generated). */
2975 static int
2976 arm_comp_type_attributes (tree type1, tree type2)
2978 int l1, l2, s1, s2;
2980 /* Check for mismatch of non-default calling convention. */
2981 if (TREE_CODE (type1) != FUNCTION_TYPE)
2982 return 1;
2984 /* Check for mismatched call attributes. */
2985 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2986 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2987 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2988 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2990 /* Only bother to check if an attribute is defined. */
2991 if (l1 | l2 | s1 | s2)
2993 /* If one type has an attribute, the other must have the same attribute. */
2994 if ((l1 != l2) || (s1 != s2))
2995 return 0;
2997 /* Disallow mixed attributes. */
2998 if ((l1 & s2) || (l2 & s1))
2999 return 0;
3002 /* Check for mismatched ISR attribute. */
3003 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3004 if (! l1)
3005 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3006 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3007 if (! l2)
3008 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3009 if (l1 != l2)
3010 return 0;
3012 return 1;
3015 /* Encode long_call or short_call attribute by prefixing
3016 symbol name in DECL with a special character FLAG. */
3017 void
3018 arm_encode_call_attribute (tree decl, int flag)
3020 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3021 int len = strlen (str);
3022 char * newstr;
3024 /* Do not allow weak functions to be treated as short call. */
3025 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3026 return;
3028 newstr = alloca (len + 2);
3029 newstr[0] = flag;
3030 strcpy (newstr + 1, str);
3032 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3033 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3036 /* Assigns default attributes to newly defined type. This is used to
3037 set short_call/long_call attributes for function types of
3038 functions defined inside corresponding #pragma scopes. */
3039 static void
3040 arm_set_default_type_attributes (tree type)
3042 /* Add __attribute__ ((long_call)) to all functions, when
3043 inside #pragma long_calls or __attribute__ ((short_call)),
3044 when inside #pragma no_long_calls. */
3045 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3047 tree type_attr_list, attr_name;
3048 type_attr_list = TYPE_ATTRIBUTES (type);
3050 if (arm_pragma_long_calls == LONG)
3051 attr_name = get_identifier ("long_call");
3052 else if (arm_pragma_long_calls == SHORT)
3053 attr_name = get_identifier ("short_call");
3054 else
3055 return;
3057 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3058 TYPE_ATTRIBUTES (type) = type_attr_list;
3062 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3063 defined within the current compilation unit. If this cannot be
3064 determined, then 0 is returned. */
3065 static int
3066 current_file_function_operand (rtx sym_ref)
3068 /* This is a bit of a fib. A function will have a short call flag
3069 applied to its name if it has the short call attribute, or it has
3070 already been defined within the current compilation unit. */
3071 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3072 return 1;
3074 /* The current function is always defined within the current compilation
3075 unit. If it s a weak definition however, then this may not be the real
3076 definition of the function, and so we have to say no. */
3077 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3078 && !DECL_WEAK (current_function_decl))
3079 return 1;
3081 /* We cannot make the determination - default to returning 0. */
3082 return 0;
3085 /* Return nonzero if a 32 bit "long_call" should be generated for
3086 this call. We generate a long_call if the function:
3088 a. has an __attribute__((long call))
3089 or b. is within the scope of a #pragma long_calls
3090 or c. the -mlong-calls command line switch has been specified
3091 . and either:
3092 1. -ffunction-sections is in effect
3093 or 2. the current function has __attribute__ ((section))
3094 or 3. the target function has __attribute__ ((section))
3096 However we do not generate a long call if the function:
3098 d. has an __attribute__ ((short_call))
3099 or e. is inside the scope of a #pragma no_long_calls
3100 or f. is defined within the current compilation unit.
3102 This function will be called by C fragments contained in the machine
3103 description file. SYM_REF and CALL_COOKIE correspond to the matched
3104 rtl operands. CALL_SYMBOL is used to distinguish between
3105 two different callers of the function. It is set to 1 in the
3106 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3107 and "call_value" patterns. This is because of the difference in the
3108 SYM_REFs passed by these patterns. */
3110 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3112 if (!call_symbol)
3114 if (GET_CODE (sym_ref) != MEM)
3115 return 0;
3117 sym_ref = XEXP (sym_ref, 0);
3120 if (GET_CODE (sym_ref) != SYMBOL_REF)
3121 return 0;
3123 if (call_cookie & CALL_SHORT)
3124 return 0;
3126 if (TARGET_LONG_CALLS)
3128 if (flag_function_sections
3129 || DECL_SECTION_NAME (current_function_decl))
3130 /* c.3 is handled by the definition of the
3131 ARM_DECLARE_FUNCTION_SIZE macro. */
3132 return 1;
3135 if (current_file_function_operand (sym_ref))
3136 return 0;
3138 return (call_cookie & CALL_LONG)
3139 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3140 || TARGET_LONG_CALLS;
3143 /* Return nonzero if it is ok to make a tail-call to DECL. */
3144 static bool
3145 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3147 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3149 if (cfun->machine->sibcall_blocked)
3150 return false;
3152 /* Never tailcall something for which we have no decl, or if we
3153 are in Thumb mode. */
3154 if (decl == NULL || TARGET_THUMB)
3155 return false;
3157 /* Get the calling method. */
3158 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3159 call_type = CALL_SHORT;
3160 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3161 call_type = CALL_LONG;
3163 /* Cannot tail-call to long calls, since these are out of range of
3164 a branch instruction. However, if not compiling PIC, we know
3165 we can reach the symbol if it is in this compilation unit. */
3166 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3167 return false;
3169 /* If we are interworking and the function is not declared static
3170 then we can't tail-call it unless we know that it exists in this
3171 compilation unit (since it might be a Thumb routine). */
3172 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3173 return false;
3175 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3176 if (IS_INTERRUPT (arm_current_func_type ()))
3177 return false;
3179 /* Everything else is ok. */
3180 return true;
3184 /* Addressing mode support functions. */
3186 /* Return nonzero if X is a legitimate immediate operand when compiling
3187 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3189 legitimate_pic_operand_p (rtx x)
3191 if (GET_CODE (x) == SYMBOL_REF
3192 || (GET_CODE (x) == CONST
3193 && GET_CODE (XEXP (x, 0)) == PLUS
3194 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3195 return 0;
3197 return 1;
3201 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3203 if (GET_CODE (orig) == SYMBOL_REF
3204 || GET_CODE (orig) == LABEL_REF)
3206 #ifndef AOF_ASSEMBLER
3207 rtx pic_ref, address;
3208 #endif
3209 rtx insn;
3210 int subregs = 0;
3212 /* If this function doesn't have a pic register, create one now.
3213 A lot of the logic here is made obscure by the fact that this
3214 routine gets called as part of the rtx cost estimation
3215 process. We don't want those calls to affect any assumptions
3216 about the real function; and further, we can't call
3217 entry_of_function() until we start the real expansion
3218 process. */
3219 if (!current_function_uses_pic_offset_table)
3221 gcc_assert (!no_new_pseudos);
3222 if (arm_pic_register != INVALID_REGNUM)
3224 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3226 /* Play games to avoid marking the function as needing pic
3227 if we are being called as part of the cost-estimation
3228 process. */
3229 if (!ir_type())
3230 current_function_uses_pic_offset_table = 1;
3232 else
3234 rtx seq;
3236 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3238 /* Play games to avoid marking the function as needing pic
3239 if we are being called as part of the cost-estimation
3240 process. */
3241 if (!ir_type())
3243 current_function_uses_pic_offset_table = 1;
3244 start_sequence ();
3246 arm_load_pic_register (0UL);
3248 seq = get_insns ();
3249 end_sequence ();
3250 emit_insn_after (seq, entry_of_function ());
3255 if (reg == 0)
3257 gcc_assert (!no_new_pseudos);
3258 reg = gen_reg_rtx (Pmode);
3260 subregs = 1;
3263 #ifdef AOF_ASSEMBLER
3264 /* The AOF assembler can generate relocations for these directly, and
3265 understands that the PIC register has to be added into the offset. */
3266 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3267 #else
3268 if (subregs)
3269 address = gen_reg_rtx (Pmode);
3270 else
3271 address = reg;
3273 if (TARGET_ARM)
3274 emit_insn (gen_pic_load_addr_arm (address, orig));
3275 else
3276 emit_insn (gen_pic_load_addr_thumb (address, orig));
3278 if ((GET_CODE (orig) == LABEL_REF
3279 || (GET_CODE (orig) == SYMBOL_REF &&
3280 SYMBOL_REF_LOCAL_P (orig)))
3281 && NEED_GOT_RELOC)
3282 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3283 else
3285 pic_ref = gen_const_mem (Pmode,
3286 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3287 address));
3290 insn = emit_move_insn (reg, pic_ref);
3291 #endif
3292 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3293 by loop. */
3294 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3295 REG_NOTES (insn));
3296 return reg;
3298 else if (GET_CODE (orig) == CONST)
3300 rtx base, offset;
3302 if (GET_CODE (XEXP (orig, 0)) == PLUS
3303 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3304 return orig;
3306 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3307 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3308 return orig;
3310 if (reg == 0)
3312 gcc_assert (!no_new_pseudos);
3313 reg = gen_reg_rtx (Pmode);
3316 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3318 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3319 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3320 base == reg ? 0 : reg);
3322 if (GET_CODE (offset) == CONST_INT)
3324 /* The base register doesn't really matter, we only want to
3325 test the index for the appropriate mode. */
3326 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3328 gcc_assert (!no_new_pseudos);
3329 offset = force_reg (Pmode, offset);
3332 if (GET_CODE (offset) == CONST_INT)
3333 return plus_constant (base, INTVAL (offset));
3336 if (GET_MODE_SIZE (mode) > 4
3337 && (GET_MODE_CLASS (mode) == MODE_INT
3338 || TARGET_SOFT_FLOAT))
3340 emit_insn (gen_addsi3 (reg, base, offset));
3341 return reg;
3344 return gen_rtx_PLUS (Pmode, base, offset);
3347 return orig;
3351 /* Find a spare low register to use during the prolog of a function. */
3353 static int
3354 thumb_find_work_register (unsigned long pushed_regs_mask)
3356 int reg;
3358 /* Check the argument registers first as these are call-used. The
3359 register allocation order means that sometimes r3 might be used
3360 but earlier argument registers might not, so check them all. */
3361 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3362 if (!regs_ever_live[reg])
3363 return reg;
3365 /* Before going on to check the call-saved registers we can try a couple
3366 more ways of deducing that r3 is available. The first is when we are
3367 pushing anonymous arguments onto the stack and we have less than 4
3368 registers worth of fixed arguments(*). In this case r3 will be part of
3369 the variable argument list and so we can be sure that it will be
3370 pushed right at the start of the function. Hence it will be available
3371 for the rest of the prologue.
3372 (*): ie current_function_pretend_args_size is greater than 0. */
3373 if (cfun->machine->uses_anonymous_args
3374 && current_function_pretend_args_size > 0)
3375 return LAST_ARG_REGNUM;
3377 /* The other case is when we have fixed arguments but less than 4 registers
3378 worth. In this case r3 might be used in the body of the function, but
3379 it is not being used to convey an argument into the function. In theory
3380 we could just check current_function_args_size to see how many bytes are
3381 being passed in argument registers, but it seems that it is unreliable.
3382 Sometimes it will have the value 0 when in fact arguments are being
3383 passed. (See testcase execute/20021111-1.c for an example). So we also
3384 check the args_info.nregs field as well. The problem with this field is
3385 that it makes no allowances for arguments that are passed to the
3386 function but which are not used. Hence we could miss an opportunity
3387 when a function has an unused argument in r3. But it is better to be
3388 safe than to be sorry. */
3389 if (! cfun->machine->uses_anonymous_args
3390 && current_function_args_size >= 0
3391 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3392 && cfun->args_info.nregs < 4)
3393 return LAST_ARG_REGNUM;
3395 /* Otherwise look for a call-saved register that is going to be pushed. */
3396 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3397 if (pushed_regs_mask & (1 << reg))
3398 return reg;
3400 /* Something went wrong - thumb_compute_save_reg_mask()
3401 should have arranged for a suitable register to be pushed. */
3402 gcc_unreachable ();
3405 static GTY(()) int pic_labelno;
3407 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3408 low register. */
3410 void
3411 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3413 #ifndef AOF_ASSEMBLER
3414 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3415 rtx global_offset_table;
3417 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3418 return;
3420 gcc_assert (flag_pic);
3422 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3423 in the code stream. */
3425 labelno = GEN_INT (pic_labelno++);
3426 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3427 l1 = gen_rtx_CONST (VOIDmode, l1);
3429 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3430 /* On the ARM the PC register contains 'dot + 8' at the time of the
3431 addition, on the Thumb it is 'dot + 4'. */
3432 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3433 if (GOT_PCREL)
3434 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3435 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3436 else
3437 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3439 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3441 if (TARGET_ARM)
3443 emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx));
3444 emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg,
3445 cfun->machine->pic_reg, labelno));
3447 else
3449 if (arm_pic_register != INVALID_REGNUM
3450 && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
3452 /* We will have pushed the pic register, so we should always be
3453 able to find a work register. */
3454 pic_tmp = gen_rtx_REG (SImode,
3455 thumb_find_work_register (saved_regs));
3456 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3457 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3459 else
3460 emit_insn (gen_pic_load_addr_thumb (cfun->machine->pic_reg, pic_rtx));
3461 emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg,
3462 cfun->machine->pic_reg, labelno));
3465 /* Need to emit this whether or not we obey regdecls,
3466 since setjmp/longjmp can cause life info to screw up. */
3467 emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
3468 #endif /* AOF_ASSEMBLER */
3472 /* Return nonzero if X is valid as an ARM state addressing register. */
3473 static int
3474 arm_address_register_rtx_p (rtx x, int strict_p)
3476 int regno;
3478 if (GET_CODE (x) != REG)
3479 return 0;
3481 regno = REGNO (x);
3483 if (strict_p)
3484 return ARM_REGNO_OK_FOR_BASE_P (regno);
3486 return (regno <= LAST_ARM_REGNUM
3487 || regno >= FIRST_PSEUDO_REGISTER
3488 || regno == FRAME_POINTER_REGNUM
3489 || regno == ARG_POINTER_REGNUM);
3492 /* Return TRUE if this rtx is the difference of a symbol and a label,
3493 and will reduce to a PC-relative relocation in the object file.
3494 Expressions like this can be left alone when generating PIC, rather
3495 than forced through the GOT. */
3496 static int
3497 pcrel_constant_p (rtx x)
3499 if (GET_CODE (x) == MINUS)
3500 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3502 return FALSE;
3505 /* Return nonzero if X is a valid ARM state address operand. */
3507 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3508 int strict_p)
3510 bool use_ldrd;
3511 enum rtx_code code = GET_CODE (x);
3513 if (arm_address_register_rtx_p (x, strict_p))
3514 return 1;
3516 use_ldrd = (TARGET_LDRD
3517 && (mode == DImode
3518 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3520 if (code == POST_INC || code == PRE_DEC
3521 || ((code == PRE_INC || code == POST_DEC)
3522 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3523 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3525 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3526 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3527 && GET_CODE (XEXP (x, 1)) == PLUS
3528 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3530 rtx addend = XEXP (XEXP (x, 1), 1);
3532 /* Don't allow ldrd post increment by register because it's hard
3533 to fixup invalid register choices. */
3534 if (use_ldrd
3535 && GET_CODE (x) == POST_MODIFY
3536 && GET_CODE (addend) == REG)
3537 return 0;
3539 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3540 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3543 /* After reload constants split into minipools will have addresses
3544 from a LABEL_REF. */
3545 else if (reload_completed
3546 && (code == LABEL_REF
3547 || (code == CONST
3548 && GET_CODE (XEXP (x, 0)) == PLUS
3549 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3550 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3551 return 1;
3553 else if (mode == TImode)
3554 return 0;
3556 else if (code == PLUS)
3558 rtx xop0 = XEXP (x, 0);
3559 rtx xop1 = XEXP (x, 1);
3561 return ((arm_address_register_rtx_p (xop0, strict_p)
3562 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3563 || (arm_address_register_rtx_p (xop1, strict_p)
3564 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3567 #if 0
3568 /* Reload currently can't handle MINUS, so disable this for now */
3569 else if (GET_CODE (x) == MINUS)
3571 rtx xop0 = XEXP (x, 0);
3572 rtx xop1 = XEXP (x, 1);
3574 return (arm_address_register_rtx_p (xop0, strict_p)
3575 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3577 #endif
3579 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3580 && code == SYMBOL_REF
3581 && CONSTANT_POOL_ADDRESS_P (x)
3582 && ! (flag_pic
3583 && symbol_mentioned_p (get_pool_constant (x))
3584 && ! pcrel_constant_p (get_pool_constant (x))))
3585 return 1;
3587 return 0;
3590 /* Return nonzero if INDEX is valid for an address index operand in
3591 ARM state. */
3592 static int
3593 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3594 int strict_p)
3596 HOST_WIDE_INT range;
3597 enum rtx_code code = GET_CODE (index);
3599 /* Standard coprocessor addressing modes. */
3600 if (TARGET_HARD_FLOAT
3601 && (TARGET_FPA || TARGET_MAVERICK)
3602 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3603 || (TARGET_MAVERICK && mode == DImode)))
3604 return (code == CONST_INT && INTVAL (index) < 1024
3605 && INTVAL (index) > -1024
3606 && (INTVAL (index) & 3) == 0);
3608 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3609 return (code == CONST_INT
3610 && INTVAL (index) < 1024
3611 && INTVAL (index) > -1024
3612 && (INTVAL (index) & 3) == 0);
3614 if (arm_address_register_rtx_p (index, strict_p)
3615 && (GET_MODE_SIZE (mode) <= 4))
3616 return 1;
3618 if (mode == DImode || mode == DFmode)
3620 if (code == CONST_INT)
3622 HOST_WIDE_INT val = INTVAL (index);
3624 if (TARGET_LDRD)
3625 return val > -256 && val < 256;
3626 else
3627 return val > -4096 && val < 4092;
3630 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3633 if (GET_MODE_SIZE (mode) <= 4
3634 && ! (arm_arch4
3635 && (mode == HImode
3636 || (mode == QImode && outer == SIGN_EXTEND))))
3638 if (code == MULT)
3640 rtx xiop0 = XEXP (index, 0);
3641 rtx xiop1 = XEXP (index, 1);
3643 return ((arm_address_register_rtx_p (xiop0, strict_p)
3644 && power_of_two_operand (xiop1, SImode))
3645 || (arm_address_register_rtx_p (xiop1, strict_p)
3646 && power_of_two_operand (xiop0, SImode)));
3648 else if (code == LSHIFTRT || code == ASHIFTRT
3649 || code == ASHIFT || code == ROTATERT)
3651 rtx op = XEXP (index, 1);
3653 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3654 && GET_CODE (op) == CONST_INT
3655 && INTVAL (op) > 0
3656 && INTVAL (op) <= 31);
3660 /* For ARM v4 we may be doing a sign-extend operation during the
3661 load. */
3662 if (arm_arch4)
3664 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3665 range = 256;
3666 else
3667 range = 4096;
3669 else
3670 range = (mode == HImode) ? 4095 : 4096;
3672 return (code == CONST_INT
3673 && INTVAL (index) < range
3674 && INTVAL (index) > -range);
3677 /* Return nonzero if X is valid as a Thumb state base register. */
3678 static int
3679 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3681 int regno;
3683 if (GET_CODE (x) != REG)
3684 return 0;
3686 regno = REGNO (x);
3688 if (strict_p)
3689 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3691 return (regno <= LAST_LO_REGNUM
3692 || regno > LAST_VIRTUAL_REGISTER
3693 || regno == FRAME_POINTER_REGNUM
3694 || (GET_MODE_SIZE (mode) >= 4
3695 && (regno == STACK_POINTER_REGNUM
3696 || regno >= FIRST_PSEUDO_REGISTER
3697 || x == hard_frame_pointer_rtx
3698 || x == arg_pointer_rtx)));
3701 /* Return nonzero if x is a legitimate index register. This is the case
3702 for any base register that can access a QImode object. */
3703 inline static int
3704 thumb_index_register_rtx_p (rtx x, int strict_p)
3706 return thumb_base_register_rtx_p (x, QImode, strict_p);
3709 /* Return nonzero if x is a legitimate Thumb-state address.
3711 The AP may be eliminated to either the SP or the FP, so we use the
3712 least common denominator, e.g. SImode, and offsets from 0 to 64.
3714 ??? Verify whether the above is the right approach.
3716 ??? Also, the FP may be eliminated to the SP, so perhaps that
3717 needs special handling also.
3719 ??? Look at how the mips16 port solves this problem. It probably uses
3720 better ways to solve some of these problems.
3722 Although it is not incorrect, we don't accept QImode and HImode
3723 addresses based on the frame pointer or arg pointer until the
3724 reload pass starts. This is so that eliminating such addresses
3725 into stack based ones won't produce impossible code. */
3727 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3729 /* ??? Not clear if this is right. Experiment. */
3730 if (GET_MODE_SIZE (mode) < 4
3731 && !(reload_in_progress || reload_completed)
3732 && (reg_mentioned_p (frame_pointer_rtx, x)
3733 || reg_mentioned_p (arg_pointer_rtx, x)
3734 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3735 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3736 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3737 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3738 return 0;
3740 /* Accept any base register. SP only in SImode or larger. */
3741 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3742 return 1;
3744 /* This is PC relative data before arm_reorg runs. */
3745 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3746 && GET_CODE (x) == SYMBOL_REF
3747 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
3748 return 1;
3750 /* This is PC relative data after arm_reorg runs. */
3751 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3752 && (GET_CODE (x) == LABEL_REF
3753 || (GET_CODE (x) == CONST
3754 && GET_CODE (XEXP (x, 0)) == PLUS
3755 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3756 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3757 return 1;
3759 /* Post-inc indexing only supported for SImode and larger. */
3760 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3761 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3762 return 1;
3764 else if (GET_CODE (x) == PLUS)
3766 /* REG+REG address can be any two index registers. */
3767 /* We disallow FRAME+REG addressing since we know that FRAME
3768 will be replaced with STACK, and SP relative addressing only
3769 permits SP+OFFSET. */
3770 if (GET_MODE_SIZE (mode) <= 4
3771 && XEXP (x, 0) != frame_pointer_rtx
3772 && XEXP (x, 1) != frame_pointer_rtx
3773 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3774 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3775 return 1;
3777 /* REG+const has 5-7 bit offset for non-SP registers. */
3778 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3779 || XEXP (x, 0) == arg_pointer_rtx)
3780 && GET_CODE (XEXP (x, 1)) == CONST_INT
3781 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3782 return 1;
3784 /* REG+const has 10 bit offset for SP, but only SImode and
3785 larger is supported. */
3786 /* ??? Should probably check for DI/DFmode overflow here
3787 just like GO_IF_LEGITIMATE_OFFSET does. */
3788 else if (GET_CODE (XEXP (x, 0)) == REG
3789 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3790 && GET_MODE_SIZE (mode) >= 4
3791 && GET_CODE (XEXP (x, 1)) == CONST_INT
3792 && INTVAL (XEXP (x, 1)) >= 0
3793 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3794 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3795 return 1;
3797 else if (GET_CODE (XEXP (x, 0)) == REG
3798 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3799 && GET_MODE_SIZE (mode) >= 4
3800 && GET_CODE (XEXP (x, 1)) == CONST_INT
3801 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3802 return 1;
3805 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3806 && GET_MODE_SIZE (mode) == 4
3807 && GET_CODE (x) == SYMBOL_REF
3808 && CONSTANT_POOL_ADDRESS_P (x)
3809 && ! (flag_pic
3810 && symbol_mentioned_p (get_pool_constant (x))
3811 && ! pcrel_constant_p (get_pool_constant (x))))
3812 return 1;
3814 return 0;
3817 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3818 instruction of mode MODE. */
3820 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3822 switch (GET_MODE_SIZE (mode))
3824 case 1:
3825 return val >= 0 && val < 32;
3827 case 2:
3828 return val >= 0 && val < 64 && (val & 1) == 0;
3830 default:
3831 return (val >= 0
3832 && (val + GET_MODE_SIZE (mode)) <= 128
3833 && (val & 3) == 0);
3837 /* Build the SYMBOL_REF for __tls_get_addr. */
3839 static GTY(()) rtx tls_get_addr_libfunc;
3841 static rtx
3842 get_tls_get_addr (void)
3844 if (!tls_get_addr_libfunc)
3845 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
3846 return tls_get_addr_libfunc;
3849 static rtx
3850 arm_load_tp (rtx target)
3852 if (!target)
3853 target = gen_reg_rtx (SImode);
3855 if (TARGET_HARD_TP)
3857 /* Can return in any reg. */
3858 emit_insn (gen_load_tp_hard (target));
3860 else
3862 /* Always returned in r0. Immediately copy the result into a pseudo,
3863 otherwise other uses of r0 (e.g. setting up function arguments) may
3864 clobber the value. */
3866 rtx tmp;
3868 emit_insn (gen_load_tp_soft ());
3870 tmp = gen_rtx_REG (SImode, 0);
3871 emit_move_insn (target, tmp);
3873 return target;
3876 static rtx
3877 load_tls_operand (rtx x, rtx reg)
3879 rtx tmp;
3881 if (reg == NULL_RTX)
3882 reg = gen_reg_rtx (SImode);
3884 tmp = gen_rtx_CONST (SImode, x);
3886 emit_move_insn (reg, tmp);
3888 return reg;
3891 static rtx
3892 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
3894 rtx insns, label, labelno, sum;
3896 start_sequence ();
3898 labelno = GEN_INT (pic_labelno++);
3899 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3900 label = gen_rtx_CONST (VOIDmode, label);
3902 sum = gen_rtx_UNSPEC (Pmode,
3903 gen_rtvec (4, x, GEN_INT (reloc), label,
3904 GEN_INT (TARGET_ARM ? 8 : 4)),
3905 UNSPEC_TLS);
3906 reg = load_tls_operand (sum, reg);
3908 if (TARGET_ARM)
3909 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
3910 else
3911 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3913 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
3914 Pmode, 1, reg, Pmode);
3916 insns = get_insns ();
3917 end_sequence ();
3919 return insns;
3923 legitimize_tls_address (rtx x, rtx reg)
3925 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
3926 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
3928 switch (model)
3930 case TLS_MODEL_GLOBAL_DYNAMIC:
3931 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
3932 dest = gen_reg_rtx (Pmode);
3933 emit_libcall_block (insns, dest, ret, x);
3934 return dest;
3936 case TLS_MODEL_LOCAL_DYNAMIC:
3937 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
3939 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3940 share the LDM result with other LD model accesses. */
3941 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
3942 UNSPEC_TLS);
3943 dest = gen_reg_rtx (Pmode);
3944 emit_libcall_block (insns, dest, ret, eqv);
3946 /* Load the addend. */
3947 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
3948 UNSPEC_TLS);
3949 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
3950 return gen_rtx_PLUS (Pmode, dest, addend);
3952 case TLS_MODEL_INITIAL_EXEC:
3953 labelno = GEN_INT (pic_labelno++);
3954 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3955 label = gen_rtx_CONST (VOIDmode, label);
3956 sum = gen_rtx_UNSPEC (Pmode,
3957 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
3958 GEN_INT (TARGET_ARM ? 8 : 4)),
3959 UNSPEC_TLS);
3960 reg = load_tls_operand (sum, reg);
3962 if (TARGET_ARM)
3963 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
3964 else
3966 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3967 emit_move_insn (reg, gen_const_mem (SImode, reg));
3970 tp = arm_load_tp (NULL_RTX);
3972 return gen_rtx_PLUS (Pmode, tp, reg);
3974 case TLS_MODEL_LOCAL_EXEC:
3975 tp = arm_load_tp (NULL_RTX);
3977 reg = gen_rtx_UNSPEC (Pmode,
3978 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
3979 UNSPEC_TLS);
3980 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
3982 return gen_rtx_PLUS (Pmode, tp, reg);
3984 default:
3985 abort ();
3989 /* Try machine-dependent ways of modifying an illegitimate address
3990 to be legitimate. If we find one, return the new, valid address. */
3992 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3994 if (arm_tls_symbol_p (x))
3995 return legitimize_tls_address (x, NULL_RTX);
3997 if (GET_CODE (x) == PLUS)
3999 rtx xop0 = XEXP (x, 0);
4000 rtx xop1 = XEXP (x, 1);
4002 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
4003 xop0 = force_reg (SImode, xop0);
4005 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4006 xop1 = force_reg (SImode, xop1);
4008 if (ARM_BASE_REGISTER_RTX_P (xop0)
4009 && GET_CODE (xop1) == CONST_INT)
4011 HOST_WIDE_INT n, low_n;
4012 rtx base_reg, val;
4013 n = INTVAL (xop1);
4015 /* VFP addressing modes actually allow greater offsets, but for
4016 now we just stick with the lowest common denominator. */
4017 if (mode == DImode
4018 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4020 low_n = n & 0x0f;
4021 n &= ~0x0f;
4022 if (low_n > 4)
4024 n += 16;
4025 low_n -= 16;
4028 else
4030 low_n = ((mode) == TImode ? 0
4031 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4032 n -= low_n;
4035 base_reg = gen_reg_rtx (SImode);
4036 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4037 emit_move_insn (base_reg, val);
4038 x = plus_constant (base_reg, low_n);
4040 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4041 x = gen_rtx_PLUS (SImode, xop0, xop1);
4044 /* XXX We don't allow MINUS any more -- see comment in
4045 arm_legitimate_address_p (). */
4046 else if (GET_CODE (x) == MINUS)
4048 rtx xop0 = XEXP (x, 0);
4049 rtx xop1 = XEXP (x, 1);
4051 if (CONSTANT_P (xop0))
4052 xop0 = force_reg (SImode, xop0);
4054 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4055 xop1 = force_reg (SImode, xop1);
4057 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4058 x = gen_rtx_MINUS (SImode, xop0, xop1);
4061 /* Make sure to take full advantage of the pre-indexed addressing mode
4062 with absolute addresses which often allows for the base register to
4063 be factorized for multiple adjacent memory references, and it might
4064 even allows for the mini pool to be avoided entirely. */
4065 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4067 unsigned int bits;
4068 HOST_WIDE_INT mask, base, index;
4069 rtx base_reg;
4071 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4072 use a 8 bit index. So let's use a 12 bit index for SImode only and
4073 hope that arm_gen_constant will enable ldrb to use more bits. */
4074 bits = (mode == SImode) ? 12 : 8;
4075 mask = (1 << bits) - 1;
4076 base = INTVAL (x) & ~mask;
4077 index = INTVAL (x) & mask;
4078 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4080 /* It'll most probably be more efficient to generate the base
4081 with more bits set and use a negative index instead. */
4082 base |= mask;
4083 index -= mask;
4085 base_reg = force_reg (SImode, GEN_INT (base));
4086 x = plus_constant (base_reg, index);
4089 if (flag_pic)
4091 /* We need to find and carefully transform any SYMBOL and LABEL
4092 references; so go back to the original address expression. */
4093 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4095 if (new_x != orig_x)
4096 x = new_x;
4099 return x;
4103 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4104 to be legitimate. If we find one, return the new, valid address. */
4106 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4108 if (arm_tls_symbol_p (x))
4109 return legitimize_tls_address (x, NULL_RTX);
4111 if (GET_CODE (x) == PLUS
4112 && GET_CODE (XEXP (x, 1)) == CONST_INT
4113 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4114 || INTVAL (XEXP (x, 1)) < 0))
4116 rtx xop0 = XEXP (x, 0);
4117 rtx xop1 = XEXP (x, 1);
4118 HOST_WIDE_INT offset = INTVAL (xop1);
4120 /* Try and fold the offset into a biasing of the base register and
4121 then offsetting that. Don't do this when optimizing for space
4122 since it can cause too many CSEs. */
4123 if (optimize_size && offset >= 0
4124 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4126 HOST_WIDE_INT delta;
4128 if (offset >= 256)
4129 delta = offset - (256 - GET_MODE_SIZE (mode));
4130 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4131 delta = 31 * GET_MODE_SIZE (mode);
4132 else
4133 delta = offset & (~31 * GET_MODE_SIZE (mode));
4135 xop0 = force_operand (plus_constant (xop0, offset - delta),
4136 NULL_RTX);
4137 x = plus_constant (xop0, delta);
4139 else if (offset < 0 && offset > -256)
4140 /* Small negative offsets are best done with a subtract before the
4141 dereference, forcing these into a register normally takes two
4142 instructions. */
4143 x = force_operand (x, NULL_RTX);
4144 else
4146 /* For the remaining cases, force the constant into a register. */
4147 xop1 = force_reg (SImode, xop1);
4148 x = gen_rtx_PLUS (SImode, xop0, xop1);
4151 else if (GET_CODE (x) == PLUS
4152 && s_register_operand (XEXP (x, 1), SImode)
4153 && !s_register_operand (XEXP (x, 0), SImode))
4155 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4157 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4160 if (flag_pic)
4162 /* We need to find and carefully transform any SYMBOL and LABEL
4163 references; so go back to the original address expression. */
4164 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4166 if (new_x != orig_x)
4167 x = new_x;
4170 return x;
4174 thumb_legitimize_reload_address (rtx *x_p,
4175 enum machine_mode mode,
4176 int opnum, int type,
4177 int ind_levels ATTRIBUTE_UNUSED)
4179 rtx x = *x_p;
4181 if (GET_CODE (x) == PLUS
4182 && GET_MODE_SIZE (mode) < 4
4183 && REG_P (XEXP (x, 0))
4184 && XEXP (x, 0) == stack_pointer_rtx
4185 && GET_CODE (XEXP (x, 1)) == CONST_INT
4186 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4188 rtx orig_x = x;
4190 x = copy_rtx (x);
4191 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4192 Pmode, VOIDmode, 0, 0, opnum, type);
4193 return x;
4196 /* If both registers are hi-regs, then it's better to reload the
4197 entire expression rather than each register individually. That
4198 only requires one reload register rather than two. */
4199 if (GET_CODE (x) == PLUS
4200 && REG_P (XEXP (x, 0))
4201 && REG_P (XEXP (x, 1))
4202 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4203 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4205 rtx orig_x = x;
4207 x = copy_rtx (x);
4208 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4209 Pmode, VOIDmode, 0, 0, opnum, type);
4210 return x;
4213 return NULL;
4216 /* Test for various thread-local symbols. */
4218 /* Return TRUE if X is a thread-local symbol. */
4220 static bool
4221 arm_tls_symbol_p (rtx x)
4223 if (! TARGET_HAVE_TLS)
4224 return false;
4226 if (GET_CODE (x) != SYMBOL_REF)
4227 return false;
4229 return SYMBOL_REF_TLS_MODEL (x) != 0;
4232 /* Helper for arm_tls_referenced_p. */
4234 static int
4235 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4237 if (GET_CODE (*x) == SYMBOL_REF)
4238 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4240 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4241 TLS offsets, not real symbol references. */
4242 if (GET_CODE (*x) == UNSPEC
4243 && XINT (*x, 1) == UNSPEC_TLS)
4244 return -1;
4246 return 0;
4249 /* Return TRUE if X contains any TLS symbol references. */
4251 bool
4252 arm_tls_referenced_p (rtx x)
4254 if (! TARGET_HAVE_TLS)
4255 return false;
4257 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4260 #define REG_OR_SUBREG_REG(X) \
4261 (GET_CODE (X) == REG \
4262 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4264 #define REG_OR_SUBREG_RTX(X) \
4265 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4267 #ifndef COSTS_N_INSNS
4268 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4269 #endif
4270 static inline int
4271 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4273 enum machine_mode mode = GET_MODE (x);
4275 switch (code)
4277 case ASHIFT:
4278 case ASHIFTRT:
4279 case LSHIFTRT:
4280 case ROTATERT:
4281 case PLUS:
4282 case MINUS:
4283 case COMPARE:
4284 case NEG:
4285 case NOT:
4286 return COSTS_N_INSNS (1);
4288 case MULT:
4289 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4291 int cycles = 0;
4292 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4294 while (i)
4296 i >>= 2;
4297 cycles++;
4299 return COSTS_N_INSNS (2) + cycles;
4301 return COSTS_N_INSNS (1) + 16;
4303 case SET:
4304 return (COSTS_N_INSNS (1)
4305 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4306 + GET_CODE (SET_DEST (x)) == MEM));
4308 case CONST_INT:
4309 if (outer == SET)
4311 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4312 return 0;
4313 if (thumb_shiftable_const (INTVAL (x)))
4314 return COSTS_N_INSNS (2);
4315 return COSTS_N_INSNS (3);
4317 else if ((outer == PLUS || outer == COMPARE)
4318 && INTVAL (x) < 256 && INTVAL (x) > -256)
4319 return 0;
4320 else if (outer == AND
4321 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4322 return COSTS_N_INSNS (1);
4323 else if (outer == ASHIFT || outer == ASHIFTRT
4324 || outer == LSHIFTRT)
4325 return 0;
4326 return COSTS_N_INSNS (2);
4328 case CONST:
4329 case CONST_DOUBLE:
4330 case LABEL_REF:
4331 case SYMBOL_REF:
4332 return COSTS_N_INSNS (3);
4334 case UDIV:
4335 case UMOD:
4336 case DIV:
4337 case MOD:
4338 return 100;
4340 case TRUNCATE:
4341 return 99;
4343 case AND:
4344 case XOR:
4345 case IOR:
4346 /* XXX guess. */
4347 return 8;
4349 case MEM:
4350 /* XXX another guess. */
4351 /* Memory costs quite a lot for the first word, but subsequent words
4352 load at the equivalent of a single insn each. */
4353 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4354 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4355 ? 4 : 0));
4357 case IF_THEN_ELSE:
4358 /* XXX a guess. */
4359 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4360 return 14;
4361 return 2;
4363 case ZERO_EXTEND:
4364 /* XXX still guessing. */
4365 switch (GET_MODE (XEXP (x, 0)))
4367 case QImode:
4368 return (1 + (mode == DImode ? 4 : 0)
4369 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4371 case HImode:
4372 return (4 + (mode == DImode ? 4 : 0)
4373 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4375 case SImode:
4376 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4378 default:
4379 return 99;
4382 default:
4383 return 99;
4388 /* Worker routine for arm_rtx_costs. */
4389 static inline int
4390 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4392 enum machine_mode mode = GET_MODE (x);
4393 enum rtx_code subcode;
4394 int extra_cost;
4396 switch (code)
4398 case MEM:
4399 /* Memory costs quite a lot for the first word, but subsequent words
4400 load at the equivalent of a single insn each. */
4401 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4402 + (GET_CODE (x) == SYMBOL_REF
4403 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4405 case DIV:
4406 case MOD:
4407 case UDIV:
4408 case UMOD:
4409 return optimize_size ? COSTS_N_INSNS (2) : 100;
4411 case ROTATE:
4412 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4413 return 4;
4414 /* Fall through */
4415 case ROTATERT:
4416 if (mode != SImode)
4417 return 8;
4418 /* Fall through */
4419 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4420 if (mode == DImode)
4421 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4422 + ((GET_CODE (XEXP (x, 0)) == REG
4423 || (GET_CODE (XEXP (x, 0)) == SUBREG
4424 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4425 ? 0 : 8));
4426 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4427 || (GET_CODE (XEXP (x, 0)) == SUBREG
4428 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4429 ? 0 : 4)
4430 + ((GET_CODE (XEXP (x, 1)) == REG
4431 || (GET_CODE (XEXP (x, 1)) == SUBREG
4432 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4433 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4434 ? 0 : 4));
4436 case MINUS:
4437 if (mode == DImode)
4438 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4439 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4440 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4441 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4442 ? 0 : 8));
4444 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4445 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4446 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4447 && arm_const_double_rtx (XEXP (x, 1))))
4448 ? 0 : 8)
4449 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4450 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4451 && arm_const_double_rtx (XEXP (x, 0))))
4452 ? 0 : 8));
4454 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4455 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4456 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4457 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4458 || subcode == ASHIFTRT || subcode == LSHIFTRT
4459 || subcode == ROTATE || subcode == ROTATERT
4460 || (subcode == MULT
4461 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4462 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4463 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4464 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4465 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4466 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4467 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4468 return 1;
4469 /* Fall through */
4471 case PLUS:
4472 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4473 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4474 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4475 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4476 && arm_const_double_rtx (XEXP (x, 1))))
4477 ? 0 : 8));
4479 /* Fall through */
4480 case AND: case XOR: case IOR:
4481 extra_cost = 0;
4483 /* Normally the frame registers will be spilt into reg+const during
4484 reload, so it is a bad idea to combine them with other instructions,
4485 since then they might not be moved outside of loops. As a compromise
4486 we allow integration with ops that have a constant as their second
4487 operand. */
4488 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4489 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4490 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4491 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4492 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4493 extra_cost = 4;
4495 if (mode == DImode)
4496 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4497 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4498 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4499 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4500 ? 0 : 8));
4502 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4503 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4504 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4505 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4506 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4507 ? 0 : 4));
4509 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4510 return (1 + extra_cost
4511 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4512 || subcode == LSHIFTRT || subcode == ASHIFTRT
4513 || subcode == ROTATE || subcode == ROTATERT
4514 || (subcode == MULT
4515 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4516 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4517 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4518 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4519 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4520 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4521 ? 0 : 4));
4523 return 8;
4525 case MULT:
4526 /* This should have been handled by the CPU specific routines. */
4527 gcc_unreachable ();
4529 case TRUNCATE:
4530 if (arm_arch3m && mode == SImode
4531 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4532 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4533 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4534 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4535 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4536 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4537 return 8;
4538 return 99;
4540 case NEG:
4541 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4542 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4543 /* Fall through */
4544 case NOT:
4545 if (mode == DImode)
4546 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4548 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4550 case IF_THEN_ELSE:
4551 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4552 return 14;
4553 return 2;
4555 case COMPARE:
4556 return 1;
4558 case ABS:
4559 return 4 + (mode == DImode ? 4 : 0);
4561 case SIGN_EXTEND:
4562 if (GET_MODE (XEXP (x, 0)) == QImode)
4563 return (4 + (mode == DImode ? 4 : 0)
4564 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4565 /* Fall through */
4566 case ZERO_EXTEND:
4567 switch (GET_MODE (XEXP (x, 0)))
4569 case QImode:
4570 return (1 + (mode == DImode ? 4 : 0)
4571 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4573 case HImode:
4574 return (4 + (mode == DImode ? 4 : 0)
4575 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4577 case SImode:
4578 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4580 case V8QImode:
4581 case V4HImode:
4582 case V2SImode:
4583 case V4QImode:
4584 case V2HImode:
4585 return 1;
4587 default:
4588 gcc_unreachable ();
4590 gcc_unreachable ();
4592 case CONST_INT:
4593 if (const_ok_for_arm (INTVAL (x)))
4594 return outer == SET ? 2 : -1;
4595 else if (outer == AND
4596 && const_ok_for_arm (~INTVAL (x)))
4597 return -1;
4598 else if ((outer == COMPARE
4599 || outer == PLUS || outer == MINUS)
4600 && const_ok_for_arm (-INTVAL (x)))
4601 return -1;
4602 else
4603 return 5;
4605 case CONST:
4606 case LABEL_REF:
4607 case SYMBOL_REF:
4608 return 6;
4610 case CONST_DOUBLE:
4611 if (arm_const_double_rtx (x))
4612 return outer == SET ? 2 : -1;
4613 else if ((outer == COMPARE || outer == PLUS)
4614 && neg_const_double_rtx_ok_for_fpa (x))
4615 return -1;
4616 return 7;
4618 default:
4619 return 99;
4623 /* RTX costs when optimizing for size. */
4624 static bool
4625 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4627 enum machine_mode mode = GET_MODE (x);
4629 if (TARGET_THUMB)
4631 /* XXX TBD. For now, use the standard costs. */
4632 *total = thumb_rtx_costs (x, code, outer_code);
4633 return true;
4636 switch (code)
4638 case MEM:
4639 /* A memory access costs 1 insn if the mode is small, or the address is
4640 a single register, otherwise it costs one insn per word. */
4641 if (REG_P (XEXP (x, 0)))
4642 *total = COSTS_N_INSNS (1);
4643 else
4644 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4645 return true;
4647 case DIV:
4648 case MOD:
4649 case UDIV:
4650 case UMOD:
4651 /* Needs a libcall, so it costs about this. */
4652 *total = COSTS_N_INSNS (2);
4653 return false;
4655 case ROTATE:
4656 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4658 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4659 return true;
4661 /* Fall through */
4662 case ROTATERT:
4663 case ASHIFT:
4664 case LSHIFTRT:
4665 case ASHIFTRT:
4666 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4668 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4669 return true;
4671 else if (mode == SImode)
4673 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4674 /* Slightly disparage register shifts, but not by much. */
4675 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4676 *total += 1 + rtx_cost (XEXP (x, 1), code);
4677 return true;
4680 /* Needs a libcall. */
4681 *total = COSTS_N_INSNS (2);
4682 return false;
4684 case MINUS:
4685 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4687 *total = COSTS_N_INSNS (1);
4688 return false;
4691 if (mode == SImode)
4693 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4694 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4696 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4697 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4698 || subcode1 == ROTATE || subcode1 == ROTATERT
4699 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4700 || subcode1 == ASHIFTRT)
4702 /* It's just the cost of the two operands. */
4703 *total = 0;
4704 return false;
4707 *total = COSTS_N_INSNS (1);
4708 return false;
4711 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4712 return false;
4714 case PLUS:
4715 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4717 *total = COSTS_N_INSNS (1);
4718 return false;
4721 /* Fall through */
4722 case AND: case XOR: case IOR:
4723 if (mode == SImode)
4725 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4727 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4728 || subcode == LSHIFTRT || subcode == ASHIFTRT
4729 || (code == AND && subcode == NOT))
4731 /* It's just the cost of the two operands. */
4732 *total = 0;
4733 return false;
4737 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4738 return false;
4740 case MULT:
4741 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4742 return false;
4744 case NEG:
4745 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4746 *total = COSTS_N_INSNS (1);
4747 /* Fall through */
4748 case NOT:
4749 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4751 return false;
4753 case IF_THEN_ELSE:
4754 *total = 0;
4755 return false;
4757 case COMPARE:
4758 if (cc_register (XEXP (x, 0), VOIDmode))
4759 * total = 0;
4760 else
4761 *total = COSTS_N_INSNS (1);
4762 return false;
4764 case ABS:
4765 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4766 *total = COSTS_N_INSNS (1);
4767 else
4768 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4769 return false;
4771 case SIGN_EXTEND:
4772 *total = 0;
4773 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4775 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4776 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4778 if (mode == DImode)
4779 *total += COSTS_N_INSNS (1);
4780 return false;
4782 case ZERO_EXTEND:
4783 *total = 0;
4784 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4786 switch (GET_MODE (XEXP (x, 0)))
4788 case QImode:
4789 *total += COSTS_N_INSNS (1);
4790 break;
4792 case HImode:
4793 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4795 case SImode:
4796 break;
4798 default:
4799 *total += COSTS_N_INSNS (2);
4803 if (mode == DImode)
4804 *total += COSTS_N_INSNS (1);
4806 return false;
4808 case CONST_INT:
4809 if (const_ok_for_arm (INTVAL (x)))
4810 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4811 else if (const_ok_for_arm (~INTVAL (x)))
4812 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4813 else if (const_ok_for_arm (-INTVAL (x)))
4815 if (outer_code == COMPARE || outer_code == PLUS
4816 || outer_code == MINUS)
4817 *total = 0;
4818 else
4819 *total = COSTS_N_INSNS (1);
4821 else
4822 *total = COSTS_N_INSNS (2);
4823 return true;
4825 case CONST:
4826 case LABEL_REF:
4827 case SYMBOL_REF:
4828 *total = COSTS_N_INSNS (2);
4829 return true;
4831 case CONST_DOUBLE:
4832 *total = COSTS_N_INSNS (4);
4833 return true;
4835 default:
4836 if (mode != VOIDmode)
4837 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4838 else
4839 *total = COSTS_N_INSNS (4); /* How knows? */
4840 return false;
4844 /* RTX costs for cores with a slow MUL implementation. */
4846 static bool
4847 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4849 enum machine_mode mode = GET_MODE (x);
4851 if (TARGET_THUMB)
4853 *total = thumb_rtx_costs (x, code, outer_code);
4854 return true;
4857 switch (code)
4859 case MULT:
4860 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4861 || mode == DImode)
4863 *total = 30;
4864 return true;
4867 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4869 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4870 & (unsigned HOST_WIDE_INT) 0xffffffff);
4871 int cost, const_ok = const_ok_for_arm (i);
4872 int j, booth_unit_size;
4874 /* Tune as appropriate. */
4875 cost = const_ok ? 4 : 8;
4876 booth_unit_size = 2;
4877 for (j = 0; i && j < 32; j += booth_unit_size)
4879 i >>= booth_unit_size;
4880 cost += 2;
4883 *total = cost;
4884 return true;
4887 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4888 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4889 return true;
4891 default:
4892 *total = arm_rtx_costs_1 (x, code, outer_code);
4893 return true;
4898 /* RTX cost for cores with a fast multiply unit (M variants). */
4900 static bool
4901 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4903 enum machine_mode mode = GET_MODE (x);
4905 if (TARGET_THUMB)
4907 *total = thumb_rtx_costs (x, code, outer_code);
4908 return true;
4911 switch (code)
4913 case MULT:
4914 /* There is no point basing this on the tuning, since it is always the
4915 fast variant if it exists at all. */
4916 if (mode == DImode
4917 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4918 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4919 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4921 *total = 8;
4922 return true;
4926 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4927 || mode == DImode)
4929 *total = 30;
4930 return true;
4933 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4935 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4936 & (unsigned HOST_WIDE_INT) 0xffffffff);
4937 int cost, const_ok = const_ok_for_arm (i);
4938 int j, booth_unit_size;
4940 /* Tune as appropriate. */
4941 cost = const_ok ? 4 : 8;
4942 booth_unit_size = 8;
4943 for (j = 0; i && j < 32; j += booth_unit_size)
4945 i >>= booth_unit_size;
4946 cost += 2;
4949 *total = cost;
4950 return true;
4953 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4954 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4955 return true;
4957 default:
4958 *total = arm_rtx_costs_1 (x, code, outer_code);
4959 return true;
4964 /* RTX cost for XScale CPUs. */
4966 static bool
4967 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4969 enum machine_mode mode = GET_MODE (x);
4971 if (TARGET_THUMB)
4973 *total = thumb_rtx_costs (x, code, outer_code);
4974 return true;
4977 switch (code)
4979 case MULT:
4980 /* There is no point basing this on the tuning, since it is always the
4981 fast variant if it exists at all. */
4982 if (mode == DImode
4983 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4984 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4985 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4987 *total = 8;
4988 return true;
4992 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4993 || mode == DImode)
4995 *total = 30;
4996 return true;
4999 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5001 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5002 & (unsigned HOST_WIDE_INT) 0xffffffff);
5003 int cost, const_ok = const_ok_for_arm (i);
5004 unsigned HOST_WIDE_INT masked_const;
5006 /* The cost will be related to two insns.
5007 First a load of the constant (MOV or LDR), then a multiply. */
5008 cost = 2;
5009 if (! const_ok)
5010 cost += 1; /* LDR is probably more expensive because
5011 of longer result latency. */
5012 masked_const = i & 0xffff8000;
5013 if (masked_const != 0 && masked_const != 0xffff8000)
5015 masked_const = i & 0xf8000000;
5016 if (masked_const == 0 || masked_const == 0xf8000000)
5017 cost += 1;
5018 else
5019 cost += 2;
5021 *total = cost;
5022 return true;
5025 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5026 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5027 return true;
5029 case COMPARE:
5030 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5031 will stall until the multiplication is complete. */
5032 if (GET_CODE (XEXP (x, 0)) == MULT)
5033 *total = 4 + rtx_cost (XEXP (x, 0), code);
5034 else
5035 *total = arm_rtx_costs_1 (x, code, outer_code);
5036 return true;
5038 default:
5039 *total = arm_rtx_costs_1 (x, code, outer_code);
5040 return true;
5045 /* RTX costs for 9e (and later) cores. */
5047 static bool
5048 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5050 enum machine_mode mode = GET_MODE (x);
5051 int nonreg_cost;
5052 int cost;
5054 if (TARGET_THUMB)
5056 switch (code)
5058 case MULT:
5059 *total = COSTS_N_INSNS (3);
5060 return true;
5062 default:
5063 *total = thumb_rtx_costs (x, code, outer_code);
5064 return true;
5068 switch (code)
5070 case MULT:
5071 /* There is no point basing this on the tuning, since it is always the
5072 fast variant if it exists at all. */
5073 if (mode == DImode
5074 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5075 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5076 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5078 *total = 3;
5079 return true;
5083 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5085 *total = 30;
5086 return true;
5088 if (mode == DImode)
5090 cost = 7;
5091 nonreg_cost = 8;
5093 else
5095 cost = 2;
5096 nonreg_cost = 4;
5100 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5101 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5102 return true;
5104 default:
5105 *total = arm_rtx_costs_1 (x, code, outer_code);
5106 return true;
5109 /* All address computations that can be done are free, but rtx cost returns
5110 the same for practically all of them. So we weight the different types
5111 of address here in the order (most pref first):
5112 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5113 static inline int
5114 arm_arm_address_cost (rtx x)
5116 enum rtx_code c = GET_CODE (x);
5118 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5119 return 0;
5120 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5121 return 10;
5123 if (c == PLUS || c == MINUS)
5125 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5126 return 2;
5128 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5129 return 3;
5131 return 4;
5134 return 6;
5137 static inline int
5138 arm_thumb_address_cost (rtx x)
5140 enum rtx_code c = GET_CODE (x);
5142 if (c == REG)
5143 return 1;
5144 if (c == PLUS
5145 && GET_CODE (XEXP (x, 0)) == REG
5146 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5147 return 1;
5149 return 2;
5152 static int
5153 arm_address_cost (rtx x)
5155 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5158 static int
5159 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5161 rtx i_pat, d_pat;
5163 /* Some true dependencies can have a higher cost depending
5164 on precisely how certain input operands are used. */
5165 if (arm_tune_xscale
5166 && REG_NOTE_KIND (link) == 0
5167 && recog_memoized (insn) >= 0
5168 && recog_memoized (dep) >= 0)
5170 int shift_opnum = get_attr_shift (insn);
5171 enum attr_type attr_type = get_attr_type (dep);
5173 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5174 operand for INSN. If we have a shifted input operand and the
5175 instruction we depend on is another ALU instruction, then we may
5176 have to account for an additional stall. */
5177 if (shift_opnum != 0
5178 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5180 rtx shifted_operand;
5181 int opno;
5183 /* Get the shifted operand. */
5184 extract_insn (insn);
5185 shifted_operand = recog_data.operand[shift_opnum];
5187 /* Iterate over all the operands in DEP. If we write an operand
5188 that overlaps with SHIFTED_OPERAND, then we have increase the
5189 cost of this dependency. */
5190 extract_insn (dep);
5191 preprocess_constraints ();
5192 for (opno = 0; opno < recog_data.n_operands; opno++)
5194 /* We can ignore strict inputs. */
5195 if (recog_data.operand_type[opno] == OP_IN)
5196 continue;
5198 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5199 shifted_operand))
5200 return 2;
5205 /* XXX This is not strictly true for the FPA. */
5206 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5207 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5208 return 0;
5210 /* Call insns don't incur a stall, even if they follow a load. */
5211 if (REG_NOTE_KIND (link) == 0
5212 && GET_CODE (insn) == CALL_INSN)
5213 return 1;
5215 if ((i_pat = single_set (insn)) != NULL
5216 && GET_CODE (SET_SRC (i_pat)) == MEM
5217 && (d_pat = single_set (dep)) != NULL
5218 && GET_CODE (SET_DEST (d_pat)) == MEM)
5220 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5221 /* This is a load after a store, there is no conflict if the load reads
5222 from a cached area. Assume that loads from the stack, and from the
5223 constant pool are cached, and that others will miss. This is a
5224 hack. */
5226 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5227 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5228 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5229 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5230 return 1;
5233 return cost;
5236 static int fp_consts_inited = 0;
5238 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5239 static const char * const strings_fp[8] =
5241 "0", "1", "2", "3",
5242 "4", "5", "0.5", "10"
5245 static REAL_VALUE_TYPE values_fp[8];
5247 static void
5248 init_fp_table (void)
5250 int i;
5251 REAL_VALUE_TYPE r;
5253 if (TARGET_VFP)
5254 fp_consts_inited = 1;
5255 else
5256 fp_consts_inited = 8;
5258 for (i = 0; i < fp_consts_inited; i++)
5260 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5261 values_fp[i] = r;
5265 /* Return TRUE if rtx X is a valid immediate FP constant. */
5267 arm_const_double_rtx (rtx x)
5269 REAL_VALUE_TYPE r;
5270 int i;
5272 if (!fp_consts_inited)
5273 init_fp_table ();
5275 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5276 if (REAL_VALUE_MINUS_ZERO (r))
5277 return 0;
5279 for (i = 0; i < fp_consts_inited; i++)
5280 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5281 return 1;
5283 return 0;
5286 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5288 neg_const_double_rtx_ok_for_fpa (rtx x)
5290 REAL_VALUE_TYPE r;
5291 int i;
5293 if (!fp_consts_inited)
5294 init_fp_table ();
5296 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5297 r = REAL_VALUE_NEGATE (r);
5298 if (REAL_VALUE_MINUS_ZERO (r))
5299 return 0;
5301 for (i = 0; i < 8; i++)
5302 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5303 return 1;
5305 return 0;
5308 /* Predicates for `match_operand' and `match_operator'. */
5310 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5312 cirrus_memory_offset (rtx op)
5314 /* Reject eliminable registers. */
5315 if (! (reload_in_progress || reload_completed)
5316 && ( reg_mentioned_p (frame_pointer_rtx, op)
5317 || reg_mentioned_p (arg_pointer_rtx, op)
5318 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5319 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5320 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5321 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5322 return 0;
5324 if (GET_CODE (op) == MEM)
5326 rtx ind;
5328 ind = XEXP (op, 0);
5330 /* Match: (mem (reg)). */
5331 if (GET_CODE (ind) == REG)
5332 return 1;
5334 /* Match:
5335 (mem (plus (reg)
5336 (const))). */
5337 if (GET_CODE (ind) == PLUS
5338 && GET_CODE (XEXP (ind, 0)) == REG
5339 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5340 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5341 return 1;
5344 return 0;
5347 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5348 WB if true if writeback address modes are allowed. */
5351 arm_coproc_mem_operand (rtx op, bool wb)
5353 rtx ind;
5355 /* Reject eliminable registers. */
5356 if (! (reload_in_progress || reload_completed)
5357 && ( reg_mentioned_p (frame_pointer_rtx, op)
5358 || reg_mentioned_p (arg_pointer_rtx, op)
5359 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5360 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5361 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5362 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5363 return FALSE;
5365 /* Constants are converted into offsets from labels. */
5366 if (GET_CODE (op) != MEM)
5367 return FALSE;
5369 ind = XEXP (op, 0);
5371 if (reload_completed
5372 && (GET_CODE (ind) == LABEL_REF
5373 || (GET_CODE (ind) == CONST
5374 && GET_CODE (XEXP (ind, 0)) == PLUS
5375 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5376 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5377 return TRUE;
5379 /* Match: (mem (reg)). */
5380 if (GET_CODE (ind) == REG)
5381 return arm_address_register_rtx_p (ind, 0);
5383 /* Autoincremment addressing modes. */
5384 if (wb
5385 && (GET_CODE (ind) == PRE_INC
5386 || GET_CODE (ind) == POST_INC
5387 || GET_CODE (ind) == PRE_DEC
5388 || GET_CODE (ind) == POST_DEC))
5389 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5391 if (wb
5392 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5393 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5394 && GET_CODE (XEXP (ind, 1)) == PLUS
5395 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5396 ind = XEXP (ind, 1);
5398 /* Match:
5399 (plus (reg)
5400 (const)). */
5401 if (GET_CODE (ind) == PLUS
5402 && GET_CODE (XEXP (ind, 0)) == REG
5403 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5404 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5405 && INTVAL (XEXP (ind, 1)) > -1024
5406 && INTVAL (XEXP (ind, 1)) < 1024
5407 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5408 return TRUE;
5410 return FALSE;
5413 /* Return true if X is a register that will be eliminated later on. */
5415 arm_eliminable_register (rtx x)
5417 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5418 || REGNO (x) == ARG_POINTER_REGNUM
5419 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5420 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5423 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5424 VFP registers. Otherwise return NO_REGS. */
5426 enum reg_class
5427 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5429 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5430 return NO_REGS;
5432 return GENERAL_REGS;
5435 /* Values which must be returned in the most-significant end of the return
5436 register. */
5438 static bool
5439 arm_return_in_msb (tree valtype)
5441 return (TARGET_AAPCS_BASED
5442 && BYTES_BIG_ENDIAN
5443 && (AGGREGATE_TYPE_P (valtype)
5444 || TREE_CODE (valtype) == COMPLEX_TYPE));
5447 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5448 Use by the Cirrus Maverick code which has to workaround
5449 a hardware bug triggered by such instructions. */
5450 static bool
5451 arm_memory_load_p (rtx insn)
5453 rtx body, lhs, rhs;;
5455 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5456 return false;
5458 body = PATTERN (insn);
5460 if (GET_CODE (body) != SET)
5461 return false;
5463 lhs = XEXP (body, 0);
5464 rhs = XEXP (body, 1);
5466 lhs = REG_OR_SUBREG_RTX (lhs);
5468 /* If the destination is not a general purpose
5469 register we do not have to worry. */
5470 if (GET_CODE (lhs) != REG
5471 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5472 return false;
5474 /* As well as loads from memory we also have to react
5475 to loads of invalid constants which will be turned
5476 into loads from the minipool. */
5477 return (GET_CODE (rhs) == MEM
5478 || GET_CODE (rhs) == SYMBOL_REF
5479 || note_invalid_constants (insn, -1, false));
5482 /* Return TRUE if INSN is a Cirrus instruction. */
5483 static bool
5484 arm_cirrus_insn_p (rtx insn)
5486 enum attr_cirrus attr;
5488 /* get_attr cannot accept USE or CLOBBER. */
5489 if (!insn
5490 || GET_CODE (insn) != INSN
5491 || GET_CODE (PATTERN (insn)) == USE
5492 || GET_CODE (PATTERN (insn)) == CLOBBER)
5493 return 0;
5495 attr = get_attr_cirrus (insn);
5497 return attr != CIRRUS_NOT;
5500 /* Cirrus reorg for invalid instruction combinations. */
5501 static void
5502 cirrus_reorg (rtx first)
5504 enum attr_cirrus attr;
5505 rtx body = PATTERN (first);
5506 rtx t;
5507 int nops;
5509 /* Any branch must be followed by 2 non Cirrus instructions. */
5510 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5512 nops = 0;
5513 t = next_nonnote_insn (first);
5515 if (arm_cirrus_insn_p (t))
5516 ++ nops;
5518 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5519 ++ nops;
5521 while (nops --)
5522 emit_insn_after (gen_nop (), first);
5524 return;
5527 /* (float (blah)) is in parallel with a clobber. */
5528 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5529 body = XVECEXP (body, 0, 0);
5531 if (GET_CODE (body) == SET)
5533 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5535 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5536 be followed by a non Cirrus insn. */
5537 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5539 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5540 emit_insn_after (gen_nop (), first);
5542 return;
5544 else if (arm_memory_load_p (first))
5546 unsigned int arm_regno;
5548 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5549 ldr/cfmv64hr combination where the Rd field is the same
5550 in both instructions must be split with a non Cirrus
5551 insn. Example:
5553 ldr r0, blah
5555 cfmvsr mvf0, r0. */
5557 /* Get Arm register number for ldr insn. */
5558 if (GET_CODE (lhs) == REG)
5559 arm_regno = REGNO (lhs);
5560 else
5562 gcc_assert (GET_CODE (rhs) == REG);
5563 arm_regno = REGNO (rhs);
5566 /* Next insn. */
5567 first = next_nonnote_insn (first);
5569 if (! arm_cirrus_insn_p (first))
5570 return;
5572 body = PATTERN (first);
5574 /* (float (blah)) is in parallel with a clobber. */
5575 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5576 body = XVECEXP (body, 0, 0);
5578 if (GET_CODE (body) == FLOAT)
5579 body = XEXP (body, 0);
5581 if (get_attr_cirrus (first) == CIRRUS_MOVE
5582 && GET_CODE (XEXP (body, 1)) == REG
5583 && arm_regno == REGNO (XEXP (body, 1)))
5584 emit_insn_after (gen_nop (), first);
5586 return;
5590 /* get_attr cannot accept USE or CLOBBER. */
5591 if (!first
5592 || GET_CODE (first) != INSN
5593 || GET_CODE (PATTERN (first)) == USE
5594 || GET_CODE (PATTERN (first)) == CLOBBER)
5595 return;
5597 attr = get_attr_cirrus (first);
5599 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5600 must be followed by a non-coprocessor instruction. */
5601 if (attr == CIRRUS_COMPARE)
5603 nops = 0;
5605 t = next_nonnote_insn (first);
5607 if (arm_cirrus_insn_p (t))
5608 ++ nops;
5610 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5611 ++ nops;
5613 while (nops --)
5614 emit_insn_after (gen_nop (), first);
5616 return;
5620 /* Return TRUE if X references a SYMBOL_REF. */
5622 symbol_mentioned_p (rtx x)
5624 const char * fmt;
5625 int i;
5627 if (GET_CODE (x) == SYMBOL_REF)
5628 return 1;
5630 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5631 are constant offsets, not symbols. */
5632 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5633 return 0;
5635 fmt = GET_RTX_FORMAT (GET_CODE (x));
5637 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5639 if (fmt[i] == 'E')
5641 int j;
5643 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5644 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5645 return 1;
5647 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5648 return 1;
5651 return 0;
5654 /* Return TRUE if X references a LABEL_REF. */
5656 label_mentioned_p (rtx x)
5658 const char * fmt;
5659 int i;
5661 if (GET_CODE (x) == LABEL_REF)
5662 return 1;
5664 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5665 instruction, but they are constant offsets, not symbols. */
5666 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5667 return 0;
5669 fmt = GET_RTX_FORMAT (GET_CODE (x));
5670 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5672 if (fmt[i] == 'E')
5674 int j;
5676 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5677 if (label_mentioned_p (XVECEXP (x, i, j)))
5678 return 1;
5680 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5681 return 1;
5684 return 0;
5688 tls_mentioned_p (rtx x)
5690 switch (GET_CODE (x))
5692 case CONST:
5693 return tls_mentioned_p (XEXP (x, 0));
5695 case UNSPEC:
5696 if (XINT (x, 1) == UNSPEC_TLS)
5697 return 1;
5699 default:
5700 return 0;
5704 /* Must not copy a SET whose source operand is PC-relative. */
5706 static bool
5707 arm_cannot_copy_insn_p (rtx insn)
5709 rtx pat = PATTERN (insn);
5711 if (GET_CODE (pat) == PARALLEL
5712 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
5714 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
5716 if (GET_CODE (rhs) == UNSPEC
5717 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
5718 return TRUE;
5720 if (GET_CODE (rhs) == MEM
5721 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
5722 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
5723 return TRUE;
5726 return FALSE;
5729 enum rtx_code
5730 minmax_code (rtx x)
5732 enum rtx_code code = GET_CODE (x);
5734 switch (code)
5736 case SMAX:
5737 return GE;
5738 case SMIN:
5739 return LE;
5740 case UMIN:
5741 return LEU;
5742 case UMAX:
5743 return GEU;
5744 default:
5745 gcc_unreachable ();
5749 /* Return 1 if memory locations are adjacent. */
5751 adjacent_mem_locations (rtx a, rtx b)
5753 /* We don't guarantee to preserve the order of these memory refs. */
5754 if (volatile_refs_p (a) || volatile_refs_p (b))
5755 return 0;
5757 if ((GET_CODE (XEXP (a, 0)) == REG
5758 || (GET_CODE (XEXP (a, 0)) == PLUS
5759 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5760 && (GET_CODE (XEXP (b, 0)) == REG
5761 || (GET_CODE (XEXP (b, 0)) == PLUS
5762 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5764 HOST_WIDE_INT val0 = 0, val1 = 0;
5765 rtx reg0, reg1;
5766 int val_diff;
5768 if (GET_CODE (XEXP (a, 0)) == PLUS)
5770 reg0 = XEXP (XEXP (a, 0), 0);
5771 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5773 else
5774 reg0 = XEXP (a, 0);
5776 if (GET_CODE (XEXP (b, 0)) == PLUS)
5778 reg1 = XEXP (XEXP (b, 0), 0);
5779 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5781 else
5782 reg1 = XEXP (b, 0);
5784 /* Don't accept any offset that will require multiple
5785 instructions to handle, since this would cause the
5786 arith_adjacentmem pattern to output an overlong sequence. */
5787 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5788 return 0;
5790 /* Don't allow an eliminable register: register elimination can make
5791 the offset too large. */
5792 if (arm_eliminable_register (reg0))
5793 return 0;
5795 val_diff = val1 - val0;
5797 if (arm_ld_sched)
5799 /* If the target has load delay slots, then there's no benefit
5800 to using an ldm instruction unless the offset is zero and
5801 we are optimizing for size. */
5802 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5803 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5804 && (val_diff == 4 || val_diff == -4));
5807 return ((REGNO (reg0) == REGNO (reg1))
5808 && (val_diff == 4 || val_diff == -4));
5811 return 0;
5815 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5816 HOST_WIDE_INT *load_offset)
5818 int unsorted_regs[4];
5819 HOST_WIDE_INT unsorted_offsets[4];
5820 int order[4];
5821 int base_reg = -1;
5822 int i;
5824 /* Can only handle 2, 3, or 4 insns at present,
5825 though could be easily extended if required. */
5826 gcc_assert (nops >= 2 && nops <= 4);
5828 /* Loop over the operands and check that the memory references are
5829 suitable (i.e. immediate offsets from the same base register). At
5830 the same time, extract the target register, and the memory
5831 offsets. */
5832 for (i = 0; i < nops; i++)
5834 rtx reg;
5835 rtx offset;
5837 /* Convert a subreg of a mem into the mem itself. */
5838 if (GET_CODE (operands[nops + i]) == SUBREG)
5839 operands[nops + i] = alter_subreg (operands + (nops + i));
5841 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5843 /* Don't reorder volatile memory references; it doesn't seem worth
5844 looking for the case where the order is ok anyway. */
5845 if (MEM_VOLATILE_P (operands[nops + i]))
5846 return 0;
5848 offset = const0_rtx;
5850 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5851 || (GET_CODE (reg) == SUBREG
5852 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5853 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5854 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5855 == REG)
5856 || (GET_CODE (reg) == SUBREG
5857 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5858 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5859 == CONST_INT)))
5861 if (i == 0)
5863 base_reg = REGNO (reg);
5864 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5865 ? REGNO (operands[i])
5866 : REGNO (SUBREG_REG (operands[i])));
5867 order[0] = 0;
5869 else
5871 if (base_reg != (int) REGNO (reg))
5872 /* Not addressed from the same base register. */
5873 return 0;
5875 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5876 ? REGNO (operands[i])
5877 : REGNO (SUBREG_REG (operands[i])));
5878 if (unsorted_regs[i] < unsorted_regs[order[0]])
5879 order[0] = i;
5882 /* If it isn't an integer register, or if it overwrites the
5883 base register but isn't the last insn in the list, then
5884 we can't do this. */
5885 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5886 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5887 return 0;
5889 unsorted_offsets[i] = INTVAL (offset);
5891 else
5892 /* Not a suitable memory address. */
5893 return 0;
5896 /* All the useful information has now been extracted from the
5897 operands into unsorted_regs and unsorted_offsets; additionally,
5898 order[0] has been set to the lowest numbered register in the
5899 list. Sort the registers into order, and check that the memory
5900 offsets are ascending and adjacent. */
5902 for (i = 1; i < nops; i++)
5904 int j;
5906 order[i] = order[i - 1];
5907 for (j = 0; j < nops; j++)
5908 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5909 && (order[i] == order[i - 1]
5910 || unsorted_regs[j] < unsorted_regs[order[i]]))
5911 order[i] = j;
5913 /* Have we found a suitable register? if not, one must be used more
5914 than once. */
5915 if (order[i] == order[i - 1])
5916 return 0;
5918 /* Is the memory address adjacent and ascending? */
5919 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5920 return 0;
5923 if (base)
5925 *base = base_reg;
5927 for (i = 0; i < nops; i++)
5928 regs[i] = unsorted_regs[order[i]];
5930 *load_offset = unsorted_offsets[order[0]];
5933 if (unsorted_offsets[order[0]] == 0)
5934 return 1; /* ldmia */
5936 if (unsorted_offsets[order[0]] == 4)
5937 return 2; /* ldmib */
5939 if (unsorted_offsets[order[nops - 1]] == 0)
5940 return 3; /* ldmda */
5942 if (unsorted_offsets[order[nops - 1]] == -4)
5943 return 4; /* ldmdb */
5945 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5946 if the offset isn't small enough. The reason 2 ldrs are faster
5947 is because these ARMs are able to do more than one cache access
5948 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5949 whilst the ARM8 has a double bandwidth cache. This means that
5950 these cores can do both an instruction fetch and a data fetch in
5951 a single cycle, so the trick of calculating the address into a
5952 scratch register (one of the result regs) and then doing a load
5953 multiple actually becomes slower (and no smaller in code size).
5954 That is the transformation
5956 ldr rd1, [rbase + offset]
5957 ldr rd2, [rbase + offset + 4]
5961 add rd1, rbase, offset
5962 ldmia rd1, {rd1, rd2}
5964 produces worse code -- '3 cycles + any stalls on rd2' instead of
5965 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5966 access per cycle, the first sequence could never complete in less
5967 than 6 cycles, whereas the ldm sequence would only take 5 and
5968 would make better use of sequential accesses if not hitting the
5969 cache.
5971 We cheat here and test 'arm_ld_sched' which we currently know to
5972 only be true for the ARM8, ARM9 and StrongARM. If this ever
5973 changes, then the test below needs to be reworked. */
5974 if (nops == 2 && arm_ld_sched)
5975 return 0;
5977 /* Can't do it without setting up the offset, only do this if it takes
5978 no more than one insn. */
5979 return (const_ok_for_arm (unsorted_offsets[order[0]])
5980 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5983 const char *
5984 emit_ldm_seq (rtx *operands, int nops)
5986 int regs[4];
5987 int base_reg;
5988 HOST_WIDE_INT offset;
5989 char buf[100];
5990 int i;
5992 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5994 case 1:
5995 strcpy (buf, "ldm%?ia\t");
5996 break;
5998 case 2:
5999 strcpy (buf, "ldm%?ib\t");
6000 break;
6002 case 3:
6003 strcpy (buf, "ldm%?da\t");
6004 break;
6006 case 4:
6007 strcpy (buf, "ldm%?db\t");
6008 break;
6010 case 5:
6011 if (offset >= 0)
6012 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6013 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6014 (long) offset);
6015 else
6016 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6017 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6018 (long) -offset);
6019 output_asm_insn (buf, operands);
6020 base_reg = regs[0];
6021 strcpy (buf, "ldm%?ia\t");
6022 break;
6024 default:
6025 gcc_unreachable ();
6028 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6029 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6031 for (i = 1; i < nops; i++)
6032 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6033 reg_names[regs[i]]);
6035 strcat (buf, "}\t%@ phole ldm");
6037 output_asm_insn (buf, operands);
6038 return "";
6042 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6043 HOST_WIDE_INT * load_offset)
6045 int unsorted_regs[4];
6046 HOST_WIDE_INT unsorted_offsets[4];
6047 int order[4];
6048 int base_reg = -1;
6049 int i;
6051 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6052 extended if required. */
6053 gcc_assert (nops >= 2 && nops <= 4);
6055 /* Loop over the operands and check that the memory references are
6056 suitable (i.e. immediate offsets from the same base register). At
6057 the same time, extract the target register, and the memory
6058 offsets. */
6059 for (i = 0; i < nops; i++)
6061 rtx reg;
6062 rtx offset;
6064 /* Convert a subreg of a mem into the mem itself. */
6065 if (GET_CODE (operands[nops + i]) == SUBREG)
6066 operands[nops + i] = alter_subreg (operands + (nops + i));
6068 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6070 /* Don't reorder volatile memory references; it doesn't seem worth
6071 looking for the case where the order is ok anyway. */
6072 if (MEM_VOLATILE_P (operands[nops + i]))
6073 return 0;
6075 offset = const0_rtx;
6077 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6078 || (GET_CODE (reg) == SUBREG
6079 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6080 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6081 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6082 == REG)
6083 || (GET_CODE (reg) == SUBREG
6084 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6085 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6086 == CONST_INT)))
6088 if (i == 0)
6090 base_reg = REGNO (reg);
6091 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6092 ? REGNO (operands[i])
6093 : REGNO (SUBREG_REG (operands[i])));
6094 order[0] = 0;
6096 else
6098 if (base_reg != (int) REGNO (reg))
6099 /* Not addressed from the same base register. */
6100 return 0;
6102 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6103 ? REGNO (operands[i])
6104 : REGNO (SUBREG_REG (operands[i])));
6105 if (unsorted_regs[i] < unsorted_regs[order[0]])
6106 order[0] = i;
6109 /* If it isn't an integer register, then we can't do this. */
6110 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6111 return 0;
6113 unsorted_offsets[i] = INTVAL (offset);
6115 else
6116 /* Not a suitable memory address. */
6117 return 0;
6120 /* All the useful information has now been extracted from the
6121 operands into unsorted_regs and unsorted_offsets; additionally,
6122 order[0] has been set to the lowest numbered register in the
6123 list. Sort the registers into order, and check that the memory
6124 offsets are ascending and adjacent. */
6126 for (i = 1; i < nops; i++)
6128 int j;
6130 order[i] = order[i - 1];
6131 for (j = 0; j < nops; j++)
6132 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6133 && (order[i] == order[i - 1]
6134 || unsorted_regs[j] < unsorted_regs[order[i]]))
6135 order[i] = j;
6137 /* Have we found a suitable register? if not, one must be used more
6138 than once. */
6139 if (order[i] == order[i - 1])
6140 return 0;
6142 /* Is the memory address adjacent and ascending? */
6143 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6144 return 0;
6147 if (base)
6149 *base = base_reg;
6151 for (i = 0; i < nops; i++)
6152 regs[i] = unsorted_regs[order[i]];
6154 *load_offset = unsorted_offsets[order[0]];
6157 if (unsorted_offsets[order[0]] == 0)
6158 return 1; /* stmia */
6160 if (unsorted_offsets[order[0]] == 4)
6161 return 2; /* stmib */
6163 if (unsorted_offsets[order[nops - 1]] == 0)
6164 return 3; /* stmda */
6166 if (unsorted_offsets[order[nops - 1]] == -4)
6167 return 4; /* stmdb */
6169 return 0;
6172 const char *
6173 emit_stm_seq (rtx *operands, int nops)
6175 int regs[4];
6176 int base_reg;
6177 HOST_WIDE_INT offset;
6178 char buf[100];
6179 int i;
6181 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6183 case 1:
6184 strcpy (buf, "stm%?ia\t");
6185 break;
6187 case 2:
6188 strcpy (buf, "stm%?ib\t");
6189 break;
6191 case 3:
6192 strcpy (buf, "stm%?da\t");
6193 break;
6195 case 4:
6196 strcpy (buf, "stm%?db\t");
6197 break;
6199 default:
6200 gcc_unreachable ();
6203 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6204 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6206 for (i = 1; i < nops; i++)
6207 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6208 reg_names[regs[i]]);
6210 strcat (buf, "}\t%@ phole stm");
6212 output_asm_insn (buf, operands);
6213 return "";
6216 /* Routines for use in generating RTL. */
6219 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6220 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6222 HOST_WIDE_INT offset = *offsetp;
6223 int i = 0, j;
6224 rtx result;
6225 int sign = up ? 1 : -1;
6226 rtx mem, addr;
6228 /* XScale has load-store double instructions, but they have stricter
6229 alignment requirements than load-store multiple, so we cannot
6230 use them.
6232 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6233 the pipeline until completion.
6235 NREGS CYCLES
6241 An ldr instruction takes 1-3 cycles, but does not block the
6242 pipeline.
6244 NREGS CYCLES
6245 1 1-3
6246 2 2-6
6247 3 3-9
6248 4 4-12
6250 Best case ldr will always win. However, the more ldr instructions
6251 we issue, the less likely we are to be able to schedule them well.
6252 Using ldr instructions also increases code size.
6254 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6255 for counts of 3 or 4 regs. */
6256 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6258 rtx seq;
6260 start_sequence ();
6262 for (i = 0; i < count; i++)
6264 addr = plus_constant (from, i * 4 * sign);
6265 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6266 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6267 offset += 4 * sign;
6270 if (write_back)
6272 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6273 *offsetp = offset;
6276 seq = get_insns ();
6277 end_sequence ();
6279 return seq;
6282 result = gen_rtx_PARALLEL (VOIDmode,
6283 rtvec_alloc (count + (write_back ? 1 : 0)));
6284 if (write_back)
6286 XVECEXP (result, 0, 0)
6287 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6288 i = 1;
6289 count++;
6292 for (j = 0; i < count; i++, j++)
6294 addr = plus_constant (from, j * 4 * sign);
6295 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6296 XVECEXP (result, 0, i)
6297 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6298 offset += 4 * sign;
6301 if (write_back)
6302 *offsetp = offset;
6304 return result;
6308 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6309 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6311 HOST_WIDE_INT offset = *offsetp;
6312 int i = 0, j;
6313 rtx result;
6314 int sign = up ? 1 : -1;
6315 rtx mem, addr;
6317 /* See arm_gen_load_multiple for discussion of
6318 the pros/cons of ldm/stm usage for XScale. */
6319 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6321 rtx seq;
6323 start_sequence ();
6325 for (i = 0; i < count; i++)
6327 addr = plus_constant (to, i * 4 * sign);
6328 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6329 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6330 offset += 4 * sign;
6333 if (write_back)
6335 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6336 *offsetp = offset;
6339 seq = get_insns ();
6340 end_sequence ();
6342 return seq;
6345 result = gen_rtx_PARALLEL (VOIDmode,
6346 rtvec_alloc (count + (write_back ? 1 : 0)));
6347 if (write_back)
6349 XVECEXP (result, 0, 0)
6350 = gen_rtx_SET (VOIDmode, to,
6351 plus_constant (to, count * 4 * sign));
6352 i = 1;
6353 count++;
6356 for (j = 0; i < count; i++, j++)
6358 addr = plus_constant (to, j * 4 * sign);
6359 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6360 XVECEXP (result, 0, i)
6361 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6362 offset += 4 * sign;
6365 if (write_back)
6366 *offsetp = offset;
6368 return result;
6372 arm_gen_movmemqi (rtx *operands)
6374 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6375 HOST_WIDE_INT srcoffset, dstoffset;
6376 int i;
6377 rtx src, dst, srcbase, dstbase;
6378 rtx part_bytes_reg = NULL;
6379 rtx mem;
6381 if (GET_CODE (operands[2]) != CONST_INT
6382 || GET_CODE (operands[3]) != CONST_INT
6383 || INTVAL (operands[2]) > 64
6384 || INTVAL (operands[3]) & 3)
6385 return 0;
6387 dstbase = operands[0];
6388 srcbase = operands[1];
6390 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6391 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6393 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6394 out_words_to_go = INTVAL (operands[2]) / 4;
6395 last_bytes = INTVAL (operands[2]) & 3;
6396 dstoffset = srcoffset = 0;
6398 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6399 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6401 for (i = 0; in_words_to_go >= 2; i+=4)
6403 if (in_words_to_go > 4)
6404 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6405 srcbase, &srcoffset));
6406 else
6407 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6408 FALSE, srcbase, &srcoffset));
6410 if (out_words_to_go)
6412 if (out_words_to_go > 4)
6413 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6414 dstbase, &dstoffset));
6415 else if (out_words_to_go != 1)
6416 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6417 dst, TRUE,
6418 (last_bytes == 0
6419 ? FALSE : TRUE),
6420 dstbase, &dstoffset));
6421 else
6423 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6424 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6425 if (last_bytes != 0)
6427 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6428 dstoffset += 4;
6433 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6434 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6437 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6438 if (out_words_to_go)
6440 rtx sreg;
6442 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6443 sreg = copy_to_reg (mem);
6445 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6446 emit_move_insn (mem, sreg);
6447 in_words_to_go--;
6449 gcc_assert (!in_words_to_go); /* Sanity check */
6452 if (in_words_to_go)
6454 gcc_assert (in_words_to_go > 0);
6456 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6457 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6460 gcc_assert (!last_bytes || part_bytes_reg);
6462 if (BYTES_BIG_ENDIAN && last_bytes)
6464 rtx tmp = gen_reg_rtx (SImode);
6466 /* The bytes we want are in the top end of the word. */
6467 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6468 GEN_INT (8 * (4 - last_bytes))));
6469 part_bytes_reg = tmp;
6471 while (last_bytes)
6473 mem = adjust_automodify_address (dstbase, QImode,
6474 plus_constant (dst, last_bytes - 1),
6475 dstoffset + last_bytes - 1);
6476 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6478 if (--last_bytes)
6480 tmp = gen_reg_rtx (SImode);
6481 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6482 part_bytes_reg = tmp;
6487 else
6489 if (last_bytes > 1)
6491 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6492 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6493 last_bytes -= 2;
6494 if (last_bytes)
6496 rtx tmp = gen_reg_rtx (SImode);
6497 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6498 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6499 part_bytes_reg = tmp;
6500 dstoffset += 2;
6504 if (last_bytes)
6506 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6507 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6511 return 1;
6514 /* Select a dominance comparison mode if possible for a test of the general
6515 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6516 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6517 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6518 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6519 In all cases OP will be either EQ or NE, but we don't need to know which
6520 here. If we are unable to support a dominance comparison we return
6521 CC mode. This will then fail to match for the RTL expressions that
6522 generate this call. */
6523 enum machine_mode
6524 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6526 enum rtx_code cond1, cond2;
6527 int swapped = 0;
6529 /* Currently we will probably get the wrong result if the individual
6530 comparisons are not simple. This also ensures that it is safe to
6531 reverse a comparison if necessary. */
6532 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6533 != CCmode)
6534 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6535 != CCmode))
6536 return CCmode;
6538 /* The if_then_else variant of this tests the second condition if the
6539 first passes, but is true if the first fails. Reverse the first
6540 condition to get a true "inclusive-or" expression. */
6541 if (cond_or == DOM_CC_NX_OR_Y)
6542 cond1 = reverse_condition (cond1);
6544 /* If the comparisons are not equal, and one doesn't dominate the other,
6545 then we can't do this. */
6546 if (cond1 != cond2
6547 && !comparison_dominates_p (cond1, cond2)
6548 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6549 return CCmode;
6551 if (swapped)
6553 enum rtx_code temp = cond1;
6554 cond1 = cond2;
6555 cond2 = temp;
6558 switch (cond1)
6560 case EQ:
6561 if (cond_or == DOM_CC_X_AND_Y)
6562 return CC_DEQmode;
6564 switch (cond2)
6566 case EQ: return CC_DEQmode;
6567 case LE: return CC_DLEmode;
6568 case LEU: return CC_DLEUmode;
6569 case GE: return CC_DGEmode;
6570 case GEU: return CC_DGEUmode;
6571 default: gcc_unreachable ();
6574 case LT:
6575 if (cond_or == DOM_CC_X_AND_Y)
6576 return CC_DLTmode;
6578 switch (cond2)
6580 case LT:
6581 return CC_DLTmode;
6582 case LE:
6583 return CC_DLEmode;
6584 case NE:
6585 return CC_DNEmode;
6586 default:
6587 gcc_unreachable ();
6590 case GT:
6591 if (cond_or == DOM_CC_X_AND_Y)
6592 return CC_DGTmode;
6594 switch (cond2)
6596 case GT:
6597 return CC_DGTmode;
6598 case GE:
6599 return CC_DGEmode;
6600 case NE:
6601 return CC_DNEmode;
6602 default:
6603 gcc_unreachable ();
6606 case LTU:
6607 if (cond_or == DOM_CC_X_AND_Y)
6608 return CC_DLTUmode;
6610 switch (cond2)
6612 case LTU:
6613 return CC_DLTUmode;
6614 case LEU:
6615 return CC_DLEUmode;
6616 case NE:
6617 return CC_DNEmode;
6618 default:
6619 gcc_unreachable ();
6622 case GTU:
6623 if (cond_or == DOM_CC_X_AND_Y)
6624 return CC_DGTUmode;
6626 switch (cond2)
6628 case GTU:
6629 return CC_DGTUmode;
6630 case GEU:
6631 return CC_DGEUmode;
6632 case NE:
6633 return CC_DNEmode;
6634 default:
6635 gcc_unreachable ();
6638 /* The remaining cases only occur when both comparisons are the
6639 same. */
6640 case NE:
6641 gcc_assert (cond1 == cond2);
6642 return CC_DNEmode;
6644 case LE:
6645 gcc_assert (cond1 == cond2);
6646 return CC_DLEmode;
6648 case GE:
6649 gcc_assert (cond1 == cond2);
6650 return CC_DGEmode;
6652 case LEU:
6653 gcc_assert (cond1 == cond2);
6654 return CC_DLEUmode;
6656 case GEU:
6657 gcc_assert (cond1 == cond2);
6658 return CC_DGEUmode;
6660 default:
6661 gcc_unreachable ();
6665 enum machine_mode
6666 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6668 /* All floating point compares return CCFP if it is an equality
6669 comparison, and CCFPE otherwise. */
6670 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6672 switch (op)
6674 case EQ:
6675 case NE:
6676 case UNORDERED:
6677 case ORDERED:
6678 case UNLT:
6679 case UNLE:
6680 case UNGT:
6681 case UNGE:
6682 case UNEQ:
6683 case LTGT:
6684 return CCFPmode;
6686 case LT:
6687 case LE:
6688 case GT:
6689 case GE:
6690 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6691 return CCFPmode;
6692 return CCFPEmode;
6694 default:
6695 gcc_unreachable ();
6699 /* A compare with a shifted operand. Because of canonicalization, the
6700 comparison will have to be swapped when we emit the assembler. */
6701 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6702 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6703 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6704 || GET_CODE (x) == ROTATERT))
6705 return CC_SWPmode;
6707 /* This operation is performed swapped, but since we only rely on the Z
6708 flag we don't need an additional mode. */
6709 if (GET_MODE (y) == SImode && REG_P (y)
6710 && GET_CODE (x) == NEG
6711 && (op == EQ || op == NE))
6712 return CC_Zmode;
6714 /* This is a special case that is used by combine to allow a
6715 comparison of a shifted byte load to be split into a zero-extend
6716 followed by a comparison of the shifted integer (only valid for
6717 equalities and unsigned inequalities). */
6718 if (GET_MODE (x) == SImode
6719 && GET_CODE (x) == ASHIFT
6720 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6721 && GET_CODE (XEXP (x, 0)) == SUBREG
6722 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6723 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6724 && (op == EQ || op == NE
6725 || op == GEU || op == GTU || op == LTU || op == LEU)
6726 && GET_CODE (y) == CONST_INT)
6727 return CC_Zmode;
6729 /* A construct for a conditional compare, if the false arm contains
6730 0, then both conditions must be true, otherwise either condition
6731 must be true. Not all conditions are possible, so CCmode is
6732 returned if it can't be done. */
6733 if (GET_CODE (x) == IF_THEN_ELSE
6734 && (XEXP (x, 2) == const0_rtx
6735 || XEXP (x, 2) == const1_rtx)
6736 && COMPARISON_P (XEXP (x, 0))
6737 && COMPARISON_P (XEXP (x, 1)))
6738 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6739 INTVAL (XEXP (x, 2)));
6741 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6742 if (GET_CODE (x) == AND
6743 && COMPARISON_P (XEXP (x, 0))
6744 && COMPARISON_P (XEXP (x, 1)))
6745 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6746 DOM_CC_X_AND_Y);
6748 if (GET_CODE (x) == IOR
6749 && COMPARISON_P (XEXP (x, 0))
6750 && COMPARISON_P (XEXP (x, 1)))
6751 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6752 DOM_CC_X_OR_Y);
6754 /* An operation (on Thumb) where we want to test for a single bit.
6755 This is done by shifting that bit up into the top bit of a
6756 scratch register; we can then branch on the sign bit. */
6757 if (TARGET_THUMB
6758 && GET_MODE (x) == SImode
6759 && (op == EQ || op == NE)
6760 && GET_CODE (x) == ZERO_EXTRACT
6761 && XEXP (x, 1) == const1_rtx)
6762 return CC_Nmode;
6764 /* An operation that sets the condition codes as a side-effect, the
6765 V flag is not set correctly, so we can only use comparisons where
6766 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6767 instead.) */
6768 if (GET_MODE (x) == SImode
6769 && y == const0_rtx
6770 && (op == EQ || op == NE || op == LT || op == GE)
6771 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6772 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6773 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6774 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6775 || GET_CODE (x) == LSHIFTRT
6776 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6777 || GET_CODE (x) == ROTATERT
6778 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6779 return CC_NOOVmode;
6781 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6782 return CC_Zmode;
6784 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6785 && GET_CODE (x) == PLUS
6786 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6787 return CC_Cmode;
6789 return CCmode;
6792 /* X and Y are two things to compare using CODE. Emit the compare insn and
6793 return the rtx for register 0 in the proper mode. FP means this is a
6794 floating point compare: I don't think that it is needed on the arm. */
6796 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6798 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6799 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6801 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
6803 return cc_reg;
6806 /* Generate a sequence of insns that will generate the correct return
6807 address mask depending on the physical architecture that the program
6808 is running on. */
6810 arm_gen_return_addr_mask (void)
6812 rtx reg = gen_reg_rtx (Pmode);
6814 emit_insn (gen_return_addr_mask (reg));
6815 return reg;
6818 void
6819 arm_reload_in_hi (rtx *operands)
6821 rtx ref = operands[1];
6822 rtx base, scratch;
6823 HOST_WIDE_INT offset = 0;
6825 if (GET_CODE (ref) == SUBREG)
6827 offset = SUBREG_BYTE (ref);
6828 ref = SUBREG_REG (ref);
6831 if (GET_CODE (ref) == REG)
6833 /* We have a pseudo which has been spilt onto the stack; there
6834 are two cases here: the first where there is a simple
6835 stack-slot replacement and a second where the stack-slot is
6836 out of range, or is used as a subreg. */
6837 if (reg_equiv_mem[REGNO (ref)])
6839 ref = reg_equiv_mem[REGNO (ref)];
6840 base = find_replacement (&XEXP (ref, 0));
6842 else
6843 /* The slot is out of range, or was dressed up in a SUBREG. */
6844 base = reg_equiv_address[REGNO (ref)];
6846 else
6847 base = find_replacement (&XEXP (ref, 0));
6849 /* Handle the case where the address is too complex to be offset by 1. */
6850 if (GET_CODE (base) == MINUS
6851 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6853 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6855 emit_set_insn (base_plus, base);
6856 base = base_plus;
6858 else if (GET_CODE (base) == PLUS)
6860 /* The addend must be CONST_INT, or we would have dealt with it above. */
6861 HOST_WIDE_INT hi, lo;
6863 offset += INTVAL (XEXP (base, 1));
6864 base = XEXP (base, 0);
6866 /* Rework the address into a legal sequence of insns. */
6867 /* Valid range for lo is -4095 -> 4095 */
6868 lo = (offset >= 0
6869 ? (offset & 0xfff)
6870 : -((-offset) & 0xfff));
6872 /* Corner case, if lo is the max offset then we would be out of range
6873 once we have added the additional 1 below, so bump the msb into the
6874 pre-loading insn(s). */
6875 if (lo == 4095)
6876 lo &= 0x7ff;
6878 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6879 ^ (HOST_WIDE_INT) 0x80000000)
6880 - (HOST_WIDE_INT) 0x80000000);
6882 gcc_assert (hi + lo == offset);
6884 if (hi != 0)
6886 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6888 /* Get the base address; addsi3 knows how to handle constants
6889 that require more than one insn. */
6890 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6891 base = base_plus;
6892 offset = lo;
6896 /* Operands[2] may overlap operands[0] (though it won't overlap
6897 operands[1]), that's why we asked for a DImode reg -- so we can
6898 use the bit that does not overlap. */
6899 if (REGNO (operands[2]) == REGNO (operands[0]))
6900 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6901 else
6902 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6904 emit_insn (gen_zero_extendqisi2 (scratch,
6905 gen_rtx_MEM (QImode,
6906 plus_constant (base,
6907 offset))));
6908 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6909 gen_rtx_MEM (QImode,
6910 plus_constant (base,
6911 offset + 1))));
6912 if (!BYTES_BIG_ENDIAN)
6913 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6914 gen_rtx_IOR (SImode,
6915 gen_rtx_ASHIFT
6916 (SImode,
6917 gen_rtx_SUBREG (SImode, operands[0], 0),
6918 GEN_INT (8)),
6919 scratch));
6920 else
6921 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6922 gen_rtx_IOR (SImode,
6923 gen_rtx_ASHIFT (SImode, scratch,
6924 GEN_INT (8)),
6925 gen_rtx_SUBREG (SImode, operands[0], 0)));
6928 /* Handle storing a half-word to memory during reload by synthesizing as two
6929 byte stores. Take care not to clobber the input values until after we
6930 have moved them somewhere safe. This code assumes that if the DImode
6931 scratch in operands[2] overlaps either the input value or output address
6932 in some way, then that value must die in this insn (we absolutely need
6933 two scratch registers for some corner cases). */
6934 void
6935 arm_reload_out_hi (rtx *operands)
6937 rtx ref = operands[0];
6938 rtx outval = operands[1];
6939 rtx base, scratch;
6940 HOST_WIDE_INT offset = 0;
6942 if (GET_CODE (ref) == SUBREG)
6944 offset = SUBREG_BYTE (ref);
6945 ref = SUBREG_REG (ref);
6948 if (GET_CODE (ref) == REG)
6950 /* We have a pseudo which has been spilt onto the stack; there
6951 are two cases here: the first where there is a simple
6952 stack-slot replacement and a second where the stack-slot is
6953 out of range, or is used as a subreg. */
6954 if (reg_equiv_mem[REGNO (ref)])
6956 ref = reg_equiv_mem[REGNO (ref)];
6957 base = find_replacement (&XEXP (ref, 0));
6959 else
6960 /* The slot is out of range, or was dressed up in a SUBREG. */
6961 base = reg_equiv_address[REGNO (ref)];
6963 else
6964 base = find_replacement (&XEXP (ref, 0));
6966 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6968 /* Handle the case where the address is too complex to be offset by 1. */
6969 if (GET_CODE (base) == MINUS
6970 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6972 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6974 /* Be careful not to destroy OUTVAL. */
6975 if (reg_overlap_mentioned_p (base_plus, outval))
6977 /* Updating base_plus might destroy outval, see if we can
6978 swap the scratch and base_plus. */
6979 if (!reg_overlap_mentioned_p (scratch, outval))
6981 rtx tmp = scratch;
6982 scratch = base_plus;
6983 base_plus = tmp;
6985 else
6987 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6989 /* Be conservative and copy OUTVAL into the scratch now,
6990 this should only be necessary if outval is a subreg
6991 of something larger than a word. */
6992 /* XXX Might this clobber base? I can't see how it can,
6993 since scratch is known to overlap with OUTVAL, and
6994 must be wider than a word. */
6995 emit_insn (gen_movhi (scratch_hi, outval));
6996 outval = scratch_hi;
7000 emit_set_insn (base_plus, base);
7001 base = base_plus;
7003 else if (GET_CODE (base) == PLUS)
7005 /* The addend must be CONST_INT, or we would have dealt with it above. */
7006 HOST_WIDE_INT hi, lo;
7008 offset += INTVAL (XEXP (base, 1));
7009 base = XEXP (base, 0);
7011 /* Rework the address into a legal sequence of insns. */
7012 /* Valid range for lo is -4095 -> 4095 */
7013 lo = (offset >= 0
7014 ? (offset & 0xfff)
7015 : -((-offset) & 0xfff));
7017 /* Corner case, if lo is the max offset then we would be out of range
7018 once we have added the additional 1 below, so bump the msb into the
7019 pre-loading insn(s). */
7020 if (lo == 4095)
7021 lo &= 0x7ff;
7023 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7024 ^ (HOST_WIDE_INT) 0x80000000)
7025 - (HOST_WIDE_INT) 0x80000000);
7027 gcc_assert (hi + lo == offset);
7029 if (hi != 0)
7031 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7033 /* Be careful not to destroy OUTVAL. */
7034 if (reg_overlap_mentioned_p (base_plus, outval))
7036 /* Updating base_plus might destroy outval, see if we
7037 can swap the scratch and base_plus. */
7038 if (!reg_overlap_mentioned_p (scratch, outval))
7040 rtx tmp = scratch;
7041 scratch = base_plus;
7042 base_plus = tmp;
7044 else
7046 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7048 /* Be conservative and copy outval into scratch now,
7049 this should only be necessary if outval is a
7050 subreg of something larger than a word. */
7051 /* XXX Might this clobber base? I can't see how it
7052 can, since scratch is known to overlap with
7053 outval. */
7054 emit_insn (gen_movhi (scratch_hi, outval));
7055 outval = scratch_hi;
7059 /* Get the base address; addsi3 knows how to handle constants
7060 that require more than one insn. */
7061 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7062 base = base_plus;
7063 offset = lo;
7067 if (BYTES_BIG_ENDIAN)
7069 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7070 plus_constant (base, offset + 1)),
7071 gen_lowpart (QImode, outval)));
7072 emit_insn (gen_lshrsi3 (scratch,
7073 gen_rtx_SUBREG (SImode, outval, 0),
7074 GEN_INT (8)));
7075 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7076 gen_lowpart (QImode, scratch)));
7078 else
7080 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7081 gen_lowpart (QImode, outval)));
7082 emit_insn (gen_lshrsi3 (scratch,
7083 gen_rtx_SUBREG (SImode, outval, 0),
7084 GEN_INT (8)));
7085 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7086 plus_constant (base, offset + 1)),
7087 gen_lowpart (QImode, scratch)));
7091 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7092 (padded to the size of a word) should be passed in a register. */
7094 static bool
7095 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7097 if (TARGET_AAPCS_BASED)
7098 return must_pass_in_stack_var_size (mode, type);
7099 else
7100 return must_pass_in_stack_var_size_or_pad (mode, type);
7104 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7105 Return true if an argument passed on the stack should be padded upwards,
7106 i.e. if the least-significant byte has useful data.
7107 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7108 aggregate types are placed in the lowest memory address. */
7110 bool
7111 arm_pad_arg_upward (enum machine_mode mode, tree type)
7113 if (!TARGET_AAPCS_BASED)
7114 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7116 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7117 return false;
7119 return true;
7123 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7124 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7125 byte of the register has useful data, and return the opposite if the
7126 most significant byte does.
7127 For AAPCS, small aggregates and small complex types are always padded
7128 upwards. */
7130 bool
7131 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7132 tree type, int first ATTRIBUTE_UNUSED)
7134 if (TARGET_AAPCS_BASED
7135 && BYTES_BIG_ENDIAN
7136 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7137 && int_size_in_bytes (type) <= 4)
7138 return true;
7140 /* Otherwise, use default padding. */
7141 return !BYTES_BIG_ENDIAN;
7145 /* Print a symbolic form of X to the debug file, F. */
7146 static void
7147 arm_print_value (FILE *f, rtx x)
7149 switch (GET_CODE (x))
7151 case CONST_INT:
7152 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7153 return;
7155 case CONST_DOUBLE:
7156 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7157 return;
7159 case CONST_VECTOR:
7161 int i;
7163 fprintf (f, "<");
7164 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7166 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7167 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7168 fputc (',', f);
7170 fprintf (f, ">");
7172 return;
7174 case CONST_STRING:
7175 fprintf (f, "\"%s\"", XSTR (x, 0));
7176 return;
7178 case SYMBOL_REF:
7179 fprintf (f, "`%s'", XSTR (x, 0));
7180 return;
7182 case LABEL_REF:
7183 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7184 return;
7186 case CONST:
7187 arm_print_value (f, XEXP (x, 0));
7188 return;
7190 case PLUS:
7191 arm_print_value (f, XEXP (x, 0));
7192 fprintf (f, "+");
7193 arm_print_value (f, XEXP (x, 1));
7194 return;
7196 case PC:
7197 fprintf (f, "pc");
7198 return;
7200 default:
7201 fprintf (f, "????");
7202 return;
7206 /* Routines for manipulation of the constant pool. */
7208 /* Arm instructions cannot load a large constant directly into a
7209 register; they have to come from a pc relative load. The constant
7210 must therefore be placed in the addressable range of the pc
7211 relative load. Depending on the precise pc relative load
7212 instruction the range is somewhere between 256 bytes and 4k. This
7213 means that we often have to dump a constant inside a function, and
7214 generate code to branch around it.
7216 It is important to minimize this, since the branches will slow
7217 things down and make the code larger.
7219 Normally we can hide the table after an existing unconditional
7220 branch so that there is no interruption of the flow, but in the
7221 worst case the code looks like this:
7223 ldr rn, L1
7225 b L2
7226 align
7227 L1: .long value
7231 ldr rn, L3
7233 b L4
7234 align
7235 L3: .long value
7239 We fix this by performing a scan after scheduling, which notices
7240 which instructions need to have their operands fetched from the
7241 constant table and builds the table.
7243 The algorithm starts by building a table of all the constants that
7244 need fixing up and all the natural barriers in the function (places
7245 where a constant table can be dropped without breaking the flow).
7246 For each fixup we note how far the pc-relative replacement will be
7247 able to reach and the offset of the instruction into the function.
7249 Having built the table we then group the fixes together to form
7250 tables that are as large as possible (subject to addressing
7251 constraints) and emit each table of constants after the last
7252 barrier that is within range of all the instructions in the group.
7253 If a group does not contain a barrier, then we forcibly create one
7254 by inserting a jump instruction into the flow. Once the table has
7255 been inserted, the insns are then modified to reference the
7256 relevant entry in the pool.
7258 Possible enhancements to the algorithm (not implemented) are:
7260 1) For some processors and object formats, there may be benefit in
7261 aligning the pools to the start of cache lines; this alignment
7262 would need to be taken into account when calculating addressability
7263 of a pool. */
7265 /* These typedefs are located at the start of this file, so that
7266 they can be used in the prototypes there. This comment is to
7267 remind readers of that fact so that the following structures
7268 can be understood more easily.
7270 typedef struct minipool_node Mnode;
7271 typedef struct minipool_fixup Mfix; */
7273 struct minipool_node
7275 /* Doubly linked chain of entries. */
7276 Mnode * next;
7277 Mnode * prev;
7278 /* The maximum offset into the code that this entry can be placed. While
7279 pushing fixes for forward references, all entries are sorted in order
7280 of increasing max_address. */
7281 HOST_WIDE_INT max_address;
7282 /* Similarly for an entry inserted for a backwards ref. */
7283 HOST_WIDE_INT min_address;
7284 /* The number of fixes referencing this entry. This can become zero
7285 if we "unpush" an entry. In this case we ignore the entry when we
7286 come to emit the code. */
7287 int refcount;
7288 /* The offset from the start of the minipool. */
7289 HOST_WIDE_INT offset;
7290 /* The value in table. */
7291 rtx value;
7292 /* The mode of value. */
7293 enum machine_mode mode;
7294 /* The size of the value. With iWMMXt enabled
7295 sizes > 4 also imply an alignment of 8-bytes. */
7296 int fix_size;
7299 struct minipool_fixup
7301 Mfix * next;
7302 rtx insn;
7303 HOST_WIDE_INT address;
7304 rtx * loc;
7305 enum machine_mode mode;
7306 int fix_size;
7307 rtx value;
7308 Mnode * minipool;
7309 HOST_WIDE_INT forwards;
7310 HOST_WIDE_INT backwards;
7313 /* Fixes less than a word need padding out to a word boundary. */
7314 #define MINIPOOL_FIX_SIZE(mode) \
7315 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7317 static Mnode * minipool_vector_head;
7318 static Mnode * minipool_vector_tail;
7319 static rtx minipool_vector_label;
7320 static int minipool_pad;
7322 /* The linked list of all minipool fixes required for this function. */
7323 Mfix * minipool_fix_head;
7324 Mfix * minipool_fix_tail;
7325 /* The fix entry for the current minipool, once it has been placed. */
7326 Mfix * minipool_barrier;
7328 /* Determines if INSN is the start of a jump table. Returns the end
7329 of the TABLE or NULL_RTX. */
7330 static rtx
7331 is_jump_table (rtx insn)
7333 rtx table;
7335 if (GET_CODE (insn) == JUMP_INSN
7336 && JUMP_LABEL (insn) != NULL
7337 && ((table = next_real_insn (JUMP_LABEL (insn)))
7338 == next_real_insn (insn))
7339 && table != NULL
7340 && GET_CODE (table) == JUMP_INSN
7341 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7342 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7343 return table;
7345 return NULL_RTX;
7348 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7349 #define JUMP_TABLES_IN_TEXT_SECTION 0
7350 #endif
7352 static HOST_WIDE_INT
7353 get_jump_table_size (rtx insn)
7355 /* ADDR_VECs only take room if read-only data does into the text
7356 section. */
7357 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7359 rtx body = PATTERN (insn);
7360 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7362 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7365 return 0;
7368 /* Move a minipool fix MP from its current location to before MAX_MP.
7369 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7370 constraints may need updating. */
7371 static Mnode *
7372 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7373 HOST_WIDE_INT max_address)
7375 /* The code below assumes these are different. */
7376 gcc_assert (mp != max_mp);
7378 if (max_mp == NULL)
7380 if (max_address < mp->max_address)
7381 mp->max_address = max_address;
7383 else
7385 if (max_address > max_mp->max_address - mp->fix_size)
7386 mp->max_address = max_mp->max_address - mp->fix_size;
7387 else
7388 mp->max_address = max_address;
7390 /* Unlink MP from its current position. Since max_mp is non-null,
7391 mp->prev must be non-null. */
7392 mp->prev->next = mp->next;
7393 if (mp->next != NULL)
7394 mp->next->prev = mp->prev;
7395 else
7396 minipool_vector_tail = mp->prev;
7398 /* Re-insert it before MAX_MP. */
7399 mp->next = max_mp;
7400 mp->prev = max_mp->prev;
7401 max_mp->prev = mp;
7403 if (mp->prev != NULL)
7404 mp->prev->next = mp;
7405 else
7406 minipool_vector_head = mp;
7409 /* Save the new entry. */
7410 max_mp = mp;
7412 /* Scan over the preceding entries and adjust their addresses as
7413 required. */
7414 while (mp->prev != NULL
7415 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7417 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7418 mp = mp->prev;
7421 return max_mp;
7424 /* Add a constant to the minipool for a forward reference. Returns the
7425 node added or NULL if the constant will not fit in this pool. */
7426 static Mnode *
7427 add_minipool_forward_ref (Mfix *fix)
7429 /* If set, max_mp is the first pool_entry that has a lower
7430 constraint than the one we are trying to add. */
7431 Mnode * max_mp = NULL;
7432 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7433 Mnode * mp;
7435 /* If this fix's address is greater than the address of the first
7436 entry, then we can't put the fix in this pool. We subtract the
7437 size of the current fix to ensure that if the table is fully
7438 packed we still have enough room to insert this value by shuffling
7439 the other fixes forwards. */
7440 if (minipool_vector_head &&
7441 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7442 return NULL;
7444 /* Scan the pool to see if a constant with the same value has
7445 already been added. While we are doing this, also note the
7446 location where we must insert the constant if it doesn't already
7447 exist. */
7448 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7450 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7451 && fix->mode == mp->mode
7452 && (GET_CODE (fix->value) != CODE_LABEL
7453 || (CODE_LABEL_NUMBER (fix->value)
7454 == CODE_LABEL_NUMBER (mp->value)))
7455 && rtx_equal_p (fix->value, mp->value))
7457 /* More than one fix references this entry. */
7458 mp->refcount++;
7459 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7462 /* Note the insertion point if necessary. */
7463 if (max_mp == NULL
7464 && mp->max_address > max_address)
7465 max_mp = mp;
7467 /* If we are inserting an 8-bytes aligned quantity and
7468 we have not already found an insertion point, then
7469 make sure that all such 8-byte aligned quantities are
7470 placed at the start of the pool. */
7471 if (ARM_DOUBLEWORD_ALIGN
7472 && max_mp == NULL
7473 && fix->fix_size == 8
7474 && mp->fix_size != 8)
7476 max_mp = mp;
7477 max_address = mp->max_address;
7481 /* The value is not currently in the minipool, so we need to create
7482 a new entry for it. If MAX_MP is NULL, the entry will be put on
7483 the end of the list since the placement is less constrained than
7484 any existing entry. Otherwise, we insert the new fix before
7485 MAX_MP and, if necessary, adjust the constraints on the other
7486 entries. */
7487 mp = XNEW (Mnode);
7488 mp->fix_size = fix->fix_size;
7489 mp->mode = fix->mode;
7490 mp->value = fix->value;
7491 mp->refcount = 1;
7492 /* Not yet required for a backwards ref. */
7493 mp->min_address = -65536;
7495 if (max_mp == NULL)
7497 mp->max_address = max_address;
7498 mp->next = NULL;
7499 mp->prev = minipool_vector_tail;
7501 if (mp->prev == NULL)
7503 minipool_vector_head = mp;
7504 minipool_vector_label = gen_label_rtx ();
7506 else
7507 mp->prev->next = mp;
7509 minipool_vector_tail = mp;
7511 else
7513 if (max_address > max_mp->max_address - mp->fix_size)
7514 mp->max_address = max_mp->max_address - mp->fix_size;
7515 else
7516 mp->max_address = max_address;
7518 mp->next = max_mp;
7519 mp->prev = max_mp->prev;
7520 max_mp->prev = mp;
7521 if (mp->prev != NULL)
7522 mp->prev->next = mp;
7523 else
7524 minipool_vector_head = mp;
7527 /* Save the new entry. */
7528 max_mp = mp;
7530 /* Scan over the preceding entries and adjust their addresses as
7531 required. */
7532 while (mp->prev != NULL
7533 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7535 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7536 mp = mp->prev;
7539 return max_mp;
7542 static Mnode *
7543 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7544 HOST_WIDE_INT min_address)
7546 HOST_WIDE_INT offset;
7548 /* The code below assumes these are different. */
7549 gcc_assert (mp != min_mp);
7551 if (min_mp == NULL)
7553 if (min_address > mp->min_address)
7554 mp->min_address = min_address;
7556 else
7558 /* We will adjust this below if it is too loose. */
7559 mp->min_address = min_address;
7561 /* Unlink MP from its current position. Since min_mp is non-null,
7562 mp->next must be non-null. */
7563 mp->next->prev = mp->prev;
7564 if (mp->prev != NULL)
7565 mp->prev->next = mp->next;
7566 else
7567 minipool_vector_head = mp->next;
7569 /* Reinsert it after MIN_MP. */
7570 mp->prev = min_mp;
7571 mp->next = min_mp->next;
7572 min_mp->next = mp;
7573 if (mp->next != NULL)
7574 mp->next->prev = mp;
7575 else
7576 minipool_vector_tail = mp;
7579 min_mp = mp;
7581 offset = 0;
7582 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7584 mp->offset = offset;
7585 if (mp->refcount > 0)
7586 offset += mp->fix_size;
7588 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7589 mp->next->min_address = mp->min_address + mp->fix_size;
7592 return min_mp;
7595 /* Add a constant to the minipool for a backward reference. Returns the
7596 node added or NULL if the constant will not fit in this pool.
7598 Note that the code for insertion for a backwards reference can be
7599 somewhat confusing because the calculated offsets for each fix do
7600 not take into account the size of the pool (which is still under
7601 construction. */
7602 static Mnode *
7603 add_minipool_backward_ref (Mfix *fix)
7605 /* If set, min_mp is the last pool_entry that has a lower constraint
7606 than the one we are trying to add. */
7607 Mnode *min_mp = NULL;
7608 /* This can be negative, since it is only a constraint. */
7609 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7610 Mnode *mp;
7612 /* If we can't reach the current pool from this insn, or if we can't
7613 insert this entry at the end of the pool without pushing other
7614 fixes out of range, then we don't try. This ensures that we
7615 can't fail later on. */
7616 if (min_address >= minipool_barrier->address
7617 || (minipool_vector_tail->min_address + fix->fix_size
7618 >= minipool_barrier->address))
7619 return NULL;
7621 /* Scan the pool to see if a constant with the same value has
7622 already been added. While we are doing this, also note the
7623 location where we must insert the constant if it doesn't already
7624 exist. */
7625 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7627 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7628 && fix->mode == mp->mode
7629 && (GET_CODE (fix->value) != CODE_LABEL
7630 || (CODE_LABEL_NUMBER (fix->value)
7631 == CODE_LABEL_NUMBER (mp->value)))
7632 && rtx_equal_p (fix->value, mp->value)
7633 /* Check that there is enough slack to move this entry to the
7634 end of the table (this is conservative). */
7635 && (mp->max_address
7636 > (minipool_barrier->address
7637 + minipool_vector_tail->offset
7638 + minipool_vector_tail->fix_size)))
7640 mp->refcount++;
7641 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7644 if (min_mp != NULL)
7645 mp->min_address += fix->fix_size;
7646 else
7648 /* Note the insertion point if necessary. */
7649 if (mp->min_address < min_address)
7651 /* For now, we do not allow the insertion of 8-byte alignment
7652 requiring nodes anywhere but at the start of the pool. */
7653 if (ARM_DOUBLEWORD_ALIGN
7654 && fix->fix_size == 8 && mp->fix_size != 8)
7655 return NULL;
7656 else
7657 min_mp = mp;
7659 else if (mp->max_address
7660 < minipool_barrier->address + mp->offset + fix->fix_size)
7662 /* Inserting before this entry would push the fix beyond
7663 its maximum address (which can happen if we have
7664 re-located a forwards fix); force the new fix to come
7665 after it. */
7666 min_mp = mp;
7667 min_address = mp->min_address + fix->fix_size;
7669 /* If we are inserting an 8-bytes aligned quantity and
7670 we have not already found an insertion point, then
7671 make sure that all such 8-byte aligned quantities are
7672 placed at the start of the pool. */
7673 else if (ARM_DOUBLEWORD_ALIGN
7674 && min_mp == NULL
7675 && fix->fix_size == 8
7676 && mp->fix_size < 8)
7678 min_mp = mp;
7679 min_address = mp->min_address + fix->fix_size;
7684 /* We need to create a new entry. */
7685 mp = XNEW (Mnode);
7686 mp->fix_size = fix->fix_size;
7687 mp->mode = fix->mode;
7688 mp->value = fix->value;
7689 mp->refcount = 1;
7690 mp->max_address = minipool_barrier->address + 65536;
7692 mp->min_address = min_address;
7694 if (min_mp == NULL)
7696 mp->prev = NULL;
7697 mp->next = minipool_vector_head;
7699 if (mp->next == NULL)
7701 minipool_vector_tail = mp;
7702 minipool_vector_label = gen_label_rtx ();
7704 else
7705 mp->next->prev = mp;
7707 minipool_vector_head = mp;
7709 else
7711 mp->next = min_mp->next;
7712 mp->prev = min_mp;
7713 min_mp->next = mp;
7715 if (mp->next != NULL)
7716 mp->next->prev = mp;
7717 else
7718 minipool_vector_tail = mp;
7721 /* Save the new entry. */
7722 min_mp = mp;
7724 if (mp->prev)
7725 mp = mp->prev;
7726 else
7727 mp->offset = 0;
7729 /* Scan over the following entries and adjust their offsets. */
7730 while (mp->next != NULL)
7732 if (mp->next->min_address < mp->min_address + mp->fix_size)
7733 mp->next->min_address = mp->min_address + mp->fix_size;
7735 if (mp->refcount)
7736 mp->next->offset = mp->offset + mp->fix_size;
7737 else
7738 mp->next->offset = mp->offset;
7740 mp = mp->next;
7743 return min_mp;
7746 static void
7747 assign_minipool_offsets (Mfix *barrier)
7749 HOST_WIDE_INT offset = 0;
7750 Mnode *mp;
7752 minipool_barrier = barrier;
7754 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7756 mp->offset = offset;
7758 if (mp->refcount > 0)
7759 offset += mp->fix_size;
7763 /* Output the literal table */
7764 static void
7765 dump_minipool (rtx scan)
7767 Mnode * mp;
7768 Mnode * nmp;
7769 int align64 = 0;
7771 if (ARM_DOUBLEWORD_ALIGN)
7772 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7773 if (mp->refcount > 0 && mp->fix_size == 8)
7775 align64 = 1;
7776 break;
7779 if (dump_file)
7780 fprintf (dump_file,
7781 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7782 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7784 scan = emit_label_after (gen_label_rtx (), scan);
7785 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7786 scan = emit_label_after (minipool_vector_label, scan);
7788 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7790 if (mp->refcount > 0)
7792 if (dump_file)
7794 fprintf (dump_file,
7795 ";; Offset %u, min %ld, max %ld ",
7796 (unsigned) mp->offset, (unsigned long) mp->min_address,
7797 (unsigned long) mp->max_address);
7798 arm_print_value (dump_file, mp->value);
7799 fputc ('\n', dump_file);
7802 switch (mp->fix_size)
7804 #ifdef HAVE_consttable_1
7805 case 1:
7806 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7807 break;
7809 #endif
7810 #ifdef HAVE_consttable_2
7811 case 2:
7812 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7813 break;
7815 #endif
7816 #ifdef HAVE_consttable_4
7817 case 4:
7818 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7819 break;
7821 #endif
7822 #ifdef HAVE_consttable_8
7823 case 8:
7824 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7825 break;
7827 #endif
7828 default:
7829 gcc_unreachable ();
7833 nmp = mp->next;
7834 free (mp);
7837 minipool_vector_head = minipool_vector_tail = NULL;
7838 scan = emit_insn_after (gen_consttable_end (), scan);
7839 scan = emit_barrier_after (scan);
7842 /* Return the cost of forcibly inserting a barrier after INSN. */
7843 static int
7844 arm_barrier_cost (rtx insn)
7846 /* Basing the location of the pool on the loop depth is preferable,
7847 but at the moment, the basic block information seems to be
7848 corrupt by this stage of the compilation. */
7849 int base_cost = 50;
7850 rtx next = next_nonnote_insn (insn);
7852 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7853 base_cost -= 20;
7855 switch (GET_CODE (insn))
7857 case CODE_LABEL:
7858 /* It will always be better to place the table before the label, rather
7859 than after it. */
7860 return 50;
7862 case INSN:
7863 case CALL_INSN:
7864 return base_cost;
7866 case JUMP_INSN:
7867 return base_cost - 10;
7869 default:
7870 return base_cost + 10;
7874 /* Find the best place in the insn stream in the range
7875 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7876 Create the barrier by inserting a jump and add a new fix entry for
7877 it. */
7878 static Mfix *
7879 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7881 HOST_WIDE_INT count = 0;
7882 rtx barrier;
7883 rtx from = fix->insn;
7884 rtx selected = from;
7885 int selected_cost;
7886 HOST_WIDE_INT selected_address;
7887 Mfix * new_fix;
7888 HOST_WIDE_INT max_count = max_address - fix->address;
7889 rtx label = gen_label_rtx ();
7891 selected_cost = arm_barrier_cost (from);
7892 selected_address = fix->address;
7894 while (from && count < max_count)
7896 rtx tmp;
7897 int new_cost;
7899 /* This code shouldn't have been called if there was a natural barrier
7900 within range. */
7901 gcc_assert (GET_CODE (from) != BARRIER);
7903 /* Count the length of this insn. */
7904 count += get_attr_length (from);
7906 /* If there is a jump table, add its length. */
7907 tmp = is_jump_table (from);
7908 if (tmp != NULL)
7910 count += get_jump_table_size (tmp);
7912 /* Jump tables aren't in a basic block, so base the cost on
7913 the dispatch insn. If we select this location, we will
7914 still put the pool after the table. */
7915 new_cost = arm_barrier_cost (from);
7917 if (count < max_count && new_cost <= selected_cost)
7919 selected = tmp;
7920 selected_cost = new_cost;
7921 selected_address = fix->address + count;
7924 /* Continue after the dispatch table. */
7925 from = NEXT_INSN (tmp);
7926 continue;
7929 new_cost = arm_barrier_cost (from);
7931 if (count < max_count && new_cost <= selected_cost)
7933 selected = from;
7934 selected_cost = new_cost;
7935 selected_address = fix->address + count;
7938 from = NEXT_INSN (from);
7941 /* Create a new JUMP_INSN that branches around a barrier. */
7942 from = emit_jump_insn_after (gen_jump (label), selected);
7943 JUMP_LABEL (from) = label;
7944 barrier = emit_barrier_after (from);
7945 emit_label_after (label, barrier);
7947 /* Create a minipool barrier entry for the new barrier. */
7948 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7949 new_fix->insn = barrier;
7950 new_fix->address = selected_address;
7951 new_fix->next = fix->next;
7952 fix->next = new_fix;
7954 return new_fix;
7957 /* Record that there is a natural barrier in the insn stream at
7958 ADDRESS. */
7959 static void
7960 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7962 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7964 fix->insn = insn;
7965 fix->address = address;
7967 fix->next = NULL;
7968 if (minipool_fix_head != NULL)
7969 minipool_fix_tail->next = fix;
7970 else
7971 minipool_fix_head = fix;
7973 minipool_fix_tail = fix;
7976 /* Record INSN, which will need fixing up to load a value from the
7977 minipool. ADDRESS is the offset of the insn since the start of the
7978 function; LOC is a pointer to the part of the insn which requires
7979 fixing; VALUE is the constant that must be loaded, which is of type
7980 MODE. */
7981 static void
7982 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7983 enum machine_mode mode, rtx value)
7985 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7987 #ifdef AOF_ASSEMBLER
7988 /* PIC symbol references need to be converted into offsets into the
7989 based area. */
7990 /* XXX This shouldn't be done here. */
7991 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7992 value = aof_pic_entry (value);
7993 #endif /* AOF_ASSEMBLER */
7995 fix->insn = insn;
7996 fix->address = address;
7997 fix->loc = loc;
7998 fix->mode = mode;
7999 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8000 fix->value = value;
8001 fix->forwards = get_attr_pool_range (insn);
8002 fix->backwards = get_attr_neg_pool_range (insn);
8003 fix->minipool = NULL;
8005 /* If an insn doesn't have a range defined for it, then it isn't
8006 expecting to be reworked by this code. Better to stop now than
8007 to generate duff assembly code. */
8008 gcc_assert (fix->forwards || fix->backwards);
8010 /* If an entry requires 8-byte alignment then assume all constant pools
8011 require 4 bytes of padding. Trying to do this later on a per-pool
8012 basis is awkward because existing pool entries have to be modified. */
8013 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8014 minipool_pad = 4;
8016 if (dump_file)
8018 fprintf (dump_file,
8019 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8020 GET_MODE_NAME (mode),
8021 INSN_UID (insn), (unsigned long) address,
8022 -1 * (long)fix->backwards, (long)fix->forwards);
8023 arm_print_value (dump_file, fix->value);
8024 fprintf (dump_file, "\n");
8027 /* Add it to the chain of fixes. */
8028 fix->next = NULL;
8030 if (minipool_fix_head != NULL)
8031 minipool_fix_tail->next = fix;
8032 else
8033 minipool_fix_head = fix;
8035 minipool_fix_tail = fix;
8038 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8039 Returns the number of insns needed, or 99 if we don't know how to
8040 do it. */
8042 arm_const_double_inline_cost (rtx val)
8044 rtx lowpart, highpart;
8045 enum machine_mode mode;
8047 mode = GET_MODE (val);
8049 if (mode == VOIDmode)
8050 mode = DImode;
8052 gcc_assert (GET_MODE_SIZE (mode) == 8);
8054 lowpart = gen_lowpart (SImode, val);
8055 highpart = gen_highpart_mode (SImode, mode, val);
8057 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8058 gcc_assert (GET_CODE (highpart) == CONST_INT);
8060 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8061 NULL_RTX, NULL_RTX, 0, 0)
8062 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8063 NULL_RTX, NULL_RTX, 0, 0));
8066 /* Return true if it is worthwhile to split a 64-bit constant into two
8067 32-bit operations. This is the case if optimizing for size, or
8068 if we have load delay slots, or if one 32-bit part can be done with
8069 a single data operation. */
8070 bool
8071 arm_const_double_by_parts (rtx val)
8073 enum machine_mode mode = GET_MODE (val);
8074 rtx part;
8076 if (optimize_size || arm_ld_sched)
8077 return true;
8079 if (mode == VOIDmode)
8080 mode = DImode;
8082 part = gen_highpart_mode (SImode, mode, val);
8084 gcc_assert (GET_CODE (part) == CONST_INT);
8086 if (const_ok_for_arm (INTVAL (part))
8087 || const_ok_for_arm (~INTVAL (part)))
8088 return true;
8090 part = gen_lowpart (SImode, val);
8092 gcc_assert (GET_CODE (part) == CONST_INT);
8094 if (const_ok_for_arm (INTVAL (part))
8095 || const_ok_for_arm (~INTVAL (part)))
8096 return true;
8098 return false;
8101 /* Scan INSN and note any of its operands that need fixing.
8102 If DO_PUSHES is false we do not actually push any of the fixups
8103 needed. The function returns TRUE if any fixups were needed/pushed.
8104 This is used by arm_memory_load_p() which needs to know about loads
8105 of constants that will be converted into minipool loads. */
8106 static bool
8107 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8109 bool result = false;
8110 int opno;
8112 extract_insn (insn);
8114 if (!constrain_operands (1))
8115 fatal_insn_not_found (insn);
8117 if (recog_data.n_alternatives == 0)
8118 return false;
8120 /* Fill in recog_op_alt with information about the constraints of
8121 this insn. */
8122 preprocess_constraints ();
8124 for (opno = 0; opno < recog_data.n_operands; opno++)
8126 /* Things we need to fix can only occur in inputs. */
8127 if (recog_data.operand_type[opno] != OP_IN)
8128 continue;
8130 /* If this alternative is a memory reference, then any mention
8131 of constants in this alternative is really to fool reload
8132 into allowing us to accept one there. We need to fix them up
8133 now so that we output the right code. */
8134 if (recog_op_alt[opno][which_alternative].memory_ok)
8136 rtx op = recog_data.operand[opno];
8138 if (CONSTANT_P (op))
8140 if (do_pushes)
8141 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8142 recog_data.operand_mode[opno], op);
8143 result = true;
8145 else if (GET_CODE (op) == MEM
8146 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8147 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8149 if (do_pushes)
8151 rtx cop = avoid_constant_pool_reference (op);
8153 /* Casting the address of something to a mode narrower
8154 than a word can cause avoid_constant_pool_reference()
8155 to return the pool reference itself. That's no good to
8156 us here. Lets just hope that we can use the
8157 constant pool value directly. */
8158 if (op == cop)
8159 cop = get_pool_constant (XEXP (op, 0));
8161 push_minipool_fix (insn, address,
8162 recog_data.operand_loc[opno],
8163 recog_data.operand_mode[opno], cop);
8166 result = true;
8171 return result;
8174 /* Gcc puts the pool in the wrong place for ARM, since we can only
8175 load addresses a limited distance around the pc. We do some
8176 special munging to move the constant pool values to the correct
8177 point in the code. */
8178 static void
8179 arm_reorg (void)
8181 rtx insn;
8182 HOST_WIDE_INT address = 0;
8183 Mfix * fix;
8185 minipool_fix_head = minipool_fix_tail = NULL;
8187 /* The first insn must always be a note, or the code below won't
8188 scan it properly. */
8189 insn = get_insns ();
8190 gcc_assert (GET_CODE (insn) == NOTE);
8191 minipool_pad = 0;
8193 /* Scan all the insns and record the operands that will need fixing. */
8194 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8196 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8197 && (arm_cirrus_insn_p (insn)
8198 || GET_CODE (insn) == JUMP_INSN
8199 || arm_memory_load_p (insn)))
8200 cirrus_reorg (insn);
8202 if (GET_CODE (insn) == BARRIER)
8203 push_minipool_barrier (insn, address);
8204 else if (INSN_P (insn))
8206 rtx table;
8208 note_invalid_constants (insn, address, true);
8209 address += get_attr_length (insn);
8211 /* If the insn is a vector jump, add the size of the table
8212 and skip the table. */
8213 if ((table = is_jump_table (insn)) != NULL)
8215 address += get_jump_table_size (table);
8216 insn = table;
8221 fix = minipool_fix_head;
8223 /* Now scan the fixups and perform the required changes. */
8224 while (fix)
8226 Mfix * ftmp;
8227 Mfix * fdel;
8228 Mfix * last_added_fix;
8229 Mfix * last_barrier = NULL;
8230 Mfix * this_fix;
8232 /* Skip any further barriers before the next fix. */
8233 while (fix && GET_CODE (fix->insn) == BARRIER)
8234 fix = fix->next;
8236 /* No more fixes. */
8237 if (fix == NULL)
8238 break;
8240 last_added_fix = NULL;
8242 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8244 if (GET_CODE (ftmp->insn) == BARRIER)
8246 if (ftmp->address >= minipool_vector_head->max_address)
8247 break;
8249 last_barrier = ftmp;
8251 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8252 break;
8254 last_added_fix = ftmp; /* Keep track of the last fix added. */
8257 /* If we found a barrier, drop back to that; any fixes that we
8258 could have reached but come after the barrier will now go in
8259 the next mini-pool. */
8260 if (last_barrier != NULL)
8262 /* Reduce the refcount for those fixes that won't go into this
8263 pool after all. */
8264 for (fdel = last_barrier->next;
8265 fdel && fdel != ftmp;
8266 fdel = fdel->next)
8268 fdel->minipool->refcount--;
8269 fdel->minipool = NULL;
8272 ftmp = last_barrier;
8274 else
8276 /* ftmp is first fix that we can't fit into this pool and
8277 there no natural barriers that we could use. Insert a
8278 new barrier in the code somewhere between the previous
8279 fix and this one, and arrange to jump around it. */
8280 HOST_WIDE_INT max_address;
8282 /* The last item on the list of fixes must be a barrier, so
8283 we can never run off the end of the list of fixes without
8284 last_barrier being set. */
8285 gcc_assert (ftmp);
8287 max_address = minipool_vector_head->max_address;
8288 /* Check that there isn't another fix that is in range that
8289 we couldn't fit into this pool because the pool was
8290 already too large: we need to put the pool before such an
8291 instruction. */
8292 if (ftmp->address < max_address)
8293 max_address = ftmp->address;
8295 last_barrier = create_fix_barrier (last_added_fix, max_address);
8298 assign_minipool_offsets (last_barrier);
8300 while (ftmp)
8302 if (GET_CODE (ftmp->insn) != BARRIER
8303 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8304 == NULL))
8305 break;
8307 ftmp = ftmp->next;
8310 /* Scan over the fixes we have identified for this pool, fixing them
8311 up and adding the constants to the pool itself. */
8312 for (this_fix = fix; this_fix && ftmp != this_fix;
8313 this_fix = this_fix->next)
8314 if (GET_CODE (this_fix->insn) != BARRIER)
8316 rtx addr
8317 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8318 minipool_vector_label),
8319 this_fix->minipool->offset);
8320 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8323 dump_minipool (last_barrier->insn);
8324 fix = ftmp;
8327 /* From now on we must synthesize any constants that we can't handle
8328 directly. This can happen if the RTL gets split during final
8329 instruction generation. */
8330 after_arm_reorg = 1;
8332 /* Free the minipool memory. */
8333 obstack_free (&minipool_obstack, minipool_startobj);
8336 /* Routines to output assembly language. */
8338 /* If the rtx is the correct value then return the string of the number.
8339 In this way we can ensure that valid double constants are generated even
8340 when cross compiling. */
8341 const char *
8342 fp_immediate_constant (rtx x)
8344 REAL_VALUE_TYPE r;
8345 int i;
8347 if (!fp_consts_inited)
8348 init_fp_table ();
8350 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8351 for (i = 0; i < 8; i++)
8352 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8353 return strings_fp[i];
8355 gcc_unreachable ();
8358 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8359 static const char *
8360 fp_const_from_val (REAL_VALUE_TYPE *r)
8362 int i;
8364 if (!fp_consts_inited)
8365 init_fp_table ();
8367 for (i = 0; i < 8; i++)
8368 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8369 return strings_fp[i];
8371 gcc_unreachable ();
8374 /* Output the operands of a LDM/STM instruction to STREAM.
8375 MASK is the ARM register set mask of which only bits 0-15 are important.
8376 REG is the base register, either the frame pointer or the stack pointer,
8377 INSTR is the possibly suffixed load or store instruction. */
8379 static void
8380 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8381 unsigned long mask)
8383 unsigned i;
8384 bool not_first = FALSE;
8386 fputc ('\t', stream);
8387 asm_fprintf (stream, instr, reg);
8388 fputs (", {", stream);
8390 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8391 if (mask & (1 << i))
8393 if (not_first)
8394 fprintf (stream, ", ");
8396 asm_fprintf (stream, "%r", i);
8397 not_first = TRUE;
8400 fprintf (stream, "}\n");
8404 /* Output a FLDMX instruction to STREAM.
8405 BASE if the register containing the address.
8406 REG and COUNT specify the register range.
8407 Extra registers may be added to avoid hardware bugs. */
8409 static void
8410 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8412 int i;
8414 /* Workaround ARM10 VFPr1 bug. */
8415 if (count == 2 && !arm_arch6)
8417 if (reg == 15)
8418 reg--;
8419 count++;
8422 fputc ('\t', stream);
8423 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8425 for (i = reg; i < reg + count; i++)
8427 if (i > reg)
8428 fputs (", ", stream);
8429 asm_fprintf (stream, "d%d", i);
8431 fputs ("}\n", stream);
8436 /* Output the assembly for a store multiple. */
8438 const char *
8439 vfp_output_fstmx (rtx * operands)
8441 char pattern[100];
8442 int p;
8443 int base;
8444 int i;
8446 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8447 p = strlen (pattern);
8449 gcc_assert (GET_CODE (operands[1]) == REG);
8451 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8452 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8454 p += sprintf (&pattern[p], ", d%d", base + i);
8456 strcpy (&pattern[p], "}");
8458 output_asm_insn (pattern, operands);
8459 return "";
8463 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8464 number of bytes pushed. */
8466 static int
8467 vfp_emit_fstmx (int base_reg, int count)
8469 rtx par;
8470 rtx dwarf;
8471 rtx tmp, reg;
8472 int i;
8474 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8475 register pairs are stored by a store multiple insn. We avoid this
8476 by pushing an extra pair. */
8477 if (count == 2 && !arm_arch6)
8479 if (base_reg == LAST_VFP_REGNUM - 3)
8480 base_reg -= 2;
8481 count++;
8484 /* ??? The frame layout is implementation defined. We describe
8485 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8486 We really need some way of representing the whole block so that the
8487 unwinder can figure it out at runtime. */
8488 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8489 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8491 reg = gen_rtx_REG (DFmode, base_reg);
8492 base_reg += 2;
8494 XVECEXP (par, 0, 0)
8495 = gen_rtx_SET (VOIDmode,
8496 gen_frame_mem (BLKmode,
8497 gen_rtx_PRE_DEC (BLKmode,
8498 stack_pointer_rtx)),
8499 gen_rtx_UNSPEC (BLKmode,
8500 gen_rtvec (1, reg),
8501 UNSPEC_PUSH_MULT));
8503 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8504 plus_constant (stack_pointer_rtx, -(count * 8 + 4)));
8505 RTX_FRAME_RELATED_P (tmp) = 1;
8506 XVECEXP (dwarf, 0, 0) = tmp;
8508 tmp = gen_rtx_SET (VOIDmode,
8509 gen_frame_mem (DFmode, stack_pointer_rtx),
8510 reg);
8511 RTX_FRAME_RELATED_P (tmp) = 1;
8512 XVECEXP (dwarf, 0, 1) = tmp;
8514 for (i = 1; i < count; i++)
8516 reg = gen_rtx_REG (DFmode, base_reg);
8517 base_reg += 2;
8518 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8520 tmp = gen_rtx_SET (VOIDmode,
8521 gen_frame_mem (DFmode,
8522 plus_constant (stack_pointer_rtx,
8523 i * 8)),
8524 reg);
8525 RTX_FRAME_RELATED_P (tmp) = 1;
8526 XVECEXP (dwarf, 0, i + 1) = tmp;
8529 par = emit_insn (par);
8530 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8531 REG_NOTES (par));
8532 RTX_FRAME_RELATED_P (par) = 1;
8534 return count * 8 + 4;
8538 /* Output a 'call' insn. */
8539 const char *
8540 output_call (rtx *operands)
8542 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8544 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8545 if (REGNO (operands[0]) == LR_REGNUM)
8547 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8548 output_asm_insn ("mov%?\t%0, %|lr", operands);
8551 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8553 if (TARGET_INTERWORK || arm_arch4t)
8554 output_asm_insn ("bx%?\t%0", operands);
8555 else
8556 output_asm_insn ("mov%?\t%|pc, %0", operands);
8558 return "";
8561 /* Output a 'call' insn that is a reference in memory. */
8562 const char *
8563 output_call_mem (rtx *operands)
8565 if (TARGET_INTERWORK && !arm_arch5)
8567 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8568 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8569 output_asm_insn ("bx%?\t%|ip", operands);
8571 else if (regno_use_in (LR_REGNUM, operands[0]))
8573 /* LR is used in the memory address. We load the address in the
8574 first instruction. It's safe to use IP as the target of the
8575 load since the call will kill it anyway. */
8576 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8577 if (arm_arch5)
8578 output_asm_insn ("blx%?\t%|ip", operands);
8579 else
8581 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8582 if (arm_arch4t)
8583 output_asm_insn ("bx%?\t%|ip", operands);
8584 else
8585 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8588 else
8590 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8591 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8594 return "";
8598 /* Output a move from arm registers to an fpa registers.
8599 OPERANDS[0] is an fpa register.
8600 OPERANDS[1] is the first registers of an arm register pair. */
8601 const char *
8602 output_mov_long_double_fpa_from_arm (rtx *operands)
8604 int arm_reg0 = REGNO (operands[1]);
8605 rtx ops[3];
8607 gcc_assert (arm_reg0 != IP_REGNUM);
8609 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8610 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8611 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8613 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8614 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8616 return "";
8619 /* Output a move from an fpa register to arm registers.
8620 OPERANDS[0] is the first registers of an arm register pair.
8621 OPERANDS[1] is an fpa register. */
8622 const char *
8623 output_mov_long_double_arm_from_fpa (rtx *operands)
8625 int arm_reg0 = REGNO (operands[0]);
8626 rtx ops[3];
8628 gcc_assert (arm_reg0 != IP_REGNUM);
8630 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8631 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8632 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8634 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8635 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8636 return "";
8639 /* Output a move from arm registers to arm registers of a long double
8640 OPERANDS[0] is the destination.
8641 OPERANDS[1] is the source. */
8642 const char *
8643 output_mov_long_double_arm_from_arm (rtx *operands)
8645 /* We have to be careful here because the two might overlap. */
8646 int dest_start = REGNO (operands[0]);
8647 int src_start = REGNO (operands[1]);
8648 rtx ops[2];
8649 int i;
8651 if (dest_start < src_start)
8653 for (i = 0; i < 3; i++)
8655 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8656 ops[1] = gen_rtx_REG (SImode, src_start + i);
8657 output_asm_insn ("mov%?\t%0, %1", ops);
8660 else
8662 for (i = 2; i >= 0; i--)
8664 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8665 ops[1] = gen_rtx_REG (SImode, src_start + i);
8666 output_asm_insn ("mov%?\t%0, %1", ops);
8670 return "";
8674 /* Output a move from arm registers to an fpa registers.
8675 OPERANDS[0] is an fpa register.
8676 OPERANDS[1] is the first registers of an arm register pair. */
8677 const char *
8678 output_mov_double_fpa_from_arm (rtx *operands)
8680 int arm_reg0 = REGNO (operands[1]);
8681 rtx ops[2];
8683 gcc_assert (arm_reg0 != IP_REGNUM);
8685 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8686 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8687 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8688 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8689 return "";
8692 /* Output a move from an fpa register to arm registers.
8693 OPERANDS[0] is the first registers of an arm register pair.
8694 OPERANDS[1] is an fpa register. */
8695 const char *
8696 output_mov_double_arm_from_fpa (rtx *operands)
8698 int arm_reg0 = REGNO (operands[0]);
8699 rtx ops[2];
8701 gcc_assert (arm_reg0 != IP_REGNUM);
8703 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8704 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8705 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8706 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8707 return "";
8710 /* Output a move between double words.
8711 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8712 or MEM<-REG and all MEMs must be offsettable addresses. */
8713 const char *
8714 output_move_double (rtx *operands)
8716 enum rtx_code code0 = GET_CODE (operands[0]);
8717 enum rtx_code code1 = GET_CODE (operands[1]);
8718 rtx otherops[3];
8720 if (code0 == REG)
8722 int reg0 = REGNO (operands[0]);
8724 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8726 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8728 switch (GET_CODE (XEXP (operands[1], 0)))
8730 case REG:
8731 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8732 break;
8734 case PRE_INC:
8735 gcc_assert (TARGET_LDRD);
8736 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8737 break;
8739 case PRE_DEC:
8740 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8741 break;
8743 case POST_INC:
8744 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8745 break;
8747 case POST_DEC:
8748 gcc_assert (TARGET_LDRD);
8749 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8750 break;
8752 case PRE_MODIFY:
8753 case POST_MODIFY:
8754 otherops[0] = operands[0];
8755 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8756 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8758 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8760 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8762 /* Registers overlap so split out the increment. */
8763 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8764 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8766 else
8767 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8769 else
8771 /* We only allow constant increments, so this is safe. */
8772 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8774 break;
8776 case LABEL_REF:
8777 case CONST:
8778 output_asm_insn ("adr%?\t%0, %1", operands);
8779 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8780 break;
8782 default:
8783 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8784 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8786 otherops[0] = operands[0];
8787 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8788 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8790 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8792 if (GET_CODE (otherops[2]) == CONST_INT)
8794 switch ((int) INTVAL (otherops[2]))
8796 case -8:
8797 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8798 return "";
8799 case -4:
8800 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8801 return "";
8802 case 4:
8803 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8804 return "";
8807 if (TARGET_LDRD
8808 && (GET_CODE (otherops[2]) == REG
8809 || (GET_CODE (otherops[2]) == CONST_INT
8810 && INTVAL (otherops[2]) > -256
8811 && INTVAL (otherops[2]) < 256)))
8813 if (reg_overlap_mentioned_p (otherops[0],
8814 otherops[2]))
8816 /* Swap base and index registers over to
8817 avoid a conflict. */
8818 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8819 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8821 /* If both registers conflict, it will usually
8822 have been fixed by a splitter. */
8823 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8825 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8826 output_asm_insn ("ldr%?d\t%0, [%1]",
8827 otherops);
8829 else
8830 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8831 return "";
8834 if (GET_CODE (otherops[2]) == CONST_INT)
8836 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8837 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8838 else
8839 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8841 else
8842 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8844 else
8845 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8847 return "ldm%?ia\t%0, %M0";
8849 else
8851 otherops[1] = adjust_address (operands[1], SImode, 4);
8852 /* Take care of overlapping base/data reg. */
8853 if (reg_mentioned_p (operands[0], operands[1]))
8855 output_asm_insn ("ldr%?\t%0, %1", otherops);
8856 output_asm_insn ("ldr%?\t%0, %1", operands);
8858 else
8860 output_asm_insn ("ldr%?\t%0, %1", operands);
8861 output_asm_insn ("ldr%?\t%0, %1", otherops);
8866 else
8868 /* Constraints should ensure this. */
8869 gcc_assert (code0 == MEM && code1 == REG);
8870 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8872 switch (GET_CODE (XEXP (operands[0], 0)))
8874 case REG:
8875 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8876 break;
8878 case PRE_INC:
8879 gcc_assert (TARGET_LDRD);
8880 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8881 break;
8883 case PRE_DEC:
8884 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8885 break;
8887 case POST_INC:
8888 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8889 break;
8891 case POST_DEC:
8892 gcc_assert (TARGET_LDRD);
8893 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8894 break;
8896 case PRE_MODIFY:
8897 case POST_MODIFY:
8898 otherops[0] = operands[1];
8899 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8900 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8902 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8903 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8904 else
8905 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8906 break;
8908 case PLUS:
8909 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8910 if (GET_CODE (otherops[2]) == CONST_INT)
8912 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8914 case -8:
8915 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8916 return "";
8918 case -4:
8919 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8920 return "";
8922 case 4:
8923 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8924 return "";
8927 if (TARGET_LDRD
8928 && (GET_CODE (otherops[2]) == REG
8929 || (GET_CODE (otherops[2]) == CONST_INT
8930 && INTVAL (otherops[2]) > -256
8931 && INTVAL (otherops[2]) < 256)))
8933 otherops[0] = operands[1];
8934 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8935 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8936 return "";
8938 /* Fall through */
8940 default:
8941 otherops[0] = adjust_address (operands[0], SImode, 4);
8942 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8943 output_asm_insn ("str%?\t%1, %0", operands);
8944 output_asm_insn ("str%?\t%1, %0", otherops);
8948 return "";
8951 /* Output an ADD r, s, #n where n may be too big for one instruction.
8952 If adding zero to one register, output nothing. */
8953 const char *
8954 output_add_immediate (rtx *operands)
8956 HOST_WIDE_INT n = INTVAL (operands[2]);
8958 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8960 if (n < 0)
8961 output_multi_immediate (operands,
8962 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8963 -n);
8964 else
8965 output_multi_immediate (operands,
8966 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8970 return "";
8973 /* Output a multiple immediate operation.
8974 OPERANDS is the vector of operands referred to in the output patterns.
8975 INSTR1 is the output pattern to use for the first constant.
8976 INSTR2 is the output pattern to use for subsequent constants.
8977 IMMED_OP is the index of the constant slot in OPERANDS.
8978 N is the constant value. */
8979 static const char *
8980 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8981 int immed_op, HOST_WIDE_INT n)
8983 #if HOST_BITS_PER_WIDE_INT > 32
8984 n &= 0xffffffff;
8985 #endif
8987 if (n == 0)
8989 /* Quick and easy output. */
8990 operands[immed_op] = const0_rtx;
8991 output_asm_insn (instr1, operands);
8993 else
8995 int i;
8996 const char * instr = instr1;
8998 /* Note that n is never zero here (which would give no output). */
8999 for (i = 0; i < 32; i += 2)
9001 if (n & (3 << i))
9003 operands[immed_op] = GEN_INT (n & (255 << i));
9004 output_asm_insn (instr, operands);
9005 instr = instr2;
9006 i += 6;
9011 return "";
9014 /* Return the appropriate ARM instruction for the operation code.
9015 The returned result should not be overwritten. OP is the rtx of the
9016 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9017 was shifted. */
9018 const char *
9019 arithmetic_instr (rtx op, int shift_first_arg)
9021 switch (GET_CODE (op))
9023 case PLUS:
9024 return "add";
9026 case MINUS:
9027 return shift_first_arg ? "rsb" : "sub";
9029 case IOR:
9030 return "orr";
9032 case XOR:
9033 return "eor";
9035 case AND:
9036 return "and";
9038 default:
9039 gcc_unreachable ();
9043 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9044 for the operation code. The returned result should not be overwritten.
9045 OP is the rtx code of the shift.
9046 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9047 shift. */
9048 static const char *
9049 shift_op (rtx op, HOST_WIDE_INT *amountp)
9051 const char * mnem;
9052 enum rtx_code code = GET_CODE (op);
9054 switch (GET_CODE (XEXP (op, 1)))
9056 case REG:
9057 case SUBREG:
9058 *amountp = -1;
9059 break;
9061 case CONST_INT:
9062 *amountp = INTVAL (XEXP (op, 1));
9063 break;
9065 default:
9066 gcc_unreachable ();
9069 switch (code)
9071 case ASHIFT:
9072 mnem = "asl";
9073 break;
9075 case ASHIFTRT:
9076 mnem = "asr";
9077 break;
9079 case LSHIFTRT:
9080 mnem = "lsr";
9081 break;
9083 case ROTATE:
9084 gcc_assert (*amountp != -1);
9085 *amountp = 32 - *amountp;
9087 /* Fall through. */
9089 case ROTATERT:
9090 mnem = "ror";
9091 break;
9093 case MULT:
9094 /* We never have to worry about the amount being other than a
9095 power of 2, since this case can never be reloaded from a reg. */
9096 gcc_assert (*amountp != -1);
9097 *amountp = int_log2 (*amountp);
9098 return "asl";
9100 default:
9101 gcc_unreachable ();
9104 if (*amountp != -1)
9106 /* This is not 100% correct, but follows from the desire to merge
9107 multiplication by a power of 2 with the recognizer for a
9108 shift. >=32 is not a valid shift for "asl", so we must try and
9109 output a shift that produces the correct arithmetical result.
9110 Using lsr #32 is identical except for the fact that the carry bit
9111 is not set correctly if we set the flags; but we never use the
9112 carry bit from such an operation, so we can ignore that. */
9113 if (code == ROTATERT)
9114 /* Rotate is just modulo 32. */
9115 *amountp &= 31;
9116 else if (*amountp != (*amountp & 31))
9118 if (code == ASHIFT)
9119 mnem = "lsr";
9120 *amountp = 32;
9123 /* Shifts of 0 are no-ops. */
9124 if (*amountp == 0)
9125 return NULL;
9128 return mnem;
9131 /* Obtain the shift from the POWER of two. */
9133 static HOST_WIDE_INT
9134 int_log2 (HOST_WIDE_INT power)
9136 HOST_WIDE_INT shift = 0;
9138 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9140 gcc_assert (shift <= 31);
9141 shift++;
9144 return shift;
9147 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9148 because /bin/as is horribly restrictive. The judgement about
9149 whether or not each character is 'printable' (and can be output as
9150 is) or not (and must be printed with an octal escape) must be made
9151 with reference to the *host* character set -- the situation is
9152 similar to that discussed in the comments above pp_c_char in
9153 c-pretty-print.c. */
9155 #define MAX_ASCII_LEN 51
9157 void
9158 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9160 int i;
9161 int len_so_far = 0;
9163 fputs ("\t.ascii\t\"", stream);
9165 for (i = 0; i < len; i++)
9167 int c = p[i];
9169 if (len_so_far >= MAX_ASCII_LEN)
9171 fputs ("\"\n\t.ascii\t\"", stream);
9172 len_so_far = 0;
9175 if (ISPRINT (c))
9177 if (c == '\\' || c == '\"')
9179 putc ('\\', stream);
9180 len_so_far++;
9182 putc (c, stream);
9183 len_so_far++;
9185 else
9187 fprintf (stream, "\\%03o", c);
9188 len_so_far += 4;
9192 fputs ("\"\n", stream);
9195 /* Compute the register save mask for registers 0 through 12
9196 inclusive. This code is used by arm_compute_save_reg_mask. */
9198 static unsigned long
9199 arm_compute_save_reg0_reg12_mask (void)
9201 unsigned long func_type = arm_current_func_type ();
9202 unsigned long save_reg_mask = 0;
9203 unsigned int reg;
9205 if (IS_INTERRUPT (func_type))
9207 unsigned int max_reg;
9208 /* Interrupt functions must not corrupt any registers,
9209 even call clobbered ones. If this is a leaf function
9210 we can just examine the registers used by the RTL, but
9211 otherwise we have to assume that whatever function is
9212 called might clobber anything, and so we have to save
9213 all the call-clobbered registers as well. */
9214 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9215 /* FIQ handlers have registers r8 - r12 banked, so
9216 we only need to check r0 - r7, Normal ISRs only
9217 bank r14 and r15, so we must check up to r12.
9218 r13 is the stack pointer which is always preserved,
9219 so we do not need to consider it here. */
9220 max_reg = 7;
9221 else
9222 max_reg = 12;
9224 for (reg = 0; reg <= max_reg; reg++)
9225 if (regs_ever_live[reg]
9226 || (! current_function_is_leaf && call_used_regs [reg]))
9227 save_reg_mask |= (1 << reg);
9229 /* Also save the pic base register if necessary. */
9230 if (flag_pic
9231 && !TARGET_SINGLE_PIC_BASE
9232 && arm_pic_register != INVALID_REGNUM
9233 && current_function_uses_pic_offset_table)
9234 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9236 else
9238 /* In the normal case we only need to save those registers
9239 which are call saved and which are used by this function. */
9240 for (reg = 0; reg <= 10; reg++)
9241 if (regs_ever_live[reg] && ! call_used_regs [reg])
9242 save_reg_mask |= (1 << reg);
9244 /* Handle the frame pointer as a special case. */
9245 if (! TARGET_APCS_FRAME
9246 && ! frame_pointer_needed
9247 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9248 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9249 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9251 /* If we aren't loading the PIC register,
9252 don't stack it even though it may be live. */
9253 if (flag_pic
9254 && !TARGET_SINGLE_PIC_BASE
9255 && arm_pic_register != INVALID_REGNUM
9256 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9257 || current_function_uses_pic_offset_table))
9258 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9261 /* Save registers so the exception handler can modify them. */
9262 if (current_function_calls_eh_return)
9264 unsigned int i;
9266 for (i = 0; ; i++)
9268 reg = EH_RETURN_DATA_REGNO (i);
9269 if (reg == INVALID_REGNUM)
9270 break;
9271 save_reg_mask |= 1 << reg;
9275 return save_reg_mask;
9278 /* Compute a bit mask of which registers need to be
9279 saved on the stack for the current function. */
9281 static unsigned long
9282 arm_compute_save_reg_mask (void)
9284 unsigned int save_reg_mask = 0;
9285 unsigned long func_type = arm_current_func_type ();
9287 if (IS_NAKED (func_type))
9288 /* This should never really happen. */
9289 return 0;
9291 /* If we are creating a stack frame, then we must save the frame pointer,
9292 IP (which will hold the old stack pointer), LR and the PC. */
9293 if (frame_pointer_needed)
9294 save_reg_mask |=
9295 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9296 | (1 << IP_REGNUM)
9297 | (1 << LR_REGNUM)
9298 | (1 << PC_REGNUM);
9300 /* Volatile functions do not return, so there
9301 is no need to save any other registers. */
9302 if (IS_VOLATILE (func_type))
9303 return save_reg_mask;
9305 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9307 /* Decide if we need to save the link register.
9308 Interrupt routines have their own banked link register,
9309 so they never need to save it.
9310 Otherwise if we do not use the link register we do not need to save
9311 it. If we are pushing other registers onto the stack however, we
9312 can save an instruction in the epilogue by pushing the link register
9313 now and then popping it back into the PC. This incurs extra memory
9314 accesses though, so we only do it when optimizing for size, and only
9315 if we know that we will not need a fancy return sequence. */
9316 if (regs_ever_live [LR_REGNUM]
9317 || (save_reg_mask
9318 && optimize_size
9319 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9320 && !current_function_calls_eh_return))
9321 save_reg_mask |= 1 << LR_REGNUM;
9323 if (cfun->machine->lr_save_eliminated)
9324 save_reg_mask &= ~ (1 << LR_REGNUM);
9326 if (TARGET_REALLY_IWMMXT
9327 && ((bit_count (save_reg_mask)
9328 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9330 unsigned int reg;
9332 /* The total number of registers that are going to be pushed
9333 onto the stack is odd. We need to ensure that the stack
9334 is 64-bit aligned before we start to save iWMMXt registers,
9335 and also before we start to create locals. (A local variable
9336 might be a double or long long which we will load/store using
9337 an iWMMXt instruction). Therefore we need to push another
9338 ARM register, so that the stack will be 64-bit aligned. We
9339 try to avoid using the arg registers (r0 -r3) as they might be
9340 used to pass values in a tail call. */
9341 for (reg = 4; reg <= 12; reg++)
9342 if ((save_reg_mask & (1 << reg)) == 0)
9343 break;
9345 if (reg <= 12)
9346 save_reg_mask |= (1 << reg);
9347 else
9349 cfun->machine->sibcall_blocked = 1;
9350 save_reg_mask |= (1 << 3);
9354 return save_reg_mask;
9358 /* Compute a bit mask of which registers need to be
9359 saved on the stack for the current function. */
9360 static unsigned long
9361 thumb_compute_save_reg_mask (void)
9363 unsigned long mask;
9364 unsigned reg;
9366 mask = 0;
9367 for (reg = 0; reg < 12; reg ++)
9368 if (regs_ever_live[reg] && !call_used_regs[reg])
9369 mask |= 1 << reg;
9371 if (flag_pic
9372 && !TARGET_SINGLE_PIC_BASE
9373 && arm_pic_register != INVALID_REGNUM
9374 && current_function_uses_pic_offset_table)
9375 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9377 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9378 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9379 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9381 /* LR will also be pushed if any lo regs are pushed. */
9382 if (mask & 0xff || thumb_force_lr_save ())
9383 mask |= (1 << LR_REGNUM);
9385 /* Make sure we have a low work register if we need one.
9386 We will need one if we are going to push a high register,
9387 but we are not currently intending to push a low register. */
9388 if ((mask & 0xff) == 0
9389 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9391 /* Use thumb_find_work_register to choose which register
9392 we will use. If the register is live then we will
9393 have to push it. Use LAST_LO_REGNUM as our fallback
9394 choice for the register to select. */
9395 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9397 if (! call_used_regs[reg])
9398 mask |= 1 << reg;
9401 return mask;
9405 /* Return the number of bytes required to save VFP registers. */
9406 static int
9407 arm_get_vfp_saved_size (void)
9409 unsigned int regno;
9410 int count;
9411 int saved;
9413 saved = 0;
9414 /* Space for saved VFP registers. */
9415 if (TARGET_HARD_FLOAT && TARGET_VFP)
9417 count = 0;
9418 for (regno = FIRST_VFP_REGNUM;
9419 regno < LAST_VFP_REGNUM;
9420 regno += 2)
9422 if ((!regs_ever_live[regno] || call_used_regs[regno])
9423 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9425 if (count > 0)
9427 /* Workaround ARM10 VFPr1 bug. */
9428 if (count == 2 && !arm_arch6)
9429 count++;
9430 saved += count * 8 + 4;
9432 count = 0;
9434 else
9435 count++;
9437 if (count > 0)
9439 if (count == 2 && !arm_arch6)
9440 count++;
9441 saved += count * 8 + 4;
9444 return saved;
9448 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9449 everything bar the final return instruction. */
9450 const char *
9451 output_return_instruction (rtx operand, int really_return, int reverse)
9453 char conditional[10];
9454 char instr[100];
9455 unsigned reg;
9456 unsigned long live_regs_mask;
9457 unsigned long func_type;
9458 arm_stack_offsets *offsets;
9460 func_type = arm_current_func_type ();
9462 if (IS_NAKED (func_type))
9463 return "";
9465 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9467 /* If this function was declared non-returning, and we have
9468 found a tail call, then we have to trust that the called
9469 function won't return. */
9470 if (really_return)
9472 rtx ops[2];
9474 /* Otherwise, trap an attempted return by aborting. */
9475 ops[0] = operand;
9476 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9477 : "abort");
9478 assemble_external_libcall (ops[1]);
9479 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9482 return "";
9485 gcc_assert (!current_function_calls_alloca || really_return);
9487 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9489 return_used_this_function = 1;
9491 live_regs_mask = arm_compute_save_reg_mask ();
9493 if (live_regs_mask)
9495 const char * return_reg;
9497 /* If we do not have any special requirements for function exit
9498 (e.g. interworking, or ISR) then we can load the return address
9499 directly into the PC. Otherwise we must load it into LR. */
9500 if (really_return
9501 && ! TARGET_INTERWORK)
9502 return_reg = reg_names[PC_REGNUM];
9503 else
9504 return_reg = reg_names[LR_REGNUM];
9506 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9508 /* There are three possible reasons for the IP register
9509 being saved. 1) a stack frame was created, in which case
9510 IP contains the old stack pointer, or 2) an ISR routine
9511 corrupted it, or 3) it was saved to align the stack on
9512 iWMMXt. In case 1, restore IP into SP, otherwise just
9513 restore IP. */
9514 if (frame_pointer_needed)
9516 live_regs_mask &= ~ (1 << IP_REGNUM);
9517 live_regs_mask |= (1 << SP_REGNUM);
9519 else
9520 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9523 /* On some ARM architectures it is faster to use LDR rather than
9524 LDM to load a single register. On other architectures, the
9525 cost is the same. In 26 bit mode, or for exception handlers,
9526 we have to use LDM to load the PC so that the CPSR is also
9527 restored. */
9528 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9529 if (live_regs_mask == (1U << reg))
9530 break;
9532 if (reg <= LAST_ARM_REGNUM
9533 && (reg != LR_REGNUM
9534 || ! really_return
9535 || ! IS_INTERRUPT (func_type)))
9537 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9538 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9540 else
9542 char *p;
9543 int first = 1;
9545 /* Generate the load multiple instruction to restore the
9546 registers. Note we can get here, even if
9547 frame_pointer_needed is true, but only if sp already
9548 points to the base of the saved core registers. */
9549 if (live_regs_mask & (1 << SP_REGNUM))
9551 unsigned HOST_WIDE_INT stack_adjust;
9553 offsets = arm_get_frame_offsets ();
9554 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9555 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9557 if (stack_adjust && arm_arch5)
9558 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9559 else
9561 /* If we can't use ldmib (SA110 bug),
9562 then try to pop r3 instead. */
9563 if (stack_adjust)
9564 live_regs_mask |= 1 << 3;
9565 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9568 else
9569 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9571 p = instr + strlen (instr);
9573 for (reg = 0; reg <= SP_REGNUM; reg++)
9574 if (live_regs_mask & (1 << reg))
9576 int l = strlen (reg_names[reg]);
9578 if (first)
9579 first = 0;
9580 else
9582 memcpy (p, ", ", 2);
9583 p += 2;
9586 memcpy (p, "%|", 2);
9587 memcpy (p + 2, reg_names[reg], l);
9588 p += l + 2;
9591 if (live_regs_mask & (1 << LR_REGNUM))
9593 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9594 /* If returning from an interrupt, restore the CPSR. */
9595 if (IS_INTERRUPT (func_type))
9596 strcat (p, "^");
9598 else
9599 strcpy (p, "}");
9602 output_asm_insn (instr, & operand);
9604 /* See if we need to generate an extra instruction to
9605 perform the actual function return. */
9606 if (really_return
9607 && func_type != ARM_FT_INTERWORKED
9608 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9610 /* The return has already been handled
9611 by loading the LR into the PC. */
9612 really_return = 0;
9616 if (really_return)
9618 switch ((int) ARM_FUNC_TYPE (func_type))
9620 case ARM_FT_ISR:
9621 case ARM_FT_FIQ:
9622 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9623 break;
9625 case ARM_FT_INTERWORKED:
9626 sprintf (instr, "bx%s\t%%|lr", conditional);
9627 break;
9629 case ARM_FT_EXCEPTION:
9630 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9631 break;
9633 default:
9634 /* Use bx if it's available. */
9635 if (arm_arch5 || arm_arch4t)
9636 sprintf (instr, "bx%s\t%%|lr", conditional);
9637 else
9638 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9639 break;
9642 output_asm_insn (instr, & operand);
9645 return "";
9648 /* Write the function name into the code section, directly preceding
9649 the function prologue.
9651 Code will be output similar to this:
9653 .ascii "arm_poke_function_name", 0
9654 .align
9656 .word 0xff000000 + (t1 - t0)
9657 arm_poke_function_name
9658 mov ip, sp
9659 stmfd sp!, {fp, ip, lr, pc}
9660 sub fp, ip, #4
9662 When performing a stack backtrace, code can inspect the value
9663 of 'pc' stored at 'fp' + 0. If the trace function then looks
9664 at location pc - 12 and the top 8 bits are set, then we know
9665 that there is a function name embedded immediately preceding this
9666 location and has length ((pc[-3]) & 0xff000000).
9668 We assume that pc is declared as a pointer to an unsigned long.
9670 It is of no benefit to output the function name if we are assembling
9671 a leaf function. These function types will not contain a stack
9672 backtrace structure, therefore it is not possible to determine the
9673 function name. */
9674 void
9675 arm_poke_function_name (FILE *stream, const char *name)
9677 unsigned long alignlength;
9678 unsigned long length;
9679 rtx x;
9681 length = strlen (name) + 1;
9682 alignlength = ROUND_UP_WORD (length);
9684 ASM_OUTPUT_ASCII (stream, name, length);
9685 ASM_OUTPUT_ALIGN (stream, 2);
9686 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9687 assemble_aligned_integer (UNITS_PER_WORD, x);
9690 /* Place some comments into the assembler stream
9691 describing the current function. */
9692 static void
9693 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9695 unsigned long func_type;
9697 if (!TARGET_ARM)
9699 thumb_output_function_prologue (f, frame_size);
9700 return;
9703 /* Sanity check. */
9704 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9706 func_type = arm_current_func_type ();
9708 switch ((int) ARM_FUNC_TYPE (func_type))
9710 default:
9711 case ARM_FT_NORMAL:
9712 break;
9713 case ARM_FT_INTERWORKED:
9714 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9715 break;
9716 case ARM_FT_ISR:
9717 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9718 break;
9719 case ARM_FT_FIQ:
9720 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9721 break;
9722 case ARM_FT_EXCEPTION:
9723 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9724 break;
9727 if (IS_NAKED (func_type))
9728 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9730 if (IS_VOLATILE (func_type))
9731 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9733 if (IS_NESTED (func_type))
9734 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9736 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9737 current_function_args_size,
9738 current_function_pretend_args_size, frame_size);
9740 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9741 frame_pointer_needed,
9742 cfun->machine->uses_anonymous_args);
9744 if (cfun->machine->lr_save_eliminated)
9745 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9747 if (current_function_calls_eh_return)
9748 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9750 #ifdef AOF_ASSEMBLER
9751 if (flag_pic)
9752 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9753 #endif
9755 return_used_this_function = 0;
9758 const char *
9759 arm_output_epilogue (rtx sibling)
9761 int reg;
9762 unsigned long saved_regs_mask;
9763 unsigned long func_type;
9764 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9765 frame that is $fp + 4 for a non-variadic function. */
9766 int floats_offset = 0;
9767 rtx operands[3];
9768 FILE * f = asm_out_file;
9769 unsigned int lrm_count = 0;
9770 int really_return = (sibling == NULL);
9771 int start_reg;
9772 arm_stack_offsets *offsets;
9774 /* If we have already generated the return instruction
9775 then it is futile to generate anything else. */
9776 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9777 return "";
9779 func_type = arm_current_func_type ();
9781 if (IS_NAKED (func_type))
9782 /* Naked functions don't have epilogues. */
9783 return "";
9785 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9787 rtx op;
9789 /* A volatile function should never return. Call abort. */
9790 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9791 assemble_external_libcall (op);
9792 output_asm_insn ("bl\t%a0", &op);
9794 return "";
9797 /* If we are throwing an exception, then we really must be doing a
9798 return, so we can't tail-call. */
9799 gcc_assert (!current_function_calls_eh_return || really_return);
9801 offsets = arm_get_frame_offsets ();
9802 saved_regs_mask = arm_compute_save_reg_mask ();
9804 if (TARGET_IWMMXT)
9805 lrm_count = bit_count (saved_regs_mask);
9807 floats_offset = offsets->saved_args;
9808 /* Compute how far away the floats will be. */
9809 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9810 if (saved_regs_mask & (1 << reg))
9811 floats_offset += 4;
9813 if (frame_pointer_needed)
9815 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9816 int vfp_offset = offsets->frame;
9818 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9820 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9821 if (regs_ever_live[reg] && !call_used_regs[reg])
9823 floats_offset += 12;
9824 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9825 reg, FP_REGNUM, floats_offset - vfp_offset);
9828 else
9830 start_reg = LAST_FPA_REGNUM;
9832 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9834 if (regs_ever_live[reg] && !call_used_regs[reg])
9836 floats_offset += 12;
9838 /* We can't unstack more than four registers at once. */
9839 if (start_reg - reg == 3)
9841 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9842 reg, FP_REGNUM, floats_offset - vfp_offset);
9843 start_reg = reg - 1;
9846 else
9848 if (reg != start_reg)
9849 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9850 reg + 1, start_reg - reg,
9851 FP_REGNUM, floats_offset - vfp_offset);
9852 start_reg = reg - 1;
9856 /* Just in case the last register checked also needs unstacking. */
9857 if (reg != start_reg)
9858 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9859 reg + 1, start_reg - reg,
9860 FP_REGNUM, floats_offset - vfp_offset);
9863 if (TARGET_HARD_FLOAT && TARGET_VFP)
9865 int saved_size;
9867 /* The fldmx insn does not have base+offset addressing modes,
9868 so we use IP to hold the address. */
9869 saved_size = arm_get_vfp_saved_size ();
9871 if (saved_size > 0)
9873 floats_offset += saved_size;
9874 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9875 FP_REGNUM, floats_offset - vfp_offset);
9877 start_reg = FIRST_VFP_REGNUM;
9878 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9880 if ((!regs_ever_live[reg] || call_used_regs[reg])
9881 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9883 if (start_reg != reg)
9884 arm_output_fldmx (f, IP_REGNUM,
9885 (start_reg - FIRST_VFP_REGNUM) / 2,
9886 (reg - start_reg) / 2);
9887 start_reg = reg + 2;
9890 if (start_reg != reg)
9891 arm_output_fldmx (f, IP_REGNUM,
9892 (start_reg - FIRST_VFP_REGNUM) / 2,
9893 (reg - start_reg) / 2);
9896 if (TARGET_IWMMXT)
9898 /* The frame pointer is guaranteed to be non-double-word aligned.
9899 This is because it is set to (old_stack_pointer - 4) and the
9900 old_stack_pointer was double word aligned. Thus the offset to
9901 the iWMMXt registers to be loaded must also be non-double-word
9902 sized, so that the resultant address *is* double-word aligned.
9903 We can ignore floats_offset since that was already included in
9904 the live_regs_mask. */
9905 lrm_count += (lrm_count % 2 ? 2 : 1);
9907 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9908 if (regs_ever_live[reg] && !call_used_regs[reg])
9910 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9911 reg, FP_REGNUM, lrm_count * 4);
9912 lrm_count += 2;
9916 /* saved_regs_mask should contain the IP, which at the time of stack
9917 frame generation actually contains the old stack pointer. So a
9918 quick way to unwind the stack is just pop the IP register directly
9919 into the stack pointer. */
9920 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9921 saved_regs_mask &= ~ (1 << IP_REGNUM);
9922 saved_regs_mask |= (1 << SP_REGNUM);
9924 /* There are two registers left in saved_regs_mask - LR and PC. We
9925 only need to restore the LR register (the return address), but to
9926 save time we can load it directly into the PC, unless we need a
9927 special function exit sequence, or we are not really returning. */
9928 if (really_return
9929 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9930 && !current_function_calls_eh_return)
9931 /* Delete the LR from the register mask, so that the LR on
9932 the stack is loaded into the PC in the register mask. */
9933 saved_regs_mask &= ~ (1 << LR_REGNUM);
9934 else
9935 saved_regs_mask &= ~ (1 << PC_REGNUM);
9937 /* We must use SP as the base register, because SP is one of the
9938 registers being restored. If an interrupt or page fault
9939 happens in the ldm instruction, the SP might or might not
9940 have been restored. That would be bad, as then SP will no
9941 longer indicate the safe area of stack, and we can get stack
9942 corruption. Using SP as the base register means that it will
9943 be reset correctly to the original value, should an interrupt
9944 occur. If the stack pointer already points at the right
9945 place, then omit the subtraction. */
9946 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9947 || current_function_calls_alloca)
9948 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9949 4 * bit_count (saved_regs_mask));
9950 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9952 if (IS_INTERRUPT (func_type))
9953 /* Interrupt handlers will have pushed the
9954 IP onto the stack, so restore it now. */
9955 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9957 else
9959 /* Restore stack pointer if necessary. */
9960 if (offsets->outgoing_args != offsets->saved_regs)
9962 operands[0] = operands[1] = stack_pointer_rtx;
9963 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9964 output_add_immediate (operands);
9967 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9969 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9970 if (regs_ever_live[reg] && !call_used_regs[reg])
9971 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9972 reg, SP_REGNUM);
9974 else
9976 start_reg = FIRST_FPA_REGNUM;
9978 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9980 if (regs_ever_live[reg] && !call_used_regs[reg])
9982 if (reg - start_reg == 3)
9984 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9985 start_reg, SP_REGNUM);
9986 start_reg = reg + 1;
9989 else
9991 if (reg != start_reg)
9992 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9993 start_reg, reg - start_reg,
9994 SP_REGNUM);
9996 start_reg = reg + 1;
10000 /* Just in case the last register checked also needs unstacking. */
10001 if (reg != start_reg)
10002 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10003 start_reg, reg - start_reg, SP_REGNUM);
10006 if (TARGET_HARD_FLOAT && TARGET_VFP)
10008 start_reg = FIRST_VFP_REGNUM;
10009 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10011 if ((!regs_ever_live[reg] || call_used_regs[reg])
10012 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10014 if (start_reg != reg)
10015 arm_output_fldmx (f, SP_REGNUM,
10016 (start_reg - FIRST_VFP_REGNUM) / 2,
10017 (reg - start_reg) / 2);
10018 start_reg = reg + 2;
10021 if (start_reg != reg)
10022 arm_output_fldmx (f, SP_REGNUM,
10023 (start_reg - FIRST_VFP_REGNUM) / 2,
10024 (reg - start_reg) / 2);
10026 if (TARGET_IWMMXT)
10027 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10028 if (regs_ever_live[reg] && !call_used_regs[reg])
10029 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10031 /* If we can, restore the LR into the PC. */
10032 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10033 && really_return
10034 && current_function_pretend_args_size == 0
10035 && saved_regs_mask & (1 << LR_REGNUM)
10036 && !current_function_calls_eh_return)
10038 saved_regs_mask &= ~ (1 << LR_REGNUM);
10039 saved_regs_mask |= (1 << PC_REGNUM);
10042 /* Load the registers off the stack. If we only have one register
10043 to load use the LDR instruction - it is faster. */
10044 if (saved_regs_mask == (1 << LR_REGNUM))
10046 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10048 else if (saved_regs_mask)
10050 if (saved_regs_mask & (1 << SP_REGNUM))
10051 /* Note - write back to the stack register is not enabled
10052 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10053 in the list of registers and if we add writeback the
10054 instruction becomes UNPREDICTABLE. */
10055 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
10056 else
10057 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
10060 if (current_function_pretend_args_size)
10062 /* Unwind the pre-pushed regs. */
10063 operands[0] = operands[1] = stack_pointer_rtx;
10064 operands[2] = GEN_INT (current_function_pretend_args_size);
10065 output_add_immediate (operands);
10069 /* We may have already restored PC directly from the stack. */
10070 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10071 return "";
10073 /* Stack adjustment for exception handler. */
10074 if (current_function_calls_eh_return)
10075 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10076 ARM_EH_STACKADJ_REGNUM);
10078 /* Generate the return instruction. */
10079 switch ((int) ARM_FUNC_TYPE (func_type))
10081 case ARM_FT_ISR:
10082 case ARM_FT_FIQ:
10083 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10084 break;
10086 case ARM_FT_EXCEPTION:
10087 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10088 break;
10090 case ARM_FT_INTERWORKED:
10091 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10092 break;
10094 default:
10095 if (arm_arch5 || arm_arch4t)
10096 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10097 else
10098 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10099 break;
10102 return "";
10105 static void
10106 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10107 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10109 arm_stack_offsets *offsets;
10111 if (TARGET_THUMB)
10113 int regno;
10115 /* Emit any call-via-reg trampolines that are needed for v4t support
10116 of call_reg and call_value_reg type insns. */
10117 for (regno = 0; regno < LR_REGNUM; regno++)
10119 rtx label = cfun->machine->call_via[regno];
10121 if (label != NULL)
10123 switch_to_section (function_section (current_function_decl));
10124 targetm.asm_out.internal_label (asm_out_file, "L",
10125 CODE_LABEL_NUMBER (label));
10126 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10130 /* ??? Probably not safe to set this here, since it assumes that a
10131 function will be emitted as assembly immediately after we generate
10132 RTL for it. This does not happen for inline functions. */
10133 return_used_this_function = 0;
10135 else
10137 /* We need to take into account any stack-frame rounding. */
10138 offsets = arm_get_frame_offsets ();
10140 gcc_assert (!use_return_insn (FALSE, NULL)
10141 || !return_used_this_function
10142 || offsets->saved_regs == offsets->outgoing_args
10143 || frame_pointer_needed);
10145 /* Reset the ARM-specific per-function variables. */
10146 after_arm_reorg = 0;
10150 /* Generate and emit an insn that we will recognize as a push_multi.
10151 Unfortunately, since this insn does not reflect very well the actual
10152 semantics of the operation, we need to annotate the insn for the benefit
10153 of DWARF2 frame unwind information. */
10154 static rtx
10155 emit_multi_reg_push (unsigned long mask)
10157 int num_regs = 0;
10158 int num_dwarf_regs;
10159 int i, j;
10160 rtx par;
10161 rtx dwarf;
10162 int dwarf_par_index;
10163 rtx tmp, reg;
10165 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10166 if (mask & (1 << i))
10167 num_regs++;
10169 gcc_assert (num_regs && num_regs <= 16);
10171 /* We don't record the PC in the dwarf frame information. */
10172 num_dwarf_regs = num_regs;
10173 if (mask & (1 << PC_REGNUM))
10174 num_dwarf_regs--;
10176 /* For the body of the insn we are going to generate an UNSPEC in
10177 parallel with several USEs. This allows the insn to be recognized
10178 by the push_multi pattern in the arm.md file. The insn looks
10179 something like this:
10181 (parallel [
10182 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10183 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10184 (use (reg:SI 11 fp))
10185 (use (reg:SI 12 ip))
10186 (use (reg:SI 14 lr))
10187 (use (reg:SI 15 pc))
10190 For the frame note however, we try to be more explicit and actually
10191 show each register being stored into the stack frame, plus a (single)
10192 decrement of the stack pointer. We do it this way in order to be
10193 friendly to the stack unwinding code, which only wants to see a single
10194 stack decrement per instruction. The RTL we generate for the note looks
10195 something like this:
10197 (sequence [
10198 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10199 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10200 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10201 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10202 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10205 This sequence is used both by the code to support stack unwinding for
10206 exceptions handlers and the code to generate dwarf2 frame debugging. */
10208 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10209 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10210 dwarf_par_index = 1;
10212 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10214 if (mask & (1 << i))
10216 reg = gen_rtx_REG (SImode, i);
10218 XVECEXP (par, 0, 0)
10219 = gen_rtx_SET (VOIDmode,
10220 gen_frame_mem (BLKmode,
10221 gen_rtx_PRE_DEC (BLKmode,
10222 stack_pointer_rtx)),
10223 gen_rtx_UNSPEC (BLKmode,
10224 gen_rtvec (1, reg),
10225 UNSPEC_PUSH_MULT));
10227 if (i != PC_REGNUM)
10229 tmp = gen_rtx_SET (VOIDmode,
10230 gen_frame_mem (SImode, stack_pointer_rtx),
10231 reg);
10232 RTX_FRAME_RELATED_P (tmp) = 1;
10233 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10234 dwarf_par_index++;
10237 break;
10241 for (j = 1, i++; j < num_regs; i++)
10243 if (mask & (1 << i))
10245 reg = gen_rtx_REG (SImode, i);
10247 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10249 if (i != PC_REGNUM)
10252 = gen_rtx_SET (VOIDmode,
10253 gen_frame_mem (SImode,
10254 plus_constant (stack_pointer_rtx,
10255 4 * j)),
10256 reg);
10257 RTX_FRAME_RELATED_P (tmp) = 1;
10258 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10261 j++;
10265 par = emit_insn (par);
10267 tmp = gen_rtx_SET (VOIDmode,
10268 stack_pointer_rtx,
10269 plus_constant (stack_pointer_rtx, -4 * num_regs));
10270 RTX_FRAME_RELATED_P (tmp) = 1;
10271 XVECEXP (dwarf, 0, 0) = tmp;
10273 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10274 REG_NOTES (par));
10275 return par;
10278 /* Calculate the size of the return value that is passed in registers. */
10279 static int
10280 arm_size_return_regs (void)
10282 enum machine_mode mode;
10284 if (current_function_return_rtx != 0)
10285 mode = GET_MODE (current_function_return_rtx);
10286 else
10287 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10289 return GET_MODE_SIZE (mode);
10292 static rtx
10293 emit_sfm (int base_reg, int count)
10295 rtx par;
10296 rtx dwarf;
10297 rtx tmp, reg;
10298 int i;
10300 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10301 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10303 reg = gen_rtx_REG (XFmode, base_reg++);
10305 XVECEXP (par, 0, 0)
10306 = gen_rtx_SET (VOIDmode,
10307 gen_frame_mem (BLKmode,
10308 gen_rtx_PRE_DEC (BLKmode,
10309 stack_pointer_rtx)),
10310 gen_rtx_UNSPEC (BLKmode,
10311 gen_rtvec (1, reg),
10312 UNSPEC_PUSH_MULT));
10313 tmp = gen_rtx_SET (VOIDmode,
10314 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10315 RTX_FRAME_RELATED_P (tmp) = 1;
10316 XVECEXP (dwarf, 0, 1) = tmp;
10318 for (i = 1; i < count; i++)
10320 reg = gen_rtx_REG (XFmode, base_reg++);
10321 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10323 tmp = gen_rtx_SET (VOIDmode,
10324 gen_frame_mem (XFmode,
10325 plus_constant (stack_pointer_rtx,
10326 i * 12)),
10327 reg);
10328 RTX_FRAME_RELATED_P (tmp) = 1;
10329 XVECEXP (dwarf, 0, i + 1) = tmp;
10332 tmp = gen_rtx_SET (VOIDmode,
10333 stack_pointer_rtx,
10334 plus_constant (stack_pointer_rtx, -12 * count));
10336 RTX_FRAME_RELATED_P (tmp) = 1;
10337 XVECEXP (dwarf, 0, 0) = tmp;
10339 par = emit_insn (par);
10340 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10341 REG_NOTES (par));
10342 return par;
10346 /* Return true if the current function needs to save/restore LR. */
10348 static bool
10349 thumb_force_lr_save (void)
10351 return !cfun->machine->lr_save_eliminated
10352 && (!leaf_function_p ()
10353 || thumb_far_jump_used_p ()
10354 || regs_ever_live [LR_REGNUM]);
10358 /* Compute the distance from register FROM to register TO.
10359 These can be the arg pointer (26), the soft frame pointer (25),
10360 the stack pointer (13) or the hard frame pointer (11).
10361 In thumb mode r7 is used as the soft frame pointer, if needed.
10362 Typical stack layout looks like this:
10364 old stack pointer -> | |
10365 ----
10366 | | \
10367 | | saved arguments for
10368 | | vararg functions
10369 | | /
10371 hard FP & arg pointer -> | | \
10372 | | stack
10373 | | frame
10374 | | /
10376 | | \
10377 | | call saved
10378 | | registers
10379 soft frame pointer -> | | /
10381 | | \
10382 | | local
10383 | | variables
10384 locals base pointer -> | | /
10386 | | \
10387 | | outgoing
10388 | | arguments
10389 current stack pointer -> | | /
10392 For a given function some or all of these stack components
10393 may not be needed, giving rise to the possibility of
10394 eliminating some of the registers.
10396 The values returned by this function must reflect the behavior
10397 of arm_expand_prologue() and arm_compute_save_reg_mask().
10399 The sign of the number returned reflects the direction of stack
10400 growth, so the values are positive for all eliminations except
10401 from the soft frame pointer to the hard frame pointer.
10403 SFP may point just inside the local variables block to ensure correct
10404 alignment. */
10407 /* Calculate stack offsets. These are used to calculate register elimination
10408 offsets and in prologue/epilogue code. */
10410 static arm_stack_offsets *
10411 arm_get_frame_offsets (void)
10413 struct arm_stack_offsets *offsets;
10414 unsigned long func_type;
10415 int leaf;
10416 int saved;
10417 HOST_WIDE_INT frame_size;
10419 offsets = &cfun->machine->stack_offsets;
10421 /* We need to know if we are a leaf function. Unfortunately, it
10422 is possible to be called after start_sequence has been called,
10423 which causes get_insns to return the insns for the sequence,
10424 not the function, which will cause leaf_function_p to return
10425 the incorrect result.
10427 to know about leaf functions once reload has completed, and the
10428 frame size cannot be changed after that time, so we can safely
10429 use the cached value. */
10431 if (reload_completed)
10432 return offsets;
10434 /* Initially this is the size of the local variables. It will translated
10435 into an offset once we have determined the size of preceding data. */
10436 frame_size = ROUND_UP_WORD (get_frame_size ());
10438 leaf = leaf_function_p ();
10440 /* Space for variadic functions. */
10441 offsets->saved_args = current_function_pretend_args_size;
10443 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10445 if (TARGET_ARM)
10447 unsigned int regno;
10449 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10451 /* We know that SP will be doubleword aligned on entry, and we must
10452 preserve that condition at any subroutine call. We also require the
10453 soft frame pointer to be doubleword aligned. */
10455 if (TARGET_REALLY_IWMMXT)
10457 /* Check for the call-saved iWMMXt registers. */
10458 for (regno = FIRST_IWMMXT_REGNUM;
10459 regno <= LAST_IWMMXT_REGNUM;
10460 regno++)
10461 if (regs_ever_live [regno] && ! call_used_regs [regno])
10462 saved += 8;
10465 func_type = arm_current_func_type ();
10466 if (! IS_VOLATILE (func_type))
10468 /* Space for saved FPA registers. */
10469 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10470 if (regs_ever_live[regno] && ! call_used_regs[regno])
10471 saved += 12;
10473 /* Space for saved VFP registers. */
10474 if (TARGET_HARD_FLOAT && TARGET_VFP)
10475 saved += arm_get_vfp_saved_size ();
10478 else /* TARGET_THUMB */
10480 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10481 if (TARGET_BACKTRACE)
10482 saved += 16;
10485 /* Saved registers include the stack frame. */
10486 offsets->saved_regs = offsets->saved_args + saved;
10487 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10488 /* A leaf function does not need any stack alignment if it has nothing
10489 on the stack. */
10490 if (leaf && frame_size == 0)
10492 offsets->outgoing_args = offsets->soft_frame;
10493 return offsets;
10496 /* Ensure SFP has the correct alignment. */
10497 if (ARM_DOUBLEWORD_ALIGN
10498 && (offsets->soft_frame & 7))
10499 offsets->soft_frame += 4;
10501 offsets->locals_base = offsets->soft_frame + frame_size;
10502 offsets->outgoing_args = (offsets->locals_base
10503 + current_function_outgoing_args_size);
10505 if (ARM_DOUBLEWORD_ALIGN)
10507 /* Ensure SP remains doubleword aligned. */
10508 if (offsets->outgoing_args & 7)
10509 offsets->outgoing_args += 4;
10510 gcc_assert (!(offsets->outgoing_args & 7));
10513 return offsets;
10517 /* Calculate the relative offsets for the different stack pointers. Positive
10518 offsets are in the direction of stack growth. */
10520 HOST_WIDE_INT
10521 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10523 arm_stack_offsets *offsets;
10525 offsets = arm_get_frame_offsets ();
10527 /* OK, now we have enough information to compute the distances.
10528 There must be an entry in these switch tables for each pair
10529 of registers in ELIMINABLE_REGS, even if some of the entries
10530 seem to be redundant or useless. */
10531 switch (from)
10533 case ARG_POINTER_REGNUM:
10534 switch (to)
10536 case THUMB_HARD_FRAME_POINTER_REGNUM:
10537 return 0;
10539 case FRAME_POINTER_REGNUM:
10540 /* This is the reverse of the soft frame pointer
10541 to hard frame pointer elimination below. */
10542 return offsets->soft_frame - offsets->saved_args;
10544 case ARM_HARD_FRAME_POINTER_REGNUM:
10545 /* If there is no stack frame then the hard
10546 frame pointer and the arg pointer coincide. */
10547 if (offsets->frame == offsets->saved_regs)
10548 return 0;
10549 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10550 return (frame_pointer_needed
10551 && cfun->static_chain_decl != NULL
10552 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10554 case STACK_POINTER_REGNUM:
10555 /* If nothing has been pushed on the stack at all
10556 then this will return -4. This *is* correct! */
10557 return offsets->outgoing_args - (offsets->saved_args + 4);
10559 default:
10560 gcc_unreachable ();
10562 gcc_unreachable ();
10564 case FRAME_POINTER_REGNUM:
10565 switch (to)
10567 case THUMB_HARD_FRAME_POINTER_REGNUM:
10568 return 0;
10570 case ARM_HARD_FRAME_POINTER_REGNUM:
10571 /* The hard frame pointer points to the top entry in the
10572 stack frame. The soft frame pointer to the bottom entry
10573 in the stack frame. If there is no stack frame at all,
10574 then they are identical. */
10576 return offsets->frame - offsets->soft_frame;
10578 case STACK_POINTER_REGNUM:
10579 return offsets->outgoing_args - offsets->soft_frame;
10581 default:
10582 gcc_unreachable ();
10584 gcc_unreachable ();
10586 default:
10587 /* You cannot eliminate from the stack pointer.
10588 In theory you could eliminate from the hard frame
10589 pointer to the stack pointer, but this will never
10590 happen, since if a stack frame is not needed the
10591 hard frame pointer will never be used. */
10592 gcc_unreachable ();
10597 /* Generate the prologue instructions for entry into an ARM function. */
10598 void
10599 arm_expand_prologue (void)
10601 int reg;
10602 rtx amount;
10603 rtx insn;
10604 rtx ip_rtx;
10605 unsigned long live_regs_mask;
10606 unsigned long func_type;
10607 int fp_offset = 0;
10608 int saved_pretend_args = 0;
10609 int saved_regs = 0;
10610 unsigned HOST_WIDE_INT args_to_push;
10611 arm_stack_offsets *offsets;
10613 func_type = arm_current_func_type ();
10615 /* Naked functions don't have prologues. */
10616 if (IS_NAKED (func_type))
10617 return;
10619 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10620 args_to_push = current_function_pretend_args_size;
10622 /* Compute which register we will have to save onto the stack. */
10623 live_regs_mask = arm_compute_save_reg_mask ();
10625 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10627 if (frame_pointer_needed)
10629 if (IS_INTERRUPT (func_type))
10631 /* Interrupt functions must not corrupt any registers.
10632 Creating a frame pointer however, corrupts the IP
10633 register, so we must push it first. */
10634 insn = emit_multi_reg_push (1 << IP_REGNUM);
10636 /* Do not set RTX_FRAME_RELATED_P on this insn.
10637 The dwarf stack unwinding code only wants to see one
10638 stack decrement per function, and this is not it. If
10639 this instruction is labeled as being part of the frame
10640 creation sequence then dwarf2out_frame_debug_expr will
10641 die when it encounters the assignment of IP to FP
10642 later on, since the use of SP here establishes SP as
10643 the CFA register and not IP.
10645 Anyway this instruction is not really part of the stack
10646 frame creation although it is part of the prologue. */
10648 else if (IS_NESTED (func_type))
10650 /* The Static chain register is the same as the IP register
10651 used as a scratch register during stack frame creation.
10652 To get around this need to find somewhere to store IP
10653 whilst the frame is being created. We try the following
10654 places in order:
10656 1. The last argument register.
10657 2. A slot on the stack above the frame. (This only
10658 works if the function is not a varargs function).
10659 3. Register r3, after pushing the argument registers
10660 onto the stack.
10662 Note - we only need to tell the dwarf2 backend about the SP
10663 adjustment in the second variant; the static chain register
10664 doesn't need to be unwound, as it doesn't contain a value
10665 inherited from the caller. */
10667 if (regs_ever_live[3] == 0)
10668 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10669 else if (args_to_push == 0)
10671 rtx dwarf;
10673 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10674 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
10675 fp_offset = 4;
10677 /* Just tell the dwarf backend that we adjusted SP. */
10678 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10679 plus_constant (stack_pointer_rtx,
10680 -fp_offset));
10681 RTX_FRAME_RELATED_P (insn) = 1;
10682 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10683 dwarf, REG_NOTES (insn));
10685 else
10687 /* Store the args on the stack. */
10688 if (cfun->machine->uses_anonymous_args)
10689 insn = emit_multi_reg_push
10690 ((0xf0 >> (args_to_push / 4)) & 0xf);
10691 else
10692 insn = emit_insn
10693 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10694 GEN_INT (- args_to_push)));
10696 RTX_FRAME_RELATED_P (insn) = 1;
10698 saved_pretend_args = 1;
10699 fp_offset = args_to_push;
10700 args_to_push = 0;
10702 /* Now reuse r3 to preserve IP. */
10703 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10707 insn = emit_set_insn (ip_rtx,
10708 plus_constant (stack_pointer_rtx, fp_offset));
10709 RTX_FRAME_RELATED_P (insn) = 1;
10712 if (args_to_push)
10714 /* Push the argument registers, or reserve space for them. */
10715 if (cfun->machine->uses_anonymous_args)
10716 insn = emit_multi_reg_push
10717 ((0xf0 >> (args_to_push / 4)) & 0xf);
10718 else
10719 insn = emit_insn
10720 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10721 GEN_INT (- args_to_push)));
10722 RTX_FRAME_RELATED_P (insn) = 1;
10725 /* If this is an interrupt service routine, and the link register
10726 is going to be pushed, and we are not creating a stack frame,
10727 (which would involve an extra push of IP and a pop in the epilogue)
10728 subtracting four from LR now will mean that the function return
10729 can be done with a single instruction. */
10730 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10731 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10732 && ! frame_pointer_needed)
10734 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
10736 emit_set_insn (lr, plus_constant (lr, -4));
10739 if (live_regs_mask)
10741 insn = emit_multi_reg_push (live_regs_mask);
10742 saved_regs += bit_count (live_regs_mask) * 4;
10743 RTX_FRAME_RELATED_P (insn) = 1;
10746 if (TARGET_IWMMXT)
10747 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10748 if (regs_ever_live[reg] && ! call_used_regs [reg])
10750 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10751 insn = gen_frame_mem (V2SImode, insn);
10752 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
10753 RTX_FRAME_RELATED_P (insn) = 1;
10754 saved_regs += 8;
10757 if (! IS_VOLATILE (func_type))
10759 int start_reg;
10761 /* Save any floating point call-saved registers used by this
10762 function. */
10763 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10765 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10766 if (regs_ever_live[reg] && !call_used_regs[reg])
10768 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10769 insn = gen_frame_mem (XFmode, insn);
10770 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
10771 RTX_FRAME_RELATED_P (insn) = 1;
10772 saved_regs += 12;
10775 else
10777 start_reg = LAST_FPA_REGNUM;
10779 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10781 if (regs_ever_live[reg] && !call_used_regs[reg])
10783 if (start_reg - reg == 3)
10785 insn = emit_sfm (reg, 4);
10786 RTX_FRAME_RELATED_P (insn) = 1;
10787 saved_regs += 48;
10788 start_reg = reg - 1;
10791 else
10793 if (start_reg != reg)
10795 insn = emit_sfm (reg + 1, start_reg - reg);
10796 RTX_FRAME_RELATED_P (insn) = 1;
10797 saved_regs += (start_reg - reg) * 12;
10799 start_reg = reg - 1;
10803 if (start_reg != reg)
10805 insn = emit_sfm (reg + 1, start_reg - reg);
10806 saved_regs += (start_reg - reg) * 12;
10807 RTX_FRAME_RELATED_P (insn) = 1;
10810 if (TARGET_HARD_FLOAT && TARGET_VFP)
10812 start_reg = FIRST_VFP_REGNUM;
10814 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10816 if ((!regs_ever_live[reg] || call_used_regs[reg])
10817 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10819 if (start_reg != reg)
10820 saved_regs += vfp_emit_fstmx (start_reg,
10821 (reg - start_reg) / 2);
10822 start_reg = reg + 2;
10825 if (start_reg != reg)
10826 saved_regs += vfp_emit_fstmx (start_reg,
10827 (reg - start_reg) / 2);
10831 if (frame_pointer_needed)
10833 /* Create the new frame pointer. */
10834 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10835 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10836 RTX_FRAME_RELATED_P (insn) = 1;
10838 if (IS_NESTED (func_type))
10840 /* Recover the static chain register. */
10841 if (regs_ever_live [3] == 0
10842 || saved_pretend_args)
10843 insn = gen_rtx_REG (SImode, 3);
10844 else /* if (current_function_pretend_args_size == 0) */
10846 insn = plus_constant (hard_frame_pointer_rtx, 4);
10847 insn = gen_frame_mem (SImode, insn);
10850 emit_set_insn (ip_rtx, insn);
10851 /* Add a USE to stop propagate_one_insn() from barfing. */
10852 emit_insn (gen_prologue_use (ip_rtx));
10856 offsets = arm_get_frame_offsets ();
10857 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10859 /* This add can produce multiple insns for a large constant, so we
10860 need to get tricky. */
10861 rtx last = get_last_insn ();
10863 amount = GEN_INT (offsets->saved_args + saved_regs
10864 - offsets->outgoing_args);
10866 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10867 amount));
10870 last = last ? NEXT_INSN (last) : get_insns ();
10871 RTX_FRAME_RELATED_P (last) = 1;
10873 while (last != insn);
10875 /* If the frame pointer is needed, emit a special barrier that
10876 will prevent the scheduler from moving stores to the frame
10877 before the stack adjustment. */
10878 if (frame_pointer_needed)
10879 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10880 hard_frame_pointer_rtx));
10884 if (flag_pic && arm_pic_register != INVALID_REGNUM)
10885 arm_load_pic_register (0UL);
10887 /* If we are profiling, make sure no instructions are scheduled before
10888 the call to mcount. Similarly if the user has requested no
10889 scheduling in the prolog. Similarly if we want non-call exceptions
10890 using the EABI unwinder, to prevent faulting instructions from being
10891 swapped with a stack adjustment. */
10892 if (current_function_profile || !TARGET_SCHED_PROLOG
10893 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
10894 emit_insn (gen_blockage ());
10896 /* If the link register is being kept alive, with the return address in it,
10897 then make sure that it does not get reused by the ce2 pass. */
10898 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10900 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10901 cfun->machine->lr_save_eliminated = 1;
10905 /* If CODE is 'd', then the X is a condition operand and the instruction
10906 should only be executed if the condition is true.
10907 if CODE is 'D', then the X is a condition operand and the instruction
10908 should only be executed if the condition is false: however, if the mode
10909 of the comparison is CCFPEmode, then always execute the instruction -- we
10910 do this because in these circumstances !GE does not necessarily imply LT;
10911 in these cases the instruction pattern will take care to make sure that
10912 an instruction containing %d will follow, thereby undoing the effects of
10913 doing this instruction unconditionally.
10914 If CODE is 'N' then X is a floating point operand that must be negated
10915 before output.
10916 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10917 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10918 void
10919 arm_print_operand (FILE *stream, rtx x, int code)
10921 switch (code)
10923 case '@':
10924 fputs (ASM_COMMENT_START, stream);
10925 return;
10927 case '_':
10928 fputs (user_label_prefix, stream);
10929 return;
10931 case '|':
10932 fputs (REGISTER_PREFIX, stream);
10933 return;
10935 case '?':
10936 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10938 if (TARGET_THUMB)
10940 output_operand_lossage ("predicated Thumb instruction");
10941 break;
10943 if (current_insn_predicate != NULL)
10945 output_operand_lossage
10946 ("predicated instruction in conditional sequence");
10947 break;
10950 fputs (arm_condition_codes[arm_current_cc], stream);
10952 else if (current_insn_predicate)
10954 enum arm_cond_code code;
10956 if (TARGET_THUMB)
10958 output_operand_lossage ("predicated Thumb instruction");
10959 break;
10962 code = get_arm_condition_code (current_insn_predicate);
10963 fputs (arm_condition_codes[code], stream);
10965 return;
10967 case 'N':
10969 REAL_VALUE_TYPE r;
10970 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10971 r = REAL_VALUE_NEGATE (r);
10972 fprintf (stream, "%s", fp_const_from_val (&r));
10974 return;
10976 case 'B':
10977 if (GET_CODE (x) == CONST_INT)
10979 HOST_WIDE_INT val;
10980 val = ARM_SIGN_EXTEND (~INTVAL (x));
10981 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10983 else
10985 putc ('~', stream);
10986 output_addr_const (stream, x);
10988 return;
10990 case 'i':
10991 fprintf (stream, "%s", arithmetic_instr (x, 1));
10992 return;
10994 /* Truncate Cirrus shift counts. */
10995 case 's':
10996 if (GET_CODE (x) == CONST_INT)
10998 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10999 return;
11001 arm_print_operand (stream, x, 0);
11002 return;
11004 case 'I':
11005 fprintf (stream, "%s", arithmetic_instr (x, 0));
11006 return;
11008 case 'S':
11010 HOST_WIDE_INT val;
11011 const char * shift = shift_op (x, &val);
11013 if (shift)
11015 fprintf (stream, ", %s ", shift_op (x, &val));
11016 if (val == -1)
11017 arm_print_operand (stream, XEXP (x, 1), 0);
11018 else
11019 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11022 return;
11024 /* An explanation of the 'Q', 'R' and 'H' register operands:
11026 In a pair of registers containing a DI or DF value the 'Q'
11027 operand returns the register number of the register containing
11028 the least significant part of the value. The 'R' operand returns
11029 the register number of the register containing the most
11030 significant part of the value.
11032 The 'H' operand returns the higher of the two register numbers.
11033 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11034 same as the 'Q' operand, since the most significant part of the
11035 value is held in the lower number register. The reverse is true
11036 on systems where WORDS_BIG_ENDIAN is false.
11038 The purpose of these operands is to distinguish between cases
11039 where the endian-ness of the values is important (for example
11040 when they are added together), and cases where the endian-ness
11041 is irrelevant, but the order of register operations is important.
11042 For example when loading a value from memory into a register
11043 pair, the endian-ness does not matter. Provided that the value
11044 from the lower memory address is put into the lower numbered
11045 register, and the value from the higher address is put into the
11046 higher numbered register, the load will work regardless of whether
11047 the value being loaded is big-wordian or little-wordian. The
11048 order of the two register loads can matter however, if the address
11049 of the memory location is actually held in one of the registers
11050 being overwritten by the load. */
11051 case 'Q':
11052 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11054 output_operand_lossage ("invalid operand for code '%c'", code);
11055 return;
11058 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11059 return;
11061 case 'R':
11062 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11064 output_operand_lossage ("invalid operand for code '%c'", code);
11065 return;
11068 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11069 return;
11071 case 'H':
11072 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11074 output_operand_lossage ("invalid operand for code '%c'", code);
11075 return;
11078 asm_fprintf (stream, "%r", REGNO (x) + 1);
11079 return;
11081 case 'm':
11082 asm_fprintf (stream, "%r",
11083 GET_CODE (XEXP (x, 0)) == REG
11084 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11085 return;
11087 case 'M':
11088 asm_fprintf (stream, "{%r-%r}",
11089 REGNO (x),
11090 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11091 return;
11093 case 'd':
11094 /* CONST_TRUE_RTX means always -- that's the default. */
11095 if (x == const_true_rtx)
11096 return;
11098 if (!COMPARISON_P (x))
11100 output_operand_lossage ("invalid operand for code '%c'", code);
11101 return;
11104 fputs (arm_condition_codes[get_arm_condition_code (x)],
11105 stream);
11106 return;
11108 case 'D':
11109 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11110 want to do that. */
11111 if (x == const_true_rtx)
11113 output_operand_lossage ("instruction never exectued");
11114 return;
11116 if (!COMPARISON_P (x))
11118 output_operand_lossage ("invalid operand for code '%c'", code);
11119 return;
11122 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11123 (get_arm_condition_code (x))],
11124 stream);
11125 return;
11127 /* Cirrus registers can be accessed in a variety of ways:
11128 single floating point (f)
11129 double floating point (d)
11130 32bit integer (fx)
11131 64bit integer (dx). */
11132 case 'W': /* Cirrus register in F mode. */
11133 case 'X': /* Cirrus register in D mode. */
11134 case 'Y': /* Cirrus register in FX mode. */
11135 case 'Z': /* Cirrus register in DX mode. */
11136 gcc_assert (GET_CODE (x) == REG
11137 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11139 fprintf (stream, "mv%s%s",
11140 code == 'W' ? "f"
11141 : code == 'X' ? "d"
11142 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11144 return;
11146 /* Print cirrus register in the mode specified by the register's mode. */
11147 case 'V':
11149 int mode = GET_MODE (x);
11151 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11153 output_operand_lossage ("invalid operand for code '%c'", code);
11154 return;
11157 fprintf (stream, "mv%s%s",
11158 mode == DFmode ? "d"
11159 : mode == SImode ? "fx"
11160 : mode == DImode ? "dx"
11161 : "f", reg_names[REGNO (x)] + 2);
11163 return;
11166 case 'U':
11167 if (GET_CODE (x) != REG
11168 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11169 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11170 /* Bad value for wCG register number. */
11172 output_operand_lossage ("invalid operand for code '%c'", code);
11173 return;
11176 else
11177 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11178 return;
11180 /* Print an iWMMXt control register name. */
11181 case 'w':
11182 if (GET_CODE (x) != CONST_INT
11183 || INTVAL (x) < 0
11184 || INTVAL (x) >= 16)
11185 /* Bad value for wC register number. */
11187 output_operand_lossage ("invalid operand for code '%c'", code);
11188 return;
11191 else
11193 static const char * wc_reg_names [16] =
11195 "wCID", "wCon", "wCSSF", "wCASF",
11196 "wC4", "wC5", "wC6", "wC7",
11197 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11198 "wC12", "wC13", "wC14", "wC15"
11201 fprintf (stream, wc_reg_names [INTVAL (x)]);
11203 return;
11205 /* Print a VFP double precision register name. */
11206 case 'P':
11208 int mode = GET_MODE (x);
11209 int num;
11211 if (mode != DImode && mode != DFmode)
11213 output_operand_lossage ("invalid operand for code '%c'", code);
11214 return;
11217 if (GET_CODE (x) != REG
11218 || !IS_VFP_REGNUM (REGNO (x)))
11220 output_operand_lossage ("invalid operand for code '%c'", code);
11221 return;
11224 num = REGNO(x) - FIRST_VFP_REGNUM;
11225 if (num & 1)
11227 output_operand_lossage ("invalid operand for code '%c'", code);
11228 return;
11231 fprintf (stream, "d%d", num >> 1);
11233 return;
11235 default:
11236 if (x == 0)
11238 output_operand_lossage ("missing operand");
11239 return;
11242 switch (GET_CODE (x))
11244 case REG:
11245 asm_fprintf (stream, "%r", REGNO (x));
11246 break;
11248 case MEM:
11249 output_memory_reference_mode = GET_MODE (x);
11250 output_address (XEXP (x, 0));
11251 break;
11253 case CONST_DOUBLE:
11254 fprintf (stream, "#%s", fp_immediate_constant (x));
11255 break;
11257 default:
11258 gcc_assert (GET_CODE (x) != NEG);
11259 fputc ('#', stream);
11260 output_addr_const (stream, x);
11261 break;
11266 #ifndef AOF_ASSEMBLER
11267 /* Target hook for assembling integer objects. The ARM version needs to
11268 handle word-sized values specially. */
11269 static bool
11270 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11272 if (size == UNITS_PER_WORD && aligned_p)
11274 fputs ("\t.word\t", asm_out_file);
11275 output_addr_const (asm_out_file, x);
11277 /* Mark symbols as position independent. We only do this in the
11278 .text segment, not in the .data segment. */
11279 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11280 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11282 if (GET_CODE (x) == SYMBOL_REF
11283 && (CONSTANT_POOL_ADDRESS_P (x)
11284 || SYMBOL_REF_LOCAL_P (x)))
11285 fputs ("(GOTOFF)", asm_out_file);
11286 else if (GET_CODE (x) == LABEL_REF)
11287 fputs ("(GOTOFF)", asm_out_file);
11288 else
11289 fputs ("(GOT)", asm_out_file);
11291 fputc ('\n', asm_out_file);
11292 return true;
11295 if (arm_vector_mode_supported_p (GET_MODE (x)))
11297 int i, units;
11299 gcc_assert (GET_CODE (x) == CONST_VECTOR);
11301 units = CONST_VECTOR_NUNITS (x);
11303 switch (GET_MODE (x))
11305 case V2SImode: size = 4; break;
11306 case V4HImode: size = 2; break;
11307 case V8QImode: size = 1; break;
11308 default:
11309 gcc_unreachable ();
11312 for (i = 0; i < units; i++)
11314 rtx elt;
11316 elt = CONST_VECTOR_ELT (x, i);
11317 assemble_integer
11318 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11321 return true;
11324 return default_assemble_integer (x, size, aligned_p);
11328 /* Add a function to the list of static constructors. */
11330 static void
11331 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
11333 if (!TARGET_AAPCS_BASED)
11335 default_named_section_asm_out_constructor (symbol, priority);
11336 return;
11339 /* Put these in the .init_array section, using a special relocation. */
11340 switch_to_section (ctors_section);
11341 assemble_align (POINTER_SIZE);
11342 fputs ("\t.word\t", asm_out_file);
11343 output_addr_const (asm_out_file, symbol);
11344 fputs ("(target1)\n", asm_out_file);
11346 #endif
11348 /* A finite state machine takes care of noticing whether or not instructions
11349 can be conditionally executed, and thus decrease execution time and code
11350 size by deleting branch instructions. The fsm is controlled by
11351 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11353 /* The state of the fsm controlling condition codes are:
11354 0: normal, do nothing special
11355 1: make ASM_OUTPUT_OPCODE not output this instruction
11356 2: make ASM_OUTPUT_OPCODE not output this instruction
11357 3: make instructions conditional
11358 4: make instructions conditional
11360 State transitions (state->state by whom under condition):
11361 0 -> 1 final_prescan_insn if the `target' is a label
11362 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11363 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11364 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11365 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11366 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11367 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11368 (the target insn is arm_target_insn).
11370 If the jump clobbers the conditions then we use states 2 and 4.
11372 A similar thing can be done with conditional return insns.
11374 XXX In case the `target' is an unconditional branch, this conditionalising
11375 of the instructions always reduces code size, but not always execution
11376 time. But then, I want to reduce the code size to somewhere near what
11377 /bin/cc produces. */
11379 /* Returns the index of the ARM condition code string in
11380 `arm_condition_codes'. COMPARISON should be an rtx like
11381 `(eq (...) (...))'. */
11382 static enum arm_cond_code
11383 get_arm_condition_code (rtx comparison)
11385 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11386 int code;
11387 enum rtx_code comp_code = GET_CODE (comparison);
11389 if (GET_MODE_CLASS (mode) != MODE_CC)
11390 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11391 XEXP (comparison, 1));
11393 switch (mode)
11395 case CC_DNEmode: code = ARM_NE; goto dominance;
11396 case CC_DEQmode: code = ARM_EQ; goto dominance;
11397 case CC_DGEmode: code = ARM_GE; goto dominance;
11398 case CC_DGTmode: code = ARM_GT; goto dominance;
11399 case CC_DLEmode: code = ARM_LE; goto dominance;
11400 case CC_DLTmode: code = ARM_LT; goto dominance;
11401 case CC_DGEUmode: code = ARM_CS; goto dominance;
11402 case CC_DGTUmode: code = ARM_HI; goto dominance;
11403 case CC_DLEUmode: code = ARM_LS; goto dominance;
11404 case CC_DLTUmode: code = ARM_CC;
11406 dominance:
11407 gcc_assert (comp_code == EQ || comp_code == NE);
11409 if (comp_code == EQ)
11410 return ARM_INVERSE_CONDITION_CODE (code);
11411 return code;
11413 case CC_NOOVmode:
11414 switch (comp_code)
11416 case NE: return ARM_NE;
11417 case EQ: return ARM_EQ;
11418 case GE: return ARM_PL;
11419 case LT: return ARM_MI;
11420 default: gcc_unreachable ();
11423 case CC_Zmode:
11424 switch (comp_code)
11426 case NE: return ARM_NE;
11427 case EQ: return ARM_EQ;
11428 default: gcc_unreachable ();
11431 case CC_Nmode:
11432 switch (comp_code)
11434 case NE: return ARM_MI;
11435 case EQ: return ARM_PL;
11436 default: gcc_unreachable ();
11439 case CCFPEmode:
11440 case CCFPmode:
11441 /* These encodings assume that AC=1 in the FPA system control
11442 byte. This allows us to handle all cases except UNEQ and
11443 LTGT. */
11444 switch (comp_code)
11446 case GE: return ARM_GE;
11447 case GT: return ARM_GT;
11448 case LE: return ARM_LS;
11449 case LT: return ARM_MI;
11450 case NE: return ARM_NE;
11451 case EQ: return ARM_EQ;
11452 case ORDERED: return ARM_VC;
11453 case UNORDERED: return ARM_VS;
11454 case UNLT: return ARM_LT;
11455 case UNLE: return ARM_LE;
11456 case UNGT: return ARM_HI;
11457 case UNGE: return ARM_PL;
11458 /* UNEQ and LTGT do not have a representation. */
11459 case UNEQ: /* Fall through. */
11460 case LTGT: /* Fall through. */
11461 default: gcc_unreachable ();
11464 case CC_SWPmode:
11465 switch (comp_code)
11467 case NE: return ARM_NE;
11468 case EQ: return ARM_EQ;
11469 case GE: return ARM_LE;
11470 case GT: return ARM_LT;
11471 case LE: return ARM_GE;
11472 case LT: return ARM_GT;
11473 case GEU: return ARM_LS;
11474 case GTU: return ARM_CC;
11475 case LEU: return ARM_CS;
11476 case LTU: return ARM_HI;
11477 default: gcc_unreachable ();
11480 case CC_Cmode:
11481 switch (comp_code)
11483 case LTU: return ARM_CS;
11484 case GEU: return ARM_CC;
11485 default: gcc_unreachable ();
11488 case CCmode:
11489 switch (comp_code)
11491 case NE: return ARM_NE;
11492 case EQ: return ARM_EQ;
11493 case GE: return ARM_GE;
11494 case GT: return ARM_GT;
11495 case LE: return ARM_LE;
11496 case LT: return ARM_LT;
11497 case GEU: return ARM_CS;
11498 case GTU: return ARM_HI;
11499 case LEU: return ARM_LS;
11500 case LTU: return ARM_CC;
11501 default: gcc_unreachable ();
11504 default: gcc_unreachable ();
11508 void
11509 arm_final_prescan_insn (rtx insn)
11511 /* BODY will hold the body of INSN. */
11512 rtx body = PATTERN (insn);
11514 /* This will be 1 if trying to repeat the trick, and things need to be
11515 reversed if it appears to fail. */
11516 int reverse = 0;
11518 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11519 taken are clobbered, even if the rtl suggests otherwise. It also
11520 means that we have to grub around within the jump expression to find
11521 out what the conditions are when the jump isn't taken. */
11522 int jump_clobbers = 0;
11524 /* If we start with a return insn, we only succeed if we find another one. */
11525 int seeking_return = 0;
11527 /* START_INSN will hold the insn from where we start looking. This is the
11528 first insn after the following code_label if REVERSE is true. */
11529 rtx start_insn = insn;
11531 /* If in state 4, check if the target branch is reached, in order to
11532 change back to state 0. */
11533 if (arm_ccfsm_state == 4)
11535 if (insn == arm_target_insn)
11537 arm_target_insn = NULL;
11538 arm_ccfsm_state = 0;
11540 return;
11543 /* If in state 3, it is possible to repeat the trick, if this insn is an
11544 unconditional branch to a label, and immediately following this branch
11545 is the previous target label which is only used once, and the label this
11546 branch jumps to is not too far off. */
11547 if (arm_ccfsm_state == 3)
11549 if (simplejump_p (insn))
11551 start_insn = next_nonnote_insn (start_insn);
11552 if (GET_CODE (start_insn) == BARRIER)
11554 /* XXX Isn't this always a barrier? */
11555 start_insn = next_nonnote_insn (start_insn);
11557 if (GET_CODE (start_insn) == CODE_LABEL
11558 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11559 && LABEL_NUSES (start_insn) == 1)
11560 reverse = TRUE;
11561 else
11562 return;
11564 else if (GET_CODE (body) == RETURN)
11566 start_insn = next_nonnote_insn (start_insn);
11567 if (GET_CODE (start_insn) == BARRIER)
11568 start_insn = next_nonnote_insn (start_insn);
11569 if (GET_CODE (start_insn) == CODE_LABEL
11570 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11571 && LABEL_NUSES (start_insn) == 1)
11573 reverse = TRUE;
11574 seeking_return = 1;
11576 else
11577 return;
11579 else
11580 return;
11583 gcc_assert (!arm_ccfsm_state || reverse);
11584 if (GET_CODE (insn) != JUMP_INSN)
11585 return;
11587 /* This jump might be paralleled with a clobber of the condition codes
11588 the jump should always come first */
11589 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11590 body = XVECEXP (body, 0, 0);
11592 if (reverse
11593 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11594 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11596 int insns_skipped;
11597 int fail = FALSE, succeed = FALSE;
11598 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11599 int then_not_else = TRUE;
11600 rtx this_insn = start_insn, label = 0;
11602 /* If the jump cannot be done with one instruction, we cannot
11603 conditionally execute the instruction in the inverse case. */
11604 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11606 jump_clobbers = 1;
11607 return;
11610 /* Register the insn jumped to. */
11611 if (reverse)
11613 if (!seeking_return)
11614 label = XEXP (SET_SRC (body), 0);
11616 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11617 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11618 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11620 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11621 then_not_else = FALSE;
11623 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11624 seeking_return = 1;
11625 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11627 seeking_return = 1;
11628 then_not_else = FALSE;
11630 else
11631 gcc_unreachable ();
11633 /* See how many insns this branch skips, and what kind of insns. If all
11634 insns are okay, and the label or unconditional branch to the same
11635 label is not too far away, succeed. */
11636 for (insns_skipped = 0;
11637 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11639 rtx scanbody;
11641 this_insn = next_nonnote_insn (this_insn);
11642 if (!this_insn)
11643 break;
11645 switch (GET_CODE (this_insn))
11647 case CODE_LABEL:
11648 /* Succeed if it is the target label, otherwise fail since
11649 control falls in from somewhere else. */
11650 if (this_insn == label)
11652 if (jump_clobbers)
11654 arm_ccfsm_state = 2;
11655 this_insn = next_nonnote_insn (this_insn);
11657 else
11658 arm_ccfsm_state = 1;
11659 succeed = TRUE;
11661 else
11662 fail = TRUE;
11663 break;
11665 case BARRIER:
11666 /* Succeed if the following insn is the target label.
11667 Otherwise fail.
11668 If return insns are used then the last insn in a function
11669 will be a barrier. */
11670 this_insn = next_nonnote_insn (this_insn);
11671 if (this_insn && this_insn == label)
11673 if (jump_clobbers)
11675 arm_ccfsm_state = 2;
11676 this_insn = next_nonnote_insn (this_insn);
11678 else
11679 arm_ccfsm_state = 1;
11680 succeed = TRUE;
11682 else
11683 fail = TRUE;
11684 break;
11686 case CALL_INSN:
11687 /* The AAPCS says that conditional calls should not be
11688 used since they make interworking inefficient (the
11689 linker can't transform BL<cond> into BLX). That's
11690 only a problem if the machine has BLX. */
11691 if (arm_arch5)
11693 fail = TRUE;
11694 break;
11697 /* Succeed if the following insn is the target label, or
11698 if the following two insns are a barrier and the
11699 target label. */
11700 this_insn = next_nonnote_insn (this_insn);
11701 if (this_insn && GET_CODE (this_insn) == BARRIER)
11702 this_insn = next_nonnote_insn (this_insn);
11704 if (this_insn && this_insn == label
11705 && insns_skipped < max_insns_skipped)
11707 if (jump_clobbers)
11709 arm_ccfsm_state = 2;
11710 this_insn = next_nonnote_insn (this_insn);
11712 else
11713 arm_ccfsm_state = 1;
11714 succeed = TRUE;
11716 else
11717 fail = TRUE;
11718 break;
11720 case JUMP_INSN:
11721 /* If this is an unconditional branch to the same label, succeed.
11722 If it is to another label, do nothing. If it is conditional,
11723 fail. */
11724 /* XXX Probably, the tests for SET and the PC are
11725 unnecessary. */
11727 scanbody = PATTERN (this_insn);
11728 if (GET_CODE (scanbody) == SET
11729 && GET_CODE (SET_DEST (scanbody)) == PC)
11731 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11732 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11734 arm_ccfsm_state = 2;
11735 succeed = TRUE;
11737 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11738 fail = TRUE;
11740 /* Fail if a conditional return is undesirable (e.g. on a
11741 StrongARM), but still allow this if optimizing for size. */
11742 else if (GET_CODE (scanbody) == RETURN
11743 && !use_return_insn (TRUE, NULL)
11744 && !optimize_size)
11745 fail = TRUE;
11746 else if (GET_CODE (scanbody) == RETURN
11747 && seeking_return)
11749 arm_ccfsm_state = 2;
11750 succeed = TRUE;
11752 else if (GET_CODE (scanbody) == PARALLEL)
11754 switch (get_attr_conds (this_insn))
11756 case CONDS_NOCOND:
11757 break;
11758 default:
11759 fail = TRUE;
11760 break;
11763 else
11764 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11766 break;
11768 case INSN:
11769 /* Instructions using or affecting the condition codes make it
11770 fail. */
11771 scanbody = PATTERN (this_insn);
11772 if (!(GET_CODE (scanbody) == SET
11773 || GET_CODE (scanbody) == PARALLEL)
11774 || get_attr_conds (this_insn) != CONDS_NOCOND)
11775 fail = TRUE;
11777 /* A conditional cirrus instruction must be followed by
11778 a non Cirrus instruction. However, since we
11779 conditionalize instructions in this function and by
11780 the time we get here we can't add instructions
11781 (nops), because shorten_branches() has already been
11782 called, we will disable conditionalizing Cirrus
11783 instructions to be safe. */
11784 if (GET_CODE (scanbody) != USE
11785 && GET_CODE (scanbody) != CLOBBER
11786 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11787 fail = TRUE;
11788 break;
11790 default:
11791 break;
11794 if (succeed)
11796 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11797 arm_target_label = CODE_LABEL_NUMBER (label);
11798 else
11800 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11802 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11804 this_insn = next_nonnote_insn (this_insn);
11805 gcc_assert (!this_insn
11806 || (GET_CODE (this_insn) != BARRIER
11807 && GET_CODE (this_insn) != CODE_LABEL));
11809 if (!this_insn)
11811 /* Oh, dear! we ran off the end.. give up. */
11812 recog (PATTERN (insn), insn, NULL);
11813 arm_ccfsm_state = 0;
11814 arm_target_insn = NULL;
11815 return;
11817 arm_target_insn = this_insn;
11819 if (jump_clobbers)
11821 gcc_assert (!reverse);
11822 arm_current_cc =
11823 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11824 0), 0), 1));
11825 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11826 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11827 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11828 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11830 else
11832 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11833 what it was. */
11834 if (!reverse)
11835 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11836 0));
11839 if (reverse || then_not_else)
11840 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11843 /* Restore recog_data (getting the attributes of other insns can
11844 destroy this array, but final.c assumes that it remains intact
11845 across this call; since the insn has been recognized already we
11846 call recog direct). */
11847 recog (PATTERN (insn), insn, NULL);
11851 /* Returns true if REGNO is a valid register
11852 for holding a quantity of type MODE. */
11854 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11856 if (GET_MODE_CLASS (mode) == MODE_CC)
11857 return (regno == CC_REGNUM
11858 || (TARGET_HARD_FLOAT && TARGET_VFP
11859 && regno == VFPCC_REGNUM));
11861 if (TARGET_THUMB)
11862 /* For the Thumb we only allow values bigger than SImode in
11863 registers 0 - 6, so that there is always a second low
11864 register available to hold the upper part of the value.
11865 We probably we ought to ensure that the register is the
11866 start of an even numbered register pair. */
11867 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11869 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
11870 && IS_CIRRUS_REGNUM (regno))
11871 /* We have outlawed SI values in Cirrus registers because they
11872 reside in the lower 32 bits, but SF values reside in the
11873 upper 32 bits. This causes gcc all sorts of grief. We can't
11874 even split the registers into pairs because Cirrus SI values
11875 get sign extended to 64bits-- aldyh. */
11876 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11878 if (TARGET_HARD_FLOAT && TARGET_VFP
11879 && IS_VFP_REGNUM (regno))
11881 if (mode == SFmode || mode == SImode)
11882 return TRUE;
11884 /* DFmode values are only valid in even register pairs. */
11885 if (mode == DFmode)
11886 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11887 return FALSE;
11890 if (TARGET_REALLY_IWMMXT)
11892 if (IS_IWMMXT_GR_REGNUM (regno))
11893 return mode == SImode;
11895 if (IS_IWMMXT_REGNUM (regno))
11896 return VALID_IWMMXT_REG_MODE (mode);
11899 /* We allow any value to be stored in the general registers.
11900 Restrict doubleword quantities to even register pairs so that we can
11901 use ldrd. */
11902 if (regno <= LAST_ARM_REGNUM)
11903 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11905 if (regno == FRAME_POINTER_REGNUM
11906 || regno == ARG_POINTER_REGNUM)
11907 /* We only allow integers in the fake hard registers. */
11908 return GET_MODE_CLASS (mode) == MODE_INT;
11910 /* The only registers left are the FPA registers
11911 which we only allow to hold FP values. */
11912 return (TARGET_HARD_FLOAT && TARGET_FPA
11913 && GET_MODE_CLASS (mode) == MODE_FLOAT
11914 && regno >= FIRST_FPA_REGNUM
11915 && regno <= LAST_FPA_REGNUM);
11919 arm_regno_class (int regno)
11921 if (TARGET_THUMB)
11923 if (regno == STACK_POINTER_REGNUM)
11924 return STACK_REG;
11925 if (regno == CC_REGNUM)
11926 return CC_REG;
11927 if (regno < 8)
11928 return LO_REGS;
11929 return HI_REGS;
11932 if ( regno <= LAST_ARM_REGNUM
11933 || regno == FRAME_POINTER_REGNUM
11934 || regno == ARG_POINTER_REGNUM)
11935 return GENERAL_REGS;
11937 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11938 return NO_REGS;
11940 if (IS_CIRRUS_REGNUM (regno))
11941 return CIRRUS_REGS;
11943 if (IS_VFP_REGNUM (regno))
11944 return VFP_REGS;
11946 if (IS_IWMMXT_REGNUM (regno))
11947 return IWMMXT_REGS;
11949 if (IS_IWMMXT_GR_REGNUM (regno))
11950 return IWMMXT_GR_REGS;
11952 return FPA_REGS;
11955 /* Handle a special case when computing the offset
11956 of an argument from the frame pointer. */
11958 arm_debugger_arg_offset (int value, rtx addr)
11960 rtx insn;
11962 /* We are only interested if dbxout_parms() failed to compute the offset. */
11963 if (value != 0)
11964 return 0;
11966 /* We can only cope with the case where the address is held in a register. */
11967 if (GET_CODE (addr) != REG)
11968 return 0;
11970 /* If we are using the frame pointer to point at the argument, then
11971 an offset of 0 is correct. */
11972 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11973 return 0;
11975 /* If we are using the stack pointer to point at the
11976 argument, then an offset of 0 is correct. */
11977 if ((TARGET_THUMB || !frame_pointer_needed)
11978 && REGNO (addr) == SP_REGNUM)
11979 return 0;
11981 /* Oh dear. The argument is pointed to by a register rather
11982 than being held in a register, or being stored at a known
11983 offset from the frame pointer. Since GDB only understands
11984 those two kinds of argument we must translate the address
11985 held in the register into an offset from the frame pointer.
11986 We do this by searching through the insns for the function
11987 looking to see where this register gets its value. If the
11988 register is initialized from the frame pointer plus an offset
11989 then we are in luck and we can continue, otherwise we give up.
11991 This code is exercised by producing debugging information
11992 for a function with arguments like this:
11994 double func (double a, double b, int c, double d) {return d;}
11996 Without this code the stab for parameter 'd' will be set to
11997 an offset of 0 from the frame pointer, rather than 8. */
11999 /* The if() statement says:
12001 If the insn is a normal instruction
12002 and if the insn is setting the value in a register
12003 and if the register being set is the register holding the address of the argument
12004 and if the address is computing by an addition
12005 that involves adding to a register
12006 which is the frame pointer
12007 a constant integer
12009 then... */
12011 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12013 if ( GET_CODE (insn) == INSN
12014 && GET_CODE (PATTERN (insn)) == SET
12015 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12016 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12017 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12018 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12019 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12022 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12024 break;
12028 if (value == 0)
12030 debug_rtx (addr);
12031 warning (0, "unable to compute real location of stacked parameter");
12032 value = 8; /* XXX magic hack */
12035 return value;
12038 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12039 do \
12041 if ((MASK) & insn_flags) \
12042 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
12043 BUILT_IN_MD, NULL, NULL_TREE); \
12045 while (0)
12047 struct builtin_description
12049 const unsigned int mask;
12050 const enum insn_code icode;
12051 const char * const name;
12052 const enum arm_builtins code;
12053 const enum rtx_code comparison;
12054 const unsigned int flag;
12057 static const struct builtin_description bdesc_2arg[] =
12059 #define IWMMXT_BUILTIN(code, string, builtin) \
12060 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12061 ARM_BUILTIN_##builtin, 0, 0 },
12063 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12064 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12065 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12066 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12067 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12068 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12069 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12070 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12071 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12072 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12073 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12074 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12075 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12076 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12077 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12078 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12079 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12080 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12081 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12082 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12083 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12084 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12085 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12086 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12087 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12088 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12089 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12090 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12091 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12092 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12093 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12094 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12095 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12096 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12097 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12098 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12099 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12100 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12101 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12102 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12103 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12104 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12105 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12106 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12107 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12108 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12109 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12110 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12111 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12112 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12113 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12114 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12115 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12116 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12117 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12118 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12119 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12120 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12122 #define IWMMXT_BUILTIN2(code, builtin) \
12123 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12125 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12126 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12127 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12128 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12129 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12130 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12131 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12132 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12133 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12134 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12135 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
12136 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
12137 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
12138 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
12139 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
12140 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
12141 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
12142 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
12143 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
12144 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
12145 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
12146 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
12147 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
12148 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
12149 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
12150 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
12151 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
12152 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
12153 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
12154 IWMMXT_BUILTIN2 (rordi3, WRORDI)
12155 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
12156 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
12159 static const struct builtin_description bdesc_1arg[] =
12161 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
12162 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
12163 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
12164 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
12165 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
12166 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
12167 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
12168 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
12169 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
12170 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
12171 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
12172 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
12173 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
12174 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
12175 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
12176 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
12177 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
12178 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
12181 /* Set up all the iWMMXt builtins. This is
12182 not called if TARGET_IWMMXT is zero. */
12184 static void
12185 arm_init_iwmmxt_builtins (void)
12187 const struct builtin_description * d;
12188 size_t i;
12189 tree endlink = void_list_node;
12191 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12192 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12193 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12195 tree int_ftype_int
12196 = build_function_type (integer_type_node,
12197 tree_cons (NULL_TREE, integer_type_node, endlink));
12198 tree v8qi_ftype_v8qi_v8qi_int
12199 = build_function_type (V8QI_type_node,
12200 tree_cons (NULL_TREE, V8QI_type_node,
12201 tree_cons (NULL_TREE, V8QI_type_node,
12202 tree_cons (NULL_TREE,
12203 integer_type_node,
12204 endlink))));
12205 tree v4hi_ftype_v4hi_int
12206 = build_function_type (V4HI_type_node,
12207 tree_cons (NULL_TREE, V4HI_type_node,
12208 tree_cons (NULL_TREE, integer_type_node,
12209 endlink)));
12210 tree v2si_ftype_v2si_int
12211 = build_function_type (V2SI_type_node,
12212 tree_cons (NULL_TREE, V2SI_type_node,
12213 tree_cons (NULL_TREE, integer_type_node,
12214 endlink)));
12215 tree v2si_ftype_di_di
12216 = build_function_type (V2SI_type_node,
12217 tree_cons (NULL_TREE, long_long_integer_type_node,
12218 tree_cons (NULL_TREE, long_long_integer_type_node,
12219 endlink)));
12220 tree di_ftype_di_int
12221 = build_function_type (long_long_integer_type_node,
12222 tree_cons (NULL_TREE, long_long_integer_type_node,
12223 tree_cons (NULL_TREE, integer_type_node,
12224 endlink)));
12225 tree di_ftype_di_int_int
12226 = build_function_type (long_long_integer_type_node,
12227 tree_cons (NULL_TREE, long_long_integer_type_node,
12228 tree_cons (NULL_TREE, integer_type_node,
12229 tree_cons (NULL_TREE,
12230 integer_type_node,
12231 endlink))));
12232 tree int_ftype_v8qi
12233 = build_function_type (integer_type_node,
12234 tree_cons (NULL_TREE, V8QI_type_node,
12235 endlink));
12236 tree int_ftype_v4hi
12237 = build_function_type (integer_type_node,
12238 tree_cons (NULL_TREE, V4HI_type_node,
12239 endlink));
12240 tree int_ftype_v2si
12241 = build_function_type (integer_type_node,
12242 tree_cons (NULL_TREE, V2SI_type_node,
12243 endlink));
12244 tree int_ftype_v8qi_int
12245 = build_function_type (integer_type_node,
12246 tree_cons (NULL_TREE, V8QI_type_node,
12247 tree_cons (NULL_TREE, integer_type_node,
12248 endlink)));
12249 tree int_ftype_v4hi_int
12250 = build_function_type (integer_type_node,
12251 tree_cons (NULL_TREE, V4HI_type_node,
12252 tree_cons (NULL_TREE, integer_type_node,
12253 endlink)));
12254 tree int_ftype_v2si_int
12255 = build_function_type (integer_type_node,
12256 tree_cons (NULL_TREE, V2SI_type_node,
12257 tree_cons (NULL_TREE, integer_type_node,
12258 endlink)));
12259 tree v8qi_ftype_v8qi_int_int
12260 = build_function_type (V8QI_type_node,
12261 tree_cons (NULL_TREE, V8QI_type_node,
12262 tree_cons (NULL_TREE, integer_type_node,
12263 tree_cons (NULL_TREE,
12264 integer_type_node,
12265 endlink))));
12266 tree v4hi_ftype_v4hi_int_int
12267 = build_function_type (V4HI_type_node,
12268 tree_cons (NULL_TREE, V4HI_type_node,
12269 tree_cons (NULL_TREE, integer_type_node,
12270 tree_cons (NULL_TREE,
12271 integer_type_node,
12272 endlink))));
12273 tree v2si_ftype_v2si_int_int
12274 = build_function_type (V2SI_type_node,
12275 tree_cons (NULL_TREE, V2SI_type_node,
12276 tree_cons (NULL_TREE, integer_type_node,
12277 tree_cons (NULL_TREE,
12278 integer_type_node,
12279 endlink))));
12280 /* Miscellaneous. */
12281 tree v8qi_ftype_v4hi_v4hi
12282 = build_function_type (V8QI_type_node,
12283 tree_cons (NULL_TREE, V4HI_type_node,
12284 tree_cons (NULL_TREE, V4HI_type_node,
12285 endlink)));
12286 tree v4hi_ftype_v2si_v2si
12287 = build_function_type (V4HI_type_node,
12288 tree_cons (NULL_TREE, V2SI_type_node,
12289 tree_cons (NULL_TREE, V2SI_type_node,
12290 endlink)));
12291 tree v2si_ftype_v4hi_v4hi
12292 = build_function_type (V2SI_type_node,
12293 tree_cons (NULL_TREE, V4HI_type_node,
12294 tree_cons (NULL_TREE, V4HI_type_node,
12295 endlink)));
12296 tree v2si_ftype_v8qi_v8qi
12297 = build_function_type (V2SI_type_node,
12298 tree_cons (NULL_TREE, V8QI_type_node,
12299 tree_cons (NULL_TREE, V8QI_type_node,
12300 endlink)));
12301 tree v4hi_ftype_v4hi_di
12302 = build_function_type (V4HI_type_node,
12303 tree_cons (NULL_TREE, V4HI_type_node,
12304 tree_cons (NULL_TREE,
12305 long_long_integer_type_node,
12306 endlink)));
12307 tree v2si_ftype_v2si_di
12308 = build_function_type (V2SI_type_node,
12309 tree_cons (NULL_TREE, V2SI_type_node,
12310 tree_cons (NULL_TREE,
12311 long_long_integer_type_node,
12312 endlink)));
12313 tree void_ftype_int_int
12314 = build_function_type (void_type_node,
12315 tree_cons (NULL_TREE, integer_type_node,
12316 tree_cons (NULL_TREE, integer_type_node,
12317 endlink)));
12318 tree di_ftype_void
12319 = build_function_type (long_long_unsigned_type_node, endlink);
12320 tree di_ftype_v8qi
12321 = build_function_type (long_long_integer_type_node,
12322 tree_cons (NULL_TREE, V8QI_type_node,
12323 endlink));
12324 tree di_ftype_v4hi
12325 = build_function_type (long_long_integer_type_node,
12326 tree_cons (NULL_TREE, V4HI_type_node,
12327 endlink));
12328 tree di_ftype_v2si
12329 = build_function_type (long_long_integer_type_node,
12330 tree_cons (NULL_TREE, V2SI_type_node,
12331 endlink));
12332 tree v2si_ftype_v4hi
12333 = build_function_type (V2SI_type_node,
12334 tree_cons (NULL_TREE, V4HI_type_node,
12335 endlink));
12336 tree v4hi_ftype_v8qi
12337 = build_function_type (V4HI_type_node,
12338 tree_cons (NULL_TREE, V8QI_type_node,
12339 endlink));
12341 tree di_ftype_di_v4hi_v4hi
12342 = build_function_type (long_long_unsigned_type_node,
12343 tree_cons (NULL_TREE,
12344 long_long_unsigned_type_node,
12345 tree_cons (NULL_TREE, V4HI_type_node,
12346 tree_cons (NULL_TREE,
12347 V4HI_type_node,
12348 endlink))));
12350 tree di_ftype_v4hi_v4hi
12351 = build_function_type (long_long_unsigned_type_node,
12352 tree_cons (NULL_TREE, V4HI_type_node,
12353 tree_cons (NULL_TREE, V4HI_type_node,
12354 endlink)));
12356 /* Normal vector binops. */
12357 tree v8qi_ftype_v8qi_v8qi
12358 = build_function_type (V8QI_type_node,
12359 tree_cons (NULL_TREE, V8QI_type_node,
12360 tree_cons (NULL_TREE, V8QI_type_node,
12361 endlink)));
12362 tree v4hi_ftype_v4hi_v4hi
12363 = build_function_type (V4HI_type_node,
12364 tree_cons (NULL_TREE, V4HI_type_node,
12365 tree_cons (NULL_TREE, V4HI_type_node,
12366 endlink)));
12367 tree v2si_ftype_v2si_v2si
12368 = build_function_type (V2SI_type_node,
12369 tree_cons (NULL_TREE, V2SI_type_node,
12370 tree_cons (NULL_TREE, V2SI_type_node,
12371 endlink)));
12372 tree di_ftype_di_di
12373 = build_function_type (long_long_unsigned_type_node,
12374 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12375 tree_cons (NULL_TREE,
12376 long_long_unsigned_type_node,
12377 endlink)));
12379 /* Add all builtins that are more or less simple operations on two
12380 operands. */
12381 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12383 /* Use one of the operands; the target can have a different mode for
12384 mask-generating compares. */
12385 enum machine_mode mode;
12386 tree type;
12388 if (d->name == 0)
12389 continue;
12391 mode = insn_data[d->icode].operand[1].mode;
12393 switch (mode)
12395 case V8QImode:
12396 type = v8qi_ftype_v8qi_v8qi;
12397 break;
12398 case V4HImode:
12399 type = v4hi_ftype_v4hi_v4hi;
12400 break;
12401 case V2SImode:
12402 type = v2si_ftype_v2si_v2si;
12403 break;
12404 case DImode:
12405 type = di_ftype_di_di;
12406 break;
12408 default:
12409 gcc_unreachable ();
12412 def_mbuiltin (d->mask, d->name, type, d->code);
12415 /* Add the remaining MMX insns with somewhat more complicated types. */
12416 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12417 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12418 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12420 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12421 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12422 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12423 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12424 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12425 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12427 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12428 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12429 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12430 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12431 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12432 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12434 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12435 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12436 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12437 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12438 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12439 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12441 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12442 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12443 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12444 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12445 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12446 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12448 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12450 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12451 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12452 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12453 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12455 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12456 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12457 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12458 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12459 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12460 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12461 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12462 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12463 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12465 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12466 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12467 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12469 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12470 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12471 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12473 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12474 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12475 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12476 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12477 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12478 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12480 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12481 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12482 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12483 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12484 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12485 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12486 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12487 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12488 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12489 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12490 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12491 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12493 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12494 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12495 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12496 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12498 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12499 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12500 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12501 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12502 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12503 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12504 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12507 static void
12508 arm_init_tls_builtins (void)
12510 tree ftype;
12511 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
12512 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
12514 ftype = build_function_type (ptr_type_node, void_list_node);
12515 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
12516 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
12517 NULL, const_nothrow);
12520 static void
12521 arm_init_builtins (void)
12523 arm_init_tls_builtins ();
12525 if (TARGET_REALLY_IWMMXT)
12526 arm_init_iwmmxt_builtins ();
12529 /* Errors in the source file can cause expand_expr to return const0_rtx
12530 where we expect a vector. To avoid crashing, use one of the vector
12531 clear instructions. */
12533 static rtx
12534 safe_vector_operand (rtx x, enum machine_mode mode)
12536 if (x != const0_rtx)
12537 return x;
12538 x = gen_reg_rtx (mode);
12540 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12541 : gen_rtx_SUBREG (DImode, x, 0)));
12542 return x;
12545 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12547 static rtx
12548 arm_expand_binop_builtin (enum insn_code icode,
12549 tree arglist, rtx target)
12551 rtx pat;
12552 tree arg0 = TREE_VALUE (arglist);
12553 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12554 rtx op0 = expand_normal (arg0);
12555 rtx op1 = expand_normal (arg1);
12556 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12557 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12558 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12560 if (VECTOR_MODE_P (mode0))
12561 op0 = safe_vector_operand (op0, mode0);
12562 if (VECTOR_MODE_P (mode1))
12563 op1 = safe_vector_operand (op1, mode1);
12565 if (! target
12566 || GET_MODE (target) != tmode
12567 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12568 target = gen_reg_rtx (tmode);
12570 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12572 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12573 op0 = copy_to_mode_reg (mode0, op0);
12574 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12575 op1 = copy_to_mode_reg (mode1, op1);
12577 pat = GEN_FCN (icode) (target, op0, op1);
12578 if (! pat)
12579 return 0;
12580 emit_insn (pat);
12581 return target;
12584 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12586 static rtx
12587 arm_expand_unop_builtin (enum insn_code icode,
12588 tree arglist, rtx target, int do_load)
12590 rtx pat;
12591 tree arg0 = TREE_VALUE (arglist);
12592 rtx op0 = expand_normal (arg0);
12593 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12594 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12596 if (! target
12597 || GET_MODE (target) != tmode
12598 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12599 target = gen_reg_rtx (tmode);
12600 if (do_load)
12601 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12602 else
12604 if (VECTOR_MODE_P (mode0))
12605 op0 = safe_vector_operand (op0, mode0);
12607 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12608 op0 = copy_to_mode_reg (mode0, op0);
12611 pat = GEN_FCN (icode) (target, op0);
12612 if (! pat)
12613 return 0;
12614 emit_insn (pat);
12615 return target;
12618 /* Expand an expression EXP that calls a built-in function,
12619 with result going to TARGET if that's convenient
12620 (and in mode MODE if that's convenient).
12621 SUBTARGET may be used as the target for computing one of EXP's operands.
12622 IGNORE is nonzero if the value is to be ignored. */
12624 static rtx
12625 arm_expand_builtin (tree exp,
12626 rtx target,
12627 rtx subtarget ATTRIBUTE_UNUSED,
12628 enum machine_mode mode ATTRIBUTE_UNUSED,
12629 int ignore ATTRIBUTE_UNUSED)
12631 const struct builtin_description * d;
12632 enum insn_code icode;
12633 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12634 tree arglist = TREE_OPERAND (exp, 1);
12635 tree arg0;
12636 tree arg1;
12637 tree arg2;
12638 rtx op0;
12639 rtx op1;
12640 rtx op2;
12641 rtx pat;
12642 int fcode = DECL_FUNCTION_CODE (fndecl);
12643 size_t i;
12644 enum machine_mode tmode;
12645 enum machine_mode mode0;
12646 enum machine_mode mode1;
12647 enum machine_mode mode2;
12649 switch (fcode)
12651 case ARM_BUILTIN_TEXTRMSB:
12652 case ARM_BUILTIN_TEXTRMUB:
12653 case ARM_BUILTIN_TEXTRMSH:
12654 case ARM_BUILTIN_TEXTRMUH:
12655 case ARM_BUILTIN_TEXTRMSW:
12656 case ARM_BUILTIN_TEXTRMUW:
12657 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12658 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12659 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12660 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12661 : CODE_FOR_iwmmxt_textrmw);
12663 arg0 = TREE_VALUE (arglist);
12664 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12665 op0 = expand_normal (arg0);
12666 op1 = expand_normal (arg1);
12667 tmode = insn_data[icode].operand[0].mode;
12668 mode0 = insn_data[icode].operand[1].mode;
12669 mode1 = insn_data[icode].operand[2].mode;
12671 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12672 op0 = copy_to_mode_reg (mode0, op0);
12673 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12675 /* @@@ better error message */
12676 error ("selector must be an immediate");
12677 return gen_reg_rtx (tmode);
12679 if (target == 0
12680 || GET_MODE (target) != tmode
12681 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12682 target = gen_reg_rtx (tmode);
12683 pat = GEN_FCN (icode) (target, op0, op1);
12684 if (! pat)
12685 return 0;
12686 emit_insn (pat);
12687 return target;
12689 case ARM_BUILTIN_TINSRB:
12690 case ARM_BUILTIN_TINSRH:
12691 case ARM_BUILTIN_TINSRW:
12692 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12693 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12694 : CODE_FOR_iwmmxt_tinsrw);
12695 arg0 = TREE_VALUE (arglist);
12696 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12697 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12698 op0 = expand_normal (arg0);
12699 op1 = expand_normal (arg1);
12700 op2 = expand_normal (arg2);
12701 tmode = insn_data[icode].operand[0].mode;
12702 mode0 = insn_data[icode].operand[1].mode;
12703 mode1 = insn_data[icode].operand[2].mode;
12704 mode2 = insn_data[icode].operand[3].mode;
12706 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12707 op0 = copy_to_mode_reg (mode0, op0);
12708 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12709 op1 = copy_to_mode_reg (mode1, op1);
12710 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12712 /* @@@ better error message */
12713 error ("selector must be an immediate");
12714 return const0_rtx;
12716 if (target == 0
12717 || GET_MODE (target) != tmode
12718 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12719 target = gen_reg_rtx (tmode);
12720 pat = GEN_FCN (icode) (target, op0, op1, op2);
12721 if (! pat)
12722 return 0;
12723 emit_insn (pat);
12724 return target;
12726 case ARM_BUILTIN_SETWCX:
12727 arg0 = TREE_VALUE (arglist);
12728 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12729 op0 = force_reg (SImode, expand_normal (arg0));
12730 op1 = expand_normal (arg1);
12731 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12732 return 0;
12734 case ARM_BUILTIN_GETWCX:
12735 arg0 = TREE_VALUE (arglist);
12736 op0 = expand_normal (arg0);
12737 target = gen_reg_rtx (SImode);
12738 emit_insn (gen_iwmmxt_tmrc (target, op0));
12739 return target;
12741 case ARM_BUILTIN_WSHUFH:
12742 icode = CODE_FOR_iwmmxt_wshufh;
12743 arg0 = TREE_VALUE (arglist);
12744 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12745 op0 = expand_normal (arg0);
12746 op1 = expand_normal (arg1);
12747 tmode = insn_data[icode].operand[0].mode;
12748 mode1 = insn_data[icode].operand[1].mode;
12749 mode2 = insn_data[icode].operand[2].mode;
12751 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12752 op0 = copy_to_mode_reg (mode1, op0);
12753 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12755 /* @@@ better error message */
12756 error ("mask must be an immediate");
12757 return const0_rtx;
12759 if (target == 0
12760 || GET_MODE (target) != tmode
12761 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12762 target = gen_reg_rtx (tmode);
12763 pat = GEN_FCN (icode) (target, op0, op1);
12764 if (! pat)
12765 return 0;
12766 emit_insn (pat);
12767 return target;
12769 case ARM_BUILTIN_WSADB:
12770 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12771 case ARM_BUILTIN_WSADH:
12772 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12773 case ARM_BUILTIN_WSADBZ:
12774 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12775 case ARM_BUILTIN_WSADHZ:
12776 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12778 /* Several three-argument builtins. */
12779 case ARM_BUILTIN_WMACS:
12780 case ARM_BUILTIN_WMACU:
12781 case ARM_BUILTIN_WALIGN:
12782 case ARM_BUILTIN_TMIA:
12783 case ARM_BUILTIN_TMIAPH:
12784 case ARM_BUILTIN_TMIATT:
12785 case ARM_BUILTIN_TMIATB:
12786 case ARM_BUILTIN_TMIABT:
12787 case ARM_BUILTIN_TMIABB:
12788 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12789 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12790 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12791 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12792 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12793 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12794 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12795 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12796 : CODE_FOR_iwmmxt_walign);
12797 arg0 = TREE_VALUE (arglist);
12798 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12799 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12800 op0 = expand_normal (arg0);
12801 op1 = expand_normal (arg1);
12802 op2 = expand_normal (arg2);
12803 tmode = insn_data[icode].operand[0].mode;
12804 mode0 = insn_data[icode].operand[1].mode;
12805 mode1 = insn_data[icode].operand[2].mode;
12806 mode2 = insn_data[icode].operand[3].mode;
12808 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12809 op0 = copy_to_mode_reg (mode0, op0);
12810 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12811 op1 = copy_to_mode_reg (mode1, op1);
12812 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12813 op2 = copy_to_mode_reg (mode2, op2);
12814 if (target == 0
12815 || GET_MODE (target) != tmode
12816 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12817 target = gen_reg_rtx (tmode);
12818 pat = GEN_FCN (icode) (target, op0, op1, op2);
12819 if (! pat)
12820 return 0;
12821 emit_insn (pat);
12822 return target;
12824 case ARM_BUILTIN_WZERO:
12825 target = gen_reg_rtx (DImode);
12826 emit_insn (gen_iwmmxt_clrdi (target));
12827 return target;
12829 case ARM_BUILTIN_THREAD_POINTER:
12830 return arm_load_tp (target);
12832 default:
12833 break;
12836 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12837 if (d->code == (const enum arm_builtins) fcode)
12838 return arm_expand_binop_builtin (d->icode, arglist, target);
12840 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12841 if (d->code == (const enum arm_builtins) fcode)
12842 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12844 /* @@@ Should really do something sensible here. */
12845 return NULL_RTX;
12848 /* Return the number (counting from 0) of
12849 the least significant set bit in MASK. */
12851 inline static int
12852 number_of_first_bit_set (unsigned mask)
12854 int bit;
12856 for (bit = 0;
12857 (mask & (1 << bit)) == 0;
12858 ++bit)
12859 continue;
12861 return bit;
12864 /* Emit code to push or pop registers to or from the stack. F is the
12865 assembly file. MASK is the registers to push or pop. PUSH is
12866 nonzero if we should push, and zero if we should pop. For debugging
12867 output, if pushing, adjust CFA_OFFSET by the amount of space added
12868 to the stack. REAL_REGS should have the same number of bits set as
12869 MASK, and will be used instead (in the same order) to describe which
12870 registers were saved - this is used to mark the save slots when we
12871 push high registers after moving them to low registers. */
12872 static void
12873 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12874 unsigned long real_regs)
12876 int regno;
12877 int lo_mask = mask & 0xFF;
12878 int pushed_words = 0;
12880 gcc_assert (mask);
12882 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12884 /* Special case. Do not generate a POP PC statement here, do it in
12885 thumb_exit() */
12886 thumb_exit (f, -1);
12887 return;
12890 if (ARM_EABI_UNWIND_TABLES && push)
12892 fprintf (f, "\t.save\t{");
12893 for (regno = 0; regno < 15; regno++)
12895 if (real_regs & (1 << regno))
12897 if (real_regs & ((1 << regno) -1))
12898 fprintf (f, ", ");
12899 asm_fprintf (f, "%r", regno);
12902 fprintf (f, "}\n");
12905 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12907 /* Look at the low registers first. */
12908 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12910 if (lo_mask & 1)
12912 asm_fprintf (f, "%r", regno);
12914 if ((lo_mask & ~1) != 0)
12915 fprintf (f, ", ");
12917 pushed_words++;
12921 if (push && (mask & (1 << LR_REGNUM)))
12923 /* Catch pushing the LR. */
12924 if (mask & 0xFF)
12925 fprintf (f, ", ");
12927 asm_fprintf (f, "%r", LR_REGNUM);
12929 pushed_words++;
12931 else if (!push && (mask & (1 << PC_REGNUM)))
12933 /* Catch popping the PC. */
12934 if (TARGET_INTERWORK || TARGET_BACKTRACE
12935 || current_function_calls_eh_return)
12937 /* The PC is never poped directly, instead
12938 it is popped into r3 and then BX is used. */
12939 fprintf (f, "}\n");
12941 thumb_exit (f, -1);
12943 return;
12945 else
12947 if (mask & 0xFF)
12948 fprintf (f, ", ");
12950 asm_fprintf (f, "%r", PC_REGNUM);
12954 fprintf (f, "}\n");
12956 if (push && pushed_words && dwarf2out_do_frame ())
12958 char *l = dwarf2out_cfi_label ();
12959 int pushed_mask = real_regs;
12961 *cfa_offset += pushed_words * 4;
12962 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12964 pushed_words = 0;
12965 pushed_mask = real_regs;
12966 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12968 if (pushed_mask & 1)
12969 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12974 /* Generate code to return from a thumb function.
12975 If 'reg_containing_return_addr' is -1, then the return address is
12976 actually on the stack, at the stack pointer. */
12977 static void
12978 thumb_exit (FILE *f, int reg_containing_return_addr)
12980 unsigned regs_available_for_popping;
12981 unsigned regs_to_pop;
12982 int pops_needed;
12983 unsigned available;
12984 unsigned required;
12985 int mode;
12986 int size;
12987 int restore_a4 = FALSE;
12989 /* Compute the registers we need to pop. */
12990 regs_to_pop = 0;
12991 pops_needed = 0;
12993 if (reg_containing_return_addr == -1)
12995 regs_to_pop |= 1 << LR_REGNUM;
12996 ++pops_needed;
12999 if (TARGET_BACKTRACE)
13001 /* Restore the (ARM) frame pointer and stack pointer. */
13002 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13003 pops_needed += 2;
13006 /* If there is nothing to pop then just emit the BX instruction and
13007 return. */
13008 if (pops_needed == 0)
13010 if (current_function_calls_eh_return)
13011 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13013 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13014 return;
13016 /* Otherwise if we are not supporting interworking and we have not created
13017 a backtrace structure and the function was not entered in ARM mode then
13018 just pop the return address straight into the PC. */
13019 else if (!TARGET_INTERWORK
13020 && !TARGET_BACKTRACE
13021 && !is_called_in_ARM_mode (current_function_decl)
13022 && !current_function_calls_eh_return)
13024 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13025 return;
13028 /* Find out how many of the (return) argument registers we can corrupt. */
13029 regs_available_for_popping = 0;
13031 /* If returning via __builtin_eh_return, the bottom three registers
13032 all contain information needed for the return. */
13033 if (current_function_calls_eh_return)
13034 size = 12;
13035 else
13037 /* If we can deduce the registers used from the function's
13038 return value. This is more reliable that examining
13039 regs_ever_live[] because that will be set if the register is
13040 ever used in the function, not just if the register is used
13041 to hold a return value. */
13043 if (current_function_return_rtx != 0)
13044 mode = GET_MODE (current_function_return_rtx);
13045 else
13046 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13048 size = GET_MODE_SIZE (mode);
13050 if (size == 0)
13052 /* In a void function we can use any argument register.
13053 In a function that returns a structure on the stack
13054 we can use the second and third argument registers. */
13055 if (mode == VOIDmode)
13056 regs_available_for_popping =
13057 (1 << ARG_REGISTER (1))
13058 | (1 << ARG_REGISTER (2))
13059 | (1 << ARG_REGISTER (3));
13060 else
13061 regs_available_for_popping =
13062 (1 << ARG_REGISTER (2))
13063 | (1 << ARG_REGISTER (3));
13065 else if (size <= 4)
13066 regs_available_for_popping =
13067 (1 << ARG_REGISTER (2))
13068 | (1 << ARG_REGISTER (3));
13069 else if (size <= 8)
13070 regs_available_for_popping =
13071 (1 << ARG_REGISTER (3));
13074 /* Match registers to be popped with registers into which we pop them. */
13075 for (available = regs_available_for_popping,
13076 required = regs_to_pop;
13077 required != 0 && available != 0;
13078 available &= ~(available & - available),
13079 required &= ~(required & - required))
13080 -- pops_needed;
13082 /* If we have any popping registers left over, remove them. */
13083 if (available > 0)
13084 regs_available_for_popping &= ~available;
13086 /* Otherwise if we need another popping register we can use
13087 the fourth argument register. */
13088 else if (pops_needed)
13090 /* If we have not found any free argument registers and
13091 reg a4 contains the return address, we must move it. */
13092 if (regs_available_for_popping == 0
13093 && reg_containing_return_addr == LAST_ARG_REGNUM)
13095 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13096 reg_containing_return_addr = LR_REGNUM;
13098 else if (size > 12)
13100 /* Register a4 is being used to hold part of the return value,
13101 but we have dire need of a free, low register. */
13102 restore_a4 = TRUE;
13104 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13107 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13109 /* The fourth argument register is available. */
13110 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13112 --pops_needed;
13116 /* Pop as many registers as we can. */
13117 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13118 regs_available_for_popping);
13120 /* Process the registers we popped. */
13121 if (reg_containing_return_addr == -1)
13123 /* The return address was popped into the lowest numbered register. */
13124 regs_to_pop &= ~(1 << LR_REGNUM);
13126 reg_containing_return_addr =
13127 number_of_first_bit_set (regs_available_for_popping);
13129 /* Remove this register for the mask of available registers, so that
13130 the return address will not be corrupted by further pops. */
13131 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13134 /* If we popped other registers then handle them here. */
13135 if (regs_available_for_popping)
13137 int frame_pointer;
13139 /* Work out which register currently contains the frame pointer. */
13140 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
13142 /* Move it into the correct place. */
13143 asm_fprintf (f, "\tmov\t%r, %r\n",
13144 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
13146 /* (Temporarily) remove it from the mask of popped registers. */
13147 regs_available_for_popping &= ~(1 << frame_pointer);
13148 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
13150 if (regs_available_for_popping)
13152 int stack_pointer;
13154 /* We popped the stack pointer as well,
13155 find the register that contains it. */
13156 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
13158 /* Move it into the stack register. */
13159 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
13161 /* At this point we have popped all necessary registers, so
13162 do not worry about restoring regs_available_for_popping
13163 to its correct value:
13165 assert (pops_needed == 0)
13166 assert (regs_available_for_popping == (1 << frame_pointer))
13167 assert (regs_to_pop == (1 << STACK_POINTER)) */
13169 else
13171 /* Since we have just move the popped value into the frame
13172 pointer, the popping register is available for reuse, and
13173 we know that we still have the stack pointer left to pop. */
13174 regs_available_for_popping |= (1 << frame_pointer);
13178 /* If we still have registers left on the stack, but we no longer have
13179 any registers into which we can pop them, then we must move the return
13180 address into the link register and make available the register that
13181 contained it. */
13182 if (regs_available_for_popping == 0 && pops_needed > 0)
13184 regs_available_for_popping |= 1 << reg_containing_return_addr;
13186 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
13187 reg_containing_return_addr);
13189 reg_containing_return_addr = LR_REGNUM;
13192 /* If we have registers left on the stack then pop some more.
13193 We know that at most we will want to pop FP and SP. */
13194 if (pops_needed > 0)
13196 int popped_into;
13197 int move_to;
13199 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13200 regs_available_for_popping);
13202 /* We have popped either FP or SP.
13203 Move whichever one it is into the correct register. */
13204 popped_into = number_of_first_bit_set (regs_available_for_popping);
13205 move_to = number_of_first_bit_set (regs_to_pop);
13207 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
13209 regs_to_pop &= ~(1 << move_to);
13211 --pops_needed;
13214 /* If we still have not popped everything then we must have only
13215 had one register available to us and we are now popping the SP. */
13216 if (pops_needed > 0)
13218 int popped_into;
13220 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13221 regs_available_for_popping);
13223 popped_into = number_of_first_bit_set (regs_available_for_popping);
13225 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
13227 assert (regs_to_pop == (1 << STACK_POINTER))
13228 assert (pops_needed == 1)
13232 /* If necessary restore the a4 register. */
13233 if (restore_a4)
13235 if (reg_containing_return_addr != LR_REGNUM)
13237 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13238 reg_containing_return_addr = LR_REGNUM;
13241 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13244 if (current_function_calls_eh_return)
13245 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13247 /* Return to caller. */
13248 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13252 void
13253 thumb_final_prescan_insn (rtx insn)
13255 if (flag_print_asm_name)
13256 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
13257 INSN_ADDRESSES (INSN_UID (insn)));
13261 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
13263 unsigned HOST_WIDE_INT mask = 0xff;
13264 int i;
13266 if (val == 0) /* XXX */
13267 return 0;
13269 for (i = 0; i < 25; i++)
13270 if ((val & (mask << i)) == val)
13271 return 1;
13273 return 0;
13276 /* Returns nonzero if the current function contains,
13277 or might contain a far jump. */
13278 static int
13279 thumb_far_jump_used_p (void)
13281 rtx insn;
13283 /* This test is only important for leaf functions. */
13284 /* assert (!leaf_function_p ()); */
13286 /* If we have already decided that far jumps may be used,
13287 do not bother checking again, and always return true even if
13288 it turns out that they are not being used. Once we have made
13289 the decision that far jumps are present (and that hence the link
13290 register will be pushed onto the stack) we cannot go back on it. */
13291 if (cfun->machine->far_jump_used)
13292 return 1;
13294 /* If this function is not being called from the prologue/epilogue
13295 generation code then it must be being called from the
13296 INITIAL_ELIMINATION_OFFSET macro. */
13297 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13299 /* In this case we know that we are being asked about the elimination
13300 of the arg pointer register. If that register is not being used,
13301 then there are no arguments on the stack, and we do not have to
13302 worry that a far jump might force the prologue to push the link
13303 register, changing the stack offsets. In this case we can just
13304 return false, since the presence of far jumps in the function will
13305 not affect stack offsets.
13307 If the arg pointer is live (or if it was live, but has now been
13308 eliminated and so set to dead) then we do have to test to see if
13309 the function might contain a far jump. This test can lead to some
13310 false negatives, since before reload is completed, then length of
13311 branch instructions is not known, so gcc defaults to returning their
13312 longest length, which in turn sets the far jump attribute to true.
13314 A false negative will not result in bad code being generated, but it
13315 will result in a needless push and pop of the link register. We
13316 hope that this does not occur too often.
13318 If we need doubleword stack alignment this could affect the other
13319 elimination offsets so we can't risk getting it wrong. */
13320 if (regs_ever_live [ARG_POINTER_REGNUM])
13321 cfun->machine->arg_pointer_live = 1;
13322 else if (!cfun->machine->arg_pointer_live)
13323 return 0;
13326 /* Check to see if the function contains a branch
13327 insn with the far jump attribute set. */
13328 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13330 if (GET_CODE (insn) == JUMP_INSN
13331 /* Ignore tablejump patterns. */
13332 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13333 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13334 && get_attr_far_jump (insn) == FAR_JUMP_YES
13337 /* Record the fact that we have decided that
13338 the function does use far jumps. */
13339 cfun->machine->far_jump_used = 1;
13340 return 1;
13344 return 0;
13347 /* Return nonzero if FUNC must be entered in ARM mode. */
13349 is_called_in_ARM_mode (tree func)
13351 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
13353 /* Ignore the problem about functions whose address is taken. */
13354 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13355 return TRUE;
13357 #ifdef ARM_PE
13358 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13359 #else
13360 return FALSE;
13361 #endif
13364 /* The bits which aren't usefully expanded as rtl. */
13365 const char *
13366 thumb_unexpanded_epilogue (void)
13368 int regno;
13369 unsigned long live_regs_mask = 0;
13370 int high_regs_pushed = 0;
13371 int had_to_push_lr;
13372 int size;
13374 if (return_used_this_function)
13375 return "";
13377 if (IS_NAKED (arm_current_func_type ()))
13378 return "";
13380 live_regs_mask = thumb_compute_save_reg_mask ();
13381 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13383 /* If we can deduce the registers used from the function's return value.
13384 This is more reliable that examining regs_ever_live[] because that
13385 will be set if the register is ever used in the function, not just if
13386 the register is used to hold a return value. */
13387 size = arm_size_return_regs ();
13389 /* The prolog may have pushed some high registers to use as
13390 work registers. e.g. the testsuite file:
13391 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13392 compiles to produce:
13393 push {r4, r5, r6, r7, lr}
13394 mov r7, r9
13395 mov r6, r8
13396 push {r6, r7}
13397 as part of the prolog. We have to undo that pushing here. */
13399 if (high_regs_pushed)
13401 unsigned long mask = live_regs_mask & 0xff;
13402 int next_hi_reg;
13404 /* The available low registers depend on the size of the value we are
13405 returning. */
13406 if (size <= 12)
13407 mask |= 1 << 3;
13408 if (size <= 8)
13409 mask |= 1 << 2;
13411 if (mask == 0)
13412 /* Oh dear! We have no low registers into which we can pop
13413 high registers! */
13414 internal_error
13415 ("no low registers available for popping high registers");
13417 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13418 if (live_regs_mask & (1 << next_hi_reg))
13419 break;
13421 while (high_regs_pushed)
13423 /* Find lo register(s) into which the high register(s) can
13424 be popped. */
13425 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13427 if (mask & (1 << regno))
13428 high_regs_pushed--;
13429 if (high_regs_pushed == 0)
13430 break;
13433 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13435 /* Pop the values into the low register(s). */
13436 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13438 /* Move the value(s) into the high registers. */
13439 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13441 if (mask & (1 << regno))
13443 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13444 regno);
13446 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13447 if (live_regs_mask & (1 << next_hi_reg))
13448 break;
13452 live_regs_mask &= ~0x0f00;
13455 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13456 live_regs_mask &= 0xff;
13458 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13460 /* Pop the return address into the PC. */
13461 if (had_to_push_lr)
13462 live_regs_mask |= 1 << PC_REGNUM;
13464 /* Either no argument registers were pushed or a backtrace
13465 structure was created which includes an adjusted stack
13466 pointer, so just pop everything. */
13467 if (live_regs_mask)
13468 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13469 live_regs_mask);
13471 /* We have either just popped the return address into the
13472 PC or it is was kept in LR for the entire function. */
13473 if (!had_to_push_lr)
13474 thumb_exit (asm_out_file, LR_REGNUM);
13476 else
13478 /* Pop everything but the return address. */
13479 if (live_regs_mask)
13480 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13481 live_regs_mask);
13483 if (had_to_push_lr)
13485 if (size > 12)
13487 /* We have no free low regs, so save one. */
13488 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13489 LAST_ARG_REGNUM);
13492 /* Get the return address into a temporary register. */
13493 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13494 1 << LAST_ARG_REGNUM);
13496 if (size > 12)
13498 /* Move the return address to lr. */
13499 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13500 LAST_ARG_REGNUM);
13501 /* Restore the low register. */
13502 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13503 IP_REGNUM);
13504 regno = LR_REGNUM;
13506 else
13507 regno = LAST_ARG_REGNUM;
13509 else
13510 regno = LR_REGNUM;
13512 /* Remove the argument registers that were pushed onto the stack. */
13513 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13514 SP_REGNUM, SP_REGNUM,
13515 current_function_pretend_args_size);
13517 thumb_exit (asm_out_file, regno);
13520 return "";
13523 /* Functions to save and restore machine-specific function data. */
13524 static struct machine_function *
13525 arm_init_machine_status (void)
13527 struct machine_function *machine;
13528 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13530 #if ARM_FT_UNKNOWN != 0
13531 machine->func_type = ARM_FT_UNKNOWN;
13532 #endif
13533 return machine;
13536 /* Return an RTX indicating where the return address to the
13537 calling function can be found. */
13539 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13541 if (count != 0)
13542 return NULL_RTX;
13544 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13547 /* Do anything needed before RTL is emitted for each function. */
13548 void
13549 arm_init_expanders (void)
13551 /* Arrange to initialize and mark the machine per-function status. */
13552 init_machine_status = arm_init_machine_status;
13554 /* This is to stop the combine pass optimizing away the alignment
13555 adjustment of va_arg. */
13556 /* ??? It is claimed that this should not be necessary. */
13557 if (cfun)
13558 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13562 /* Like arm_compute_initial_elimination offset. Simpler because there
13563 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13564 to point at the base of the local variables after static stack
13565 space for a function has been allocated. */
13567 HOST_WIDE_INT
13568 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13570 arm_stack_offsets *offsets;
13572 offsets = arm_get_frame_offsets ();
13574 switch (from)
13576 case ARG_POINTER_REGNUM:
13577 switch (to)
13579 case STACK_POINTER_REGNUM:
13580 return offsets->outgoing_args - offsets->saved_args;
13582 case FRAME_POINTER_REGNUM:
13583 return offsets->soft_frame - offsets->saved_args;
13585 case ARM_HARD_FRAME_POINTER_REGNUM:
13586 return offsets->saved_regs - offsets->saved_args;
13588 case THUMB_HARD_FRAME_POINTER_REGNUM:
13589 return offsets->locals_base - offsets->saved_args;
13591 default:
13592 gcc_unreachable ();
13594 break;
13596 case FRAME_POINTER_REGNUM:
13597 switch (to)
13599 case STACK_POINTER_REGNUM:
13600 return offsets->outgoing_args - offsets->soft_frame;
13602 case ARM_HARD_FRAME_POINTER_REGNUM:
13603 return offsets->saved_regs - offsets->soft_frame;
13605 case THUMB_HARD_FRAME_POINTER_REGNUM:
13606 return offsets->locals_base - offsets->soft_frame;
13608 default:
13609 gcc_unreachable ();
13611 break;
13613 default:
13614 gcc_unreachable ();
13619 /* Generate the rest of a function's prologue. */
13620 void
13621 thumb_expand_prologue (void)
13623 rtx insn, dwarf;
13625 HOST_WIDE_INT amount;
13626 arm_stack_offsets *offsets;
13627 unsigned long func_type;
13628 int regno;
13629 unsigned long live_regs_mask;
13631 func_type = arm_current_func_type ();
13633 /* Naked functions don't have prologues. */
13634 if (IS_NAKED (func_type))
13635 return;
13637 if (IS_INTERRUPT (func_type))
13639 error ("interrupt Service Routines cannot be coded in Thumb mode");
13640 return;
13643 live_regs_mask = thumb_compute_save_reg_mask ();
13644 /* Load the pic register before setting the frame pointer,
13645 so we can use r7 as a temporary work register. */
13646 if (flag_pic && arm_pic_register != INVALID_REGNUM)
13647 arm_load_pic_register (live_regs_mask);
13649 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13650 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13651 stack_pointer_rtx);
13653 offsets = arm_get_frame_offsets ();
13654 amount = offsets->outgoing_args - offsets->saved_regs;
13655 if (amount)
13657 if (amount < 512)
13659 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13660 GEN_INT (- amount)));
13661 RTX_FRAME_RELATED_P (insn) = 1;
13663 else
13665 rtx reg;
13667 /* The stack decrement is too big for an immediate value in a single
13668 insn. In theory we could issue multiple subtracts, but after
13669 three of them it becomes more space efficient to place the full
13670 value in the constant pool and load into a register. (Also the
13671 ARM debugger really likes to see only one stack decrement per
13672 function). So instead we look for a scratch register into which
13673 we can load the decrement, and then we subtract this from the
13674 stack pointer. Unfortunately on the thumb the only available
13675 scratch registers are the argument registers, and we cannot use
13676 these as they may hold arguments to the function. Instead we
13677 attempt to locate a call preserved register which is used by this
13678 function. If we can find one, then we know that it will have
13679 been pushed at the start of the prologue and so we can corrupt
13680 it now. */
13681 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13682 if (live_regs_mask & (1 << regno)
13683 && !(frame_pointer_needed
13684 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13685 break;
13687 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13689 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13691 /* Choose an arbitrary, non-argument low register. */
13692 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13694 /* Save it by copying it into a high, scratch register. */
13695 emit_insn (gen_movsi (spare, reg));
13696 /* Add a USE to stop propagate_one_insn() from barfing. */
13697 emit_insn (gen_prologue_use (spare));
13699 /* Decrement the stack. */
13700 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13701 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13702 stack_pointer_rtx, reg));
13703 RTX_FRAME_RELATED_P (insn) = 1;
13704 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13705 plus_constant (stack_pointer_rtx,
13706 -amount));
13707 RTX_FRAME_RELATED_P (dwarf) = 1;
13708 REG_NOTES (insn)
13709 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13710 REG_NOTES (insn));
13712 /* Restore the low register's original value. */
13713 emit_insn (gen_movsi (reg, spare));
13715 /* Emit a USE of the restored scratch register, so that flow
13716 analysis will not consider the restore redundant. The
13717 register won't be used again in this function and isn't
13718 restored by the epilogue. */
13719 emit_insn (gen_prologue_use (reg));
13721 else
13723 reg = gen_rtx_REG (SImode, regno);
13725 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13727 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13728 stack_pointer_rtx, reg));
13729 RTX_FRAME_RELATED_P (insn) = 1;
13730 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13731 plus_constant (stack_pointer_rtx,
13732 -amount));
13733 RTX_FRAME_RELATED_P (dwarf) = 1;
13734 REG_NOTES (insn)
13735 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13736 REG_NOTES (insn));
13741 if (frame_pointer_needed)
13743 amount = offsets->outgoing_args - offsets->locals_base;
13745 if (amount < 1024)
13746 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13747 stack_pointer_rtx, GEN_INT (amount)));
13748 else
13750 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13751 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13752 hard_frame_pointer_rtx,
13753 stack_pointer_rtx));
13754 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
13755 plus_constant (stack_pointer_rtx, amount));
13756 RTX_FRAME_RELATED_P (dwarf) = 1;
13757 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13758 REG_NOTES (insn));
13761 RTX_FRAME_RELATED_P (insn) = 1;
13764 /* If we are profiling, make sure no instructions are scheduled before
13765 the call to mcount. Similarly if the user has requested no
13766 scheduling in the prolog. Similarly if we want non-call exceptions
13767 using the EABI unwinder, to prevent faulting instructions from being
13768 swapped with a stack adjustment. */
13769 if (current_function_profile || !TARGET_SCHED_PROLOG
13770 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
13771 emit_insn (gen_blockage ());
13773 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13774 if (live_regs_mask & 0xff)
13775 cfun->machine->lr_save_eliminated = 0;
13777 /* If the link register is being kept alive, with the return address in it,
13778 then make sure that it does not get reused by the ce2 pass. */
13779 if (cfun->machine->lr_save_eliminated)
13780 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13784 void
13785 thumb_expand_epilogue (void)
13787 HOST_WIDE_INT amount;
13788 arm_stack_offsets *offsets;
13789 int regno;
13791 /* Naked functions don't have prologues. */
13792 if (IS_NAKED (arm_current_func_type ()))
13793 return;
13795 offsets = arm_get_frame_offsets ();
13796 amount = offsets->outgoing_args - offsets->saved_regs;
13798 if (frame_pointer_needed)
13800 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13801 amount = offsets->locals_base - offsets->saved_regs;
13804 if (amount)
13806 if (amount < 512)
13807 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13808 GEN_INT (amount)));
13809 else
13811 /* r3 is always free in the epilogue. */
13812 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13814 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13815 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13819 /* Emit a USE (stack_pointer_rtx), so that
13820 the stack adjustment will not be deleted. */
13821 emit_insn (gen_prologue_use (stack_pointer_rtx));
13823 if (current_function_profile || !TARGET_SCHED_PROLOG)
13824 emit_insn (gen_blockage ());
13826 /* Emit a clobber for each insn that will be restored in the epilogue,
13827 so that flow2 will get register lifetimes correct. */
13828 for (regno = 0; regno < 13; regno++)
13829 if (regs_ever_live[regno] && !call_used_regs[regno])
13830 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13832 if (! regs_ever_live[LR_REGNUM])
13833 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13836 static void
13837 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13839 unsigned long live_regs_mask = 0;
13840 unsigned long l_mask;
13841 unsigned high_regs_pushed = 0;
13842 int cfa_offset = 0;
13843 int regno;
13845 if (IS_NAKED (arm_current_func_type ()))
13846 return;
13848 if (is_called_in_ARM_mode (current_function_decl))
13850 const char * name;
13852 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13853 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13854 == SYMBOL_REF);
13855 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13857 /* Generate code sequence to switch us into Thumb mode. */
13858 /* The .code 32 directive has already been emitted by
13859 ASM_DECLARE_FUNCTION_NAME. */
13860 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13861 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13863 /* Generate a label, so that the debugger will notice the
13864 change in instruction sets. This label is also used by
13865 the assembler to bypass the ARM code when this function
13866 is called from a Thumb encoded function elsewhere in the
13867 same file. Hence the definition of STUB_NAME here must
13868 agree with the definition in gas/config/tc-arm.c. */
13870 #define STUB_NAME ".real_start_of"
13872 fprintf (f, "\t.code\t16\n");
13873 #ifdef ARM_PE
13874 if (arm_dllexport_name_p (name))
13875 name = arm_strip_name_encoding (name);
13876 #endif
13877 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13878 fprintf (f, "\t.thumb_func\n");
13879 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13882 if (current_function_pretend_args_size)
13884 /* Output unwind directive for the stack adjustment. */
13885 if (ARM_EABI_UNWIND_TABLES)
13886 fprintf (f, "\t.pad #%d\n",
13887 current_function_pretend_args_size);
13889 if (cfun->machine->uses_anonymous_args)
13891 int num_pushes;
13893 fprintf (f, "\tpush\t{");
13895 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13897 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13898 regno <= LAST_ARG_REGNUM;
13899 regno++)
13900 asm_fprintf (f, "%r%s", regno,
13901 regno == LAST_ARG_REGNUM ? "" : ", ");
13903 fprintf (f, "}\n");
13905 else
13906 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13907 SP_REGNUM, SP_REGNUM,
13908 current_function_pretend_args_size);
13910 /* We don't need to record the stores for unwinding (would it
13911 help the debugger any if we did?), but record the change in
13912 the stack pointer. */
13913 if (dwarf2out_do_frame ())
13915 char *l = dwarf2out_cfi_label ();
13917 cfa_offset = cfa_offset + current_function_pretend_args_size;
13918 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13922 /* Get the registers we are going to push. */
13923 live_regs_mask = thumb_compute_save_reg_mask ();
13924 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13925 l_mask = live_regs_mask & 0x40ff;
13926 /* Then count how many other high registers will need to be pushed. */
13927 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13929 if (TARGET_BACKTRACE)
13931 unsigned offset;
13932 unsigned work_register;
13934 /* We have been asked to create a stack backtrace structure.
13935 The code looks like this:
13937 0 .align 2
13938 0 func:
13939 0 sub SP, #16 Reserve space for 4 registers.
13940 2 push {R7} Push low registers.
13941 4 add R7, SP, #20 Get the stack pointer before the push.
13942 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13943 8 mov R7, PC Get hold of the start of this code plus 12.
13944 10 str R7, [SP, #16] Store it.
13945 12 mov R7, FP Get hold of the current frame pointer.
13946 14 str R7, [SP, #4] Store it.
13947 16 mov R7, LR Get hold of the current return address.
13948 18 str R7, [SP, #12] Store it.
13949 20 add R7, SP, #16 Point at the start of the backtrace structure.
13950 22 mov FP, R7 Put this value into the frame pointer. */
13952 work_register = thumb_find_work_register (live_regs_mask);
13954 if (ARM_EABI_UNWIND_TABLES)
13955 asm_fprintf (f, "\t.pad #16\n");
13957 asm_fprintf
13958 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13959 SP_REGNUM, SP_REGNUM);
13961 if (dwarf2out_do_frame ())
13963 char *l = dwarf2out_cfi_label ();
13965 cfa_offset = cfa_offset + 16;
13966 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13969 if (l_mask)
13971 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13972 offset = bit_count (l_mask) * UNITS_PER_WORD;
13974 else
13975 offset = 0;
13977 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13978 offset + 16 + current_function_pretend_args_size);
13980 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13981 offset + 4);
13983 /* Make sure that the instruction fetching the PC is in the right place
13984 to calculate "start of backtrace creation code + 12". */
13985 if (l_mask)
13987 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13988 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13989 offset + 12);
13990 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13991 ARM_HARD_FRAME_POINTER_REGNUM);
13992 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13993 offset);
13995 else
13997 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13998 ARM_HARD_FRAME_POINTER_REGNUM);
13999 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14000 offset);
14001 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14002 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14003 offset + 12);
14006 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14007 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14008 offset + 8);
14009 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14010 offset + 12);
14011 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14012 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14014 /* Optimization: If we are not pushing any low registers but we are going
14015 to push some high registers then delay our first push. This will just
14016 be a push of LR and we can combine it with the push of the first high
14017 register. */
14018 else if ((l_mask & 0xff) != 0
14019 || (high_regs_pushed == 0 && l_mask))
14020 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14022 if (high_regs_pushed)
14024 unsigned pushable_regs;
14025 unsigned next_hi_reg;
14027 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14028 if (live_regs_mask & (1 << next_hi_reg))
14029 break;
14031 pushable_regs = l_mask & 0xff;
14033 if (pushable_regs == 0)
14034 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14036 while (high_regs_pushed > 0)
14038 unsigned long real_regs_mask = 0;
14040 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14042 if (pushable_regs & (1 << regno))
14044 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14046 high_regs_pushed --;
14047 real_regs_mask |= (1 << next_hi_reg);
14049 if (high_regs_pushed)
14051 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14052 next_hi_reg --)
14053 if (live_regs_mask & (1 << next_hi_reg))
14054 break;
14056 else
14058 pushable_regs &= ~((1 << regno) - 1);
14059 break;
14064 /* If we had to find a work register and we have not yet
14065 saved the LR then add it to the list of regs to push. */
14066 if (l_mask == (1 << LR_REGNUM))
14068 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14069 1, &cfa_offset,
14070 real_regs_mask | (1 << LR_REGNUM));
14071 l_mask = 0;
14073 else
14074 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14079 /* Handle the case of a double word load into a low register from
14080 a computed memory address. The computed address may involve a
14081 register which is overwritten by the load. */
14082 const char *
14083 thumb_load_double_from_address (rtx *operands)
14085 rtx addr;
14086 rtx base;
14087 rtx offset;
14088 rtx arg1;
14089 rtx arg2;
14091 gcc_assert (GET_CODE (operands[0]) == REG);
14092 gcc_assert (GET_CODE (operands[1]) == MEM);
14094 /* Get the memory address. */
14095 addr = XEXP (operands[1], 0);
14097 /* Work out how the memory address is computed. */
14098 switch (GET_CODE (addr))
14100 case REG:
14101 operands[2] = adjust_address (operands[1], SImode, 4);
14103 if (REGNO (operands[0]) == REGNO (addr))
14105 output_asm_insn ("ldr\t%H0, %2", operands);
14106 output_asm_insn ("ldr\t%0, %1", operands);
14108 else
14110 output_asm_insn ("ldr\t%0, %1", operands);
14111 output_asm_insn ("ldr\t%H0, %2", operands);
14113 break;
14115 case CONST:
14116 /* Compute <address> + 4 for the high order load. */
14117 operands[2] = adjust_address (operands[1], SImode, 4);
14119 output_asm_insn ("ldr\t%0, %1", operands);
14120 output_asm_insn ("ldr\t%H0, %2", operands);
14121 break;
14123 case PLUS:
14124 arg1 = XEXP (addr, 0);
14125 arg2 = XEXP (addr, 1);
14127 if (CONSTANT_P (arg1))
14128 base = arg2, offset = arg1;
14129 else
14130 base = arg1, offset = arg2;
14132 gcc_assert (GET_CODE (base) == REG);
14134 /* Catch the case of <address> = <reg> + <reg> */
14135 if (GET_CODE (offset) == REG)
14137 int reg_offset = REGNO (offset);
14138 int reg_base = REGNO (base);
14139 int reg_dest = REGNO (operands[0]);
14141 /* Add the base and offset registers together into the
14142 higher destination register. */
14143 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14144 reg_dest + 1, reg_base, reg_offset);
14146 /* Load the lower destination register from the address in
14147 the higher destination register. */
14148 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14149 reg_dest, reg_dest + 1);
14151 /* Load the higher destination register from its own address
14152 plus 4. */
14153 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14154 reg_dest + 1, reg_dest + 1);
14156 else
14158 /* Compute <address> + 4 for the high order load. */
14159 operands[2] = adjust_address (operands[1], SImode, 4);
14161 /* If the computed address is held in the low order register
14162 then load the high order register first, otherwise always
14163 load the low order register first. */
14164 if (REGNO (operands[0]) == REGNO (base))
14166 output_asm_insn ("ldr\t%H0, %2", operands);
14167 output_asm_insn ("ldr\t%0, %1", operands);
14169 else
14171 output_asm_insn ("ldr\t%0, %1", operands);
14172 output_asm_insn ("ldr\t%H0, %2", operands);
14175 break;
14177 case LABEL_REF:
14178 /* With no registers to worry about we can just load the value
14179 directly. */
14180 operands[2] = adjust_address (operands[1], SImode, 4);
14182 output_asm_insn ("ldr\t%H0, %2", operands);
14183 output_asm_insn ("ldr\t%0, %1", operands);
14184 break;
14186 default:
14187 gcc_unreachable ();
14190 return "";
14193 const char *
14194 thumb_output_move_mem_multiple (int n, rtx *operands)
14196 rtx tmp;
14198 switch (n)
14200 case 2:
14201 if (REGNO (operands[4]) > REGNO (operands[5]))
14203 tmp = operands[4];
14204 operands[4] = operands[5];
14205 operands[5] = tmp;
14207 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
14208 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
14209 break;
14211 case 3:
14212 if (REGNO (operands[4]) > REGNO (operands[5]))
14214 tmp = operands[4];
14215 operands[4] = operands[5];
14216 operands[5] = tmp;
14218 if (REGNO (operands[5]) > REGNO (operands[6]))
14220 tmp = operands[5];
14221 operands[5] = operands[6];
14222 operands[6] = tmp;
14224 if (REGNO (operands[4]) > REGNO (operands[5]))
14226 tmp = operands[4];
14227 operands[4] = operands[5];
14228 operands[5] = tmp;
14231 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
14232 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
14233 break;
14235 default:
14236 gcc_unreachable ();
14239 return "";
14242 /* Output a call-via instruction for thumb state. */
14243 const char *
14244 thumb_call_via_reg (rtx reg)
14246 int regno = REGNO (reg);
14247 rtx *labelp;
14249 gcc_assert (regno < LR_REGNUM);
14251 /* If we are in the normal text section we can use a single instance
14252 per compilation unit. If we are doing function sections, then we need
14253 an entry per section, since we can't rely on reachability. */
14254 if (in_section == text_section)
14256 thumb_call_reg_needed = 1;
14258 if (thumb_call_via_label[regno] == NULL)
14259 thumb_call_via_label[regno] = gen_label_rtx ();
14260 labelp = thumb_call_via_label + regno;
14262 else
14264 if (cfun->machine->call_via[regno] == NULL)
14265 cfun->machine->call_via[regno] = gen_label_rtx ();
14266 labelp = cfun->machine->call_via + regno;
14269 output_asm_insn ("bl\t%a0", labelp);
14270 return "";
14273 /* Routines for generating rtl. */
14274 void
14275 thumb_expand_movmemqi (rtx *operands)
14277 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
14278 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
14279 HOST_WIDE_INT len = INTVAL (operands[2]);
14280 HOST_WIDE_INT offset = 0;
14282 while (len >= 12)
14284 emit_insn (gen_movmem12b (out, in, out, in));
14285 len -= 12;
14288 if (len >= 8)
14290 emit_insn (gen_movmem8b (out, in, out, in));
14291 len -= 8;
14294 if (len >= 4)
14296 rtx reg = gen_reg_rtx (SImode);
14297 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
14298 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
14299 len -= 4;
14300 offset += 4;
14303 if (len >= 2)
14305 rtx reg = gen_reg_rtx (HImode);
14306 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
14307 plus_constant (in, offset))));
14308 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
14309 reg));
14310 len -= 2;
14311 offset += 2;
14314 if (len)
14316 rtx reg = gen_reg_rtx (QImode);
14317 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
14318 plus_constant (in, offset))));
14319 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
14320 reg));
14324 void
14325 thumb_reload_out_hi (rtx *operands)
14327 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14330 /* Handle reading a half-word from memory during reload. */
14331 void
14332 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14334 gcc_unreachable ();
14337 /* Return the length of a function name prefix
14338 that starts with the character 'c'. */
14339 static int
14340 arm_get_strip_length (int c)
14342 switch (c)
14344 ARM_NAME_ENCODING_LENGTHS
14345 default: return 0;
14349 /* Return a pointer to a function's name with any
14350 and all prefix encodings stripped from it. */
14351 const char *
14352 arm_strip_name_encoding (const char *name)
14354 int skip;
14356 while ((skip = arm_get_strip_length (* name)))
14357 name += skip;
14359 return name;
14362 /* If there is a '*' anywhere in the name's prefix, then
14363 emit the stripped name verbatim, otherwise prepend an
14364 underscore if leading underscores are being used. */
14365 void
14366 arm_asm_output_labelref (FILE *stream, const char *name)
14368 int skip;
14369 int verbatim = 0;
14371 while ((skip = arm_get_strip_length (* name)))
14373 verbatim |= (*name == '*');
14374 name += skip;
14377 if (verbatim)
14378 fputs (name, stream);
14379 else
14380 asm_fprintf (stream, "%U%s", name);
14383 static void
14384 arm_file_end (void)
14386 int regno;
14388 if (! thumb_call_reg_needed)
14389 return;
14391 switch_to_section (text_section);
14392 asm_fprintf (asm_out_file, "\t.code 16\n");
14393 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14395 for (regno = 0; regno < LR_REGNUM; regno++)
14397 rtx label = thumb_call_via_label[regno];
14399 if (label != 0)
14401 targetm.asm_out.internal_label (asm_out_file, "L",
14402 CODE_LABEL_NUMBER (label));
14403 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14408 rtx aof_pic_label;
14410 #ifdef AOF_ASSEMBLER
14411 /* Special functions only needed when producing AOF syntax assembler. */
14413 struct pic_chain
14415 struct pic_chain * next;
14416 const char * symname;
14419 static struct pic_chain * aof_pic_chain = NULL;
14422 aof_pic_entry (rtx x)
14424 struct pic_chain ** chainp;
14425 int offset;
14427 if (aof_pic_label == NULL_RTX)
14429 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14432 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14433 offset += 4, chainp = &(*chainp)->next)
14434 if ((*chainp)->symname == XSTR (x, 0))
14435 return plus_constant (aof_pic_label, offset);
14437 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14438 (*chainp)->next = NULL;
14439 (*chainp)->symname = XSTR (x, 0);
14440 return plus_constant (aof_pic_label, offset);
14443 void
14444 aof_dump_pic_table (FILE *f)
14446 struct pic_chain * chain;
14448 if (aof_pic_chain == NULL)
14449 return;
14451 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14452 PIC_OFFSET_TABLE_REGNUM,
14453 PIC_OFFSET_TABLE_REGNUM);
14454 fputs ("|x$adcons|\n", f);
14456 for (chain = aof_pic_chain; chain; chain = chain->next)
14458 fputs ("\tDCD\t", f);
14459 assemble_name (f, chain->symname);
14460 fputs ("\n", f);
14464 int arm_text_section_count = 1;
14466 /* A get_unnamed_section callback for switching to the text section. */
14468 static void
14469 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14471 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
14472 arm_text_section_count++);
14473 if (flag_pic)
14474 fprintf (asm_out_file, ", PIC, REENTRANT");
14475 fprintf (asm_out_file, "\n");
14478 static int arm_data_section_count = 1;
14480 /* A get_unnamed_section callback for switching to the data section. */
14482 static void
14483 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14485 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
14486 arm_data_section_count++);
14489 /* Implement TARGET_ASM_INIT_SECTIONS.
14491 AOF Assembler syntax is a nightmare when it comes to areas, since once
14492 we change from one area to another, we can't go back again. Instead,
14493 we must create a new area with the same attributes and add the new output
14494 to that. Unfortunately, there is nothing we can do here to guarantee that
14495 two areas with the same attributes will be linked adjacently in the
14496 resulting executable, so we have to be careful not to do pc-relative
14497 addressing across such boundaries. */
14499 static void
14500 aof_asm_init_sections (void)
14502 text_section = get_unnamed_section (SECTION_CODE,
14503 aof_output_text_section_asm_op, NULL);
14504 data_section = get_unnamed_section (SECTION_WRITE,
14505 aof_output_data_section_asm_op, NULL);
14506 readonly_data_section = text_section;
14509 void
14510 zero_init_section (void)
14512 static int zero_init_count = 1;
14514 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
14515 in_section = NULL;
14518 /* The AOF assembler is religiously strict about declarations of
14519 imported and exported symbols, so that it is impossible to declare
14520 a function as imported near the beginning of the file, and then to
14521 export it later on. It is, however, possible to delay the decision
14522 until all the functions in the file have been compiled. To get
14523 around this, we maintain a list of the imports and exports, and
14524 delete from it any that are subsequently defined. At the end of
14525 compilation we spit the remainder of the list out before the END
14526 directive. */
14528 struct import
14530 struct import * next;
14531 const char * name;
14534 static struct import * imports_list = NULL;
14536 void
14537 aof_add_import (const char *name)
14539 struct import * new;
14541 for (new = imports_list; new; new = new->next)
14542 if (new->name == name)
14543 return;
14545 new = (struct import *) xmalloc (sizeof (struct import));
14546 new->next = imports_list;
14547 imports_list = new;
14548 new->name = name;
14551 void
14552 aof_delete_import (const char *name)
14554 struct import ** old;
14556 for (old = &imports_list; *old; old = & (*old)->next)
14558 if ((*old)->name == name)
14560 *old = (*old)->next;
14561 return;
14566 int arm_main_function = 0;
14568 static void
14569 aof_dump_imports (FILE *f)
14571 /* The AOF assembler needs this to cause the startup code to be extracted
14572 from the library. Brining in __main causes the whole thing to work
14573 automagically. */
14574 if (arm_main_function)
14576 switch_to_section (text_section);
14577 fputs ("\tIMPORT __main\n", f);
14578 fputs ("\tDCD __main\n", f);
14581 /* Now dump the remaining imports. */
14582 while (imports_list)
14584 fprintf (f, "\tIMPORT\t");
14585 assemble_name (f, imports_list->name);
14586 fputc ('\n', f);
14587 imports_list = imports_list->next;
14591 static void
14592 aof_globalize_label (FILE *stream, const char *name)
14594 default_globalize_label (stream, name);
14595 if (! strcmp (name, "main"))
14596 arm_main_function = 1;
14599 static void
14600 aof_file_start (void)
14602 fputs ("__r0\tRN\t0\n", asm_out_file);
14603 fputs ("__a1\tRN\t0\n", asm_out_file);
14604 fputs ("__a2\tRN\t1\n", asm_out_file);
14605 fputs ("__a3\tRN\t2\n", asm_out_file);
14606 fputs ("__a4\tRN\t3\n", asm_out_file);
14607 fputs ("__v1\tRN\t4\n", asm_out_file);
14608 fputs ("__v2\tRN\t5\n", asm_out_file);
14609 fputs ("__v3\tRN\t6\n", asm_out_file);
14610 fputs ("__v4\tRN\t7\n", asm_out_file);
14611 fputs ("__v5\tRN\t8\n", asm_out_file);
14612 fputs ("__v6\tRN\t9\n", asm_out_file);
14613 fputs ("__sl\tRN\t10\n", asm_out_file);
14614 fputs ("__fp\tRN\t11\n", asm_out_file);
14615 fputs ("__ip\tRN\t12\n", asm_out_file);
14616 fputs ("__sp\tRN\t13\n", asm_out_file);
14617 fputs ("__lr\tRN\t14\n", asm_out_file);
14618 fputs ("__pc\tRN\t15\n", asm_out_file);
14619 fputs ("__f0\tFN\t0\n", asm_out_file);
14620 fputs ("__f1\tFN\t1\n", asm_out_file);
14621 fputs ("__f2\tFN\t2\n", asm_out_file);
14622 fputs ("__f3\tFN\t3\n", asm_out_file);
14623 fputs ("__f4\tFN\t4\n", asm_out_file);
14624 fputs ("__f5\tFN\t5\n", asm_out_file);
14625 fputs ("__f6\tFN\t6\n", asm_out_file);
14626 fputs ("__f7\tFN\t7\n", asm_out_file);
14627 switch_to_section (text_section);
14630 static void
14631 aof_file_end (void)
14633 if (flag_pic)
14634 aof_dump_pic_table (asm_out_file);
14635 arm_file_end ();
14636 aof_dump_imports (asm_out_file);
14637 fputs ("\tEND\n", asm_out_file);
14639 #endif /* AOF_ASSEMBLER */
14641 #ifndef ARM_PE
14642 /* Symbols in the text segment can be accessed without indirecting via the
14643 constant pool; it may take an extra binary operation, but this is still
14644 faster than indirecting via memory. Don't do this when not optimizing,
14645 since we won't be calculating al of the offsets necessary to do this
14646 simplification. */
14648 static void
14649 arm_encode_section_info (tree decl, rtx rtl, int first)
14651 /* This doesn't work with AOF syntax, since the string table may be in
14652 a different AREA. */
14653 #ifndef AOF_ASSEMBLER
14654 if (optimize > 0 && TREE_CONSTANT (decl))
14655 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14656 #endif
14658 /* If we are referencing a function that is weak then encode a long call
14659 flag in the function name, otherwise if the function is static or
14660 or known to be defined in this file then encode a short call flag. */
14661 if (first && DECL_P (decl))
14663 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14664 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14665 else if (! TREE_PUBLIC (decl))
14666 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14669 default_encode_section_info (decl, rtl, first);
14671 #endif /* !ARM_PE */
14673 static void
14674 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14676 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14677 && !strcmp (prefix, "L"))
14679 arm_ccfsm_state = 0;
14680 arm_target_insn = NULL;
14682 default_internal_label (stream, prefix, labelno);
14685 /* Output code to add DELTA to the first argument, and then jump
14686 to FUNCTION. Used for C++ multiple inheritance. */
14687 static void
14688 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14689 HOST_WIDE_INT delta,
14690 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14691 tree function)
14693 static int thunk_label = 0;
14694 char label[256];
14695 int mi_delta = delta;
14696 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14697 int shift = 0;
14698 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14699 ? 1 : 0);
14700 if (mi_delta < 0)
14701 mi_delta = - mi_delta;
14702 if (TARGET_THUMB)
14704 int labelno = thunk_label++;
14705 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14706 fputs ("\tldr\tr12, ", file);
14707 assemble_name (file, label);
14708 fputc ('\n', file);
14710 while (mi_delta != 0)
14712 if ((mi_delta & (3 << shift)) == 0)
14713 shift += 2;
14714 else
14716 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14717 mi_op, this_regno, this_regno,
14718 mi_delta & (0xff << shift));
14719 mi_delta &= ~(0xff << shift);
14720 shift += 8;
14723 if (TARGET_THUMB)
14725 fprintf (file, "\tbx\tr12\n");
14726 ASM_OUTPUT_ALIGN (file, 2);
14727 assemble_name (file, label);
14728 fputs (":\n", file);
14729 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14731 else
14733 fputs ("\tb\t", file);
14734 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14735 if (NEED_PLT_RELOC)
14736 fputs ("(PLT)", file);
14737 fputc ('\n', file);
14742 arm_emit_vector_const (FILE *file, rtx x)
14744 int i;
14745 const char * pattern;
14747 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14749 switch (GET_MODE (x))
14751 case V2SImode: pattern = "%08x"; break;
14752 case V4HImode: pattern = "%04x"; break;
14753 case V8QImode: pattern = "%02x"; break;
14754 default: gcc_unreachable ();
14757 fprintf (file, "0x");
14758 for (i = CONST_VECTOR_NUNITS (x); i--;)
14760 rtx element;
14762 element = CONST_VECTOR_ELT (x, i);
14763 fprintf (file, pattern, INTVAL (element));
14766 return 1;
14769 const char *
14770 arm_output_load_gr (rtx *operands)
14772 rtx reg;
14773 rtx offset;
14774 rtx wcgr;
14775 rtx sum;
14777 if (GET_CODE (operands [1]) != MEM
14778 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14779 || GET_CODE (reg = XEXP (sum, 0)) != REG
14780 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14781 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14782 return "wldrw%?\t%0, %1";
14784 /* Fix up an out-of-range load of a GR register. */
14785 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14786 wcgr = operands[0];
14787 operands[0] = reg;
14788 output_asm_insn ("ldr%?\t%0, %1", operands);
14790 operands[0] = wcgr;
14791 operands[1] = reg;
14792 output_asm_insn ("tmcr%?\t%0, %1", operands);
14793 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14795 return "";
14798 static rtx
14799 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14800 int incoming ATTRIBUTE_UNUSED)
14802 #if 0
14803 /* FIXME: The ARM backend has special code to handle structure
14804 returns, and will reserve its own hidden first argument. So
14805 if this macro is enabled a *second* hidden argument will be
14806 reserved, which will break binary compatibility with old
14807 toolchains and also thunk handling. One day this should be
14808 fixed. */
14809 return 0;
14810 #else
14811 /* Register in which address to store a structure value
14812 is passed to a function. */
14813 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14814 #endif
14817 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14819 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14820 named arg and all anonymous args onto the stack.
14821 XXX I know the prologue shouldn't be pushing registers, but it is faster
14822 that way. */
14824 static void
14825 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14826 enum machine_mode mode ATTRIBUTE_UNUSED,
14827 tree type ATTRIBUTE_UNUSED,
14828 int *pretend_size,
14829 int second_time ATTRIBUTE_UNUSED)
14831 cfun->machine->uses_anonymous_args = 1;
14832 if (cum->nregs < NUM_ARG_REGS)
14833 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14836 /* Return nonzero if the CONSUMER instruction (a store) does not need
14837 PRODUCER's value to calculate the address. */
14840 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14842 rtx value = PATTERN (producer);
14843 rtx addr = PATTERN (consumer);
14845 if (GET_CODE (value) == COND_EXEC)
14846 value = COND_EXEC_CODE (value);
14847 if (GET_CODE (value) == PARALLEL)
14848 value = XVECEXP (value, 0, 0);
14849 value = XEXP (value, 0);
14850 if (GET_CODE (addr) == COND_EXEC)
14851 addr = COND_EXEC_CODE (addr);
14852 if (GET_CODE (addr) == PARALLEL)
14853 addr = XVECEXP (addr, 0, 0);
14854 addr = XEXP (addr, 0);
14856 return !reg_overlap_mentioned_p (value, addr);
14859 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14860 have an early register shift value or amount dependency on the
14861 result of PRODUCER. */
14864 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14866 rtx value = PATTERN (producer);
14867 rtx op = PATTERN (consumer);
14868 rtx early_op;
14870 if (GET_CODE (value) == COND_EXEC)
14871 value = COND_EXEC_CODE (value);
14872 if (GET_CODE (value) == PARALLEL)
14873 value = XVECEXP (value, 0, 0);
14874 value = XEXP (value, 0);
14875 if (GET_CODE (op) == COND_EXEC)
14876 op = COND_EXEC_CODE (op);
14877 if (GET_CODE (op) == PARALLEL)
14878 op = XVECEXP (op, 0, 0);
14879 op = XEXP (op, 1);
14881 early_op = XEXP (op, 0);
14882 /* This is either an actual independent shift, or a shift applied to
14883 the first operand of another operation. We want the whole shift
14884 operation. */
14885 if (GET_CODE (early_op) == REG)
14886 early_op = op;
14888 return !reg_overlap_mentioned_p (value, early_op);
14891 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14892 have an early register shift value dependency on the result of
14893 PRODUCER. */
14896 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14898 rtx value = PATTERN (producer);
14899 rtx op = PATTERN (consumer);
14900 rtx early_op;
14902 if (GET_CODE (value) == COND_EXEC)
14903 value = COND_EXEC_CODE (value);
14904 if (GET_CODE (value) == PARALLEL)
14905 value = XVECEXP (value, 0, 0);
14906 value = XEXP (value, 0);
14907 if (GET_CODE (op) == COND_EXEC)
14908 op = COND_EXEC_CODE (op);
14909 if (GET_CODE (op) == PARALLEL)
14910 op = XVECEXP (op, 0, 0);
14911 op = XEXP (op, 1);
14913 early_op = XEXP (op, 0);
14915 /* This is either an actual independent shift, or a shift applied to
14916 the first operand of another operation. We want the value being
14917 shifted, in either case. */
14918 if (GET_CODE (early_op) != REG)
14919 early_op = XEXP (early_op, 0);
14921 return !reg_overlap_mentioned_p (value, early_op);
14924 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14925 have an early register mult dependency on the result of
14926 PRODUCER. */
14929 arm_no_early_mul_dep (rtx producer, rtx consumer)
14931 rtx value = PATTERN (producer);
14932 rtx op = PATTERN (consumer);
14934 if (GET_CODE (value) == COND_EXEC)
14935 value = COND_EXEC_CODE (value);
14936 if (GET_CODE (value) == PARALLEL)
14937 value = XVECEXP (value, 0, 0);
14938 value = XEXP (value, 0);
14939 if (GET_CODE (op) == COND_EXEC)
14940 op = COND_EXEC_CODE (op);
14941 if (GET_CODE (op) == PARALLEL)
14942 op = XVECEXP (op, 0, 0);
14943 op = XEXP (op, 1);
14945 return (GET_CODE (op) == PLUS
14946 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14950 /* We can't rely on the caller doing the proper promotion when
14951 using APCS or ATPCS. */
14953 static bool
14954 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14956 return !TARGET_AAPCS_BASED;
14960 /* AAPCS based ABIs use short enums by default. */
14962 static bool
14963 arm_default_short_enums (void)
14965 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
14969 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14971 static bool
14972 arm_align_anon_bitfield (void)
14974 return TARGET_AAPCS_BASED;
14978 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14980 static tree
14981 arm_cxx_guard_type (void)
14983 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14987 /* The EABI says test the least significant bit of a guard variable. */
14989 static bool
14990 arm_cxx_guard_mask_bit (void)
14992 return TARGET_AAPCS_BASED;
14996 /* The EABI specifies that all array cookies are 8 bytes long. */
14998 static tree
14999 arm_get_cookie_size (tree type)
15001 tree size;
15003 if (!TARGET_AAPCS_BASED)
15004 return default_cxx_get_cookie_size (type);
15006 size = build_int_cst (sizetype, 8);
15007 return size;
15011 /* The EABI says that array cookies should also contain the element size. */
15013 static bool
15014 arm_cookie_has_size (void)
15016 return TARGET_AAPCS_BASED;
15020 /* The EABI says constructors and destructors should return a pointer to
15021 the object constructed/destroyed. */
15023 static bool
15024 arm_cxx_cdtor_returns_this (void)
15026 return TARGET_AAPCS_BASED;
15029 /* The EABI says that an inline function may never be the key
15030 method. */
15032 static bool
15033 arm_cxx_key_method_may_be_inline (void)
15035 return !TARGET_AAPCS_BASED;
15038 static void
15039 arm_cxx_determine_class_data_visibility (tree decl)
15041 if (!TARGET_AAPCS_BASED)
15042 return;
15044 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
15045 is exported. However, on systems without dynamic vague linkage,
15046 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
15047 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
15048 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
15049 else
15050 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
15051 DECL_VISIBILITY_SPECIFIED (decl) = 1;
15054 static bool
15055 arm_cxx_class_data_always_comdat (void)
15057 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
15058 vague linkage if the class has no key function. */
15059 return !TARGET_AAPCS_BASED;
15063 /* The EABI says __aeabi_atexit should be used to register static
15064 destructors. */
15066 static bool
15067 arm_cxx_use_aeabi_atexit (void)
15069 return TARGET_AAPCS_BASED;
15073 void
15074 arm_set_return_address (rtx source, rtx scratch)
15076 arm_stack_offsets *offsets;
15077 HOST_WIDE_INT delta;
15078 rtx addr;
15079 unsigned long saved_regs;
15081 saved_regs = arm_compute_save_reg_mask ();
15083 if ((saved_regs & (1 << LR_REGNUM)) == 0)
15084 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15085 else
15087 if (frame_pointer_needed)
15088 addr = plus_constant(hard_frame_pointer_rtx, -4);
15089 else
15091 /* LR will be the first saved register. */
15092 offsets = arm_get_frame_offsets ();
15093 delta = offsets->outgoing_args - (offsets->frame + 4);
15096 if (delta >= 4096)
15098 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
15099 GEN_INT (delta & ~4095)));
15100 addr = scratch;
15101 delta &= 4095;
15103 else
15104 addr = stack_pointer_rtx;
15106 addr = plus_constant (addr, delta);
15108 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15113 void
15114 thumb_set_return_address (rtx source, rtx scratch)
15116 arm_stack_offsets *offsets;
15117 HOST_WIDE_INT delta;
15118 int reg;
15119 rtx addr;
15120 unsigned long mask;
15122 emit_insn (gen_rtx_USE (VOIDmode, source));
15124 mask = thumb_compute_save_reg_mask ();
15125 if (mask & (1 << LR_REGNUM))
15127 offsets = arm_get_frame_offsets ();
15129 /* Find the saved regs. */
15130 if (frame_pointer_needed)
15132 delta = offsets->soft_frame - offsets->saved_args;
15133 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
15135 else
15137 delta = offsets->outgoing_args - offsets->saved_args;
15138 reg = SP_REGNUM;
15140 /* Allow for the stack frame. */
15141 if (TARGET_BACKTRACE)
15142 delta -= 16;
15143 /* The link register is always the first saved register. */
15144 delta -= 4;
15146 /* Construct the address. */
15147 addr = gen_rtx_REG (SImode, reg);
15148 if ((reg != SP_REGNUM && delta >= 128)
15149 || delta >= 1024)
15151 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
15152 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
15153 addr = scratch;
15155 else
15156 addr = plus_constant (addr, delta);
15158 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15160 else
15161 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15164 /* Implements target hook vector_mode_supported_p. */
15165 bool
15166 arm_vector_mode_supported_p (enum machine_mode mode)
15168 if ((mode == V2SImode)
15169 || (mode == V4HImode)
15170 || (mode == V8QImode))
15171 return true;
15173 return false;
15176 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15177 ARM insns and therefore guarantee that the shift count is modulo 256.
15178 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15179 guarantee no particular behavior for out-of-range counts. */
15181 static unsigned HOST_WIDE_INT
15182 arm_shift_truncation_mask (enum machine_mode mode)
15184 return mode == SImode ? 255 : 0;
15188 /* Map internal gcc register numbers to DWARF2 register numbers. */
15190 unsigned int
15191 arm_dbx_register_number (unsigned int regno)
15193 if (regno < 16)
15194 return regno;
15196 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15197 compatibility. The EABI defines them as registers 96-103. */
15198 if (IS_FPA_REGNUM (regno))
15199 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
15201 if (IS_VFP_REGNUM (regno))
15202 return 64 + regno - FIRST_VFP_REGNUM;
15204 if (IS_IWMMXT_GR_REGNUM (regno))
15205 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
15207 if (IS_IWMMXT_REGNUM (regno))
15208 return 112 + regno - FIRST_IWMMXT_REGNUM;
15210 gcc_unreachable ();
15214 #ifdef TARGET_UNWIND_INFO
15215 /* Emit unwind directives for a store-multiple instruction. This should
15216 only ever be generated by the function prologue code, so we expect it
15217 to have a particular form. */
15219 static void
15220 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
15222 int i;
15223 HOST_WIDE_INT offset;
15224 HOST_WIDE_INT nregs;
15225 int reg_size;
15226 unsigned reg;
15227 unsigned lastreg;
15228 rtx e;
15230 /* First insn will adjust the stack pointer. */
15231 e = XVECEXP (p, 0, 0);
15232 if (GET_CODE (e) != SET
15233 || GET_CODE (XEXP (e, 0)) != REG
15234 || REGNO (XEXP (e, 0)) != SP_REGNUM
15235 || GET_CODE (XEXP (e, 1)) != PLUS)
15236 abort ();
15238 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
15239 nregs = XVECLEN (p, 0) - 1;
15241 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
15242 if (reg < 16)
15244 /* The function prologue may also push pc, but not annotate it as it is
15245 never restored. We turn this into a stack pointer adjustment. */
15246 if (nregs * 4 == offset - 4)
15248 fprintf (asm_out_file, "\t.pad #4\n");
15249 offset -= 4;
15251 reg_size = 4;
15253 else if (IS_VFP_REGNUM (reg))
15255 /* FPA register saves use an additional word. */
15256 offset -= 4;
15257 reg_size = 8;
15259 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
15261 /* FPA registers are done differently. */
15262 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
15263 return;
15265 else
15266 /* Unknown register type. */
15267 abort ();
15269 /* If the stack increment doesn't match the size of the saved registers,
15270 something has gone horribly wrong. */
15271 if (offset != nregs * reg_size)
15272 abort ();
15274 fprintf (asm_out_file, "\t.save {");
15276 offset = 0;
15277 lastreg = 0;
15278 /* The remaining insns will describe the stores. */
15279 for (i = 1; i <= nregs; i++)
15281 /* Expect (set (mem <addr>) (reg)).
15282 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15283 e = XVECEXP (p, 0, i);
15284 if (GET_CODE (e) != SET
15285 || GET_CODE (XEXP (e, 0)) != MEM
15286 || GET_CODE (XEXP (e, 1)) != REG)
15287 abort ();
15289 reg = REGNO (XEXP (e, 1));
15290 if (reg < lastreg)
15291 abort ();
15293 if (i != 1)
15294 fprintf (asm_out_file, ", ");
15295 /* We can't use %r for vfp because we need to use the
15296 double precision register names. */
15297 if (IS_VFP_REGNUM (reg))
15298 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
15299 else
15300 asm_fprintf (asm_out_file, "%r", reg);
15302 #ifdef ENABLE_CHECKING
15303 /* Check that the addresses are consecutive. */
15304 e = XEXP (XEXP (e, 0), 0);
15305 if (GET_CODE (e) == PLUS)
15307 offset += reg_size;
15308 if (GET_CODE (XEXP (e, 0)) != REG
15309 || REGNO (XEXP (e, 0)) != SP_REGNUM
15310 || GET_CODE (XEXP (e, 1)) != CONST_INT
15311 || offset != INTVAL (XEXP (e, 1)))
15312 abort ();
15314 else if (i != 1
15315 || GET_CODE (e) != REG
15316 || REGNO (e) != SP_REGNUM)
15317 abort ();
15318 #endif
15320 fprintf (asm_out_file, "}\n");
15323 /* Emit unwind directives for a SET. */
15325 static void
15326 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
15328 rtx e0;
15329 rtx e1;
15331 e0 = XEXP (p, 0);
15332 e1 = XEXP (p, 1);
15333 switch (GET_CODE (e0))
15335 case MEM:
15336 /* Pushing a single register. */
15337 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
15338 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
15339 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
15340 abort ();
15342 asm_fprintf (asm_out_file, "\t.save ");
15343 if (IS_VFP_REGNUM (REGNO (e1)))
15344 asm_fprintf(asm_out_file, "{d%d}\n",
15345 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
15346 else
15347 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
15348 break;
15350 case REG:
15351 if (REGNO (e0) == SP_REGNUM)
15353 /* A stack increment. */
15354 if (GET_CODE (e1) != PLUS
15355 || GET_CODE (XEXP (e1, 0)) != REG
15356 || REGNO (XEXP (e1, 0)) != SP_REGNUM
15357 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15358 abort ();
15360 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
15361 -INTVAL (XEXP (e1, 1)));
15363 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
15365 HOST_WIDE_INT offset;
15366 unsigned reg;
15368 if (GET_CODE (e1) == PLUS)
15370 if (GET_CODE (XEXP (e1, 0)) != REG
15371 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15372 abort ();
15373 reg = REGNO (XEXP (e1, 0));
15374 offset = INTVAL (XEXP (e1, 1));
15375 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
15376 HARD_FRAME_POINTER_REGNUM, reg,
15377 INTVAL (XEXP (e1, 1)));
15379 else if (GET_CODE (e1) == REG)
15381 reg = REGNO (e1);
15382 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
15383 HARD_FRAME_POINTER_REGNUM, reg);
15385 else
15386 abort ();
15388 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
15390 /* Move from sp to reg. */
15391 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
15393 else
15394 abort ();
15395 break;
15397 default:
15398 abort ();
15403 /* Emit unwind directives for the given insn. */
15405 static void
15406 arm_unwind_emit (FILE * asm_out_file, rtx insn)
15408 rtx pat;
15410 if (!ARM_EABI_UNWIND_TABLES)
15411 return;
15413 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15414 return;
15416 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15417 if (pat)
15418 pat = XEXP (pat, 0);
15419 else
15420 pat = PATTERN (insn);
15422 switch (GET_CODE (pat))
15424 case SET:
15425 arm_unwind_emit_set (asm_out_file, pat);
15426 break;
15428 case SEQUENCE:
15429 /* Store multiple. */
15430 arm_unwind_emit_stm (asm_out_file, pat);
15431 break;
15433 default:
15434 abort();
15439 /* Output a reference from a function exception table to the type_info
15440 object X. The EABI specifies that the symbol should be relocated by
15441 an R_ARM_TARGET2 relocation. */
15443 static bool
15444 arm_output_ttype (rtx x)
15446 fputs ("\t.word\t", asm_out_file);
15447 output_addr_const (asm_out_file, x);
15448 /* Use special relocations for symbol references. */
15449 if (GET_CODE (x) != CONST_INT)
15450 fputs ("(TARGET2)", asm_out_file);
15451 fputc ('\n', asm_out_file);
15453 return TRUE;
15455 #endif /* TARGET_UNWIND_INFO */
15458 /* Output unwind directives for the start/end of a function. */
15460 void
15461 arm_output_fn_unwind (FILE * f, bool prologue)
15463 if (!ARM_EABI_UNWIND_TABLES)
15464 return;
15466 if (prologue)
15467 fputs ("\t.fnstart\n", f);
15468 else
15469 fputs ("\t.fnend\n", f);
15472 static bool
15473 arm_emit_tls_decoration (FILE *fp, rtx x)
15475 enum tls_reloc reloc;
15476 rtx val;
15478 val = XVECEXP (x, 0, 0);
15479 reloc = INTVAL (XVECEXP (x, 0, 1));
15481 output_addr_const (fp, val);
15483 switch (reloc)
15485 case TLS_GD32:
15486 fputs ("(tlsgd)", fp);
15487 break;
15488 case TLS_LDM32:
15489 fputs ("(tlsldm)", fp);
15490 break;
15491 case TLS_LDO32:
15492 fputs ("(tlsldo)", fp);
15493 break;
15494 case TLS_IE32:
15495 fputs ("(gottpoff)", fp);
15496 break;
15497 case TLS_LE32:
15498 fputs ("(tpoff)", fp);
15499 break;
15500 default:
15501 gcc_unreachable ();
15504 switch (reloc)
15506 case TLS_GD32:
15507 case TLS_LDM32:
15508 case TLS_IE32:
15509 fputs (" + (. - ", fp);
15510 output_addr_const (fp, XVECEXP (x, 0, 2));
15511 fputs (" - ", fp);
15512 output_addr_const (fp, XVECEXP (x, 0, 3));
15513 fputc (')', fp);
15514 break;
15515 default:
15516 break;
15519 return TRUE;
15522 bool
15523 arm_output_addr_const_extra (FILE *fp, rtx x)
15525 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
15526 return arm_emit_tls_decoration (fp, x);
15527 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
15529 char label[256];
15530 int labelno = INTVAL (XVECEXP (x, 0, 0));
15532 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
15533 assemble_name_raw (fp, label);
15535 return TRUE;
15537 else if (GET_CODE (x) == CONST_VECTOR)
15538 return arm_emit_vector_const (fp, x);
15540 return FALSE;
15543 #include "gt-arm.h"