* cfgcleanup.c, cfgexpand.c, cgraphunit.c, config/arm/arm.c,
[official-gcc.git] / gcc / config / arm / arm.c
bloba26222a932b5ccd0f63759cf603c22d53ac25dc0
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static rtx emit_set_insn (rtx, rtx);
146 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
147 tree, bool);
149 #ifdef OBJECT_FORMAT_ELF
150 static void arm_elf_asm_constructor (rtx, int);
151 #endif
152 #ifndef ARM_PE
153 static void arm_encode_section_info (tree, rtx, int);
154 #endif
156 static void arm_file_end (void);
158 #ifdef AOF_ASSEMBLER
159 static void aof_globalize_label (FILE *, const char *);
160 static void aof_dump_imports (FILE *);
161 static void aof_dump_pic_table (FILE *);
162 static void aof_file_start (void);
163 static void aof_file_end (void);
164 static void aof_asm_init_sections (void);
165 #endif
166 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
167 tree, int *, int);
168 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
169 enum machine_mode, tree, bool);
170 static bool arm_promote_prototypes (tree);
171 static bool arm_default_short_enums (void);
172 static bool arm_align_anon_bitfield (void);
173 static bool arm_return_in_msb (tree);
174 static bool arm_must_pass_in_stack (enum machine_mode, tree);
175 #ifdef TARGET_UNWIND_INFO
176 static void arm_unwind_emit (FILE *, rtx);
177 static bool arm_output_ttype (rtx);
178 #endif
180 static tree arm_cxx_guard_type (void);
181 static bool arm_cxx_guard_mask_bit (void);
182 static tree arm_get_cookie_size (tree);
183 static bool arm_cookie_has_size (void);
184 static bool arm_cxx_cdtor_returns_this (void);
185 static bool arm_cxx_key_method_may_be_inline (void);
186 static void arm_cxx_determine_class_data_visibility (tree);
187 static bool arm_cxx_class_data_always_comdat (void);
188 static bool arm_cxx_use_aeabi_atexit (void);
189 static void arm_init_libfuncs (void);
190 static bool arm_handle_option (size_t, const char *, int);
191 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
192 static bool arm_cannot_copy_insn_p (rtx);
193 static bool arm_tls_symbol_p (rtx x);
196 /* Initialize the GCC target structure. */
197 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
198 #undef TARGET_MERGE_DECL_ATTRIBUTES
199 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
200 #endif
202 #undef TARGET_ATTRIBUTE_TABLE
203 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
205 #undef TARGET_ASM_FILE_END
206 #define TARGET_ASM_FILE_END arm_file_end
208 #ifdef AOF_ASSEMBLER
209 #undef TARGET_ASM_BYTE_OP
210 #define TARGET_ASM_BYTE_OP "\tDCB\t"
211 #undef TARGET_ASM_ALIGNED_HI_OP
212 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
213 #undef TARGET_ASM_ALIGNED_SI_OP
214 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
215 #undef TARGET_ASM_GLOBALIZE_LABEL
216 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
217 #undef TARGET_ASM_FILE_START
218 #define TARGET_ASM_FILE_START aof_file_start
219 #undef TARGET_ASM_FILE_END
220 #define TARGET_ASM_FILE_END aof_file_end
221 #else
222 #undef TARGET_ASM_ALIGNED_SI_OP
223 #define TARGET_ASM_ALIGNED_SI_OP NULL
224 #undef TARGET_ASM_INTEGER
225 #define TARGET_ASM_INTEGER arm_assemble_integer
226 #endif
228 #undef TARGET_ASM_FUNCTION_PROLOGUE
229 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
231 #undef TARGET_ASM_FUNCTION_EPILOGUE
232 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
234 #undef TARGET_DEFAULT_TARGET_FLAGS
235 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
236 #undef TARGET_HANDLE_OPTION
237 #define TARGET_HANDLE_OPTION arm_handle_option
239 #undef TARGET_COMP_TYPE_ATTRIBUTES
240 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
242 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
243 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
245 #undef TARGET_SCHED_ADJUST_COST
246 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
248 #undef TARGET_ENCODE_SECTION_INFO
249 #ifdef ARM_PE
250 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
251 #else
252 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
253 #endif
255 #undef TARGET_STRIP_NAME_ENCODING
256 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
258 #undef TARGET_ASM_INTERNAL_LABEL
259 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
261 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
262 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
264 #undef TARGET_ASM_OUTPUT_MI_THUNK
265 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
266 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
267 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
269 /* This will be overridden in arm_override_options. */
270 #undef TARGET_RTX_COSTS
271 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
272 #undef TARGET_ADDRESS_COST
273 #define TARGET_ADDRESS_COST arm_address_cost
275 #undef TARGET_SHIFT_TRUNCATION_MASK
276 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
277 #undef TARGET_VECTOR_MODE_SUPPORTED_P
278 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
280 #undef TARGET_MACHINE_DEPENDENT_REORG
281 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
283 #undef TARGET_INIT_BUILTINS
284 #define TARGET_INIT_BUILTINS arm_init_builtins
285 #undef TARGET_EXPAND_BUILTIN
286 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
288 #undef TARGET_INIT_LIBFUNCS
289 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
291 #undef TARGET_PROMOTE_FUNCTION_ARGS
292 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
293 #undef TARGET_PROMOTE_FUNCTION_RETURN
294 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
295 #undef TARGET_PROMOTE_PROTOTYPES
296 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
297 #undef TARGET_PASS_BY_REFERENCE
298 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
299 #undef TARGET_ARG_PARTIAL_BYTES
300 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
302 #undef TARGET_SETUP_INCOMING_VARARGS
303 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
305 #undef TARGET_DEFAULT_SHORT_ENUMS
306 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
308 #undef TARGET_ALIGN_ANON_BITFIELD
309 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
311 #undef TARGET_NARROW_VOLATILE_BITFIELD
312 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
314 #undef TARGET_CXX_GUARD_TYPE
315 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
317 #undef TARGET_CXX_GUARD_MASK_BIT
318 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
320 #undef TARGET_CXX_GET_COOKIE_SIZE
321 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
323 #undef TARGET_CXX_COOKIE_HAS_SIZE
324 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
326 #undef TARGET_CXX_CDTOR_RETURNS_THIS
327 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
329 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
330 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
332 #undef TARGET_CXX_USE_AEABI_ATEXIT
333 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
335 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
336 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
337 arm_cxx_determine_class_data_visibility
339 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
340 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
342 #undef TARGET_RETURN_IN_MSB
343 #define TARGET_RETURN_IN_MSB arm_return_in_msb
345 #undef TARGET_MUST_PASS_IN_STACK
346 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
348 #ifdef TARGET_UNWIND_INFO
349 #undef TARGET_UNWIND_EMIT
350 #define TARGET_UNWIND_EMIT arm_unwind_emit
352 /* EABI unwinding tables use a different format for the typeinfo tables. */
353 #undef TARGET_ASM_TTYPE
354 #define TARGET_ASM_TTYPE arm_output_ttype
356 #undef TARGET_ARM_EABI_UNWINDER
357 #define TARGET_ARM_EABI_UNWINDER true
358 #endif /* TARGET_UNWIND_INFO */
360 #undef TARGET_CANNOT_COPY_INSN_P
361 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
363 #ifdef HAVE_AS_TLS
364 #undef TARGET_HAVE_TLS
365 #define TARGET_HAVE_TLS true
366 #endif
368 #undef TARGET_CANNOT_FORCE_CONST_MEM
369 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
371 struct gcc_target targetm = TARGET_INITIALIZER;
373 /* Obstack for minipool constant handling. */
374 static struct obstack minipool_obstack;
375 static char * minipool_startobj;
377 /* The maximum number of insns skipped which
378 will be conditionalised if possible. */
379 static int max_insns_skipped = 5;
381 extern FILE * asm_out_file;
383 /* True if we are currently building a constant table. */
384 int making_const_table;
386 /* Define the information needed to generate branch insns. This is
387 stored from the compare operation. */
388 rtx arm_compare_op0, arm_compare_op1;
390 /* The processor for which instructions should be scheduled. */
391 enum processor_type arm_tune = arm_none;
393 /* Which floating point model to use. */
394 enum arm_fp_model arm_fp_model;
396 /* Which floating point hardware is available. */
397 enum fputype arm_fpu_arch;
399 /* Which floating point hardware to schedule for. */
400 enum fputype arm_fpu_tune;
402 /* Whether to use floating point hardware. */
403 enum float_abi_type arm_float_abi;
405 /* Which ABI to use. */
406 enum arm_abi_type arm_abi;
408 /* Which thread pointer model to use. */
409 enum arm_tp_type target_thread_pointer = TP_AUTO;
411 /* Used to parse -mstructure_size_boundary command line option. */
412 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
414 /* Used for Thumb call_via trampolines. */
415 rtx thumb_call_via_label[14];
416 static int thumb_call_reg_needed;
418 /* Bit values used to identify processor capabilities. */
419 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
420 #define FL_ARCH3M (1 << 1) /* Extended multiply */
421 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
422 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
423 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
424 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
425 #define FL_THUMB (1 << 6) /* Thumb aware */
426 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
427 #define FL_STRONG (1 << 8) /* StrongARM */
428 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
429 #define FL_XSCALE (1 << 10) /* XScale */
430 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
431 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
432 media instructions. */
433 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
434 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
435 Note: ARM6 & 7 derivatives only. */
436 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
438 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
440 #define FL_FOR_ARCH2 0
441 #define FL_FOR_ARCH3 FL_MODE32
442 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
443 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
444 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
445 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
446 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
447 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
448 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
449 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
450 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
451 #define FL_FOR_ARCH6J FL_FOR_ARCH6
452 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
453 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
454 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
456 /* The bits in this mask specify which
457 instructions we are allowed to generate. */
458 static unsigned long insn_flags = 0;
460 /* The bits in this mask specify which instruction scheduling options should
461 be used. */
462 static unsigned long tune_flags = 0;
464 /* The following are used in the arm.md file as equivalents to bits
465 in the above two flag variables. */
467 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
468 int arm_arch3m = 0;
470 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
471 int arm_arch4 = 0;
473 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
474 int arm_arch4t = 0;
476 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
477 int arm_arch5 = 0;
479 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
480 int arm_arch5e = 0;
482 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
483 int arm_arch6 = 0;
485 /* Nonzero if this chip supports the ARM 6K extensions. */
486 int arm_arch6k = 0;
488 /* Nonzero if this chip can benefit from load scheduling. */
489 int arm_ld_sched = 0;
491 /* Nonzero if this chip is a StrongARM. */
492 int arm_tune_strongarm = 0;
494 /* Nonzero if this chip is a Cirrus variant. */
495 int arm_arch_cirrus = 0;
497 /* Nonzero if this chip supports Intel Wireless MMX technology. */
498 int arm_arch_iwmmxt = 0;
500 /* Nonzero if this chip is an XScale. */
501 int arm_arch_xscale = 0;
503 /* Nonzero if tuning for XScale */
504 int arm_tune_xscale = 0;
506 /* Nonzero if we want to tune for stores that access the write-buffer.
507 This typically means an ARM6 or ARM7 with MMU or MPU. */
508 int arm_tune_wbuf = 0;
510 /* Nonzero if generating Thumb instructions. */
511 int thumb_code = 0;
513 /* Nonzero if we should define __THUMB_INTERWORK__ in the
514 preprocessor.
515 XXX This is a bit of a hack, it's intended to help work around
516 problems in GLD which doesn't understand that armv5t code is
517 interworking clean. */
518 int arm_cpp_interwork = 0;
520 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
521 must report the mode of the memory reference from PRINT_OPERAND to
522 PRINT_OPERAND_ADDRESS. */
523 enum machine_mode output_memory_reference_mode;
525 /* The register number to be used for the PIC offset register. */
526 unsigned arm_pic_register = INVALID_REGNUM;
528 /* Set to 1 when a return insn is output, this means that the epilogue
529 is not needed. */
530 int return_used_this_function;
532 /* Set to 1 after arm_reorg has started. Reset to start at the start of
533 the next function. */
534 static int after_arm_reorg = 0;
536 /* The maximum number of insns to be used when loading a constant. */
537 static int arm_constant_limit = 3;
539 /* For an explanation of these variables, see final_prescan_insn below. */
540 int arm_ccfsm_state;
541 enum arm_cond_code arm_current_cc;
542 rtx arm_target_insn;
543 int arm_target_label;
545 /* The condition codes of the ARM, and the inverse function. */
546 static const char * const arm_condition_codes[] =
548 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
549 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
552 #define streq(string1, string2) (strcmp (string1, string2) == 0)
554 /* Initialization code. */
556 struct processors
558 const char *const name;
559 enum processor_type core;
560 const char *arch;
561 const unsigned long flags;
562 bool (* rtx_costs) (rtx, int, int, int *);
565 /* Not all of these give usefully different compilation alternatives,
566 but there is no simple way of generalizing them. */
567 static const struct processors all_cores[] =
569 /* ARM Cores */
570 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
571 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
572 #include "arm-cores.def"
573 #undef ARM_CORE
574 {NULL, arm_none, NULL, 0, NULL}
577 static const struct processors all_architectures[] =
579 /* ARM Architectures */
580 /* We don't specify rtx_costs here as it will be figured out
581 from the core. */
583 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
584 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
585 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
586 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
587 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
588 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
589 implementations that support it, so we will leave it out for now. */
590 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
591 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
592 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
593 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
594 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
595 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
596 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
597 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
598 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
599 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
600 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
601 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
602 {NULL, arm_none, NULL, 0 , NULL}
605 struct arm_cpu_select
607 const char * string;
608 const char * name;
609 const struct processors * processors;
612 /* This is a magic structure. The 'string' field is magically filled in
613 with a pointer to the value specified by the user on the command line
614 assuming that the user has specified such a value. */
616 static struct arm_cpu_select arm_select[] =
618 /* string name processors */
619 { NULL, "-mcpu=", all_cores },
620 { NULL, "-march=", all_architectures },
621 { NULL, "-mtune=", all_cores }
624 /* Defines representing the indexes into the above table. */
625 #define ARM_OPT_SET_CPU 0
626 #define ARM_OPT_SET_ARCH 1
627 #define ARM_OPT_SET_TUNE 2
629 /* The name of the proprocessor macro to define for this architecture. */
631 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
633 struct fpu_desc
635 const char * name;
636 enum fputype fpu;
640 /* Available values for -mfpu=. */
642 static const struct fpu_desc all_fpus[] =
644 {"fpa", FPUTYPE_FPA},
645 {"fpe2", FPUTYPE_FPA_EMU2},
646 {"fpe3", FPUTYPE_FPA_EMU2},
647 {"maverick", FPUTYPE_MAVERICK},
648 {"vfp", FPUTYPE_VFP}
652 /* Floating point models used by the different hardware.
653 See fputype in arm.h. */
655 static const enum fputype fp_model_for_fpu[] =
657 /* No FP hardware. */
658 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
659 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
660 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
661 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
662 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
663 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
667 struct float_abi
669 const char * name;
670 enum float_abi_type abi_type;
674 /* Available values for -mfloat-abi=. */
676 static const struct float_abi all_float_abis[] =
678 {"soft", ARM_FLOAT_ABI_SOFT},
679 {"softfp", ARM_FLOAT_ABI_SOFTFP},
680 {"hard", ARM_FLOAT_ABI_HARD}
684 struct abi_name
686 const char *name;
687 enum arm_abi_type abi_type;
691 /* Available values for -mabi=. */
693 static const struct abi_name arm_all_abis[] =
695 {"apcs-gnu", ARM_ABI_APCS},
696 {"atpcs", ARM_ABI_ATPCS},
697 {"aapcs", ARM_ABI_AAPCS},
698 {"iwmmxt", ARM_ABI_IWMMXT},
699 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
702 /* Supported TLS relocations. */
704 enum tls_reloc {
705 TLS_GD32,
706 TLS_LDM32,
707 TLS_LDO32,
708 TLS_IE32,
709 TLS_LE32
712 /* Emit an insn that's a simple single-set. Both the operands must be known
713 to be valid. */
714 inline static rtx
715 emit_set_insn (rtx x, rtx y)
717 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
720 /* Return the number of bits set in VALUE. */
721 static unsigned
722 bit_count (unsigned long value)
724 unsigned long count = 0;
726 while (value)
728 count++;
729 value &= value - 1; /* Clear the least-significant set bit. */
732 return count;
735 /* Set up library functions unique to ARM. */
737 static void
738 arm_init_libfuncs (void)
740 /* There are no special library functions unless we are using the
741 ARM BPABI. */
742 if (!TARGET_BPABI)
743 return;
745 /* The functions below are described in Section 4 of the "Run-Time
746 ABI for the ARM architecture", Version 1.0. */
748 /* Double-precision floating-point arithmetic. Table 2. */
749 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
750 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
751 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
752 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
753 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
755 /* Double-precision comparisons. Table 3. */
756 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
757 set_optab_libfunc (ne_optab, DFmode, NULL);
758 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
759 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
760 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
761 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
762 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
764 /* Single-precision floating-point arithmetic. Table 4. */
765 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
766 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
767 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
768 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
769 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
771 /* Single-precision comparisons. Table 5. */
772 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
773 set_optab_libfunc (ne_optab, SFmode, NULL);
774 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
775 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
776 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
777 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
778 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
780 /* Floating-point to integer conversions. Table 6. */
781 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
782 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
783 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
784 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
785 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
786 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
787 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
788 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
790 /* Conversions between floating types. Table 7. */
791 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
792 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
794 /* Integer to floating-point conversions. Table 8. */
795 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
796 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
797 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
798 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
799 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
800 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
801 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
802 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
804 /* Long long. Table 9. */
805 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
806 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
807 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
808 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
809 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
810 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
811 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
812 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
814 /* Integer (32/32->32) division. \S 4.3.1. */
815 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
816 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
818 /* The divmod functions are designed so that they can be used for
819 plain division, even though they return both the quotient and the
820 remainder. The quotient is returned in the usual location (i.e.,
821 r0 for SImode, {r0, r1} for DImode), just as would be expected
822 for an ordinary division routine. Because the AAPCS calling
823 conventions specify that all of { r0, r1, r2, r3 } are
824 callee-saved registers, there is no need to tell the compiler
825 explicitly that those registers are clobbered by these
826 routines. */
827 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
828 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
830 /* For SImode division the ABI provides div-without-mod routines,
831 which are faster. */
832 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
833 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
835 /* We don't have mod libcalls. Fortunately gcc knows how to use the
836 divmod libcalls instead. */
837 set_optab_libfunc (smod_optab, DImode, NULL);
838 set_optab_libfunc (umod_optab, DImode, NULL);
839 set_optab_libfunc (smod_optab, SImode, NULL);
840 set_optab_libfunc (umod_optab, SImode, NULL);
843 /* Implement TARGET_HANDLE_OPTION. */
845 static bool
846 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
848 switch (code)
850 case OPT_march_:
851 arm_select[1].string = arg;
852 return true;
854 case OPT_mcpu_:
855 arm_select[0].string = arg;
856 return true;
858 case OPT_mhard_float:
859 target_float_abi_name = "hard";
860 return true;
862 case OPT_msoft_float:
863 target_float_abi_name = "soft";
864 return true;
866 case OPT_mtune_:
867 arm_select[2].string = arg;
868 return true;
870 default:
871 return true;
875 /* Fix up any incompatible options that the user has specified.
876 This has now turned into a maze. */
877 void
878 arm_override_options (void)
880 unsigned i;
881 enum processor_type target_arch_cpu = arm_none;
883 /* Set up the flags based on the cpu/architecture selected by the user. */
884 for (i = ARRAY_SIZE (arm_select); i--;)
886 struct arm_cpu_select * ptr = arm_select + i;
888 if (ptr->string != NULL && ptr->string[0] != '\0')
890 const struct processors * sel;
892 for (sel = ptr->processors; sel->name != NULL; sel++)
893 if (streq (ptr->string, sel->name))
895 /* Set the architecture define. */
896 if (i != ARM_OPT_SET_TUNE)
897 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
899 /* Determine the processor core for which we should
900 tune code-generation. */
901 if (/* -mcpu= is a sensible default. */
902 i == ARM_OPT_SET_CPU
903 /* -mtune= overrides -mcpu= and -march=. */
904 || i == ARM_OPT_SET_TUNE)
905 arm_tune = (enum processor_type) (sel - ptr->processors);
907 /* Remember the CPU associated with this architecture.
908 If no other option is used to set the CPU type,
909 we'll use this to guess the most suitable tuning
910 options. */
911 if (i == ARM_OPT_SET_ARCH)
912 target_arch_cpu = sel->core;
914 if (i != ARM_OPT_SET_TUNE)
916 /* If we have been given an architecture and a processor
917 make sure that they are compatible. We only generate
918 a warning though, and we prefer the CPU over the
919 architecture. */
920 if (insn_flags != 0 && (insn_flags ^ sel->flags))
921 warning (0, "switch -mcpu=%s conflicts with -march= switch",
922 ptr->string);
924 insn_flags = sel->flags;
927 break;
930 if (sel->name == NULL)
931 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
935 /* Guess the tuning options from the architecture if necessary. */
936 if (arm_tune == arm_none)
937 arm_tune = target_arch_cpu;
939 /* If the user did not specify a processor, choose one for them. */
940 if (insn_flags == 0)
942 const struct processors * sel;
943 unsigned int sought;
944 enum processor_type cpu;
946 cpu = TARGET_CPU_DEFAULT;
947 if (cpu == arm_none)
949 #ifdef SUBTARGET_CPU_DEFAULT
950 /* Use the subtarget default CPU if none was specified by
951 configure. */
952 cpu = SUBTARGET_CPU_DEFAULT;
953 #endif
954 /* Default to ARM6. */
955 if (cpu == arm_none)
956 cpu = arm6;
958 sel = &all_cores[cpu];
960 insn_flags = sel->flags;
962 /* Now check to see if the user has specified some command line
963 switch that require certain abilities from the cpu. */
964 sought = 0;
966 if (TARGET_INTERWORK || TARGET_THUMB)
968 sought |= (FL_THUMB | FL_MODE32);
970 /* There are no ARM processors that support both APCS-26 and
971 interworking. Therefore we force FL_MODE26 to be removed
972 from insn_flags here (if it was set), so that the search
973 below will always be able to find a compatible processor. */
974 insn_flags &= ~FL_MODE26;
977 if (sought != 0 && ((sought & insn_flags) != sought))
979 /* Try to locate a CPU type that supports all of the abilities
980 of the default CPU, plus the extra abilities requested by
981 the user. */
982 for (sel = all_cores; sel->name != NULL; sel++)
983 if ((sel->flags & sought) == (sought | insn_flags))
984 break;
986 if (sel->name == NULL)
988 unsigned current_bit_count = 0;
989 const struct processors * best_fit = NULL;
991 /* Ideally we would like to issue an error message here
992 saying that it was not possible to find a CPU compatible
993 with the default CPU, but which also supports the command
994 line options specified by the programmer, and so they
995 ought to use the -mcpu=<name> command line option to
996 override the default CPU type.
998 If we cannot find a cpu that has both the
999 characteristics of the default cpu and the given
1000 command line options we scan the array again looking
1001 for a best match. */
1002 for (sel = all_cores; sel->name != NULL; sel++)
1003 if ((sel->flags & sought) == sought)
1005 unsigned count;
1007 count = bit_count (sel->flags & insn_flags);
1009 if (count >= current_bit_count)
1011 best_fit = sel;
1012 current_bit_count = count;
1016 gcc_assert (best_fit);
1017 sel = best_fit;
1020 insn_flags = sel->flags;
1022 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1023 if (arm_tune == arm_none)
1024 arm_tune = (enum processor_type) (sel - all_cores);
1027 /* The processor for which we should tune should now have been
1028 chosen. */
1029 gcc_assert (arm_tune != arm_none);
1031 tune_flags = all_cores[(int)arm_tune].flags;
1032 if (optimize_size)
1033 targetm.rtx_costs = arm_size_rtx_costs;
1034 else
1035 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1037 /* Make sure that the processor choice does not conflict with any of the
1038 other command line choices. */
1039 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1041 warning (0, "target CPU does not support interworking" );
1042 target_flags &= ~MASK_INTERWORK;
1045 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1047 warning (0, "target CPU does not support THUMB instructions");
1048 target_flags &= ~MASK_THUMB;
1051 if (TARGET_APCS_FRAME && TARGET_THUMB)
1053 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1054 target_flags &= ~MASK_APCS_FRAME;
1057 /* Callee super interworking implies thumb interworking. Adding
1058 this to the flags here simplifies the logic elsewhere. */
1059 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1060 target_flags |= MASK_INTERWORK;
1062 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1063 from here where no function is being compiled currently. */
1064 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1065 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1067 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1068 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1070 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1071 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1073 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1075 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1076 target_flags |= MASK_APCS_FRAME;
1079 if (TARGET_POKE_FUNCTION_NAME)
1080 target_flags |= MASK_APCS_FRAME;
1082 if (TARGET_APCS_REENT && flag_pic)
1083 error ("-fpic and -mapcs-reent are incompatible");
1085 if (TARGET_APCS_REENT)
1086 warning (0, "APCS reentrant code not supported. Ignored");
1088 /* If this target is normally configured to use APCS frames, warn if they
1089 are turned off and debugging is turned on. */
1090 if (TARGET_ARM
1091 && write_symbols != NO_DEBUG
1092 && !TARGET_APCS_FRAME
1093 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1094 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1096 /* If stack checking is disabled, we can use r10 as the PIC register,
1097 which keeps r9 available. */
1098 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1099 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1101 if (TARGET_APCS_FLOAT)
1102 warning (0, "passing floating point arguments in fp regs not yet supported");
1104 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1105 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1106 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1107 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1108 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1109 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1110 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1111 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1112 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1113 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1115 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1116 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1117 thumb_code = (TARGET_ARM == 0);
1118 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1119 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1120 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1122 /* V5 code we generate is completely interworking capable, so we turn off
1123 TARGET_INTERWORK here to avoid many tests later on. */
1125 /* XXX However, we must pass the right pre-processor defines to CPP
1126 or GLD can get confused. This is a hack. */
1127 if (TARGET_INTERWORK)
1128 arm_cpp_interwork = 1;
1130 if (arm_arch5)
1131 target_flags &= ~MASK_INTERWORK;
1133 if (target_abi_name)
1135 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1137 if (streq (arm_all_abis[i].name, target_abi_name))
1139 arm_abi = arm_all_abis[i].abi_type;
1140 break;
1143 if (i == ARRAY_SIZE (arm_all_abis))
1144 error ("invalid ABI option: -mabi=%s", target_abi_name);
1146 else
1147 arm_abi = ARM_DEFAULT_ABI;
1149 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1150 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1152 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1153 error ("iwmmxt abi requires an iwmmxt capable cpu");
1155 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1156 if (target_fpu_name == NULL && target_fpe_name != NULL)
1158 if (streq (target_fpe_name, "2"))
1159 target_fpu_name = "fpe2";
1160 else if (streq (target_fpe_name, "3"))
1161 target_fpu_name = "fpe3";
1162 else
1163 error ("invalid floating point emulation option: -mfpe=%s",
1164 target_fpe_name);
1166 if (target_fpu_name != NULL)
1168 /* The user specified a FPU. */
1169 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1171 if (streq (all_fpus[i].name, target_fpu_name))
1173 arm_fpu_arch = all_fpus[i].fpu;
1174 arm_fpu_tune = arm_fpu_arch;
1175 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1176 break;
1179 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1180 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1182 else
1184 #ifdef FPUTYPE_DEFAULT
1185 /* Use the default if it is specified for this platform. */
1186 arm_fpu_arch = FPUTYPE_DEFAULT;
1187 arm_fpu_tune = FPUTYPE_DEFAULT;
1188 #else
1189 /* Pick one based on CPU type. */
1190 /* ??? Some targets assume FPA is the default.
1191 if ((insn_flags & FL_VFP) != 0)
1192 arm_fpu_arch = FPUTYPE_VFP;
1193 else
1195 if (arm_arch_cirrus)
1196 arm_fpu_arch = FPUTYPE_MAVERICK;
1197 else
1198 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1199 #endif
1200 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1201 arm_fpu_tune = FPUTYPE_FPA;
1202 else
1203 arm_fpu_tune = arm_fpu_arch;
1204 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1205 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1208 if (target_float_abi_name != NULL)
1210 /* The user specified a FP ABI. */
1211 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1213 if (streq (all_float_abis[i].name, target_float_abi_name))
1215 arm_float_abi = all_float_abis[i].abi_type;
1216 break;
1219 if (i == ARRAY_SIZE (all_float_abis))
1220 error ("invalid floating point abi: -mfloat-abi=%s",
1221 target_float_abi_name);
1223 else
1224 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1226 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1227 sorry ("-mfloat-abi=hard and VFP");
1229 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1230 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1231 will ever exist. GCC makes no attempt to support this combination. */
1232 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1233 sorry ("iWMMXt and hardware floating point");
1235 /* If soft-float is specified then don't use FPU. */
1236 if (TARGET_SOFT_FLOAT)
1237 arm_fpu_arch = FPUTYPE_NONE;
1239 /* For arm2/3 there is no need to do any scheduling if there is only
1240 a floating point emulator, or we are doing software floating-point. */
1241 if ((TARGET_SOFT_FLOAT
1242 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1243 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1244 && (tune_flags & FL_MODE32) == 0)
1245 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1247 if (target_thread_switch)
1249 if (strcmp (target_thread_switch, "soft") == 0)
1250 target_thread_pointer = TP_SOFT;
1251 else if (strcmp (target_thread_switch, "auto") == 0)
1252 target_thread_pointer = TP_AUTO;
1253 else if (strcmp (target_thread_switch, "cp15") == 0)
1254 target_thread_pointer = TP_CP15;
1255 else
1256 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1259 /* Use the cp15 method if it is available. */
1260 if (target_thread_pointer == TP_AUTO)
1262 if (arm_arch6k && !TARGET_THUMB)
1263 target_thread_pointer = TP_CP15;
1264 else
1265 target_thread_pointer = TP_SOFT;
1268 if (TARGET_HARD_TP && TARGET_THUMB)
1269 error ("can not use -mtp=cp15 with -mthumb");
1271 /* Override the default structure alignment for AAPCS ABI. */
1272 if (TARGET_AAPCS_BASED)
1273 arm_structure_size_boundary = 8;
1275 if (structure_size_string != NULL)
1277 int size = strtol (structure_size_string, NULL, 0);
1279 if (size == 8 || size == 32
1280 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1281 arm_structure_size_boundary = size;
1282 else
1283 warning (0, "structure size boundary can only be set to %s",
1284 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1287 if (arm_pic_register_string != NULL)
1289 int pic_register = decode_reg_name (arm_pic_register_string);
1291 if (!flag_pic)
1292 warning (0, "-mpic-register= is useless without -fpic");
1294 /* Prevent the user from choosing an obviously stupid PIC register. */
1295 else if (pic_register < 0 || call_used_regs[pic_register]
1296 || pic_register == HARD_FRAME_POINTER_REGNUM
1297 || pic_register == STACK_POINTER_REGNUM
1298 || pic_register >= PC_REGNUM)
1299 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1300 else
1301 arm_pic_register = pic_register;
1304 if (TARGET_THUMB && flag_schedule_insns)
1306 /* Don't warn since it's on by default in -O2. */
1307 flag_schedule_insns = 0;
1310 if (optimize_size)
1312 arm_constant_limit = 1;
1314 /* If optimizing for size, bump the number of instructions that we
1315 are prepared to conditionally execute (even on a StrongARM). */
1316 max_insns_skipped = 6;
1318 else
1320 /* For processors with load scheduling, it never costs more than
1321 2 cycles to load a constant, and the load scheduler may well
1322 reduce that to 1. */
1323 if (arm_ld_sched)
1324 arm_constant_limit = 1;
1326 /* On XScale the longer latency of a load makes it more difficult
1327 to achieve a good schedule, so it's faster to synthesize
1328 constants that can be done in two insns. */
1329 if (arm_tune_xscale)
1330 arm_constant_limit = 2;
1332 /* StrongARM has early execution of branches, so a sequence
1333 that is worth skipping is shorter. */
1334 if (arm_tune_strongarm)
1335 max_insns_skipped = 3;
1338 /* Register global variables with the garbage collector. */
1339 arm_add_gc_roots ();
1342 static void
1343 arm_add_gc_roots (void)
1345 gcc_obstack_init(&minipool_obstack);
1346 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1349 /* A table of known ARM exception types.
1350 For use with the interrupt function attribute. */
1352 typedef struct
1354 const char *const arg;
1355 const unsigned long return_value;
1357 isr_attribute_arg;
1359 static const isr_attribute_arg isr_attribute_args [] =
1361 { "IRQ", ARM_FT_ISR },
1362 { "irq", ARM_FT_ISR },
1363 { "FIQ", ARM_FT_FIQ },
1364 { "fiq", ARM_FT_FIQ },
1365 { "ABORT", ARM_FT_ISR },
1366 { "abort", ARM_FT_ISR },
1367 { "ABORT", ARM_FT_ISR },
1368 { "abort", ARM_FT_ISR },
1369 { "UNDEF", ARM_FT_EXCEPTION },
1370 { "undef", ARM_FT_EXCEPTION },
1371 { "SWI", ARM_FT_EXCEPTION },
1372 { "swi", ARM_FT_EXCEPTION },
1373 { NULL, ARM_FT_NORMAL }
1376 /* Returns the (interrupt) function type of the current
1377 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1379 static unsigned long
1380 arm_isr_value (tree argument)
1382 const isr_attribute_arg * ptr;
1383 const char * arg;
1385 /* No argument - default to IRQ. */
1386 if (argument == NULL_TREE)
1387 return ARM_FT_ISR;
1389 /* Get the value of the argument. */
1390 if (TREE_VALUE (argument) == NULL_TREE
1391 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1392 return ARM_FT_UNKNOWN;
1394 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1396 /* Check it against the list of known arguments. */
1397 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1398 if (streq (arg, ptr->arg))
1399 return ptr->return_value;
1401 /* An unrecognized interrupt type. */
1402 return ARM_FT_UNKNOWN;
1405 /* Computes the type of the current function. */
1407 static unsigned long
1408 arm_compute_func_type (void)
1410 unsigned long type = ARM_FT_UNKNOWN;
1411 tree a;
1412 tree attr;
1414 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1416 /* Decide if the current function is volatile. Such functions
1417 never return, and many memory cycles can be saved by not storing
1418 register values that will never be needed again. This optimization
1419 was added to speed up context switching in a kernel application. */
1420 if (optimize > 0
1421 && (TREE_NOTHROW (current_function_decl)
1422 || !(flag_unwind_tables
1423 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1424 && TREE_THIS_VOLATILE (current_function_decl))
1425 type |= ARM_FT_VOLATILE;
1427 if (cfun->static_chain_decl != NULL)
1428 type |= ARM_FT_NESTED;
1430 attr = DECL_ATTRIBUTES (current_function_decl);
1432 a = lookup_attribute ("naked", attr);
1433 if (a != NULL_TREE)
1434 type |= ARM_FT_NAKED;
1436 a = lookup_attribute ("isr", attr);
1437 if (a == NULL_TREE)
1438 a = lookup_attribute ("interrupt", attr);
1440 if (a == NULL_TREE)
1441 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1442 else
1443 type |= arm_isr_value (TREE_VALUE (a));
1445 return type;
1448 /* Returns the type of the current function. */
1450 unsigned long
1451 arm_current_func_type (void)
1453 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1454 cfun->machine->func_type = arm_compute_func_type ();
1456 return cfun->machine->func_type;
1459 /* Return 1 if it is possible to return using a single instruction.
1460 If SIBLING is non-null, this is a test for a return before a sibling
1461 call. SIBLING is the call insn, so we can examine its register usage. */
1464 use_return_insn (int iscond, rtx sibling)
1466 int regno;
1467 unsigned int func_type;
1468 unsigned long saved_int_regs;
1469 unsigned HOST_WIDE_INT stack_adjust;
1470 arm_stack_offsets *offsets;
1472 /* Never use a return instruction before reload has run. */
1473 if (!reload_completed)
1474 return 0;
1476 func_type = arm_current_func_type ();
1478 /* Naked functions and volatile functions need special
1479 consideration. */
1480 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1481 return 0;
1483 /* So do interrupt functions that use the frame pointer. */
1484 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1485 return 0;
1487 offsets = arm_get_frame_offsets ();
1488 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1490 /* As do variadic functions. */
1491 if (current_function_pretend_args_size
1492 || cfun->machine->uses_anonymous_args
1493 /* Or if the function calls __builtin_eh_return () */
1494 || current_function_calls_eh_return
1495 /* Or if the function calls alloca */
1496 || current_function_calls_alloca
1497 /* Or if there is a stack adjustment. However, if the stack pointer
1498 is saved on the stack, we can use a pre-incrementing stack load. */
1499 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1500 return 0;
1502 saved_int_regs = arm_compute_save_reg_mask ();
1504 /* Unfortunately, the insn
1506 ldmib sp, {..., sp, ...}
1508 triggers a bug on most SA-110 based devices, such that the stack
1509 pointer won't be correctly restored if the instruction takes a
1510 page fault. We work around this problem by popping r3 along with
1511 the other registers, since that is never slower than executing
1512 another instruction.
1514 We test for !arm_arch5 here, because code for any architecture
1515 less than this could potentially be run on one of the buggy
1516 chips. */
1517 if (stack_adjust == 4 && !arm_arch5)
1519 /* Validate that r3 is a call-clobbered register (always true in
1520 the default abi) ... */
1521 if (!call_used_regs[3])
1522 return 0;
1524 /* ... that it isn't being used for a return value ... */
1525 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1526 return 0;
1528 /* ... or for a tail-call argument ... */
1529 if (sibling)
1531 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1533 if (find_regno_fusage (sibling, USE, 3))
1534 return 0;
1537 /* ... and that there are no call-saved registers in r0-r2
1538 (always true in the default ABI). */
1539 if (saved_int_regs & 0x7)
1540 return 0;
1543 /* Can't be done if interworking with Thumb, and any registers have been
1544 stacked. */
1545 if (TARGET_INTERWORK && saved_int_regs != 0)
1546 return 0;
1548 /* On StrongARM, conditional returns are expensive if they aren't
1549 taken and multiple registers have been stacked. */
1550 if (iscond && arm_tune_strongarm)
1552 /* Conditional return when just the LR is stored is a simple
1553 conditional-load instruction, that's not expensive. */
1554 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1555 return 0;
1557 if (flag_pic
1558 && arm_pic_register != INVALID_REGNUM
1559 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1560 return 0;
1563 /* If there are saved registers but the LR isn't saved, then we need
1564 two instructions for the return. */
1565 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1566 return 0;
1568 /* Can't be done if any of the FPA regs are pushed,
1569 since this also requires an insn. */
1570 if (TARGET_HARD_FLOAT && TARGET_FPA)
1571 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1572 if (regs_ever_live[regno] && !call_used_regs[regno])
1573 return 0;
1575 /* Likewise VFP regs. */
1576 if (TARGET_HARD_FLOAT && TARGET_VFP)
1577 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1578 if (regs_ever_live[regno] && !call_used_regs[regno])
1579 return 0;
1581 if (TARGET_REALLY_IWMMXT)
1582 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1583 if (regs_ever_live[regno] && ! call_used_regs [regno])
1584 return 0;
1586 return 1;
1589 /* Return TRUE if int I is a valid immediate ARM constant. */
1592 const_ok_for_arm (HOST_WIDE_INT i)
1594 int lowbit;
1596 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1597 be all zero, or all one. */
1598 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1599 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1600 != ((~(unsigned HOST_WIDE_INT) 0)
1601 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1602 return FALSE;
1604 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1606 /* Fast return for 0 and small values. We must do this for zero, since
1607 the code below can't handle that one case. */
1608 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1609 return TRUE;
1611 /* Get the number of trailing zeros, rounded down to the nearest even
1612 number. */
1613 lowbit = (ffs ((int) i) - 1) & ~1;
1615 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1616 return TRUE;
1617 else if (lowbit <= 4
1618 && ((i & ~0xc000003f) == 0
1619 || (i & ~0xf000000f) == 0
1620 || (i & ~0xfc000003) == 0))
1621 return TRUE;
1623 return FALSE;
1626 /* Return true if I is a valid constant for the operation CODE. */
1627 static int
1628 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1630 if (const_ok_for_arm (i))
1631 return 1;
1633 switch (code)
1635 case PLUS:
1636 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1638 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1639 case XOR:
1640 case IOR:
1641 return 0;
1643 case AND:
1644 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1646 default:
1647 gcc_unreachable ();
1651 /* Emit a sequence of insns to handle a large constant.
1652 CODE is the code of the operation required, it can be any of SET, PLUS,
1653 IOR, AND, XOR, MINUS;
1654 MODE is the mode in which the operation is being performed;
1655 VAL is the integer to operate on;
1656 SOURCE is the other operand (a register, or a null-pointer for SET);
1657 SUBTARGETS means it is safe to create scratch registers if that will
1658 either produce a simpler sequence, or we will want to cse the values.
1659 Return value is the number of insns emitted. */
1662 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1663 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1665 rtx cond;
1667 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1668 cond = COND_EXEC_TEST (PATTERN (insn));
1669 else
1670 cond = NULL_RTX;
1672 if (subtargets || code == SET
1673 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1674 && REGNO (target) != REGNO (source)))
1676 /* After arm_reorg has been called, we can't fix up expensive
1677 constants by pushing them into memory so we must synthesize
1678 them in-line, regardless of the cost. This is only likely to
1679 be more costly on chips that have load delay slots and we are
1680 compiling without running the scheduler (so no splitting
1681 occurred before the final instruction emission).
1683 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1685 if (!after_arm_reorg
1686 && !cond
1687 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1688 1, 0)
1689 > arm_constant_limit + (code != SET)))
1691 if (code == SET)
1693 /* Currently SET is the only monadic value for CODE, all
1694 the rest are diadic. */
1695 emit_set_insn (target, GEN_INT (val));
1696 return 1;
1698 else
1700 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1702 emit_set_insn (temp, GEN_INT (val));
1703 /* For MINUS, the value is subtracted from, since we never
1704 have subtraction of a constant. */
1705 if (code == MINUS)
1706 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1707 else
1708 emit_set_insn (target,
1709 gen_rtx_fmt_ee (code, mode, source, temp));
1710 return 2;
1715 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1719 static int
1720 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1722 HOST_WIDE_INT temp1;
1723 int num_insns = 0;
1726 int end;
1728 if (i <= 0)
1729 i += 32;
1730 if (remainder & (3 << (i - 2)))
1732 end = i - 8;
1733 if (end < 0)
1734 end += 32;
1735 temp1 = remainder & ((0x0ff << end)
1736 | ((i < end) ? (0xff >> (32 - end)) : 0));
1737 remainder &= ~temp1;
1738 num_insns++;
1739 i -= 6;
1741 i -= 2;
1742 } while (remainder);
1743 return num_insns;
1746 /* Emit an instruction with the indicated PATTERN. If COND is
1747 non-NULL, conditionalize the execution of the instruction on COND
1748 being true. */
1750 static void
1751 emit_constant_insn (rtx cond, rtx pattern)
1753 if (cond)
1754 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1755 emit_insn (pattern);
1758 /* As above, but extra parameter GENERATE which, if clear, suppresses
1759 RTL generation. */
1761 static int
1762 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1763 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1764 int generate)
1766 int can_invert = 0;
1767 int can_negate = 0;
1768 int can_negate_initial = 0;
1769 int can_shift = 0;
1770 int i;
1771 int num_bits_set = 0;
1772 int set_sign_bit_copies = 0;
1773 int clear_sign_bit_copies = 0;
1774 int clear_zero_bit_copies = 0;
1775 int set_zero_bit_copies = 0;
1776 int insns = 0;
1777 unsigned HOST_WIDE_INT temp1, temp2;
1778 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1780 /* Find out which operations are safe for a given CODE. Also do a quick
1781 check for degenerate cases; these can occur when DImode operations
1782 are split. */
1783 switch (code)
1785 case SET:
1786 can_invert = 1;
1787 can_shift = 1;
1788 can_negate = 1;
1789 break;
1791 case PLUS:
1792 can_negate = 1;
1793 can_negate_initial = 1;
1794 break;
1796 case IOR:
1797 if (remainder == 0xffffffff)
1799 if (generate)
1800 emit_constant_insn (cond,
1801 gen_rtx_SET (VOIDmode, target,
1802 GEN_INT (ARM_SIGN_EXTEND (val))));
1803 return 1;
1805 if (remainder == 0)
1807 if (reload_completed && rtx_equal_p (target, source))
1808 return 0;
1809 if (generate)
1810 emit_constant_insn (cond,
1811 gen_rtx_SET (VOIDmode, target, source));
1812 return 1;
1814 break;
1816 case AND:
1817 if (remainder == 0)
1819 if (generate)
1820 emit_constant_insn (cond,
1821 gen_rtx_SET (VOIDmode, target, const0_rtx));
1822 return 1;
1824 if (remainder == 0xffffffff)
1826 if (reload_completed && rtx_equal_p (target, source))
1827 return 0;
1828 if (generate)
1829 emit_constant_insn (cond,
1830 gen_rtx_SET (VOIDmode, target, source));
1831 return 1;
1833 can_invert = 1;
1834 break;
1836 case XOR:
1837 if (remainder == 0)
1839 if (reload_completed && rtx_equal_p (target, source))
1840 return 0;
1841 if (generate)
1842 emit_constant_insn (cond,
1843 gen_rtx_SET (VOIDmode, target, source));
1844 return 1;
1847 /* We don't know how to handle other cases yet. */
1848 gcc_assert (remainder == 0xffffffff);
1850 if (generate)
1851 emit_constant_insn (cond,
1852 gen_rtx_SET (VOIDmode, target,
1853 gen_rtx_NOT (mode, source)));
1854 return 1;
1856 case MINUS:
1857 /* We treat MINUS as (val - source), since (source - val) is always
1858 passed as (source + (-val)). */
1859 if (remainder == 0)
1861 if (generate)
1862 emit_constant_insn (cond,
1863 gen_rtx_SET (VOIDmode, target,
1864 gen_rtx_NEG (mode, source)));
1865 return 1;
1867 if (const_ok_for_arm (val))
1869 if (generate)
1870 emit_constant_insn (cond,
1871 gen_rtx_SET (VOIDmode, target,
1872 gen_rtx_MINUS (mode, GEN_INT (val),
1873 source)));
1874 return 1;
1876 can_negate = 1;
1878 break;
1880 default:
1881 gcc_unreachable ();
1884 /* If we can do it in one insn get out quickly. */
1885 if (const_ok_for_arm (val)
1886 || (can_negate_initial && const_ok_for_arm (-val))
1887 || (can_invert && const_ok_for_arm (~val)))
1889 if (generate)
1890 emit_constant_insn (cond,
1891 gen_rtx_SET (VOIDmode, target,
1892 (source
1893 ? gen_rtx_fmt_ee (code, mode, source,
1894 GEN_INT (val))
1895 : GEN_INT (val))));
1896 return 1;
1899 /* Calculate a few attributes that may be useful for specific
1900 optimizations. */
1901 for (i = 31; i >= 0; i--)
1903 if ((remainder & (1 << i)) == 0)
1904 clear_sign_bit_copies++;
1905 else
1906 break;
1909 for (i = 31; i >= 0; i--)
1911 if ((remainder & (1 << i)) != 0)
1912 set_sign_bit_copies++;
1913 else
1914 break;
1917 for (i = 0; i <= 31; i++)
1919 if ((remainder & (1 << i)) == 0)
1920 clear_zero_bit_copies++;
1921 else
1922 break;
1925 for (i = 0; i <= 31; i++)
1927 if ((remainder & (1 << i)) != 0)
1928 set_zero_bit_copies++;
1929 else
1930 break;
1933 switch (code)
1935 case SET:
1936 /* See if we can do this by sign_extending a constant that is known
1937 to be negative. This is a good, way of doing it, since the shift
1938 may well merge into a subsequent insn. */
1939 if (set_sign_bit_copies > 1)
1941 if (const_ok_for_arm
1942 (temp1 = ARM_SIGN_EXTEND (remainder
1943 << (set_sign_bit_copies - 1))))
1945 if (generate)
1947 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1948 emit_constant_insn (cond,
1949 gen_rtx_SET (VOIDmode, new_src,
1950 GEN_INT (temp1)));
1951 emit_constant_insn (cond,
1952 gen_ashrsi3 (target, new_src,
1953 GEN_INT (set_sign_bit_copies - 1)));
1955 return 2;
1957 /* For an inverted constant, we will need to set the low bits,
1958 these will be shifted out of harm's way. */
1959 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1960 if (const_ok_for_arm (~temp1))
1962 if (generate)
1964 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1965 emit_constant_insn (cond,
1966 gen_rtx_SET (VOIDmode, new_src,
1967 GEN_INT (temp1)));
1968 emit_constant_insn (cond,
1969 gen_ashrsi3 (target, new_src,
1970 GEN_INT (set_sign_bit_copies - 1)));
1972 return 2;
1976 /* See if we can calculate the value as the difference between two
1977 valid immediates. */
1978 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1980 int topshift = clear_sign_bit_copies & ~1;
1982 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1983 & (0xff000000 >> topshift));
1985 /* If temp1 is zero, then that means the 9 most significant
1986 bits of remainder were 1 and we've caused it to overflow.
1987 When topshift is 0 we don't need to do anything since we
1988 can borrow from 'bit 32'. */
1989 if (temp1 == 0 && topshift != 0)
1990 temp1 = 0x80000000 >> (topshift - 1);
1992 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1994 if (const_ok_for_arm (temp2))
1996 if (generate)
1998 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1999 emit_constant_insn (cond,
2000 gen_rtx_SET (VOIDmode, new_src,
2001 GEN_INT (temp1)));
2002 emit_constant_insn (cond,
2003 gen_addsi3 (target, new_src,
2004 GEN_INT (-temp2)));
2007 return 2;
2011 /* See if we can generate this by setting the bottom (or the top)
2012 16 bits, and then shifting these into the other half of the
2013 word. We only look for the simplest cases, to do more would cost
2014 too much. Be careful, however, not to generate this when the
2015 alternative would take fewer insns. */
2016 if (val & 0xffff0000)
2018 temp1 = remainder & 0xffff0000;
2019 temp2 = remainder & 0x0000ffff;
2021 /* Overlaps outside this range are best done using other methods. */
2022 for (i = 9; i < 24; i++)
2024 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2025 && !const_ok_for_arm (temp2))
2027 rtx new_src = (subtargets
2028 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2029 : target);
2030 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2031 source, subtargets, generate);
2032 source = new_src;
2033 if (generate)
2034 emit_constant_insn
2035 (cond,
2036 gen_rtx_SET
2037 (VOIDmode, target,
2038 gen_rtx_IOR (mode,
2039 gen_rtx_ASHIFT (mode, source,
2040 GEN_INT (i)),
2041 source)));
2042 return insns + 1;
2046 /* Don't duplicate cases already considered. */
2047 for (i = 17; i < 24; i++)
2049 if (((temp1 | (temp1 >> i)) == remainder)
2050 && !const_ok_for_arm (temp1))
2052 rtx new_src = (subtargets
2053 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2054 : target);
2055 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2056 source, subtargets, generate);
2057 source = new_src;
2058 if (generate)
2059 emit_constant_insn
2060 (cond,
2061 gen_rtx_SET (VOIDmode, target,
2062 gen_rtx_IOR
2063 (mode,
2064 gen_rtx_LSHIFTRT (mode, source,
2065 GEN_INT (i)),
2066 source)));
2067 return insns + 1;
2071 break;
2073 case IOR:
2074 case XOR:
2075 /* If we have IOR or XOR, and the constant can be loaded in a
2076 single instruction, and we can find a temporary to put it in,
2077 then this can be done in two instructions instead of 3-4. */
2078 if (subtargets
2079 /* TARGET can't be NULL if SUBTARGETS is 0 */
2080 || (reload_completed && !reg_mentioned_p (target, source)))
2082 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2084 if (generate)
2086 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2088 emit_constant_insn (cond,
2089 gen_rtx_SET (VOIDmode, sub,
2090 GEN_INT (val)));
2091 emit_constant_insn (cond,
2092 gen_rtx_SET (VOIDmode, target,
2093 gen_rtx_fmt_ee (code, mode,
2094 source, sub)));
2096 return 2;
2100 if (code == XOR)
2101 break;
2103 if (set_sign_bit_copies > 8
2104 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2106 if (generate)
2108 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2109 rtx shift = GEN_INT (set_sign_bit_copies);
2111 emit_constant_insn
2112 (cond,
2113 gen_rtx_SET (VOIDmode, sub,
2114 gen_rtx_NOT (mode,
2115 gen_rtx_ASHIFT (mode,
2116 source,
2117 shift))));
2118 emit_constant_insn
2119 (cond,
2120 gen_rtx_SET (VOIDmode, target,
2121 gen_rtx_NOT (mode,
2122 gen_rtx_LSHIFTRT (mode, sub,
2123 shift))));
2125 return 2;
2128 if (set_zero_bit_copies > 8
2129 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2131 if (generate)
2133 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2134 rtx shift = GEN_INT (set_zero_bit_copies);
2136 emit_constant_insn
2137 (cond,
2138 gen_rtx_SET (VOIDmode, sub,
2139 gen_rtx_NOT (mode,
2140 gen_rtx_LSHIFTRT (mode,
2141 source,
2142 shift))));
2143 emit_constant_insn
2144 (cond,
2145 gen_rtx_SET (VOIDmode, target,
2146 gen_rtx_NOT (mode,
2147 gen_rtx_ASHIFT (mode, sub,
2148 shift))));
2150 return 2;
2153 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2155 if (generate)
2157 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2158 emit_constant_insn (cond,
2159 gen_rtx_SET (VOIDmode, sub,
2160 gen_rtx_NOT (mode, source)));
2161 source = sub;
2162 if (subtargets)
2163 sub = gen_reg_rtx (mode);
2164 emit_constant_insn (cond,
2165 gen_rtx_SET (VOIDmode, sub,
2166 gen_rtx_AND (mode, source,
2167 GEN_INT (temp1))));
2168 emit_constant_insn (cond,
2169 gen_rtx_SET (VOIDmode, target,
2170 gen_rtx_NOT (mode, sub)));
2172 return 3;
2174 break;
2176 case AND:
2177 /* See if two shifts will do 2 or more insn's worth of work. */
2178 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2180 HOST_WIDE_INT shift_mask = ((0xffffffff
2181 << (32 - clear_sign_bit_copies))
2182 & 0xffffffff);
2184 if ((remainder | shift_mask) != 0xffffffff)
2186 if (generate)
2188 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2189 insns = arm_gen_constant (AND, mode, cond,
2190 remainder | shift_mask,
2191 new_src, source, subtargets, 1);
2192 source = new_src;
2194 else
2196 rtx targ = subtargets ? NULL_RTX : target;
2197 insns = arm_gen_constant (AND, mode, cond,
2198 remainder | shift_mask,
2199 targ, source, subtargets, 0);
2203 if (generate)
2205 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2206 rtx shift = GEN_INT (clear_sign_bit_copies);
2208 emit_insn (gen_ashlsi3 (new_src, source, shift));
2209 emit_insn (gen_lshrsi3 (target, new_src, shift));
2212 return insns + 2;
2215 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2217 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2219 if ((remainder | shift_mask) != 0xffffffff)
2221 if (generate)
2223 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2225 insns = arm_gen_constant (AND, mode, cond,
2226 remainder | shift_mask,
2227 new_src, source, subtargets, 1);
2228 source = new_src;
2230 else
2232 rtx targ = subtargets ? NULL_RTX : target;
2234 insns = arm_gen_constant (AND, mode, cond,
2235 remainder | shift_mask,
2236 targ, source, subtargets, 0);
2240 if (generate)
2242 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2243 rtx shift = GEN_INT (clear_zero_bit_copies);
2245 emit_insn (gen_lshrsi3 (new_src, source, shift));
2246 emit_insn (gen_ashlsi3 (target, new_src, shift));
2249 return insns + 2;
2252 break;
2254 default:
2255 break;
2258 for (i = 0; i < 32; i++)
2259 if (remainder & (1 << i))
2260 num_bits_set++;
2262 if (code == AND || (can_invert && num_bits_set > 16))
2263 remainder = (~remainder) & 0xffffffff;
2264 else if (code == PLUS && num_bits_set > 16)
2265 remainder = (-remainder) & 0xffffffff;
2266 else
2268 can_invert = 0;
2269 can_negate = 0;
2272 /* Now try and find a way of doing the job in either two or three
2273 instructions.
2274 We start by looking for the largest block of zeros that are aligned on
2275 a 2-bit boundary, we then fill up the temps, wrapping around to the
2276 top of the word when we drop off the bottom.
2277 In the worst case this code should produce no more than four insns. */
2279 int best_start = 0;
2280 int best_consecutive_zeros = 0;
2282 for (i = 0; i < 32; i += 2)
2284 int consecutive_zeros = 0;
2286 if (!(remainder & (3 << i)))
2288 while ((i < 32) && !(remainder & (3 << i)))
2290 consecutive_zeros += 2;
2291 i += 2;
2293 if (consecutive_zeros > best_consecutive_zeros)
2295 best_consecutive_zeros = consecutive_zeros;
2296 best_start = i - consecutive_zeros;
2298 i -= 2;
2302 /* So long as it won't require any more insns to do so, it's
2303 desirable to emit a small constant (in bits 0...9) in the last
2304 insn. This way there is more chance that it can be combined with
2305 a later addressing insn to form a pre-indexed load or store
2306 operation. Consider:
2308 *((volatile int *)0xe0000100) = 1;
2309 *((volatile int *)0xe0000110) = 2;
2311 We want this to wind up as:
2313 mov rA, #0xe0000000
2314 mov rB, #1
2315 str rB, [rA, #0x100]
2316 mov rB, #2
2317 str rB, [rA, #0x110]
2319 rather than having to synthesize both large constants from scratch.
2321 Therefore, we calculate how many insns would be required to emit
2322 the constant starting from `best_start', and also starting from
2323 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2324 yield a shorter sequence, we may as well use zero. */
2325 if (best_start != 0
2326 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2327 && (count_insns_for_constant (remainder, 0) <=
2328 count_insns_for_constant (remainder, best_start)))
2329 best_start = 0;
2331 /* Now start emitting the insns. */
2332 i = best_start;
2335 int end;
2337 if (i <= 0)
2338 i += 32;
2339 if (remainder & (3 << (i - 2)))
2341 end = i - 8;
2342 if (end < 0)
2343 end += 32;
2344 temp1 = remainder & ((0x0ff << end)
2345 | ((i < end) ? (0xff >> (32 - end)) : 0));
2346 remainder &= ~temp1;
2348 if (generate)
2350 rtx new_src, temp1_rtx;
2352 if (code == SET || code == MINUS)
2354 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2355 if (can_invert && code != MINUS)
2356 temp1 = ~temp1;
2358 else
2360 if (remainder && subtargets)
2361 new_src = gen_reg_rtx (mode);
2362 else
2363 new_src = target;
2364 if (can_invert)
2365 temp1 = ~temp1;
2366 else if (can_negate)
2367 temp1 = -temp1;
2370 temp1 = trunc_int_for_mode (temp1, mode);
2371 temp1_rtx = GEN_INT (temp1);
2373 if (code == SET)
2375 else if (code == MINUS)
2376 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2377 else
2378 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2380 emit_constant_insn (cond,
2381 gen_rtx_SET (VOIDmode, new_src,
2382 temp1_rtx));
2383 source = new_src;
2386 if (code == SET)
2388 can_invert = 0;
2389 code = PLUS;
2391 else if (code == MINUS)
2392 code = PLUS;
2394 insns++;
2395 i -= 6;
2397 i -= 2;
2399 while (remainder);
2402 return insns;
2405 /* Canonicalize a comparison so that we are more likely to recognize it.
2406 This can be done for a few constant compares, where we can make the
2407 immediate value easier to load. */
2409 enum rtx_code
2410 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2411 rtx * op1)
2413 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2414 unsigned HOST_WIDE_INT maxval;
2415 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2417 switch (code)
2419 case EQ:
2420 case NE:
2421 return code;
2423 case GT:
2424 case LE:
2425 if (i != maxval
2426 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2428 *op1 = GEN_INT (i + 1);
2429 return code == GT ? GE : LT;
2431 break;
2433 case GE:
2434 case LT:
2435 if (i != ~maxval
2436 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2438 *op1 = GEN_INT (i - 1);
2439 return code == GE ? GT : LE;
2441 break;
2443 case GTU:
2444 case LEU:
2445 if (i != ~((unsigned HOST_WIDE_INT) 0)
2446 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2448 *op1 = GEN_INT (i + 1);
2449 return code == GTU ? GEU : LTU;
2451 break;
2453 case GEU:
2454 case LTU:
2455 if (i != 0
2456 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2458 *op1 = GEN_INT (i - 1);
2459 return code == GEU ? GTU : LEU;
2461 break;
2463 default:
2464 gcc_unreachable ();
2467 return code;
2471 /* Define how to find the value returned by a function. */
2474 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2476 enum machine_mode mode;
2477 int unsignedp ATTRIBUTE_UNUSED;
2478 rtx r ATTRIBUTE_UNUSED;
2480 mode = TYPE_MODE (type);
2481 /* Promote integer types. */
2482 if (INTEGRAL_TYPE_P (type))
2483 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2485 /* Promotes small structs returned in a register to full-word size
2486 for big-endian AAPCS. */
2487 if (arm_return_in_msb (type))
2489 HOST_WIDE_INT size = int_size_in_bytes (type);
2490 if (size % UNITS_PER_WORD != 0)
2492 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2493 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2497 return LIBCALL_VALUE(mode);
2500 /* Determine the amount of memory needed to store the possible return
2501 registers of an untyped call. */
2503 arm_apply_result_size (void)
2505 int size = 16;
2507 if (TARGET_ARM)
2509 if (TARGET_HARD_FLOAT_ABI)
2511 if (TARGET_FPA)
2512 size += 12;
2513 if (TARGET_MAVERICK)
2514 size += 8;
2516 if (TARGET_IWMMXT_ABI)
2517 size += 8;
2520 return size;
2523 /* Decide whether a type should be returned in memory (true)
2524 or in a register (false). This is called by the macro
2525 RETURN_IN_MEMORY. */
2527 arm_return_in_memory (tree type)
2529 HOST_WIDE_INT size;
2531 if (!AGGREGATE_TYPE_P (type) &&
2532 (TREE_CODE (type) != VECTOR_TYPE) &&
2533 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2534 /* All simple types are returned in registers.
2535 For AAPCS, complex types are treated the same as aggregates. */
2536 return 0;
2538 size = int_size_in_bytes (type);
2540 if (arm_abi != ARM_ABI_APCS)
2542 /* ATPCS and later return aggregate types in memory only if they are
2543 larger than a word (or are variable size). */
2544 return (size < 0 || size > UNITS_PER_WORD);
2547 /* To maximize backwards compatibility with previous versions of gcc,
2548 return vectors up to 4 words in registers. */
2549 if (TREE_CODE (type) == VECTOR_TYPE)
2550 return (size < 0 || size > (4 * UNITS_PER_WORD));
2552 /* For the arm-wince targets we choose to be compatible with Microsoft's
2553 ARM and Thumb compilers, which always return aggregates in memory. */
2554 #ifndef ARM_WINCE
2555 /* All structures/unions bigger than one word are returned in memory.
2556 Also catch the case where int_size_in_bytes returns -1. In this case
2557 the aggregate is either huge or of variable size, and in either case
2558 we will want to return it via memory and not in a register. */
2559 if (size < 0 || size > UNITS_PER_WORD)
2560 return 1;
2562 if (TREE_CODE (type) == RECORD_TYPE)
2564 tree field;
2566 /* For a struct the APCS says that we only return in a register
2567 if the type is 'integer like' and every addressable element
2568 has an offset of zero. For practical purposes this means
2569 that the structure can have at most one non bit-field element
2570 and that this element must be the first one in the structure. */
2572 /* Find the first field, ignoring non FIELD_DECL things which will
2573 have been created by C++. */
2574 for (field = TYPE_FIELDS (type);
2575 field && TREE_CODE (field) != FIELD_DECL;
2576 field = TREE_CHAIN (field))
2577 continue;
2579 if (field == NULL)
2580 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2582 /* Check that the first field is valid for returning in a register. */
2584 /* ... Floats are not allowed */
2585 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2586 return 1;
2588 /* ... Aggregates that are not themselves valid for returning in
2589 a register are not allowed. */
2590 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2591 return 1;
2593 /* Now check the remaining fields, if any. Only bitfields are allowed,
2594 since they are not addressable. */
2595 for (field = TREE_CHAIN (field);
2596 field;
2597 field = TREE_CHAIN (field))
2599 if (TREE_CODE (field) != FIELD_DECL)
2600 continue;
2602 if (!DECL_BIT_FIELD_TYPE (field))
2603 return 1;
2606 return 0;
2609 if (TREE_CODE (type) == UNION_TYPE)
2611 tree field;
2613 /* Unions can be returned in registers if every element is
2614 integral, or can be returned in an integer register. */
2615 for (field = TYPE_FIELDS (type);
2616 field;
2617 field = TREE_CHAIN (field))
2619 if (TREE_CODE (field) != FIELD_DECL)
2620 continue;
2622 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2623 return 1;
2625 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2626 return 1;
2629 return 0;
2631 #endif /* not ARM_WINCE */
2633 /* Return all other types in memory. */
2634 return 1;
2637 /* Indicate whether or not words of a double are in big-endian order. */
2640 arm_float_words_big_endian (void)
2642 if (TARGET_MAVERICK)
2643 return 0;
2645 /* For FPA, float words are always big-endian. For VFP, floats words
2646 follow the memory system mode. */
2648 if (TARGET_FPA)
2650 return 1;
2653 if (TARGET_VFP)
2654 return (TARGET_BIG_END ? 1 : 0);
2656 return 1;
2659 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2660 for a call to a function whose data type is FNTYPE.
2661 For a library call, FNTYPE is NULL. */
2662 void
2663 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2664 rtx libname ATTRIBUTE_UNUSED,
2665 tree fndecl ATTRIBUTE_UNUSED)
2667 /* On the ARM, the offset starts at 0. */
2668 pcum->nregs = 0;
2669 pcum->iwmmxt_nregs = 0;
2670 pcum->can_split = true;
2672 pcum->call_cookie = CALL_NORMAL;
2674 if (TARGET_LONG_CALLS)
2675 pcum->call_cookie = CALL_LONG;
2677 /* Check for long call/short call attributes. The attributes
2678 override any command line option. */
2679 if (fntype)
2681 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2682 pcum->call_cookie = CALL_SHORT;
2683 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2684 pcum->call_cookie = CALL_LONG;
2687 /* Varargs vectors are treated the same as long long.
2688 named_count avoids having to change the way arm handles 'named' */
2689 pcum->named_count = 0;
2690 pcum->nargs = 0;
2692 if (TARGET_REALLY_IWMMXT && fntype)
2694 tree fn_arg;
2696 for (fn_arg = TYPE_ARG_TYPES (fntype);
2697 fn_arg;
2698 fn_arg = TREE_CHAIN (fn_arg))
2699 pcum->named_count += 1;
2701 if (! pcum->named_count)
2702 pcum->named_count = INT_MAX;
2707 /* Return true if mode/type need doubleword alignment. */
2708 bool
2709 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2711 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2712 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2716 /* Determine where to put an argument to a function.
2717 Value is zero to push the argument on the stack,
2718 or a hard register in which to store the argument.
2720 MODE is the argument's machine mode.
2721 TYPE is the data type of the argument (as a tree).
2722 This is null for libcalls where that information may
2723 not be available.
2724 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2725 the preceding args and about the function being called.
2726 NAMED is nonzero if this argument is a named parameter
2727 (otherwise it is an extra parameter matching an ellipsis). */
2730 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2731 tree type, int named)
2733 int nregs;
2735 /* Varargs vectors are treated the same as long long.
2736 named_count avoids having to change the way arm handles 'named' */
2737 if (TARGET_IWMMXT_ABI
2738 && arm_vector_mode_supported_p (mode)
2739 && pcum->named_count > pcum->nargs + 1)
2741 if (pcum->iwmmxt_nregs <= 9)
2742 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2743 else
2745 pcum->can_split = false;
2746 return NULL_RTX;
2750 /* Put doubleword aligned quantities in even register pairs. */
2751 if (pcum->nregs & 1
2752 && ARM_DOUBLEWORD_ALIGN
2753 && arm_needs_doubleword_align (mode, type))
2754 pcum->nregs++;
2756 if (mode == VOIDmode)
2757 /* Compute operand 2 of the call insn. */
2758 return GEN_INT (pcum->call_cookie);
2760 /* Only allow splitting an arg between regs and memory if all preceding
2761 args were allocated to regs. For args passed by reference we only count
2762 the reference pointer. */
2763 if (pcum->can_split)
2764 nregs = 1;
2765 else
2766 nregs = ARM_NUM_REGS2 (mode, type);
2768 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2769 return NULL_RTX;
2771 return gen_rtx_REG (mode, pcum->nregs);
2774 static int
2775 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2776 tree type, bool named ATTRIBUTE_UNUSED)
2778 int nregs = pcum->nregs;
2780 if (arm_vector_mode_supported_p (mode))
2781 return 0;
2783 if (NUM_ARG_REGS > nregs
2784 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2785 && pcum->can_split)
2786 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2788 return 0;
2791 /* Variable sized types are passed by reference. This is a GCC
2792 extension to the ARM ABI. */
2794 static bool
2795 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2796 enum machine_mode mode ATTRIBUTE_UNUSED,
2797 tree type, bool named ATTRIBUTE_UNUSED)
2799 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2802 /* Encode the current state of the #pragma [no_]long_calls. */
2803 typedef enum
2805 OFF, /* No #pragma [no_]long_calls is in effect. */
2806 LONG, /* #pragma long_calls is in effect. */
2807 SHORT /* #pragma no_long_calls is in effect. */
2808 } arm_pragma_enum;
2810 static arm_pragma_enum arm_pragma_long_calls = OFF;
2812 void
2813 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2815 arm_pragma_long_calls = LONG;
2818 void
2819 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2821 arm_pragma_long_calls = SHORT;
2824 void
2825 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2827 arm_pragma_long_calls = OFF;
2830 /* Table of machine attributes. */
2831 const struct attribute_spec arm_attribute_table[] =
2833 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2834 /* Function calls made to this symbol must be done indirectly, because
2835 it may lie outside of the 26 bit addressing range of a normal function
2836 call. */
2837 { "long_call", 0, 0, false, true, true, NULL },
2838 /* Whereas these functions are always known to reside within the 26 bit
2839 addressing range. */
2840 { "short_call", 0, 0, false, true, true, NULL },
2841 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2842 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2843 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2844 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2845 #ifdef ARM_PE
2846 /* ARM/PE has three new attributes:
2847 interfacearm - ?
2848 dllexport - for exporting a function/variable that will live in a dll
2849 dllimport - for importing a function/variable from a dll
2851 Microsoft allows multiple declspecs in one __declspec, separating
2852 them with spaces. We do NOT support this. Instead, use __declspec
2853 multiple times.
2855 { "dllimport", 0, 0, true, false, false, NULL },
2856 { "dllexport", 0, 0, true, false, false, NULL },
2857 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2858 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2859 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2860 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2861 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2862 #endif
2863 { NULL, 0, 0, false, false, false, NULL }
2866 /* Handle an attribute requiring a FUNCTION_DECL;
2867 arguments as in struct attribute_spec.handler. */
2868 static tree
2869 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2870 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2872 if (TREE_CODE (*node) != FUNCTION_DECL)
2874 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2875 IDENTIFIER_POINTER (name));
2876 *no_add_attrs = true;
2879 return NULL_TREE;
2882 /* Handle an "interrupt" or "isr" attribute;
2883 arguments as in struct attribute_spec.handler. */
2884 static tree
2885 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2886 bool *no_add_attrs)
2888 if (DECL_P (*node))
2890 if (TREE_CODE (*node) != FUNCTION_DECL)
2892 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2893 IDENTIFIER_POINTER (name));
2894 *no_add_attrs = true;
2896 /* FIXME: the argument if any is checked for type attributes;
2897 should it be checked for decl ones? */
2899 else
2901 if (TREE_CODE (*node) == FUNCTION_TYPE
2902 || TREE_CODE (*node) == METHOD_TYPE)
2904 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2906 warning (OPT_Wattributes, "%qs attribute ignored",
2907 IDENTIFIER_POINTER (name));
2908 *no_add_attrs = true;
2911 else if (TREE_CODE (*node) == POINTER_TYPE
2912 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2913 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2914 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2916 *node = build_variant_type_copy (*node);
2917 TREE_TYPE (*node) = build_type_attribute_variant
2918 (TREE_TYPE (*node),
2919 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2920 *no_add_attrs = true;
2922 else
2924 /* Possibly pass this attribute on from the type to a decl. */
2925 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2926 | (int) ATTR_FLAG_FUNCTION_NEXT
2927 | (int) ATTR_FLAG_ARRAY_NEXT))
2929 *no_add_attrs = true;
2930 return tree_cons (name, args, NULL_TREE);
2932 else
2934 warning (OPT_Wattributes, "%qs attribute ignored",
2935 IDENTIFIER_POINTER (name));
2940 return NULL_TREE;
2943 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2944 /* Handle the "notshared" attribute. This attribute is another way of
2945 requesting hidden visibility. ARM's compiler supports
2946 "__declspec(notshared)"; we support the same thing via an
2947 attribute. */
2949 static tree
2950 arm_handle_notshared_attribute (tree *node,
2951 tree name ATTRIBUTE_UNUSED,
2952 tree args ATTRIBUTE_UNUSED,
2953 int flags ATTRIBUTE_UNUSED,
2954 bool *no_add_attrs)
2956 tree decl = TYPE_NAME (*node);
2958 if (decl)
2960 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2961 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2962 *no_add_attrs = false;
2964 return NULL_TREE;
2966 #endif
2968 /* Return 0 if the attributes for two types are incompatible, 1 if they
2969 are compatible, and 2 if they are nearly compatible (which causes a
2970 warning to be generated). */
2971 static int
2972 arm_comp_type_attributes (tree type1, tree type2)
2974 int l1, l2, s1, s2;
2976 /* Check for mismatch of non-default calling convention. */
2977 if (TREE_CODE (type1) != FUNCTION_TYPE)
2978 return 1;
2980 /* Check for mismatched call attributes. */
2981 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2982 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2983 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2984 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2986 /* Only bother to check if an attribute is defined. */
2987 if (l1 | l2 | s1 | s2)
2989 /* If one type has an attribute, the other must have the same attribute. */
2990 if ((l1 != l2) || (s1 != s2))
2991 return 0;
2993 /* Disallow mixed attributes. */
2994 if ((l1 & s2) || (l2 & s1))
2995 return 0;
2998 /* Check for mismatched ISR attribute. */
2999 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3000 if (! l1)
3001 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3002 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3003 if (! l2)
3004 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3005 if (l1 != l2)
3006 return 0;
3008 return 1;
3011 /* Encode long_call or short_call attribute by prefixing
3012 symbol name in DECL with a special character FLAG. */
3013 void
3014 arm_encode_call_attribute (tree decl, int flag)
3016 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3017 int len = strlen (str);
3018 char * newstr;
3020 /* Do not allow weak functions to be treated as short call. */
3021 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3022 return;
3024 newstr = alloca (len + 2);
3025 newstr[0] = flag;
3026 strcpy (newstr + 1, str);
3028 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3029 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3032 /* Assigns default attributes to newly defined type. This is used to
3033 set short_call/long_call attributes for function types of
3034 functions defined inside corresponding #pragma scopes. */
3035 static void
3036 arm_set_default_type_attributes (tree type)
3038 /* Add __attribute__ ((long_call)) to all functions, when
3039 inside #pragma long_calls or __attribute__ ((short_call)),
3040 when inside #pragma no_long_calls. */
3041 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3043 tree type_attr_list, attr_name;
3044 type_attr_list = TYPE_ATTRIBUTES (type);
3046 if (arm_pragma_long_calls == LONG)
3047 attr_name = get_identifier ("long_call");
3048 else if (arm_pragma_long_calls == SHORT)
3049 attr_name = get_identifier ("short_call");
3050 else
3051 return;
3053 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3054 TYPE_ATTRIBUTES (type) = type_attr_list;
3058 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3059 defined within the current compilation unit. If this cannot be
3060 determined, then 0 is returned. */
3061 static int
3062 current_file_function_operand (rtx sym_ref)
3064 /* This is a bit of a fib. A function will have a short call flag
3065 applied to its name if it has the short call attribute, or it has
3066 already been defined within the current compilation unit. */
3067 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3068 return 1;
3070 /* The current function is always defined within the current compilation
3071 unit. If it s a weak definition however, then this may not be the real
3072 definition of the function, and so we have to say no. */
3073 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3074 && !DECL_WEAK (current_function_decl))
3075 return 1;
3077 /* We cannot make the determination - default to returning 0. */
3078 return 0;
3081 /* Return nonzero if a 32 bit "long_call" should be generated for
3082 this call. We generate a long_call if the function:
3084 a. has an __attribute__((long call))
3085 or b. is within the scope of a #pragma long_calls
3086 or c. the -mlong-calls command line switch has been specified
3087 . and either:
3088 1. -ffunction-sections is in effect
3089 or 2. the current function has __attribute__ ((section))
3090 or 3. the target function has __attribute__ ((section))
3092 However we do not generate a long call if the function:
3094 d. has an __attribute__ ((short_call))
3095 or e. is inside the scope of a #pragma no_long_calls
3096 or f. is defined within the current compilation unit.
3098 This function will be called by C fragments contained in the machine
3099 description file. SYM_REF and CALL_COOKIE correspond to the matched
3100 rtl operands. CALL_SYMBOL is used to distinguish between
3101 two different callers of the function. It is set to 1 in the
3102 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3103 and "call_value" patterns. This is because of the difference in the
3104 SYM_REFs passed by these patterns. */
3106 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3108 if (!call_symbol)
3110 if (GET_CODE (sym_ref) != MEM)
3111 return 0;
3113 sym_ref = XEXP (sym_ref, 0);
3116 if (GET_CODE (sym_ref) != SYMBOL_REF)
3117 return 0;
3119 if (call_cookie & CALL_SHORT)
3120 return 0;
3122 if (TARGET_LONG_CALLS)
3124 if (flag_function_sections
3125 || DECL_SECTION_NAME (current_function_decl))
3126 /* c.3 is handled by the definition of the
3127 ARM_DECLARE_FUNCTION_SIZE macro. */
3128 return 1;
3131 if (current_file_function_operand (sym_ref))
3132 return 0;
3134 return (call_cookie & CALL_LONG)
3135 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3136 || TARGET_LONG_CALLS;
3139 /* Return nonzero if it is ok to make a tail-call to DECL. */
3140 static bool
3141 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3143 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3145 if (cfun->machine->sibcall_blocked)
3146 return false;
3148 /* Never tailcall something for which we have no decl, or if we
3149 are in Thumb mode. */
3150 if (decl == NULL || TARGET_THUMB)
3151 return false;
3153 /* Get the calling method. */
3154 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3155 call_type = CALL_SHORT;
3156 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3157 call_type = CALL_LONG;
3159 /* Cannot tail-call to long calls, since these are out of range of
3160 a branch instruction. However, if not compiling PIC, we know
3161 we can reach the symbol if it is in this compilation unit. */
3162 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3163 return false;
3165 /* If we are interworking and the function is not declared static
3166 then we can't tail-call it unless we know that it exists in this
3167 compilation unit (since it might be a Thumb routine). */
3168 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3169 return false;
3171 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3172 if (IS_INTERRUPT (arm_current_func_type ()))
3173 return false;
3175 /* Everything else is ok. */
3176 return true;
3180 /* Addressing mode support functions. */
3182 /* Return nonzero if X is a legitimate immediate operand when compiling
3183 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3185 legitimate_pic_operand_p (rtx x)
3187 if (GET_CODE (x) == SYMBOL_REF
3188 || (GET_CODE (x) == CONST
3189 && GET_CODE (XEXP (x, 0)) == PLUS
3190 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3191 return 0;
3193 return 1;
3197 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3199 if (GET_CODE (orig) == SYMBOL_REF
3200 || GET_CODE (orig) == LABEL_REF)
3202 #ifndef AOF_ASSEMBLER
3203 rtx pic_ref, address;
3204 #endif
3205 rtx insn;
3206 int subregs = 0;
3208 /* If this function doesn't have a pic register, create one now.
3209 A lot of the logic here is made obscure by the fact that this
3210 routine gets called as part of the rtx cost estimation
3211 process. We don't want those calls to affect any assumptions
3212 about the real function; and further, we can't call
3213 entry_of_function() until we start the real expansion
3214 process. */
3215 if (!current_function_uses_pic_offset_table)
3217 gcc_assert (!no_new_pseudos);
3218 if (arm_pic_register != INVALID_REGNUM)
3220 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3222 /* Play games to avoid marking the function as needing pic
3223 if we are being called as part of the cost-estimation
3224 process. */
3225 if (!ir_type())
3226 current_function_uses_pic_offset_table = 1;
3228 else
3230 rtx seq;
3232 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3234 /* Play games to avoid marking the function as needing pic
3235 if we are being called as part of the cost-estimation
3236 process. */
3237 if (!ir_type())
3239 current_function_uses_pic_offset_table = 1;
3240 start_sequence ();
3242 arm_load_pic_register (0UL);
3244 seq = get_insns ();
3245 end_sequence ();
3246 emit_insn_after (seq, entry_of_function ());
3251 if (reg == 0)
3253 gcc_assert (!no_new_pseudos);
3254 reg = gen_reg_rtx (Pmode);
3256 subregs = 1;
3259 #ifdef AOF_ASSEMBLER
3260 /* The AOF assembler can generate relocations for these directly, and
3261 understands that the PIC register has to be added into the offset. */
3262 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3263 #else
3264 if (subregs)
3265 address = gen_reg_rtx (Pmode);
3266 else
3267 address = reg;
3269 if (TARGET_ARM)
3270 emit_insn (gen_pic_load_addr_arm (address, orig));
3271 else
3272 emit_insn (gen_pic_load_addr_thumb (address, orig));
3274 if ((GET_CODE (orig) == LABEL_REF
3275 || (GET_CODE (orig) == SYMBOL_REF &&
3276 SYMBOL_REF_LOCAL_P (orig)))
3277 && NEED_GOT_RELOC)
3278 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3279 else
3281 pic_ref = gen_const_mem (Pmode,
3282 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3283 address));
3286 insn = emit_move_insn (reg, pic_ref);
3287 #endif
3288 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3289 by loop. */
3290 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3291 REG_NOTES (insn));
3292 return reg;
3294 else if (GET_CODE (orig) == CONST)
3296 rtx base, offset;
3298 if (GET_CODE (XEXP (orig, 0)) == PLUS
3299 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3300 return orig;
3302 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3303 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3304 return orig;
3306 if (reg == 0)
3308 gcc_assert (!no_new_pseudos);
3309 reg = gen_reg_rtx (Pmode);
3312 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3314 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3315 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3316 base == reg ? 0 : reg);
3318 if (GET_CODE (offset) == CONST_INT)
3320 /* The base register doesn't really matter, we only want to
3321 test the index for the appropriate mode. */
3322 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3324 gcc_assert (!no_new_pseudos);
3325 offset = force_reg (Pmode, offset);
3328 if (GET_CODE (offset) == CONST_INT)
3329 return plus_constant (base, INTVAL (offset));
3332 if (GET_MODE_SIZE (mode) > 4
3333 && (GET_MODE_CLASS (mode) == MODE_INT
3334 || TARGET_SOFT_FLOAT))
3336 emit_insn (gen_addsi3 (reg, base, offset));
3337 return reg;
3340 return gen_rtx_PLUS (Pmode, base, offset);
3343 return orig;
3347 /* Find a spare low register to use during the prolog of a function. */
3349 static int
3350 thumb_find_work_register (unsigned long pushed_regs_mask)
3352 int reg;
3354 /* Check the argument registers first as these are call-used. The
3355 register allocation order means that sometimes r3 might be used
3356 but earlier argument registers might not, so check them all. */
3357 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3358 if (!regs_ever_live[reg])
3359 return reg;
3361 /* Before going on to check the call-saved registers we can try a couple
3362 more ways of deducing that r3 is available. The first is when we are
3363 pushing anonymous arguments onto the stack and we have less than 4
3364 registers worth of fixed arguments(*). In this case r3 will be part of
3365 the variable argument list and so we can be sure that it will be
3366 pushed right at the start of the function. Hence it will be available
3367 for the rest of the prologue.
3368 (*): ie current_function_pretend_args_size is greater than 0. */
3369 if (cfun->machine->uses_anonymous_args
3370 && current_function_pretend_args_size > 0)
3371 return LAST_ARG_REGNUM;
3373 /* The other case is when we have fixed arguments but less than 4 registers
3374 worth. In this case r3 might be used in the body of the function, but
3375 it is not being used to convey an argument into the function. In theory
3376 we could just check current_function_args_size to see how many bytes are
3377 being passed in argument registers, but it seems that it is unreliable.
3378 Sometimes it will have the value 0 when in fact arguments are being
3379 passed. (See testcase execute/20021111-1.c for an example). So we also
3380 check the args_info.nregs field as well. The problem with this field is
3381 that it makes no allowances for arguments that are passed to the
3382 function but which are not used. Hence we could miss an opportunity
3383 when a function has an unused argument in r3. But it is better to be
3384 safe than to be sorry. */
3385 if (! cfun->machine->uses_anonymous_args
3386 && current_function_args_size >= 0
3387 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3388 && cfun->args_info.nregs < 4)
3389 return LAST_ARG_REGNUM;
3391 /* Otherwise look for a call-saved register that is going to be pushed. */
3392 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3393 if (pushed_regs_mask & (1 << reg))
3394 return reg;
3396 /* Something went wrong - thumb_compute_save_reg_mask()
3397 should have arranged for a suitable register to be pushed. */
3398 gcc_unreachable ();
3401 static GTY(()) int pic_labelno;
3403 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3404 low register. */
3406 void
3407 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3409 #ifndef AOF_ASSEMBLER
3410 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3411 rtx global_offset_table;
3413 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3414 return;
3416 gcc_assert (flag_pic);
3418 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3419 in the code stream. */
3421 labelno = GEN_INT (pic_labelno++);
3422 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3423 l1 = gen_rtx_CONST (VOIDmode, l1);
3425 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3426 /* On the ARM the PC register contains 'dot + 8' at the time of the
3427 addition, on the Thumb it is 'dot + 4'. */
3428 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3429 if (GOT_PCREL)
3430 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3431 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3432 else
3433 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3435 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3437 if (TARGET_ARM)
3439 emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx));
3440 emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg,
3441 cfun->machine->pic_reg, labelno));
3443 else
3445 if (arm_pic_register != INVALID_REGNUM
3446 && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
3448 /* We will have pushed the pic register, so we should always be
3449 able to find a work register. */
3450 pic_tmp = gen_rtx_REG (SImode,
3451 thumb_find_work_register (saved_regs));
3452 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3453 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3455 else
3456 emit_insn (gen_pic_load_addr_thumb (cfun->machine->pic_reg, pic_rtx));
3457 emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg,
3458 cfun->machine->pic_reg, labelno));
3461 /* Need to emit this whether or not we obey regdecls,
3462 since setjmp/longjmp can cause life info to screw up. */
3463 emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
3464 #endif /* AOF_ASSEMBLER */
3468 /* Return nonzero if X is valid as an ARM state addressing register. */
3469 static int
3470 arm_address_register_rtx_p (rtx x, int strict_p)
3472 int regno;
3474 if (GET_CODE (x) != REG)
3475 return 0;
3477 regno = REGNO (x);
3479 if (strict_p)
3480 return ARM_REGNO_OK_FOR_BASE_P (regno);
3482 return (regno <= LAST_ARM_REGNUM
3483 || regno >= FIRST_PSEUDO_REGISTER
3484 || regno == FRAME_POINTER_REGNUM
3485 || regno == ARG_POINTER_REGNUM);
3488 /* Return TRUE if this rtx is the difference of a symbol and a label,
3489 and will reduce to a PC-relative relocation in the object file.
3490 Expressions like this can be left alone when generating PIC, rather
3491 than forced through the GOT. */
3492 static int
3493 pcrel_constant_p (rtx x)
3495 if (GET_CODE (x) == MINUS)
3496 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3498 return FALSE;
3501 /* Return nonzero if X is a valid ARM state address operand. */
3503 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3504 int strict_p)
3506 bool use_ldrd;
3507 enum rtx_code code = GET_CODE (x);
3509 if (arm_address_register_rtx_p (x, strict_p))
3510 return 1;
3512 use_ldrd = (TARGET_LDRD
3513 && (mode == DImode
3514 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3516 if (code == POST_INC || code == PRE_DEC
3517 || ((code == PRE_INC || code == POST_DEC)
3518 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3519 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3521 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3522 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3523 && GET_CODE (XEXP (x, 1)) == PLUS
3524 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3526 rtx addend = XEXP (XEXP (x, 1), 1);
3528 /* Don't allow ldrd post increment by register because it's hard
3529 to fixup invalid register choices. */
3530 if (use_ldrd
3531 && GET_CODE (x) == POST_MODIFY
3532 && GET_CODE (addend) == REG)
3533 return 0;
3535 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3536 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3539 /* After reload constants split into minipools will have addresses
3540 from a LABEL_REF. */
3541 else if (reload_completed
3542 && (code == LABEL_REF
3543 || (code == CONST
3544 && GET_CODE (XEXP (x, 0)) == PLUS
3545 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3546 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3547 return 1;
3549 else if (mode == TImode)
3550 return 0;
3552 else if (code == PLUS)
3554 rtx xop0 = XEXP (x, 0);
3555 rtx xop1 = XEXP (x, 1);
3557 return ((arm_address_register_rtx_p (xop0, strict_p)
3558 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3559 || (arm_address_register_rtx_p (xop1, strict_p)
3560 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3563 #if 0
3564 /* Reload currently can't handle MINUS, so disable this for now */
3565 else if (GET_CODE (x) == MINUS)
3567 rtx xop0 = XEXP (x, 0);
3568 rtx xop1 = XEXP (x, 1);
3570 return (arm_address_register_rtx_p (xop0, strict_p)
3571 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3573 #endif
3575 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3576 && code == SYMBOL_REF
3577 && CONSTANT_POOL_ADDRESS_P (x)
3578 && ! (flag_pic
3579 && symbol_mentioned_p (get_pool_constant (x))
3580 && ! pcrel_constant_p (get_pool_constant (x))))
3581 return 1;
3583 return 0;
3586 /* Return nonzero if INDEX is valid for an address index operand in
3587 ARM state. */
3588 static int
3589 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3590 int strict_p)
3592 HOST_WIDE_INT range;
3593 enum rtx_code code = GET_CODE (index);
3595 /* Standard coprocessor addressing modes. */
3596 if (TARGET_HARD_FLOAT
3597 && (TARGET_FPA || TARGET_MAVERICK)
3598 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3599 || (TARGET_MAVERICK && mode == DImode)))
3600 return (code == CONST_INT && INTVAL (index) < 1024
3601 && INTVAL (index) > -1024
3602 && (INTVAL (index) & 3) == 0);
3604 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3605 return (code == CONST_INT
3606 && INTVAL (index) < 1024
3607 && INTVAL (index) > -1024
3608 && (INTVAL (index) & 3) == 0);
3610 if (arm_address_register_rtx_p (index, strict_p)
3611 && (GET_MODE_SIZE (mode) <= 4))
3612 return 1;
3614 if (mode == DImode || mode == DFmode)
3616 if (code == CONST_INT)
3618 HOST_WIDE_INT val = INTVAL (index);
3620 if (TARGET_LDRD)
3621 return val > -256 && val < 256;
3622 else
3623 return val > -4096 && val < 4092;
3626 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3629 if (GET_MODE_SIZE (mode) <= 4
3630 && ! (arm_arch4
3631 && (mode == HImode
3632 || (mode == QImode && outer == SIGN_EXTEND))))
3634 if (code == MULT)
3636 rtx xiop0 = XEXP (index, 0);
3637 rtx xiop1 = XEXP (index, 1);
3639 return ((arm_address_register_rtx_p (xiop0, strict_p)
3640 && power_of_two_operand (xiop1, SImode))
3641 || (arm_address_register_rtx_p (xiop1, strict_p)
3642 && power_of_two_operand (xiop0, SImode)));
3644 else if (code == LSHIFTRT || code == ASHIFTRT
3645 || code == ASHIFT || code == ROTATERT)
3647 rtx op = XEXP (index, 1);
3649 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3650 && GET_CODE (op) == CONST_INT
3651 && INTVAL (op) > 0
3652 && INTVAL (op) <= 31);
3656 /* For ARM v4 we may be doing a sign-extend operation during the
3657 load. */
3658 if (arm_arch4)
3660 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3661 range = 256;
3662 else
3663 range = 4096;
3665 else
3666 range = (mode == HImode) ? 4095 : 4096;
3668 return (code == CONST_INT
3669 && INTVAL (index) < range
3670 && INTVAL (index) > -range);
3673 /* Return nonzero if X is valid as a Thumb state base register. */
3674 static int
3675 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3677 int regno;
3679 if (GET_CODE (x) != REG)
3680 return 0;
3682 regno = REGNO (x);
3684 if (strict_p)
3685 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3687 return (regno <= LAST_LO_REGNUM
3688 || regno > LAST_VIRTUAL_REGISTER
3689 || regno == FRAME_POINTER_REGNUM
3690 || (GET_MODE_SIZE (mode) >= 4
3691 && (regno == STACK_POINTER_REGNUM
3692 || regno >= FIRST_PSEUDO_REGISTER
3693 || x == hard_frame_pointer_rtx
3694 || x == arg_pointer_rtx)));
3697 /* Return nonzero if x is a legitimate index register. This is the case
3698 for any base register that can access a QImode object. */
3699 inline static int
3700 thumb_index_register_rtx_p (rtx x, int strict_p)
3702 return thumb_base_register_rtx_p (x, QImode, strict_p);
3705 /* Return nonzero if x is a legitimate Thumb-state address.
3707 The AP may be eliminated to either the SP or the FP, so we use the
3708 least common denominator, e.g. SImode, and offsets from 0 to 64.
3710 ??? Verify whether the above is the right approach.
3712 ??? Also, the FP may be eliminated to the SP, so perhaps that
3713 needs special handling also.
3715 ??? Look at how the mips16 port solves this problem. It probably uses
3716 better ways to solve some of these problems.
3718 Although it is not incorrect, we don't accept QImode and HImode
3719 addresses based on the frame pointer or arg pointer until the
3720 reload pass starts. This is so that eliminating such addresses
3721 into stack based ones won't produce impossible code. */
3723 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3725 /* ??? Not clear if this is right. Experiment. */
3726 if (GET_MODE_SIZE (mode) < 4
3727 && !(reload_in_progress || reload_completed)
3728 && (reg_mentioned_p (frame_pointer_rtx, x)
3729 || reg_mentioned_p (arg_pointer_rtx, x)
3730 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3731 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3732 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3733 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3734 return 0;
3736 /* Accept any base register. SP only in SImode or larger. */
3737 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3738 return 1;
3740 /* This is PC relative data before arm_reorg runs. */
3741 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3742 && GET_CODE (x) == SYMBOL_REF
3743 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
3744 return 1;
3746 /* This is PC relative data after arm_reorg runs. */
3747 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3748 && (GET_CODE (x) == LABEL_REF
3749 || (GET_CODE (x) == CONST
3750 && GET_CODE (XEXP (x, 0)) == PLUS
3751 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3752 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3753 return 1;
3755 /* Post-inc indexing only supported for SImode and larger. */
3756 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3757 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3758 return 1;
3760 else if (GET_CODE (x) == PLUS)
3762 /* REG+REG address can be any two index registers. */
3763 /* We disallow FRAME+REG addressing since we know that FRAME
3764 will be replaced with STACK, and SP relative addressing only
3765 permits SP+OFFSET. */
3766 if (GET_MODE_SIZE (mode) <= 4
3767 && XEXP (x, 0) != frame_pointer_rtx
3768 && XEXP (x, 1) != frame_pointer_rtx
3769 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3770 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3771 return 1;
3773 /* REG+const has 5-7 bit offset for non-SP registers. */
3774 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3775 || XEXP (x, 0) == arg_pointer_rtx)
3776 && GET_CODE (XEXP (x, 1)) == CONST_INT
3777 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3778 return 1;
3780 /* REG+const has 10 bit offset for SP, but only SImode and
3781 larger is supported. */
3782 /* ??? Should probably check for DI/DFmode overflow here
3783 just like GO_IF_LEGITIMATE_OFFSET does. */
3784 else if (GET_CODE (XEXP (x, 0)) == REG
3785 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3786 && GET_MODE_SIZE (mode) >= 4
3787 && GET_CODE (XEXP (x, 1)) == CONST_INT
3788 && INTVAL (XEXP (x, 1)) >= 0
3789 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3790 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3791 return 1;
3793 else if (GET_CODE (XEXP (x, 0)) == REG
3794 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3795 && GET_MODE_SIZE (mode) >= 4
3796 && GET_CODE (XEXP (x, 1)) == CONST_INT
3797 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3798 return 1;
3801 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3802 && GET_MODE_SIZE (mode) == 4
3803 && GET_CODE (x) == SYMBOL_REF
3804 && CONSTANT_POOL_ADDRESS_P (x)
3805 && ! (flag_pic
3806 && symbol_mentioned_p (get_pool_constant (x))
3807 && ! pcrel_constant_p (get_pool_constant (x))))
3808 return 1;
3810 return 0;
3813 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3814 instruction of mode MODE. */
3816 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3818 switch (GET_MODE_SIZE (mode))
3820 case 1:
3821 return val >= 0 && val < 32;
3823 case 2:
3824 return val >= 0 && val < 64 && (val & 1) == 0;
3826 default:
3827 return (val >= 0
3828 && (val + GET_MODE_SIZE (mode)) <= 128
3829 && (val & 3) == 0);
3833 /* Build the SYMBOL_REF for __tls_get_addr. */
3835 static GTY(()) rtx tls_get_addr_libfunc;
3837 static rtx
3838 get_tls_get_addr (void)
3840 if (!tls_get_addr_libfunc)
3841 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
3842 return tls_get_addr_libfunc;
3845 static rtx
3846 arm_load_tp (rtx target)
3848 if (!target)
3849 target = gen_reg_rtx (SImode);
3851 if (TARGET_HARD_TP)
3853 /* Can return in any reg. */
3854 emit_insn (gen_load_tp_hard (target));
3856 else
3858 /* Always returned in r0. Immediately copy the result into a pseudo,
3859 otherwise other uses of r0 (e.g. setting up function arguments) may
3860 clobber the value. */
3862 rtx tmp;
3864 emit_insn (gen_load_tp_soft ());
3866 tmp = gen_rtx_REG (SImode, 0);
3867 emit_move_insn (target, tmp);
3869 return target;
3872 static rtx
3873 load_tls_operand (rtx x, rtx reg)
3875 rtx tmp;
3877 if (reg == NULL_RTX)
3878 reg = gen_reg_rtx (SImode);
3880 tmp = gen_rtx_CONST (SImode, x);
3882 emit_move_insn (reg, tmp);
3884 return reg;
3887 static rtx
3888 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
3890 rtx insns, label, labelno, sum;
3892 start_sequence ();
3894 labelno = GEN_INT (pic_labelno++);
3895 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3896 label = gen_rtx_CONST (VOIDmode, label);
3898 sum = gen_rtx_UNSPEC (Pmode,
3899 gen_rtvec (4, x, GEN_INT (reloc), label,
3900 GEN_INT (TARGET_ARM ? 8 : 4)),
3901 UNSPEC_TLS);
3902 reg = load_tls_operand (sum, reg);
3904 if (TARGET_ARM)
3905 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
3906 else
3907 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3909 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
3910 Pmode, 1, reg, Pmode);
3912 insns = get_insns ();
3913 end_sequence ();
3915 return insns;
3919 legitimize_tls_address (rtx x, rtx reg)
3921 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
3922 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
3924 switch (model)
3926 case TLS_MODEL_GLOBAL_DYNAMIC:
3927 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
3928 dest = gen_reg_rtx (Pmode);
3929 emit_libcall_block (insns, dest, ret, x);
3930 return dest;
3932 case TLS_MODEL_LOCAL_DYNAMIC:
3933 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
3935 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3936 share the LDM result with other LD model accesses. */
3937 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
3938 UNSPEC_TLS);
3939 dest = gen_reg_rtx (Pmode);
3940 emit_libcall_block (insns, dest, ret, eqv);
3942 /* Load the addend. */
3943 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
3944 UNSPEC_TLS);
3945 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
3946 return gen_rtx_PLUS (Pmode, dest, addend);
3948 case TLS_MODEL_INITIAL_EXEC:
3949 labelno = GEN_INT (pic_labelno++);
3950 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3951 label = gen_rtx_CONST (VOIDmode, label);
3952 sum = gen_rtx_UNSPEC (Pmode,
3953 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
3954 GEN_INT (TARGET_ARM ? 8 : 4)),
3955 UNSPEC_TLS);
3956 reg = load_tls_operand (sum, reg);
3958 if (TARGET_ARM)
3959 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
3960 else
3962 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3963 emit_move_insn (reg, gen_const_mem (SImode, reg));
3966 tp = arm_load_tp (NULL_RTX);
3968 return gen_rtx_PLUS (Pmode, tp, reg);
3970 case TLS_MODEL_LOCAL_EXEC:
3971 tp = arm_load_tp (NULL_RTX);
3973 reg = gen_rtx_UNSPEC (Pmode,
3974 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
3975 UNSPEC_TLS);
3976 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
3978 return gen_rtx_PLUS (Pmode, tp, reg);
3980 default:
3981 abort ();
3985 /* Try machine-dependent ways of modifying an illegitimate address
3986 to be legitimate. If we find one, return the new, valid address. */
3988 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3990 if (arm_tls_symbol_p (x))
3991 return legitimize_tls_address (x, NULL_RTX);
3993 if (GET_CODE (x) == PLUS)
3995 rtx xop0 = XEXP (x, 0);
3996 rtx xop1 = XEXP (x, 1);
3998 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3999 xop0 = force_reg (SImode, xop0);
4001 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4002 xop1 = force_reg (SImode, xop1);
4004 if (ARM_BASE_REGISTER_RTX_P (xop0)
4005 && GET_CODE (xop1) == CONST_INT)
4007 HOST_WIDE_INT n, low_n;
4008 rtx base_reg, val;
4009 n = INTVAL (xop1);
4011 /* VFP addressing modes actually allow greater offsets, but for
4012 now we just stick with the lowest common denominator. */
4013 if (mode == DImode
4014 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4016 low_n = n & 0x0f;
4017 n &= ~0x0f;
4018 if (low_n > 4)
4020 n += 16;
4021 low_n -= 16;
4024 else
4026 low_n = ((mode) == TImode ? 0
4027 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4028 n -= low_n;
4031 base_reg = gen_reg_rtx (SImode);
4032 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4033 emit_move_insn (base_reg, val);
4034 x = plus_constant (base_reg, low_n);
4036 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4037 x = gen_rtx_PLUS (SImode, xop0, xop1);
4040 /* XXX We don't allow MINUS any more -- see comment in
4041 arm_legitimate_address_p (). */
4042 else if (GET_CODE (x) == MINUS)
4044 rtx xop0 = XEXP (x, 0);
4045 rtx xop1 = XEXP (x, 1);
4047 if (CONSTANT_P (xop0))
4048 xop0 = force_reg (SImode, xop0);
4050 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4051 xop1 = force_reg (SImode, xop1);
4053 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4054 x = gen_rtx_MINUS (SImode, xop0, xop1);
4057 /* Make sure to take full advantage of the pre-indexed addressing mode
4058 with absolute addresses which often allows for the base register to
4059 be factorized for multiple adjacent memory references, and it might
4060 even allows for the mini pool to be avoided entirely. */
4061 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4063 unsigned int bits;
4064 HOST_WIDE_INT mask, base, index;
4065 rtx base_reg;
4067 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4068 use a 8 bit index. So let's use a 12 bit index for SImode only and
4069 hope that arm_gen_constant will enable ldrb to use more bits. */
4070 bits = (mode == SImode) ? 12 : 8;
4071 mask = (1 << bits) - 1;
4072 base = INTVAL (x) & ~mask;
4073 index = INTVAL (x) & mask;
4074 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4076 /* It'll most probably be more efficient to generate the base
4077 with more bits set and use a negative index instead. */
4078 base |= mask;
4079 index -= mask;
4081 base_reg = force_reg (SImode, GEN_INT (base));
4082 x = plus_constant (base_reg, index);
4085 if (flag_pic)
4087 /* We need to find and carefully transform any SYMBOL and LABEL
4088 references; so go back to the original address expression. */
4089 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4091 if (new_x != orig_x)
4092 x = new_x;
4095 return x;
4099 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4100 to be legitimate. If we find one, return the new, valid address. */
4102 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4104 if (arm_tls_symbol_p (x))
4105 return legitimize_tls_address (x, NULL_RTX);
4107 if (GET_CODE (x) == PLUS
4108 && GET_CODE (XEXP (x, 1)) == CONST_INT
4109 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4110 || INTVAL (XEXP (x, 1)) < 0))
4112 rtx xop0 = XEXP (x, 0);
4113 rtx xop1 = XEXP (x, 1);
4114 HOST_WIDE_INT offset = INTVAL (xop1);
4116 /* Try and fold the offset into a biasing of the base register and
4117 then offsetting that. Don't do this when optimizing for space
4118 since it can cause too many CSEs. */
4119 if (optimize_size && offset >= 0
4120 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4122 HOST_WIDE_INT delta;
4124 if (offset >= 256)
4125 delta = offset - (256 - GET_MODE_SIZE (mode));
4126 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4127 delta = 31 * GET_MODE_SIZE (mode);
4128 else
4129 delta = offset & (~31 * GET_MODE_SIZE (mode));
4131 xop0 = force_operand (plus_constant (xop0, offset - delta),
4132 NULL_RTX);
4133 x = plus_constant (xop0, delta);
4135 else if (offset < 0 && offset > -256)
4136 /* Small negative offsets are best done with a subtract before the
4137 dereference, forcing these into a register normally takes two
4138 instructions. */
4139 x = force_operand (x, NULL_RTX);
4140 else
4142 /* For the remaining cases, force the constant into a register. */
4143 xop1 = force_reg (SImode, xop1);
4144 x = gen_rtx_PLUS (SImode, xop0, xop1);
4147 else if (GET_CODE (x) == PLUS
4148 && s_register_operand (XEXP (x, 1), SImode)
4149 && !s_register_operand (XEXP (x, 0), SImode))
4151 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4153 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4156 if (flag_pic)
4158 /* We need to find and carefully transform any SYMBOL and LABEL
4159 references; so go back to the original address expression. */
4160 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4162 if (new_x != orig_x)
4163 x = new_x;
4166 return x;
4170 thumb_legitimize_reload_address (rtx *x_p,
4171 enum machine_mode mode,
4172 int opnum, int type,
4173 int ind_levels ATTRIBUTE_UNUSED)
4175 rtx x = *x_p;
4177 if (GET_CODE (x) == PLUS
4178 && GET_MODE_SIZE (mode) < 4
4179 && REG_P (XEXP (x, 0))
4180 && XEXP (x, 0) == stack_pointer_rtx
4181 && GET_CODE (XEXP (x, 1)) == CONST_INT
4182 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4184 rtx orig_x = x;
4186 x = copy_rtx (x);
4187 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4188 Pmode, VOIDmode, 0, 0, opnum, type);
4189 return x;
4192 /* If both registers are hi-regs, then it's better to reload the
4193 entire expression rather than each register individually. That
4194 only requires one reload register rather than two. */
4195 if (GET_CODE (x) == PLUS
4196 && REG_P (XEXP (x, 0))
4197 && REG_P (XEXP (x, 1))
4198 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4199 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4201 rtx orig_x = x;
4203 x = copy_rtx (x);
4204 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4205 Pmode, VOIDmode, 0, 0, opnum, type);
4206 return x;
4209 return NULL;
4212 /* Test for various thread-local symbols. */
4214 /* Return TRUE if X is a thread-local symbol. */
4216 static bool
4217 arm_tls_symbol_p (rtx x)
4219 if (! TARGET_HAVE_TLS)
4220 return false;
4222 if (GET_CODE (x) != SYMBOL_REF)
4223 return false;
4225 return SYMBOL_REF_TLS_MODEL (x) != 0;
4228 /* Helper for arm_tls_referenced_p. */
4230 static int
4231 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4233 if (GET_CODE (*x) == SYMBOL_REF)
4234 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4236 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4237 TLS offsets, not real symbol references. */
4238 if (GET_CODE (*x) == UNSPEC
4239 && XINT (*x, 1) == UNSPEC_TLS)
4240 return -1;
4242 return 0;
4245 /* Return TRUE if X contains any TLS symbol references. */
4247 bool
4248 arm_tls_referenced_p (rtx x)
4250 if (! TARGET_HAVE_TLS)
4251 return false;
4253 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4256 #define REG_OR_SUBREG_REG(X) \
4257 (GET_CODE (X) == REG \
4258 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4260 #define REG_OR_SUBREG_RTX(X) \
4261 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4263 #ifndef COSTS_N_INSNS
4264 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4265 #endif
4266 static inline int
4267 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4269 enum machine_mode mode = GET_MODE (x);
4271 switch (code)
4273 case ASHIFT:
4274 case ASHIFTRT:
4275 case LSHIFTRT:
4276 case ROTATERT:
4277 case PLUS:
4278 case MINUS:
4279 case COMPARE:
4280 case NEG:
4281 case NOT:
4282 return COSTS_N_INSNS (1);
4284 case MULT:
4285 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4287 int cycles = 0;
4288 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4290 while (i)
4292 i >>= 2;
4293 cycles++;
4295 return COSTS_N_INSNS (2) + cycles;
4297 return COSTS_N_INSNS (1) + 16;
4299 case SET:
4300 return (COSTS_N_INSNS (1)
4301 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4302 + GET_CODE (SET_DEST (x)) == MEM));
4304 case CONST_INT:
4305 if (outer == SET)
4307 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4308 return 0;
4309 if (thumb_shiftable_const (INTVAL (x)))
4310 return COSTS_N_INSNS (2);
4311 return COSTS_N_INSNS (3);
4313 else if ((outer == PLUS || outer == COMPARE)
4314 && INTVAL (x) < 256 && INTVAL (x) > -256)
4315 return 0;
4316 else if (outer == AND
4317 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4318 return COSTS_N_INSNS (1);
4319 else if (outer == ASHIFT || outer == ASHIFTRT
4320 || outer == LSHIFTRT)
4321 return 0;
4322 return COSTS_N_INSNS (2);
4324 case CONST:
4325 case CONST_DOUBLE:
4326 case LABEL_REF:
4327 case SYMBOL_REF:
4328 return COSTS_N_INSNS (3);
4330 case UDIV:
4331 case UMOD:
4332 case DIV:
4333 case MOD:
4334 return 100;
4336 case TRUNCATE:
4337 return 99;
4339 case AND:
4340 case XOR:
4341 case IOR:
4342 /* XXX guess. */
4343 return 8;
4345 case MEM:
4346 /* XXX another guess. */
4347 /* Memory costs quite a lot for the first word, but subsequent words
4348 load at the equivalent of a single insn each. */
4349 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4350 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4351 ? 4 : 0));
4353 case IF_THEN_ELSE:
4354 /* XXX a guess. */
4355 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4356 return 14;
4357 return 2;
4359 case ZERO_EXTEND:
4360 /* XXX still guessing. */
4361 switch (GET_MODE (XEXP (x, 0)))
4363 case QImode:
4364 return (1 + (mode == DImode ? 4 : 0)
4365 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4367 case HImode:
4368 return (4 + (mode == DImode ? 4 : 0)
4369 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4371 case SImode:
4372 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4374 default:
4375 return 99;
4378 default:
4379 return 99;
4384 /* Worker routine for arm_rtx_costs. */
4385 static inline int
4386 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4388 enum machine_mode mode = GET_MODE (x);
4389 enum rtx_code subcode;
4390 int extra_cost;
4392 switch (code)
4394 case MEM:
4395 /* Memory costs quite a lot for the first word, but subsequent words
4396 load at the equivalent of a single insn each. */
4397 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4398 + (GET_CODE (x) == SYMBOL_REF
4399 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4401 case DIV:
4402 case MOD:
4403 case UDIV:
4404 case UMOD:
4405 return optimize_size ? COSTS_N_INSNS (2) : 100;
4407 case ROTATE:
4408 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4409 return 4;
4410 /* Fall through */
4411 case ROTATERT:
4412 if (mode != SImode)
4413 return 8;
4414 /* Fall through */
4415 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4416 if (mode == DImode)
4417 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4418 + ((GET_CODE (XEXP (x, 0)) == REG
4419 || (GET_CODE (XEXP (x, 0)) == SUBREG
4420 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4421 ? 0 : 8));
4422 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4423 || (GET_CODE (XEXP (x, 0)) == SUBREG
4424 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4425 ? 0 : 4)
4426 + ((GET_CODE (XEXP (x, 1)) == REG
4427 || (GET_CODE (XEXP (x, 1)) == SUBREG
4428 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4429 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4430 ? 0 : 4));
4432 case MINUS:
4433 if (mode == DImode)
4434 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4435 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4436 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4437 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4438 ? 0 : 8));
4440 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4441 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4442 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4443 && arm_const_double_rtx (XEXP (x, 1))))
4444 ? 0 : 8)
4445 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4446 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4447 && arm_const_double_rtx (XEXP (x, 0))))
4448 ? 0 : 8));
4450 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4451 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4452 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4453 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4454 || subcode == ASHIFTRT || subcode == LSHIFTRT
4455 || subcode == ROTATE || subcode == ROTATERT
4456 || (subcode == MULT
4457 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4458 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4459 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4460 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4461 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4462 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4463 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4464 return 1;
4465 /* Fall through */
4467 case PLUS:
4468 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4469 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4470 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4471 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4472 && arm_const_double_rtx (XEXP (x, 1))))
4473 ? 0 : 8));
4475 /* Fall through */
4476 case AND: case XOR: case IOR:
4477 extra_cost = 0;
4479 /* Normally the frame registers will be spilt into reg+const during
4480 reload, so it is a bad idea to combine them with other instructions,
4481 since then they might not be moved outside of loops. As a compromise
4482 we allow integration with ops that have a constant as their second
4483 operand. */
4484 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4485 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4486 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4487 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4488 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4489 extra_cost = 4;
4491 if (mode == DImode)
4492 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4493 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4494 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4495 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4496 ? 0 : 8));
4498 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4499 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4500 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4501 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4502 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4503 ? 0 : 4));
4505 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4506 return (1 + extra_cost
4507 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4508 || subcode == LSHIFTRT || subcode == ASHIFTRT
4509 || subcode == ROTATE || subcode == ROTATERT
4510 || (subcode == MULT
4511 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4512 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4513 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4514 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4515 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4516 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4517 ? 0 : 4));
4519 return 8;
4521 case MULT:
4522 /* This should have been handled by the CPU specific routines. */
4523 gcc_unreachable ();
4525 case TRUNCATE:
4526 if (arm_arch3m && mode == SImode
4527 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4528 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4529 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4530 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4531 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4532 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4533 return 8;
4534 return 99;
4536 case NEG:
4537 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4538 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4539 /* Fall through */
4540 case NOT:
4541 if (mode == DImode)
4542 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4544 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4546 case IF_THEN_ELSE:
4547 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4548 return 14;
4549 return 2;
4551 case COMPARE:
4552 return 1;
4554 case ABS:
4555 return 4 + (mode == DImode ? 4 : 0);
4557 case SIGN_EXTEND:
4558 if (GET_MODE (XEXP (x, 0)) == QImode)
4559 return (4 + (mode == DImode ? 4 : 0)
4560 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4561 /* Fall through */
4562 case ZERO_EXTEND:
4563 switch (GET_MODE (XEXP (x, 0)))
4565 case QImode:
4566 return (1 + (mode == DImode ? 4 : 0)
4567 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4569 case HImode:
4570 return (4 + (mode == DImode ? 4 : 0)
4571 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4573 case SImode:
4574 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4576 case V8QImode:
4577 case V4HImode:
4578 case V2SImode:
4579 case V4QImode:
4580 case V2HImode:
4581 return 1;
4583 default:
4584 gcc_unreachable ();
4586 gcc_unreachable ();
4588 case CONST_INT:
4589 if (const_ok_for_arm (INTVAL (x)))
4590 return outer == SET ? 2 : -1;
4591 else if (outer == AND
4592 && const_ok_for_arm (~INTVAL (x)))
4593 return -1;
4594 else if ((outer == COMPARE
4595 || outer == PLUS || outer == MINUS)
4596 && const_ok_for_arm (-INTVAL (x)))
4597 return -1;
4598 else
4599 return 5;
4601 case CONST:
4602 case LABEL_REF:
4603 case SYMBOL_REF:
4604 return 6;
4606 case CONST_DOUBLE:
4607 if (arm_const_double_rtx (x))
4608 return outer == SET ? 2 : -1;
4609 else if ((outer == COMPARE || outer == PLUS)
4610 && neg_const_double_rtx_ok_for_fpa (x))
4611 return -1;
4612 return 7;
4614 default:
4615 return 99;
4619 /* RTX costs when optimizing for size. */
4620 static bool
4621 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4623 enum machine_mode mode = GET_MODE (x);
4625 if (TARGET_THUMB)
4627 /* XXX TBD. For now, use the standard costs. */
4628 *total = thumb_rtx_costs (x, code, outer_code);
4629 return true;
4632 switch (code)
4634 case MEM:
4635 /* A memory access costs 1 insn if the mode is small, or the address is
4636 a single register, otherwise it costs one insn per word. */
4637 if (REG_P (XEXP (x, 0)))
4638 *total = COSTS_N_INSNS (1);
4639 else
4640 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4641 return true;
4643 case DIV:
4644 case MOD:
4645 case UDIV:
4646 case UMOD:
4647 /* Needs a libcall, so it costs about this. */
4648 *total = COSTS_N_INSNS (2);
4649 return false;
4651 case ROTATE:
4652 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4654 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4655 return true;
4657 /* Fall through */
4658 case ROTATERT:
4659 case ASHIFT:
4660 case LSHIFTRT:
4661 case ASHIFTRT:
4662 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4664 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4665 return true;
4667 else if (mode == SImode)
4669 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4670 /* Slightly disparage register shifts, but not by much. */
4671 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4672 *total += 1 + rtx_cost (XEXP (x, 1), code);
4673 return true;
4676 /* Needs a libcall. */
4677 *total = COSTS_N_INSNS (2);
4678 return false;
4680 case MINUS:
4681 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4683 *total = COSTS_N_INSNS (1);
4684 return false;
4687 if (mode == SImode)
4689 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4690 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4692 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4693 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4694 || subcode1 == ROTATE || subcode1 == ROTATERT
4695 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4696 || subcode1 == ASHIFTRT)
4698 /* It's just the cost of the two operands. */
4699 *total = 0;
4700 return false;
4703 *total = COSTS_N_INSNS (1);
4704 return false;
4707 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4708 return false;
4710 case PLUS:
4711 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4713 *total = COSTS_N_INSNS (1);
4714 return false;
4717 /* Fall through */
4718 case AND: case XOR: case IOR:
4719 if (mode == SImode)
4721 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4723 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4724 || subcode == LSHIFTRT || subcode == ASHIFTRT
4725 || (code == AND && subcode == NOT))
4727 /* It's just the cost of the two operands. */
4728 *total = 0;
4729 return false;
4733 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4734 return false;
4736 case MULT:
4737 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4738 return false;
4740 case NEG:
4741 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4742 *total = COSTS_N_INSNS (1);
4743 /* Fall through */
4744 case NOT:
4745 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4747 return false;
4749 case IF_THEN_ELSE:
4750 *total = 0;
4751 return false;
4753 case COMPARE:
4754 if (cc_register (XEXP (x, 0), VOIDmode))
4755 * total = 0;
4756 else
4757 *total = COSTS_N_INSNS (1);
4758 return false;
4760 case ABS:
4761 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4762 *total = COSTS_N_INSNS (1);
4763 else
4764 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4765 return false;
4767 case SIGN_EXTEND:
4768 *total = 0;
4769 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4771 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4772 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4774 if (mode == DImode)
4775 *total += COSTS_N_INSNS (1);
4776 return false;
4778 case ZERO_EXTEND:
4779 *total = 0;
4780 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4782 switch (GET_MODE (XEXP (x, 0)))
4784 case QImode:
4785 *total += COSTS_N_INSNS (1);
4786 break;
4788 case HImode:
4789 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4791 case SImode:
4792 break;
4794 default:
4795 *total += COSTS_N_INSNS (2);
4799 if (mode == DImode)
4800 *total += COSTS_N_INSNS (1);
4802 return false;
4804 case CONST_INT:
4805 if (const_ok_for_arm (INTVAL (x)))
4806 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4807 else if (const_ok_for_arm (~INTVAL (x)))
4808 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4809 else if (const_ok_for_arm (-INTVAL (x)))
4811 if (outer_code == COMPARE || outer_code == PLUS
4812 || outer_code == MINUS)
4813 *total = 0;
4814 else
4815 *total = COSTS_N_INSNS (1);
4817 else
4818 *total = COSTS_N_INSNS (2);
4819 return true;
4821 case CONST:
4822 case LABEL_REF:
4823 case SYMBOL_REF:
4824 *total = COSTS_N_INSNS (2);
4825 return true;
4827 case CONST_DOUBLE:
4828 *total = COSTS_N_INSNS (4);
4829 return true;
4831 default:
4832 if (mode != VOIDmode)
4833 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4834 else
4835 *total = COSTS_N_INSNS (4); /* How knows? */
4836 return false;
4840 /* RTX costs for cores with a slow MUL implementation. */
4842 static bool
4843 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4845 enum machine_mode mode = GET_MODE (x);
4847 if (TARGET_THUMB)
4849 *total = thumb_rtx_costs (x, code, outer_code);
4850 return true;
4853 switch (code)
4855 case MULT:
4856 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4857 || mode == DImode)
4859 *total = 30;
4860 return true;
4863 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4865 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4866 & (unsigned HOST_WIDE_INT) 0xffffffff);
4867 int cost, const_ok = const_ok_for_arm (i);
4868 int j, booth_unit_size;
4870 /* Tune as appropriate. */
4871 cost = const_ok ? 4 : 8;
4872 booth_unit_size = 2;
4873 for (j = 0; i && j < 32; j += booth_unit_size)
4875 i >>= booth_unit_size;
4876 cost += 2;
4879 *total = cost;
4880 return true;
4883 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4884 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4885 return true;
4887 default:
4888 *total = arm_rtx_costs_1 (x, code, outer_code);
4889 return true;
4894 /* RTX cost for cores with a fast multiply unit (M variants). */
4896 static bool
4897 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4899 enum machine_mode mode = GET_MODE (x);
4901 if (TARGET_THUMB)
4903 *total = thumb_rtx_costs (x, code, outer_code);
4904 return true;
4907 switch (code)
4909 case MULT:
4910 /* There is no point basing this on the tuning, since it is always the
4911 fast variant if it exists at all. */
4912 if (mode == DImode
4913 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4914 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4915 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4917 *total = 8;
4918 return true;
4922 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4923 || mode == DImode)
4925 *total = 30;
4926 return true;
4929 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4931 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4932 & (unsigned HOST_WIDE_INT) 0xffffffff);
4933 int cost, const_ok = const_ok_for_arm (i);
4934 int j, booth_unit_size;
4936 /* Tune as appropriate. */
4937 cost = const_ok ? 4 : 8;
4938 booth_unit_size = 8;
4939 for (j = 0; i && j < 32; j += booth_unit_size)
4941 i >>= booth_unit_size;
4942 cost += 2;
4945 *total = cost;
4946 return true;
4949 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4950 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4951 return true;
4953 default:
4954 *total = arm_rtx_costs_1 (x, code, outer_code);
4955 return true;
4960 /* RTX cost for XScale CPUs. */
4962 static bool
4963 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4965 enum machine_mode mode = GET_MODE (x);
4967 if (TARGET_THUMB)
4969 *total = thumb_rtx_costs (x, code, outer_code);
4970 return true;
4973 switch (code)
4975 case MULT:
4976 /* There is no point basing this on the tuning, since it is always the
4977 fast variant if it exists at all. */
4978 if (mode == DImode
4979 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4980 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4981 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4983 *total = 8;
4984 return true;
4988 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4989 || mode == DImode)
4991 *total = 30;
4992 return true;
4995 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4997 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4998 & (unsigned HOST_WIDE_INT) 0xffffffff);
4999 int cost, const_ok = const_ok_for_arm (i);
5000 unsigned HOST_WIDE_INT masked_const;
5002 /* The cost will be related to two insns.
5003 First a load of the constant (MOV or LDR), then a multiply. */
5004 cost = 2;
5005 if (! const_ok)
5006 cost += 1; /* LDR is probably more expensive because
5007 of longer result latency. */
5008 masked_const = i & 0xffff8000;
5009 if (masked_const != 0 && masked_const != 0xffff8000)
5011 masked_const = i & 0xf8000000;
5012 if (masked_const == 0 || masked_const == 0xf8000000)
5013 cost += 1;
5014 else
5015 cost += 2;
5017 *total = cost;
5018 return true;
5021 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5022 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5023 return true;
5025 case COMPARE:
5026 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5027 will stall until the multiplication is complete. */
5028 if (GET_CODE (XEXP (x, 0)) == MULT)
5029 *total = 4 + rtx_cost (XEXP (x, 0), code);
5030 else
5031 *total = arm_rtx_costs_1 (x, code, outer_code);
5032 return true;
5034 default:
5035 *total = arm_rtx_costs_1 (x, code, outer_code);
5036 return true;
5041 /* RTX costs for 9e (and later) cores. */
5043 static bool
5044 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5046 enum machine_mode mode = GET_MODE (x);
5047 int nonreg_cost;
5048 int cost;
5050 if (TARGET_THUMB)
5052 switch (code)
5054 case MULT:
5055 *total = COSTS_N_INSNS (3);
5056 return true;
5058 default:
5059 *total = thumb_rtx_costs (x, code, outer_code);
5060 return true;
5064 switch (code)
5066 case MULT:
5067 /* There is no point basing this on the tuning, since it is always the
5068 fast variant if it exists at all. */
5069 if (mode == DImode
5070 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5071 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5072 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5074 *total = 3;
5075 return true;
5079 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5081 *total = 30;
5082 return true;
5084 if (mode == DImode)
5086 cost = 7;
5087 nonreg_cost = 8;
5089 else
5091 cost = 2;
5092 nonreg_cost = 4;
5096 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5097 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5098 return true;
5100 default:
5101 *total = arm_rtx_costs_1 (x, code, outer_code);
5102 return true;
5105 /* All address computations that can be done are free, but rtx cost returns
5106 the same for practically all of them. So we weight the different types
5107 of address here in the order (most pref first):
5108 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5109 static inline int
5110 arm_arm_address_cost (rtx x)
5112 enum rtx_code c = GET_CODE (x);
5114 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5115 return 0;
5116 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5117 return 10;
5119 if (c == PLUS || c == MINUS)
5121 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5122 return 2;
5124 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5125 return 3;
5127 return 4;
5130 return 6;
5133 static inline int
5134 arm_thumb_address_cost (rtx x)
5136 enum rtx_code c = GET_CODE (x);
5138 if (c == REG)
5139 return 1;
5140 if (c == PLUS
5141 && GET_CODE (XEXP (x, 0)) == REG
5142 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5143 return 1;
5145 return 2;
5148 static int
5149 arm_address_cost (rtx x)
5151 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5154 static int
5155 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5157 rtx i_pat, d_pat;
5159 /* Some true dependencies can have a higher cost depending
5160 on precisely how certain input operands are used. */
5161 if (arm_tune_xscale
5162 && REG_NOTE_KIND (link) == 0
5163 && recog_memoized (insn) >= 0
5164 && recog_memoized (dep) >= 0)
5166 int shift_opnum = get_attr_shift (insn);
5167 enum attr_type attr_type = get_attr_type (dep);
5169 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5170 operand for INSN. If we have a shifted input operand and the
5171 instruction we depend on is another ALU instruction, then we may
5172 have to account for an additional stall. */
5173 if (shift_opnum != 0
5174 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5176 rtx shifted_operand;
5177 int opno;
5179 /* Get the shifted operand. */
5180 extract_insn (insn);
5181 shifted_operand = recog_data.operand[shift_opnum];
5183 /* Iterate over all the operands in DEP. If we write an operand
5184 that overlaps with SHIFTED_OPERAND, then we have increase the
5185 cost of this dependency. */
5186 extract_insn (dep);
5187 preprocess_constraints ();
5188 for (opno = 0; opno < recog_data.n_operands; opno++)
5190 /* We can ignore strict inputs. */
5191 if (recog_data.operand_type[opno] == OP_IN)
5192 continue;
5194 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5195 shifted_operand))
5196 return 2;
5201 /* XXX This is not strictly true for the FPA. */
5202 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5203 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5204 return 0;
5206 /* Call insns don't incur a stall, even if they follow a load. */
5207 if (REG_NOTE_KIND (link) == 0
5208 && GET_CODE (insn) == CALL_INSN)
5209 return 1;
5211 if ((i_pat = single_set (insn)) != NULL
5212 && GET_CODE (SET_SRC (i_pat)) == MEM
5213 && (d_pat = single_set (dep)) != NULL
5214 && GET_CODE (SET_DEST (d_pat)) == MEM)
5216 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5217 /* This is a load after a store, there is no conflict if the load reads
5218 from a cached area. Assume that loads from the stack, and from the
5219 constant pool are cached, and that others will miss. This is a
5220 hack. */
5222 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5223 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5224 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5225 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5226 return 1;
5229 return cost;
5232 static int fp_consts_inited = 0;
5234 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5235 static const char * const strings_fp[8] =
5237 "0", "1", "2", "3",
5238 "4", "5", "0.5", "10"
5241 static REAL_VALUE_TYPE values_fp[8];
5243 static void
5244 init_fp_table (void)
5246 int i;
5247 REAL_VALUE_TYPE r;
5249 if (TARGET_VFP)
5250 fp_consts_inited = 1;
5251 else
5252 fp_consts_inited = 8;
5254 for (i = 0; i < fp_consts_inited; i++)
5256 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5257 values_fp[i] = r;
5261 /* Return TRUE if rtx X is a valid immediate FP constant. */
5263 arm_const_double_rtx (rtx x)
5265 REAL_VALUE_TYPE r;
5266 int i;
5268 if (!fp_consts_inited)
5269 init_fp_table ();
5271 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5272 if (REAL_VALUE_MINUS_ZERO (r))
5273 return 0;
5275 for (i = 0; i < fp_consts_inited; i++)
5276 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5277 return 1;
5279 return 0;
5282 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5284 neg_const_double_rtx_ok_for_fpa (rtx x)
5286 REAL_VALUE_TYPE r;
5287 int i;
5289 if (!fp_consts_inited)
5290 init_fp_table ();
5292 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5293 r = REAL_VALUE_NEGATE (r);
5294 if (REAL_VALUE_MINUS_ZERO (r))
5295 return 0;
5297 for (i = 0; i < 8; i++)
5298 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5299 return 1;
5301 return 0;
5304 /* Predicates for `match_operand' and `match_operator'. */
5306 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5308 cirrus_memory_offset (rtx op)
5310 /* Reject eliminable registers. */
5311 if (! (reload_in_progress || reload_completed)
5312 && ( reg_mentioned_p (frame_pointer_rtx, op)
5313 || reg_mentioned_p (arg_pointer_rtx, op)
5314 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5315 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5316 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5317 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5318 return 0;
5320 if (GET_CODE (op) == MEM)
5322 rtx ind;
5324 ind = XEXP (op, 0);
5326 /* Match: (mem (reg)). */
5327 if (GET_CODE (ind) == REG)
5328 return 1;
5330 /* Match:
5331 (mem (plus (reg)
5332 (const))). */
5333 if (GET_CODE (ind) == PLUS
5334 && GET_CODE (XEXP (ind, 0)) == REG
5335 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5336 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5337 return 1;
5340 return 0;
5343 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5344 WB if true if writeback address modes are allowed. */
5347 arm_coproc_mem_operand (rtx op, bool wb)
5349 rtx ind;
5351 /* Reject eliminable registers. */
5352 if (! (reload_in_progress || reload_completed)
5353 && ( reg_mentioned_p (frame_pointer_rtx, op)
5354 || reg_mentioned_p (arg_pointer_rtx, op)
5355 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5356 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5357 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5358 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5359 return FALSE;
5361 /* Constants are converted into offsets from labels. */
5362 if (GET_CODE (op) != MEM)
5363 return FALSE;
5365 ind = XEXP (op, 0);
5367 if (reload_completed
5368 && (GET_CODE (ind) == LABEL_REF
5369 || (GET_CODE (ind) == CONST
5370 && GET_CODE (XEXP (ind, 0)) == PLUS
5371 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5372 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5373 return TRUE;
5375 /* Match: (mem (reg)). */
5376 if (GET_CODE (ind) == REG)
5377 return arm_address_register_rtx_p (ind, 0);
5379 /* Autoincremment addressing modes. */
5380 if (wb
5381 && (GET_CODE (ind) == PRE_INC
5382 || GET_CODE (ind) == POST_INC
5383 || GET_CODE (ind) == PRE_DEC
5384 || GET_CODE (ind) == POST_DEC))
5385 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5387 if (wb
5388 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5389 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5390 && GET_CODE (XEXP (ind, 1)) == PLUS
5391 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5392 ind = XEXP (ind, 1);
5394 /* Match:
5395 (plus (reg)
5396 (const)). */
5397 if (GET_CODE (ind) == PLUS
5398 && GET_CODE (XEXP (ind, 0)) == REG
5399 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5400 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5401 && INTVAL (XEXP (ind, 1)) > -1024
5402 && INTVAL (XEXP (ind, 1)) < 1024
5403 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5404 return TRUE;
5406 return FALSE;
5409 /* Return true if X is a register that will be eliminated later on. */
5411 arm_eliminable_register (rtx x)
5413 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5414 || REGNO (x) == ARG_POINTER_REGNUM
5415 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5416 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5419 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5420 VFP registers. Otherwise return NO_REGS. */
5422 enum reg_class
5423 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5425 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5426 return NO_REGS;
5428 return GENERAL_REGS;
5431 /* Values which must be returned in the most-significant end of the return
5432 register. */
5434 static bool
5435 arm_return_in_msb (tree valtype)
5437 return (TARGET_AAPCS_BASED
5438 && BYTES_BIG_ENDIAN
5439 && (AGGREGATE_TYPE_P (valtype)
5440 || TREE_CODE (valtype) == COMPLEX_TYPE));
5443 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5444 Use by the Cirrus Maverick code which has to workaround
5445 a hardware bug triggered by such instructions. */
5446 static bool
5447 arm_memory_load_p (rtx insn)
5449 rtx body, lhs, rhs;;
5451 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5452 return false;
5454 body = PATTERN (insn);
5456 if (GET_CODE (body) != SET)
5457 return false;
5459 lhs = XEXP (body, 0);
5460 rhs = XEXP (body, 1);
5462 lhs = REG_OR_SUBREG_RTX (lhs);
5464 /* If the destination is not a general purpose
5465 register we do not have to worry. */
5466 if (GET_CODE (lhs) != REG
5467 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5468 return false;
5470 /* As well as loads from memory we also have to react
5471 to loads of invalid constants which will be turned
5472 into loads from the minipool. */
5473 return (GET_CODE (rhs) == MEM
5474 || GET_CODE (rhs) == SYMBOL_REF
5475 || note_invalid_constants (insn, -1, false));
5478 /* Return TRUE if INSN is a Cirrus instruction. */
5479 static bool
5480 arm_cirrus_insn_p (rtx insn)
5482 enum attr_cirrus attr;
5484 /* get_attr cannot accept USE or CLOBBER. */
5485 if (!insn
5486 || GET_CODE (insn) != INSN
5487 || GET_CODE (PATTERN (insn)) == USE
5488 || GET_CODE (PATTERN (insn)) == CLOBBER)
5489 return 0;
5491 attr = get_attr_cirrus (insn);
5493 return attr != CIRRUS_NOT;
5496 /* Cirrus reorg for invalid instruction combinations. */
5497 static void
5498 cirrus_reorg (rtx first)
5500 enum attr_cirrus attr;
5501 rtx body = PATTERN (first);
5502 rtx t;
5503 int nops;
5505 /* Any branch must be followed by 2 non Cirrus instructions. */
5506 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5508 nops = 0;
5509 t = next_nonnote_insn (first);
5511 if (arm_cirrus_insn_p (t))
5512 ++ nops;
5514 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5515 ++ nops;
5517 while (nops --)
5518 emit_insn_after (gen_nop (), first);
5520 return;
5523 /* (float (blah)) is in parallel with a clobber. */
5524 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5525 body = XVECEXP (body, 0, 0);
5527 if (GET_CODE (body) == SET)
5529 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5531 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5532 be followed by a non Cirrus insn. */
5533 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5535 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5536 emit_insn_after (gen_nop (), first);
5538 return;
5540 else if (arm_memory_load_p (first))
5542 unsigned int arm_regno;
5544 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5545 ldr/cfmv64hr combination where the Rd field is the same
5546 in both instructions must be split with a non Cirrus
5547 insn. Example:
5549 ldr r0, blah
5551 cfmvsr mvf0, r0. */
5553 /* Get Arm register number for ldr insn. */
5554 if (GET_CODE (lhs) == REG)
5555 arm_regno = REGNO (lhs);
5556 else
5558 gcc_assert (GET_CODE (rhs) == REG);
5559 arm_regno = REGNO (rhs);
5562 /* Next insn. */
5563 first = next_nonnote_insn (first);
5565 if (! arm_cirrus_insn_p (first))
5566 return;
5568 body = PATTERN (first);
5570 /* (float (blah)) is in parallel with a clobber. */
5571 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5572 body = XVECEXP (body, 0, 0);
5574 if (GET_CODE (body) == FLOAT)
5575 body = XEXP (body, 0);
5577 if (get_attr_cirrus (first) == CIRRUS_MOVE
5578 && GET_CODE (XEXP (body, 1)) == REG
5579 && arm_regno == REGNO (XEXP (body, 1)))
5580 emit_insn_after (gen_nop (), first);
5582 return;
5586 /* get_attr cannot accept USE or CLOBBER. */
5587 if (!first
5588 || GET_CODE (first) != INSN
5589 || GET_CODE (PATTERN (first)) == USE
5590 || GET_CODE (PATTERN (first)) == CLOBBER)
5591 return;
5593 attr = get_attr_cirrus (first);
5595 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5596 must be followed by a non-coprocessor instruction. */
5597 if (attr == CIRRUS_COMPARE)
5599 nops = 0;
5601 t = next_nonnote_insn (first);
5603 if (arm_cirrus_insn_p (t))
5604 ++ nops;
5606 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5607 ++ nops;
5609 while (nops --)
5610 emit_insn_after (gen_nop (), first);
5612 return;
5616 /* Return TRUE if X references a SYMBOL_REF. */
5618 symbol_mentioned_p (rtx x)
5620 const char * fmt;
5621 int i;
5623 if (GET_CODE (x) == SYMBOL_REF)
5624 return 1;
5626 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5627 are constant offsets, not symbols. */
5628 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5629 return 0;
5631 fmt = GET_RTX_FORMAT (GET_CODE (x));
5633 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5635 if (fmt[i] == 'E')
5637 int j;
5639 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5640 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5641 return 1;
5643 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5644 return 1;
5647 return 0;
5650 /* Return TRUE if X references a LABEL_REF. */
5652 label_mentioned_p (rtx x)
5654 const char * fmt;
5655 int i;
5657 if (GET_CODE (x) == LABEL_REF)
5658 return 1;
5660 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5661 instruction, but they are constant offsets, not symbols. */
5662 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5663 return 0;
5665 fmt = GET_RTX_FORMAT (GET_CODE (x));
5666 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5668 if (fmt[i] == 'E')
5670 int j;
5672 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5673 if (label_mentioned_p (XVECEXP (x, i, j)))
5674 return 1;
5676 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5677 return 1;
5680 return 0;
5684 tls_mentioned_p (rtx x)
5686 switch (GET_CODE (x))
5688 case CONST:
5689 return tls_mentioned_p (XEXP (x, 0));
5691 case UNSPEC:
5692 if (XINT (x, 1) == UNSPEC_TLS)
5693 return 1;
5695 default:
5696 return 0;
5700 /* Must not copy a SET whose source operand is PC-relative. */
5702 static bool
5703 arm_cannot_copy_insn_p (rtx insn)
5705 rtx pat = PATTERN (insn);
5707 if (GET_CODE (pat) == PARALLEL
5708 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
5710 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
5712 if (GET_CODE (rhs) == UNSPEC
5713 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
5714 return TRUE;
5716 if (GET_CODE (rhs) == MEM
5717 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
5718 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
5719 return TRUE;
5722 return FALSE;
5725 enum rtx_code
5726 minmax_code (rtx x)
5728 enum rtx_code code = GET_CODE (x);
5730 switch (code)
5732 case SMAX:
5733 return GE;
5734 case SMIN:
5735 return LE;
5736 case UMIN:
5737 return LEU;
5738 case UMAX:
5739 return GEU;
5740 default:
5741 gcc_unreachable ();
5745 /* Return 1 if memory locations are adjacent. */
5747 adjacent_mem_locations (rtx a, rtx b)
5749 /* We don't guarantee to preserve the order of these memory refs. */
5750 if (volatile_refs_p (a) || volatile_refs_p (b))
5751 return 0;
5753 if ((GET_CODE (XEXP (a, 0)) == REG
5754 || (GET_CODE (XEXP (a, 0)) == PLUS
5755 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5756 && (GET_CODE (XEXP (b, 0)) == REG
5757 || (GET_CODE (XEXP (b, 0)) == PLUS
5758 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5760 HOST_WIDE_INT val0 = 0, val1 = 0;
5761 rtx reg0, reg1;
5762 int val_diff;
5764 if (GET_CODE (XEXP (a, 0)) == PLUS)
5766 reg0 = XEXP (XEXP (a, 0), 0);
5767 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5769 else
5770 reg0 = XEXP (a, 0);
5772 if (GET_CODE (XEXP (b, 0)) == PLUS)
5774 reg1 = XEXP (XEXP (b, 0), 0);
5775 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5777 else
5778 reg1 = XEXP (b, 0);
5780 /* Don't accept any offset that will require multiple
5781 instructions to handle, since this would cause the
5782 arith_adjacentmem pattern to output an overlong sequence. */
5783 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5784 return 0;
5786 /* Don't allow an eliminable register: register elimination can make
5787 the offset too large. */
5788 if (arm_eliminable_register (reg0))
5789 return 0;
5791 val_diff = val1 - val0;
5793 if (arm_ld_sched)
5795 /* If the target has load delay slots, then there's no benefit
5796 to using an ldm instruction unless the offset is zero and
5797 we are optimizing for size. */
5798 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5799 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5800 && (val_diff == 4 || val_diff == -4));
5803 return ((REGNO (reg0) == REGNO (reg1))
5804 && (val_diff == 4 || val_diff == -4));
5807 return 0;
5811 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5812 HOST_WIDE_INT *load_offset)
5814 int unsorted_regs[4];
5815 HOST_WIDE_INT unsorted_offsets[4];
5816 int order[4];
5817 int base_reg = -1;
5818 int i;
5820 /* Can only handle 2, 3, or 4 insns at present,
5821 though could be easily extended if required. */
5822 gcc_assert (nops >= 2 && nops <= 4);
5824 /* Loop over the operands and check that the memory references are
5825 suitable (i.e. immediate offsets from the same base register). At
5826 the same time, extract the target register, and the memory
5827 offsets. */
5828 for (i = 0; i < nops; i++)
5830 rtx reg;
5831 rtx offset;
5833 /* Convert a subreg of a mem into the mem itself. */
5834 if (GET_CODE (operands[nops + i]) == SUBREG)
5835 operands[nops + i] = alter_subreg (operands + (nops + i));
5837 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5839 /* Don't reorder volatile memory references; it doesn't seem worth
5840 looking for the case where the order is ok anyway. */
5841 if (MEM_VOLATILE_P (operands[nops + i]))
5842 return 0;
5844 offset = const0_rtx;
5846 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5847 || (GET_CODE (reg) == SUBREG
5848 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5849 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5850 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5851 == REG)
5852 || (GET_CODE (reg) == SUBREG
5853 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5854 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5855 == CONST_INT)))
5857 if (i == 0)
5859 base_reg = REGNO (reg);
5860 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5861 ? REGNO (operands[i])
5862 : REGNO (SUBREG_REG (operands[i])));
5863 order[0] = 0;
5865 else
5867 if (base_reg != (int) REGNO (reg))
5868 /* Not addressed from the same base register. */
5869 return 0;
5871 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5872 ? REGNO (operands[i])
5873 : REGNO (SUBREG_REG (operands[i])));
5874 if (unsorted_regs[i] < unsorted_regs[order[0]])
5875 order[0] = i;
5878 /* If it isn't an integer register, or if it overwrites the
5879 base register but isn't the last insn in the list, then
5880 we can't do this. */
5881 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5882 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5883 return 0;
5885 unsorted_offsets[i] = INTVAL (offset);
5887 else
5888 /* Not a suitable memory address. */
5889 return 0;
5892 /* All the useful information has now been extracted from the
5893 operands into unsorted_regs and unsorted_offsets; additionally,
5894 order[0] has been set to the lowest numbered register in the
5895 list. Sort the registers into order, and check that the memory
5896 offsets are ascending and adjacent. */
5898 for (i = 1; i < nops; i++)
5900 int j;
5902 order[i] = order[i - 1];
5903 for (j = 0; j < nops; j++)
5904 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5905 && (order[i] == order[i - 1]
5906 || unsorted_regs[j] < unsorted_regs[order[i]]))
5907 order[i] = j;
5909 /* Have we found a suitable register? if not, one must be used more
5910 than once. */
5911 if (order[i] == order[i - 1])
5912 return 0;
5914 /* Is the memory address adjacent and ascending? */
5915 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5916 return 0;
5919 if (base)
5921 *base = base_reg;
5923 for (i = 0; i < nops; i++)
5924 regs[i] = unsorted_regs[order[i]];
5926 *load_offset = unsorted_offsets[order[0]];
5929 if (unsorted_offsets[order[0]] == 0)
5930 return 1; /* ldmia */
5932 if (unsorted_offsets[order[0]] == 4)
5933 return 2; /* ldmib */
5935 if (unsorted_offsets[order[nops - 1]] == 0)
5936 return 3; /* ldmda */
5938 if (unsorted_offsets[order[nops - 1]] == -4)
5939 return 4; /* ldmdb */
5941 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5942 if the offset isn't small enough. The reason 2 ldrs are faster
5943 is because these ARMs are able to do more than one cache access
5944 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5945 whilst the ARM8 has a double bandwidth cache. This means that
5946 these cores can do both an instruction fetch and a data fetch in
5947 a single cycle, so the trick of calculating the address into a
5948 scratch register (one of the result regs) and then doing a load
5949 multiple actually becomes slower (and no smaller in code size).
5950 That is the transformation
5952 ldr rd1, [rbase + offset]
5953 ldr rd2, [rbase + offset + 4]
5957 add rd1, rbase, offset
5958 ldmia rd1, {rd1, rd2}
5960 produces worse code -- '3 cycles + any stalls on rd2' instead of
5961 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5962 access per cycle, the first sequence could never complete in less
5963 than 6 cycles, whereas the ldm sequence would only take 5 and
5964 would make better use of sequential accesses if not hitting the
5965 cache.
5967 We cheat here and test 'arm_ld_sched' which we currently know to
5968 only be true for the ARM8, ARM9 and StrongARM. If this ever
5969 changes, then the test below needs to be reworked. */
5970 if (nops == 2 && arm_ld_sched)
5971 return 0;
5973 /* Can't do it without setting up the offset, only do this if it takes
5974 no more than one insn. */
5975 return (const_ok_for_arm (unsorted_offsets[order[0]])
5976 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5979 const char *
5980 emit_ldm_seq (rtx *operands, int nops)
5982 int regs[4];
5983 int base_reg;
5984 HOST_WIDE_INT offset;
5985 char buf[100];
5986 int i;
5988 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5990 case 1:
5991 strcpy (buf, "ldm%?ia\t");
5992 break;
5994 case 2:
5995 strcpy (buf, "ldm%?ib\t");
5996 break;
5998 case 3:
5999 strcpy (buf, "ldm%?da\t");
6000 break;
6002 case 4:
6003 strcpy (buf, "ldm%?db\t");
6004 break;
6006 case 5:
6007 if (offset >= 0)
6008 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6009 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6010 (long) offset);
6011 else
6012 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6013 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6014 (long) -offset);
6015 output_asm_insn (buf, operands);
6016 base_reg = regs[0];
6017 strcpy (buf, "ldm%?ia\t");
6018 break;
6020 default:
6021 gcc_unreachable ();
6024 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6025 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6027 for (i = 1; i < nops; i++)
6028 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6029 reg_names[regs[i]]);
6031 strcat (buf, "}\t%@ phole ldm");
6033 output_asm_insn (buf, operands);
6034 return "";
6038 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6039 HOST_WIDE_INT * load_offset)
6041 int unsorted_regs[4];
6042 HOST_WIDE_INT unsorted_offsets[4];
6043 int order[4];
6044 int base_reg = -1;
6045 int i;
6047 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6048 extended if required. */
6049 gcc_assert (nops >= 2 && nops <= 4);
6051 /* Loop over the operands and check that the memory references are
6052 suitable (i.e. immediate offsets from the same base register). At
6053 the same time, extract the target register, and the memory
6054 offsets. */
6055 for (i = 0; i < nops; i++)
6057 rtx reg;
6058 rtx offset;
6060 /* Convert a subreg of a mem into the mem itself. */
6061 if (GET_CODE (operands[nops + i]) == SUBREG)
6062 operands[nops + i] = alter_subreg (operands + (nops + i));
6064 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6066 /* Don't reorder volatile memory references; it doesn't seem worth
6067 looking for the case where the order is ok anyway. */
6068 if (MEM_VOLATILE_P (operands[nops + i]))
6069 return 0;
6071 offset = const0_rtx;
6073 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6074 || (GET_CODE (reg) == SUBREG
6075 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6076 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6077 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6078 == REG)
6079 || (GET_CODE (reg) == SUBREG
6080 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6081 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6082 == CONST_INT)))
6084 if (i == 0)
6086 base_reg = REGNO (reg);
6087 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6088 ? REGNO (operands[i])
6089 : REGNO (SUBREG_REG (operands[i])));
6090 order[0] = 0;
6092 else
6094 if (base_reg != (int) REGNO (reg))
6095 /* Not addressed from the same base register. */
6096 return 0;
6098 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6099 ? REGNO (operands[i])
6100 : REGNO (SUBREG_REG (operands[i])));
6101 if (unsorted_regs[i] < unsorted_regs[order[0]])
6102 order[0] = i;
6105 /* If it isn't an integer register, then we can't do this. */
6106 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6107 return 0;
6109 unsorted_offsets[i] = INTVAL (offset);
6111 else
6112 /* Not a suitable memory address. */
6113 return 0;
6116 /* All the useful information has now been extracted from the
6117 operands into unsorted_regs and unsorted_offsets; additionally,
6118 order[0] has been set to the lowest numbered register in the
6119 list. Sort the registers into order, and check that the memory
6120 offsets are ascending and adjacent. */
6122 for (i = 1; i < nops; i++)
6124 int j;
6126 order[i] = order[i - 1];
6127 for (j = 0; j < nops; j++)
6128 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6129 && (order[i] == order[i - 1]
6130 || unsorted_regs[j] < unsorted_regs[order[i]]))
6131 order[i] = j;
6133 /* Have we found a suitable register? if not, one must be used more
6134 than once. */
6135 if (order[i] == order[i - 1])
6136 return 0;
6138 /* Is the memory address adjacent and ascending? */
6139 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6140 return 0;
6143 if (base)
6145 *base = base_reg;
6147 for (i = 0; i < nops; i++)
6148 regs[i] = unsorted_regs[order[i]];
6150 *load_offset = unsorted_offsets[order[0]];
6153 if (unsorted_offsets[order[0]] == 0)
6154 return 1; /* stmia */
6156 if (unsorted_offsets[order[0]] == 4)
6157 return 2; /* stmib */
6159 if (unsorted_offsets[order[nops - 1]] == 0)
6160 return 3; /* stmda */
6162 if (unsorted_offsets[order[nops - 1]] == -4)
6163 return 4; /* stmdb */
6165 return 0;
6168 const char *
6169 emit_stm_seq (rtx *operands, int nops)
6171 int regs[4];
6172 int base_reg;
6173 HOST_WIDE_INT offset;
6174 char buf[100];
6175 int i;
6177 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6179 case 1:
6180 strcpy (buf, "stm%?ia\t");
6181 break;
6183 case 2:
6184 strcpy (buf, "stm%?ib\t");
6185 break;
6187 case 3:
6188 strcpy (buf, "stm%?da\t");
6189 break;
6191 case 4:
6192 strcpy (buf, "stm%?db\t");
6193 break;
6195 default:
6196 gcc_unreachable ();
6199 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6200 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6202 for (i = 1; i < nops; i++)
6203 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6204 reg_names[regs[i]]);
6206 strcat (buf, "}\t%@ phole stm");
6208 output_asm_insn (buf, operands);
6209 return "";
6212 /* Routines for use in generating RTL. */
6215 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6216 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6218 HOST_WIDE_INT offset = *offsetp;
6219 int i = 0, j;
6220 rtx result;
6221 int sign = up ? 1 : -1;
6222 rtx mem, addr;
6224 /* XScale has load-store double instructions, but they have stricter
6225 alignment requirements than load-store multiple, so we cannot
6226 use them.
6228 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6229 the pipeline until completion.
6231 NREGS CYCLES
6237 An ldr instruction takes 1-3 cycles, but does not block the
6238 pipeline.
6240 NREGS CYCLES
6241 1 1-3
6242 2 2-6
6243 3 3-9
6244 4 4-12
6246 Best case ldr will always win. However, the more ldr instructions
6247 we issue, the less likely we are to be able to schedule them well.
6248 Using ldr instructions also increases code size.
6250 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6251 for counts of 3 or 4 regs. */
6252 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6254 rtx seq;
6256 start_sequence ();
6258 for (i = 0; i < count; i++)
6260 addr = plus_constant (from, i * 4 * sign);
6261 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6262 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6263 offset += 4 * sign;
6266 if (write_back)
6268 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6269 *offsetp = offset;
6272 seq = get_insns ();
6273 end_sequence ();
6275 return seq;
6278 result = gen_rtx_PARALLEL (VOIDmode,
6279 rtvec_alloc (count + (write_back ? 1 : 0)));
6280 if (write_back)
6282 XVECEXP (result, 0, 0)
6283 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6284 i = 1;
6285 count++;
6288 for (j = 0; i < count; i++, j++)
6290 addr = plus_constant (from, j * 4 * sign);
6291 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6292 XVECEXP (result, 0, i)
6293 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6294 offset += 4 * sign;
6297 if (write_back)
6298 *offsetp = offset;
6300 return result;
6304 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6305 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6307 HOST_WIDE_INT offset = *offsetp;
6308 int i = 0, j;
6309 rtx result;
6310 int sign = up ? 1 : -1;
6311 rtx mem, addr;
6313 /* See arm_gen_load_multiple for discussion of
6314 the pros/cons of ldm/stm usage for XScale. */
6315 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6317 rtx seq;
6319 start_sequence ();
6321 for (i = 0; i < count; i++)
6323 addr = plus_constant (to, i * 4 * sign);
6324 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6325 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6326 offset += 4 * sign;
6329 if (write_back)
6331 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6332 *offsetp = offset;
6335 seq = get_insns ();
6336 end_sequence ();
6338 return seq;
6341 result = gen_rtx_PARALLEL (VOIDmode,
6342 rtvec_alloc (count + (write_back ? 1 : 0)));
6343 if (write_back)
6345 XVECEXP (result, 0, 0)
6346 = gen_rtx_SET (VOIDmode, to,
6347 plus_constant (to, count * 4 * sign));
6348 i = 1;
6349 count++;
6352 for (j = 0; i < count; i++, j++)
6354 addr = plus_constant (to, j * 4 * sign);
6355 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6356 XVECEXP (result, 0, i)
6357 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6358 offset += 4 * sign;
6361 if (write_back)
6362 *offsetp = offset;
6364 return result;
6368 arm_gen_movmemqi (rtx *operands)
6370 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6371 HOST_WIDE_INT srcoffset, dstoffset;
6372 int i;
6373 rtx src, dst, srcbase, dstbase;
6374 rtx part_bytes_reg = NULL;
6375 rtx mem;
6377 if (GET_CODE (operands[2]) != CONST_INT
6378 || GET_CODE (operands[3]) != CONST_INT
6379 || INTVAL (operands[2]) > 64
6380 || INTVAL (operands[3]) & 3)
6381 return 0;
6383 dstbase = operands[0];
6384 srcbase = operands[1];
6386 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6387 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6389 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6390 out_words_to_go = INTVAL (operands[2]) / 4;
6391 last_bytes = INTVAL (operands[2]) & 3;
6392 dstoffset = srcoffset = 0;
6394 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6395 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6397 for (i = 0; in_words_to_go >= 2; i+=4)
6399 if (in_words_to_go > 4)
6400 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6401 srcbase, &srcoffset));
6402 else
6403 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6404 FALSE, srcbase, &srcoffset));
6406 if (out_words_to_go)
6408 if (out_words_to_go > 4)
6409 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6410 dstbase, &dstoffset));
6411 else if (out_words_to_go != 1)
6412 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6413 dst, TRUE,
6414 (last_bytes == 0
6415 ? FALSE : TRUE),
6416 dstbase, &dstoffset));
6417 else
6419 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6420 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6421 if (last_bytes != 0)
6423 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6424 dstoffset += 4;
6429 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6430 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6433 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6434 if (out_words_to_go)
6436 rtx sreg;
6438 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6439 sreg = copy_to_reg (mem);
6441 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6442 emit_move_insn (mem, sreg);
6443 in_words_to_go--;
6445 gcc_assert (!in_words_to_go); /* Sanity check */
6448 if (in_words_to_go)
6450 gcc_assert (in_words_to_go > 0);
6452 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6453 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6456 gcc_assert (!last_bytes || part_bytes_reg);
6458 if (BYTES_BIG_ENDIAN && last_bytes)
6460 rtx tmp = gen_reg_rtx (SImode);
6462 /* The bytes we want are in the top end of the word. */
6463 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6464 GEN_INT (8 * (4 - last_bytes))));
6465 part_bytes_reg = tmp;
6467 while (last_bytes)
6469 mem = adjust_automodify_address (dstbase, QImode,
6470 plus_constant (dst, last_bytes - 1),
6471 dstoffset + last_bytes - 1);
6472 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6474 if (--last_bytes)
6476 tmp = gen_reg_rtx (SImode);
6477 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6478 part_bytes_reg = tmp;
6483 else
6485 if (last_bytes > 1)
6487 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6488 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6489 last_bytes -= 2;
6490 if (last_bytes)
6492 rtx tmp = gen_reg_rtx (SImode);
6493 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6494 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6495 part_bytes_reg = tmp;
6496 dstoffset += 2;
6500 if (last_bytes)
6502 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6503 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6507 return 1;
6510 /* Select a dominance comparison mode if possible for a test of the general
6511 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6512 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6513 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6514 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6515 In all cases OP will be either EQ or NE, but we don't need to know which
6516 here. If we are unable to support a dominance comparison we return
6517 CC mode. This will then fail to match for the RTL expressions that
6518 generate this call. */
6519 enum machine_mode
6520 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6522 enum rtx_code cond1, cond2;
6523 int swapped = 0;
6525 /* Currently we will probably get the wrong result if the individual
6526 comparisons are not simple. This also ensures that it is safe to
6527 reverse a comparison if necessary. */
6528 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6529 != CCmode)
6530 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6531 != CCmode))
6532 return CCmode;
6534 /* The if_then_else variant of this tests the second condition if the
6535 first passes, but is true if the first fails. Reverse the first
6536 condition to get a true "inclusive-or" expression. */
6537 if (cond_or == DOM_CC_NX_OR_Y)
6538 cond1 = reverse_condition (cond1);
6540 /* If the comparisons are not equal, and one doesn't dominate the other,
6541 then we can't do this. */
6542 if (cond1 != cond2
6543 && !comparison_dominates_p (cond1, cond2)
6544 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6545 return CCmode;
6547 if (swapped)
6549 enum rtx_code temp = cond1;
6550 cond1 = cond2;
6551 cond2 = temp;
6554 switch (cond1)
6556 case EQ:
6557 if (cond_or == DOM_CC_X_AND_Y)
6558 return CC_DEQmode;
6560 switch (cond2)
6562 case EQ: return CC_DEQmode;
6563 case LE: return CC_DLEmode;
6564 case LEU: return CC_DLEUmode;
6565 case GE: return CC_DGEmode;
6566 case GEU: return CC_DGEUmode;
6567 default: gcc_unreachable ();
6570 case LT:
6571 if (cond_or == DOM_CC_X_AND_Y)
6572 return CC_DLTmode;
6574 switch (cond2)
6576 case LT:
6577 return CC_DLTmode;
6578 case LE:
6579 return CC_DLEmode;
6580 case NE:
6581 return CC_DNEmode;
6582 default:
6583 gcc_unreachable ();
6586 case GT:
6587 if (cond_or == DOM_CC_X_AND_Y)
6588 return CC_DGTmode;
6590 switch (cond2)
6592 case GT:
6593 return CC_DGTmode;
6594 case GE:
6595 return CC_DGEmode;
6596 case NE:
6597 return CC_DNEmode;
6598 default:
6599 gcc_unreachable ();
6602 case LTU:
6603 if (cond_or == DOM_CC_X_AND_Y)
6604 return CC_DLTUmode;
6606 switch (cond2)
6608 case LTU:
6609 return CC_DLTUmode;
6610 case LEU:
6611 return CC_DLEUmode;
6612 case NE:
6613 return CC_DNEmode;
6614 default:
6615 gcc_unreachable ();
6618 case GTU:
6619 if (cond_or == DOM_CC_X_AND_Y)
6620 return CC_DGTUmode;
6622 switch (cond2)
6624 case GTU:
6625 return CC_DGTUmode;
6626 case GEU:
6627 return CC_DGEUmode;
6628 case NE:
6629 return CC_DNEmode;
6630 default:
6631 gcc_unreachable ();
6634 /* The remaining cases only occur when both comparisons are the
6635 same. */
6636 case NE:
6637 gcc_assert (cond1 == cond2);
6638 return CC_DNEmode;
6640 case LE:
6641 gcc_assert (cond1 == cond2);
6642 return CC_DLEmode;
6644 case GE:
6645 gcc_assert (cond1 == cond2);
6646 return CC_DGEmode;
6648 case LEU:
6649 gcc_assert (cond1 == cond2);
6650 return CC_DLEUmode;
6652 case GEU:
6653 gcc_assert (cond1 == cond2);
6654 return CC_DGEUmode;
6656 default:
6657 gcc_unreachable ();
6661 enum machine_mode
6662 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6664 /* All floating point compares return CCFP if it is an equality
6665 comparison, and CCFPE otherwise. */
6666 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6668 switch (op)
6670 case EQ:
6671 case NE:
6672 case UNORDERED:
6673 case ORDERED:
6674 case UNLT:
6675 case UNLE:
6676 case UNGT:
6677 case UNGE:
6678 case UNEQ:
6679 case LTGT:
6680 return CCFPmode;
6682 case LT:
6683 case LE:
6684 case GT:
6685 case GE:
6686 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6687 return CCFPmode;
6688 return CCFPEmode;
6690 default:
6691 gcc_unreachable ();
6695 /* A compare with a shifted operand. Because of canonicalization, the
6696 comparison will have to be swapped when we emit the assembler. */
6697 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6698 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6699 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6700 || GET_CODE (x) == ROTATERT))
6701 return CC_SWPmode;
6703 /* This operation is performed swapped, but since we only rely on the Z
6704 flag we don't need an additional mode. */
6705 if (GET_MODE (y) == SImode && REG_P (y)
6706 && GET_CODE (x) == NEG
6707 && (op == EQ || op == NE))
6708 return CC_Zmode;
6710 /* This is a special case that is used by combine to allow a
6711 comparison of a shifted byte load to be split into a zero-extend
6712 followed by a comparison of the shifted integer (only valid for
6713 equalities and unsigned inequalities). */
6714 if (GET_MODE (x) == SImode
6715 && GET_CODE (x) == ASHIFT
6716 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6717 && GET_CODE (XEXP (x, 0)) == SUBREG
6718 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6719 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6720 && (op == EQ || op == NE
6721 || op == GEU || op == GTU || op == LTU || op == LEU)
6722 && GET_CODE (y) == CONST_INT)
6723 return CC_Zmode;
6725 /* A construct for a conditional compare, if the false arm contains
6726 0, then both conditions must be true, otherwise either condition
6727 must be true. Not all conditions are possible, so CCmode is
6728 returned if it can't be done. */
6729 if (GET_CODE (x) == IF_THEN_ELSE
6730 && (XEXP (x, 2) == const0_rtx
6731 || XEXP (x, 2) == const1_rtx)
6732 && COMPARISON_P (XEXP (x, 0))
6733 && COMPARISON_P (XEXP (x, 1)))
6734 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6735 INTVAL (XEXP (x, 2)));
6737 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6738 if (GET_CODE (x) == AND
6739 && COMPARISON_P (XEXP (x, 0))
6740 && COMPARISON_P (XEXP (x, 1)))
6741 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6742 DOM_CC_X_AND_Y);
6744 if (GET_CODE (x) == IOR
6745 && COMPARISON_P (XEXP (x, 0))
6746 && COMPARISON_P (XEXP (x, 1)))
6747 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6748 DOM_CC_X_OR_Y);
6750 /* An operation (on Thumb) where we want to test for a single bit.
6751 This is done by shifting that bit up into the top bit of a
6752 scratch register; we can then branch on the sign bit. */
6753 if (TARGET_THUMB
6754 && GET_MODE (x) == SImode
6755 && (op == EQ || op == NE)
6756 && GET_CODE (x) == ZERO_EXTRACT
6757 && XEXP (x, 1) == const1_rtx)
6758 return CC_Nmode;
6760 /* An operation that sets the condition codes as a side-effect, the
6761 V flag is not set correctly, so we can only use comparisons where
6762 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6763 instead.) */
6764 if (GET_MODE (x) == SImode
6765 && y == const0_rtx
6766 && (op == EQ || op == NE || op == LT || op == GE)
6767 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6768 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6769 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6770 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6771 || GET_CODE (x) == LSHIFTRT
6772 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6773 || GET_CODE (x) == ROTATERT
6774 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6775 return CC_NOOVmode;
6777 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6778 return CC_Zmode;
6780 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6781 && GET_CODE (x) == PLUS
6782 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6783 return CC_Cmode;
6785 return CCmode;
6788 /* X and Y are two things to compare using CODE. Emit the compare insn and
6789 return the rtx for register 0 in the proper mode. FP means this is a
6790 floating point compare: I don't think that it is needed on the arm. */
6792 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6794 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6795 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6797 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
6799 return cc_reg;
6802 /* Generate a sequence of insns that will generate the correct return
6803 address mask depending on the physical architecture that the program
6804 is running on. */
6806 arm_gen_return_addr_mask (void)
6808 rtx reg = gen_reg_rtx (Pmode);
6810 emit_insn (gen_return_addr_mask (reg));
6811 return reg;
6814 void
6815 arm_reload_in_hi (rtx *operands)
6817 rtx ref = operands[1];
6818 rtx base, scratch;
6819 HOST_WIDE_INT offset = 0;
6821 if (GET_CODE (ref) == SUBREG)
6823 offset = SUBREG_BYTE (ref);
6824 ref = SUBREG_REG (ref);
6827 if (GET_CODE (ref) == REG)
6829 /* We have a pseudo which has been spilt onto the stack; there
6830 are two cases here: the first where there is a simple
6831 stack-slot replacement and a second where the stack-slot is
6832 out of range, or is used as a subreg. */
6833 if (reg_equiv_mem[REGNO (ref)])
6835 ref = reg_equiv_mem[REGNO (ref)];
6836 base = find_replacement (&XEXP (ref, 0));
6838 else
6839 /* The slot is out of range, or was dressed up in a SUBREG. */
6840 base = reg_equiv_address[REGNO (ref)];
6842 else
6843 base = find_replacement (&XEXP (ref, 0));
6845 /* Handle the case where the address is too complex to be offset by 1. */
6846 if (GET_CODE (base) == MINUS
6847 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6849 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6851 emit_set_insn (base_plus, base);
6852 base = base_plus;
6854 else if (GET_CODE (base) == PLUS)
6856 /* The addend must be CONST_INT, or we would have dealt with it above. */
6857 HOST_WIDE_INT hi, lo;
6859 offset += INTVAL (XEXP (base, 1));
6860 base = XEXP (base, 0);
6862 /* Rework the address into a legal sequence of insns. */
6863 /* Valid range for lo is -4095 -> 4095 */
6864 lo = (offset >= 0
6865 ? (offset & 0xfff)
6866 : -((-offset) & 0xfff));
6868 /* Corner case, if lo is the max offset then we would be out of range
6869 once we have added the additional 1 below, so bump the msb into the
6870 pre-loading insn(s). */
6871 if (lo == 4095)
6872 lo &= 0x7ff;
6874 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6875 ^ (HOST_WIDE_INT) 0x80000000)
6876 - (HOST_WIDE_INT) 0x80000000);
6878 gcc_assert (hi + lo == offset);
6880 if (hi != 0)
6882 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6884 /* Get the base address; addsi3 knows how to handle constants
6885 that require more than one insn. */
6886 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6887 base = base_plus;
6888 offset = lo;
6892 /* Operands[2] may overlap operands[0] (though it won't overlap
6893 operands[1]), that's why we asked for a DImode reg -- so we can
6894 use the bit that does not overlap. */
6895 if (REGNO (operands[2]) == REGNO (operands[0]))
6896 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6897 else
6898 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6900 emit_insn (gen_zero_extendqisi2 (scratch,
6901 gen_rtx_MEM (QImode,
6902 plus_constant (base,
6903 offset))));
6904 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6905 gen_rtx_MEM (QImode,
6906 plus_constant (base,
6907 offset + 1))));
6908 if (!BYTES_BIG_ENDIAN)
6909 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6910 gen_rtx_IOR (SImode,
6911 gen_rtx_ASHIFT
6912 (SImode,
6913 gen_rtx_SUBREG (SImode, operands[0], 0),
6914 GEN_INT (8)),
6915 scratch));
6916 else
6917 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6918 gen_rtx_IOR (SImode,
6919 gen_rtx_ASHIFT (SImode, scratch,
6920 GEN_INT (8)),
6921 gen_rtx_SUBREG (SImode, operands[0], 0)));
6924 /* Handle storing a half-word to memory during reload by synthesizing as two
6925 byte stores. Take care not to clobber the input values until after we
6926 have moved them somewhere safe. This code assumes that if the DImode
6927 scratch in operands[2] overlaps either the input value or output address
6928 in some way, then that value must die in this insn (we absolutely need
6929 two scratch registers for some corner cases). */
6930 void
6931 arm_reload_out_hi (rtx *operands)
6933 rtx ref = operands[0];
6934 rtx outval = operands[1];
6935 rtx base, scratch;
6936 HOST_WIDE_INT offset = 0;
6938 if (GET_CODE (ref) == SUBREG)
6940 offset = SUBREG_BYTE (ref);
6941 ref = SUBREG_REG (ref);
6944 if (GET_CODE (ref) == REG)
6946 /* We have a pseudo which has been spilt onto the stack; there
6947 are two cases here: the first where there is a simple
6948 stack-slot replacement and a second where the stack-slot is
6949 out of range, or is used as a subreg. */
6950 if (reg_equiv_mem[REGNO (ref)])
6952 ref = reg_equiv_mem[REGNO (ref)];
6953 base = find_replacement (&XEXP (ref, 0));
6955 else
6956 /* The slot is out of range, or was dressed up in a SUBREG. */
6957 base = reg_equiv_address[REGNO (ref)];
6959 else
6960 base = find_replacement (&XEXP (ref, 0));
6962 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6964 /* Handle the case where the address is too complex to be offset by 1. */
6965 if (GET_CODE (base) == MINUS
6966 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6968 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6970 /* Be careful not to destroy OUTVAL. */
6971 if (reg_overlap_mentioned_p (base_plus, outval))
6973 /* Updating base_plus might destroy outval, see if we can
6974 swap the scratch and base_plus. */
6975 if (!reg_overlap_mentioned_p (scratch, outval))
6977 rtx tmp = scratch;
6978 scratch = base_plus;
6979 base_plus = tmp;
6981 else
6983 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6985 /* Be conservative and copy OUTVAL into the scratch now,
6986 this should only be necessary if outval is a subreg
6987 of something larger than a word. */
6988 /* XXX Might this clobber base? I can't see how it can,
6989 since scratch is known to overlap with OUTVAL, and
6990 must be wider than a word. */
6991 emit_insn (gen_movhi (scratch_hi, outval));
6992 outval = scratch_hi;
6996 emit_set_insn (base_plus, base);
6997 base = base_plus;
6999 else if (GET_CODE (base) == PLUS)
7001 /* The addend must be CONST_INT, or we would have dealt with it above. */
7002 HOST_WIDE_INT hi, lo;
7004 offset += INTVAL (XEXP (base, 1));
7005 base = XEXP (base, 0);
7007 /* Rework the address into a legal sequence of insns. */
7008 /* Valid range for lo is -4095 -> 4095 */
7009 lo = (offset >= 0
7010 ? (offset & 0xfff)
7011 : -((-offset) & 0xfff));
7013 /* Corner case, if lo is the max offset then we would be out of range
7014 once we have added the additional 1 below, so bump the msb into the
7015 pre-loading insn(s). */
7016 if (lo == 4095)
7017 lo &= 0x7ff;
7019 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7020 ^ (HOST_WIDE_INT) 0x80000000)
7021 - (HOST_WIDE_INT) 0x80000000);
7023 gcc_assert (hi + lo == offset);
7025 if (hi != 0)
7027 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7029 /* Be careful not to destroy OUTVAL. */
7030 if (reg_overlap_mentioned_p (base_plus, outval))
7032 /* Updating base_plus might destroy outval, see if we
7033 can swap the scratch and base_plus. */
7034 if (!reg_overlap_mentioned_p (scratch, outval))
7036 rtx tmp = scratch;
7037 scratch = base_plus;
7038 base_plus = tmp;
7040 else
7042 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7044 /* Be conservative and copy outval into scratch now,
7045 this should only be necessary if outval is a
7046 subreg of something larger than a word. */
7047 /* XXX Might this clobber base? I can't see how it
7048 can, since scratch is known to overlap with
7049 outval. */
7050 emit_insn (gen_movhi (scratch_hi, outval));
7051 outval = scratch_hi;
7055 /* Get the base address; addsi3 knows how to handle constants
7056 that require more than one insn. */
7057 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7058 base = base_plus;
7059 offset = lo;
7063 if (BYTES_BIG_ENDIAN)
7065 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7066 plus_constant (base, offset + 1)),
7067 gen_lowpart (QImode, outval)));
7068 emit_insn (gen_lshrsi3 (scratch,
7069 gen_rtx_SUBREG (SImode, outval, 0),
7070 GEN_INT (8)));
7071 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7072 gen_lowpart (QImode, scratch)));
7074 else
7076 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7077 gen_lowpart (QImode, outval)));
7078 emit_insn (gen_lshrsi3 (scratch,
7079 gen_rtx_SUBREG (SImode, outval, 0),
7080 GEN_INT (8)));
7081 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7082 plus_constant (base, offset + 1)),
7083 gen_lowpart (QImode, scratch)));
7087 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7088 (padded to the size of a word) should be passed in a register. */
7090 static bool
7091 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7093 if (TARGET_AAPCS_BASED)
7094 return must_pass_in_stack_var_size (mode, type);
7095 else
7096 return must_pass_in_stack_var_size_or_pad (mode, type);
7100 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7101 Return true if an argument passed on the stack should be padded upwards,
7102 i.e. if the least-significant byte has useful data.
7103 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7104 aggregate types are placed in the lowest memory address. */
7106 bool
7107 arm_pad_arg_upward (enum machine_mode mode, tree type)
7109 if (!TARGET_AAPCS_BASED)
7110 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7112 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7113 return false;
7115 return true;
7119 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7120 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7121 byte of the register has useful data, and return the opposite if the
7122 most significant byte does.
7123 For AAPCS, small aggregates and small complex types are always padded
7124 upwards. */
7126 bool
7127 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7128 tree type, int first ATTRIBUTE_UNUSED)
7130 if (TARGET_AAPCS_BASED
7131 && BYTES_BIG_ENDIAN
7132 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7133 && int_size_in_bytes (type) <= 4)
7134 return true;
7136 /* Otherwise, use default padding. */
7137 return !BYTES_BIG_ENDIAN;
7141 /* Print a symbolic form of X to the debug file, F. */
7142 static void
7143 arm_print_value (FILE *f, rtx x)
7145 switch (GET_CODE (x))
7147 case CONST_INT:
7148 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7149 return;
7151 case CONST_DOUBLE:
7152 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7153 return;
7155 case CONST_VECTOR:
7157 int i;
7159 fprintf (f, "<");
7160 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7162 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7163 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7164 fputc (',', f);
7166 fprintf (f, ">");
7168 return;
7170 case CONST_STRING:
7171 fprintf (f, "\"%s\"", XSTR (x, 0));
7172 return;
7174 case SYMBOL_REF:
7175 fprintf (f, "`%s'", XSTR (x, 0));
7176 return;
7178 case LABEL_REF:
7179 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7180 return;
7182 case CONST:
7183 arm_print_value (f, XEXP (x, 0));
7184 return;
7186 case PLUS:
7187 arm_print_value (f, XEXP (x, 0));
7188 fprintf (f, "+");
7189 arm_print_value (f, XEXP (x, 1));
7190 return;
7192 case PC:
7193 fprintf (f, "pc");
7194 return;
7196 default:
7197 fprintf (f, "????");
7198 return;
7202 /* Routines for manipulation of the constant pool. */
7204 /* Arm instructions cannot load a large constant directly into a
7205 register; they have to come from a pc relative load. The constant
7206 must therefore be placed in the addressable range of the pc
7207 relative load. Depending on the precise pc relative load
7208 instruction the range is somewhere between 256 bytes and 4k. This
7209 means that we often have to dump a constant inside a function, and
7210 generate code to branch around it.
7212 It is important to minimize this, since the branches will slow
7213 things down and make the code larger.
7215 Normally we can hide the table after an existing unconditional
7216 branch so that there is no interruption of the flow, but in the
7217 worst case the code looks like this:
7219 ldr rn, L1
7221 b L2
7222 align
7223 L1: .long value
7227 ldr rn, L3
7229 b L4
7230 align
7231 L3: .long value
7235 We fix this by performing a scan after scheduling, which notices
7236 which instructions need to have their operands fetched from the
7237 constant table and builds the table.
7239 The algorithm starts by building a table of all the constants that
7240 need fixing up and all the natural barriers in the function (places
7241 where a constant table can be dropped without breaking the flow).
7242 For each fixup we note how far the pc-relative replacement will be
7243 able to reach and the offset of the instruction into the function.
7245 Having built the table we then group the fixes together to form
7246 tables that are as large as possible (subject to addressing
7247 constraints) and emit each table of constants after the last
7248 barrier that is within range of all the instructions in the group.
7249 If a group does not contain a barrier, then we forcibly create one
7250 by inserting a jump instruction into the flow. Once the table has
7251 been inserted, the insns are then modified to reference the
7252 relevant entry in the pool.
7254 Possible enhancements to the algorithm (not implemented) are:
7256 1) For some processors and object formats, there may be benefit in
7257 aligning the pools to the start of cache lines; this alignment
7258 would need to be taken into account when calculating addressability
7259 of a pool. */
7261 /* These typedefs are located at the start of this file, so that
7262 they can be used in the prototypes there. This comment is to
7263 remind readers of that fact so that the following structures
7264 can be understood more easily.
7266 typedef struct minipool_node Mnode;
7267 typedef struct minipool_fixup Mfix; */
7269 struct minipool_node
7271 /* Doubly linked chain of entries. */
7272 Mnode * next;
7273 Mnode * prev;
7274 /* The maximum offset into the code that this entry can be placed. While
7275 pushing fixes for forward references, all entries are sorted in order
7276 of increasing max_address. */
7277 HOST_WIDE_INT max_address;
7278 /* Similarly for an entry inserted for a backwards ref. */
7279 HOST_WIDE_INT min_address;
7280 /* The number of fixes referencing this entry. This can become zero
7281 if we "unpush" an entry. In this case we ignore the entry when we
7282 come to emit the code. */
7283 int refcount;
7284 /* The offset from the start of the minipool. */
7285 HOST_WIDE_INT offset;
7286 /* The value in table. */
7287 rtx value;
7288 /* The mode of value. */
7289 enum machine_mode mode;
7290 /* The size of the value. With iWMMXt enabled
7291 sizes > 4 also imply an alignment of 8-bytes. */
7292 int fix_size;
7295 struct minipool_fixup
7297 Mfix * next;
7298 rtx insn;
7299 HOST_WIDE_INT address;
7300 rtx * loc;
7301 enum machine_mode mode;
7302 int fix_size;
7303 rtx value;
7304 Mnode * minipool;
7305 HOST_WIDE_INT forwards;
7306 HOST_WIDE_INT backwards;
7309 /* Fixes less than a word need padding out to a word boundary. */
7310 #define MINIPOOL_FIX_SIZE(mode) \
7311 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7313 static Mnode * minipool_vector_head;
7314 static Mnode * minipool_vector_tail;
7315 static rtx minipool_vector_label;
7316 static int minipool_pad;
7318 /* The linked list of all minipool fixes required for this function. */
7319 Mfix * minipool_fix_head;
7320 Mfix * minipool_fix_tail;
7321 /* The fix entry for the current minipool, once it has been placed. */
7322 Mfix * minipool_barrier;
7324 /* Determines if INSN is the start of a jump table. Returns the end
7325 of the TABLE or NULL_RTX. */
7326 static rtx
7327 is_jump_table (rtx insn)
7329 rtx table;
7331 if (GET_CODE (insn) == JUMP_INSN
7332 && JUMP_LABEL (insn) != NULL
7333 && ((table = next_real_insn (JUMP_LABEL (insn)))
7334 == next_real_insn (insn))
7335 && table != NULL
7336 && GET_CODE (table) == JUMP_INSN
7337 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7338 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7339 return table;
7341 return NULL_RTX;
7344 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7345 #define JUMP_TABLES_IN_TEXT_SECTION 0
7346 #endif
7348 static HOST_WIDE_INT
7349 get_jump_table_size (rtx insn)
7351 /* ADDR_VECs only take room if read-only data does into the text
7352 section. */
7353 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7355 rtx body = PATTERN (insn);
7356 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7358 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7361 return 0;
7364 /* Move a minipool fix MP from its current location to before MAX_MP.
7365 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7366 constraints may need updating. */
7367 static Mnode *
7368 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7369 HOST_WIDE_INT max_address)
7371 /* The code below assumes these are different. */
7372 gcc_assert (mp != max_mp);
7374 if (max_mp == NULL)
7376 if (max_address < mp->max_address)
7377 mp->max_address = max_address;
7379 else
7381 if (max_address > max_mp->max_address - mp->fix_size)
7382 mp->max_address = max_mp->max_address - mp->fix_size;
7383 else
7384 mp->max_address = max_address;
7386 /* Unlink MP from its current position. Since max_mp is non-null,
7387 mp->prev must be non-null. */
7388 mp->prev->next = mp->next;
7389 if (mp->next != NULL)
7390 mp->next->prev = mp->prev;
7391 else
7392 minipool_vector_tail = mp->prev;
7394 /* Re-insert it before MAX_MP. */
7395 mp->next = max_mp;
7396 mp->prev = max_mp->prev;
7397 max_mp->prev = mp;
7399 if (mp->prev != NULL)
7400 mp->prev->next = mp;
7401 else
7402 minipool_vector_head = mp;
7405 /* Save the new entry. */
7406 max_mp = mp;
7408 /* Scan over the preceding entries and adjust their addresses as
7409 required. */
7410 while (mp->prev != NULL
7411 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7413 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7414 mp = mp->prev;
7417 return max_mp;
7420 /* Add a constant to the minipool for a forward reference. Returns the
7421 node added or NULL if the constant will not fit in this pool. */
7422 static Mnode *
7423 add_minipool_forward_ref (Mfix *fix)
7425 /* If set, max_mp is the first pool_entry that has a lower
7426 constraint than the one we are trying to add. */
7427 Mnode * max_mp = NULL;
7428 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7429 Mnode * mp;
7431 /* If the minipool starts before the end of FIX->INSN then this FIX
7432 can not be placed into the current pool. Furthermore, adding the
7433 new constant pool entry may cause the pool to start FIX_SIZE bytes
7434 earlier. */
7435 if (minipool_vector_head &&
7436 (fix->address + get_attr_length (fix->insn)
7437 >= minipool_vector_head->max_address - fix->fix_size))
7438 return NULL;
7440 /* Scan the pool to see if a constant with the same value has
7441 already been added. While we are doing this, also note the
7442 location where we must insert the constant if it doesn't already
7443 exist. */
7444 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7446 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7447 && fix->mode == mp->mode
7448 && (GET_CODE (fix->value) != CODE_LABEL
7449 || (CODE_LABEL_NUMBER (fix->value)
7450 == CODE_LABEL_NUMBER (mp->value)))
7451 && rtx_equal_p (fix->value, mp->value))
7453 /* More than one fix references this entry. */
7454 mp->refcount++;
7455 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7458 /* Note the insertion point if necessary. */
7459 if (max_mp == NULL
7460 && mp->max_address > max_address)
7461 max_mp = mp;
7463 /* If we are inserting an 8-bytes aligned quantity and
7464 we have not already found an insertion point, then
7465 make sure that all such 8-byte aligned quantities are
7466 placed at the start of the pool. */
7467 if (ARM_DOUBLEWORD_ALIGN
7468 && max_mp == NULL
7469 && fix->fix_size == 8
7470 && mp->fix_size != 8)
7472 max_mp = mp;
7473 max_address = mp->max_address;
7477 /* The value is not currently in the minipool, so we need to create
7478 a new entry for it. If MAX_MP is NULL, the entry will be put on
7479 the end of the list since the placement is less constrained than
7480 any existing entry. Otherwise, we insert the new fix before
7481 MAX_MP and, if necessary, adjust the constraints on the other
7482 entries. */
7483 mp = XNEW (Mnode);
7484 mp->fix_size = fix->fix_size;
7485 mp->mode = fix->mode;
7486 mp->value = fix->value;
7487 mp->refcount = 1;
7488 /* Not yet required for a backwards ref. */
7489 mp->min_address = -65536;
7491 if (max_mp == NULL)
7493 mp->max_address = max_address;
7494 mp->next = NULL;
7495 mp->prev = minipool_vector_tail;
7497 if (mp->prev == NULL)
7499 minipool_vector_head = mp;
7500 minipool_vector_label = gen_label_rtx ();
7502 else
7503 mp->prev->next = mp;
7505 minipool_vector_tail = mp;
7507 else
7509 if (max_address > max_mp->max_address - mp->fix_size)
7510 mp->max_address = max_mp->max_address - mp->fix_size;
7511 else
7512 mp->max_address = max_address;
7514 mp->next = max_mp;
7515 mp->prev = max_mp->prev;
7516 max_mp->prev = mp;
7517 if (mp->prev != NULL)
7518 mp->prev->next = mp;
7519 else
7520 minipool_vector_head = mp;
7523 /* Save the new entry. */
7524 max_mp = mp;
7526 /* Scan over the preceding entries and adjust their addresses as
7527 required. */
7528 while (mp->prev != NULL
7529 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7531 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7532 mp = mp->prev;
7535 return max_mp;
7538 static Mnode *
7539 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7540 HOST_WIDE_INT min_address)
7542 HOST_WIDE_INT offset;
7544 /* The code below assumes these are different. */
7545 gcc_assert (mp != min_mp);
7547 if (min_mp == NULL)
7549 if (min_address > mp->min_address)
7550 mp->min_address = min_address;
7552 else
7554 /* We will adjust this below if it is too loose. */
7555 mp->min_address = min_address;
7557 /* Unlink MP from its current position. Since min_mp is non-null,
7558 mp->next must be non-null. */
7559 mp->next->prev = mp->prev;
7560 if (mp->prev != NULL)
7561 mp->prev->next = mp->next;
7562 else
7563 minipool_vector_head = mp->next;
7565 /* Reinsert it after MIN_MP. */
7566 mp->prev = min_mp;
7567 mp->next = min_mp->next;
7568 min_mp->next = mp;
7569 if (mp->next != NULL)
7570 mp->next->prev = mp;
7571 else
7572 minipool_vector_tail = mp;
7575 min_mp = mp;
7577 offset = 0;
7578 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7580 mp->offset = offset;
7581 if (mp->refcount > 0)
7582 offset += mp->fix_size;
7584 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7585 mp->next->min_address = mp->min_address + mp->fix_size;
7588 return min_mp;
7591 /* Add a constant to the minipool for a backward reference. Returns the
7592 node added or NULL if the constant will not fit in this pool.
7594 Note that the code for insertion for a backwards reference can be
7595 somewhat confusing because the calculated offsets for each fix do
7596 not take into account the size of the pool (which is still under
7597 construction. */
7598 static Mnode *
7599 add_minipool_backward_ref (Mfix *fix)
7601 /* If set, min_mp is the last pool_entry that has a lower constraint
7602 than the one we are trying to add. */
7603 Mnode *min_mp = NULL;
7604 /* This can be negative, since it is only a constraint. */
7605 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7606 Mnode *mp;
7608 /* If we can't reach the current pool from this insn, or if we can't
7609 insert this entry at the end of the pool without pushing other
7610 fixes out of range, then we don't try. This ensures that we
7611 can't fail later on. */
7612 if (min_address >= minipool_barrier->address
7613 || (minipool_vector_tail->min_address + fix->fix_size
7614 >= minipool_barrier->address))
7615 return NULL;
7617 /* Scan the pool to see if a constant with the same value has
7618 already been added. While we are doing this, also note the
7619 location where we must insert the constant if it doesn't already
7620 exist. */
7621 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7623 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7624 && fix->mode == mp->mode
7625 && (GET_CODE (fix->value) != CODE_LABEL
7626 || (CODE_LABEL_NUMBER (fix->value)
7627 == CODE_LABEL_NUMBER (mp->value)))
7628 && rtx_equal_p (fix->value, mp->value)
7629 /* Check that there is enough slack to move this entry to the
7630 end of the table (this is conservative). */
7631 && (mp->max_address
7632 > (minipool_barrier->address
7633 + minipool_vector_tail->offset
7634 + minipool_vector_tail->fix_size)))
7636 mp->refcount++;
7637 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7640 if (min_mp != NULL)
7641 mp->min_address += fix->fix_size;
7642 else
7644 /* Note the insertion point if necessary. */
7645 if (mp->min_address < min_address)
7647 /* For now, we do not allow the insertion of 8-byte alignment
7648 requiring nodes anywhere but at the start of the pool. */
7649 if (ARM_DOUBLEWORD_ALIGN
7650 && fix->fix_size == 8 && mp->fix_size != 8)
7651 return NULL;
7652 else
7653 min_mp = mp;
7655 else if (mp->max_address
7656 < minipool_barrier->address + mp->offset + fix->fix_size)
7658 /* Inserting before this entry would push the fix beyond
7659 its maximum address (which can happen if we have
7660 re-located a forwards fix); force the new fix to come
7661 after it. */
7662 min_mp = mp;
7663 min_address = mp->min_address + fix->fix_size;
7665 /* If we are inserting an 8-bytes aligned quantity and
7666 we have not already found an insertion point, then
7667 make sure that all such 8-byte aligned quantities are
7668 placed at the start of the pool. */
7669 else if (ARM_DOUBLEWORD_ALIGN
7670 && min_mp == NULL
7671 && fix->fix_size == 8
7672 && mp->fix_size < 8)
7674 min_mp = mp;
7675 min_address = mp->min_address + fix->fix_size;
7680 /* We need to create a new entry. */
7681 mp = XNEW (Mnode);
7682 mp->fix_size = fix->fix_size;
7683 mp->mode = fix->mode;
7684 mp->value = fix->value;
7685 mp->refcount = 1;
7686 mp->max_address = minipool_barrier->address + 65536;
7688 mp->min_address = min_address;
7690 if (min_mp == NULL)
7692 mp->prev = NULL;
7693 mp->next = minipool_vector_head;
7695 if (mp->next == NULL)
7697 minipool_vector_tail = mp;
7698 minipool_vector_label = gen_label_rtx ();
7700 else
7701 mp->next->prev = mp;
7703 minipool_vector_head = mp;
7705 else
7707 mp->next = min_mp->next;
7708 mp->prev = min_mp;
7709 min_mp->next = mp;
7711 if (mp->next != NULL)
7712 mp->next->prev = mp;
7713 else
7714 minipool_vector_tail = mp;
7717 /* Save the new entry. */
7718 min_mp = mp;
7720 if (mp->prev)
7721 mp = mp->prev;
7722 else
7723 mp->offset = 0;
7725 /* Scan over the following entries and adjust their offsets. */
7726 while (mp->next != NULL)
7728 if (mp->next->min_address < mp->min_address + mp->fix_size)
7729 mp->next->min_address = mp->min_address + mp->fix_size;
7731 if (mp->refcount)
7732 mp->next->offset = mp->offset + mp->fix_size;
7733 else
7734 mp->next->offset = mp->offset;
7736 mp = mp->next;
7739 return min_mp;
7742 static void
7743 assign_minipool_offsets (Mfix *barrier)
7745 HOST_WIDE_INT offset = 0;
7746 Mnode *mp;
7748 minipool_barrier = barrier;
7750 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7752 mp->offset = offset;
7754 if (mp->refcount > 0)
7755 offset += mp->fix_size;
7759 /* Output the literal table */
7760 static void
7761 dump_minipool (rtx scan)
7763 Mnode * mp;
7764 Mnode * nmp;
7765 int align64 = 0;
7767 if (ARM_DOUBLEWORD_ALIGN)
7768 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7769 if (mp->refcount > 0 && mp->fix_size == 8)
7771 align64 = 1;
7772 break;
7775 if (dump_file)
7776 fprintf (dump_file,
7777 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7778 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7780 scan = emit_label_after (gen_label_rtx (), scan);
7781 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7782 scan = emit_label_after (minipool_vector_label, scan);
7784 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7786 if (mp->refcount > 0)
7788 if (dump_file)
7790 fprintf (dump_file,
7791 ";; Offset %u, min %ld, max %ld ",
7792 (unsigned) mp->offset, (unsigned long) mp->min_address,
7793 (unsigned long) mp->max_address);
7794 arm_print_value (dump_file, mp->value);
7795 fputc ('\n', dump_file);
7798 switch (mp->fix_size)
7800 #ifdef HAVE_consttable_1
7801 case 1:
7802 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7803 break;
7805 #endif
7806 #ifdef HAVE_consttable_2
7807 case 2:
7808 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7809 break;
7811 #endif
7812 #ifdef HAVE_consttable_4
7813 case 4:
7814 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7815 break;
7817 #endif
7818 #ifdef HAVE_consttable_8
7819 case 8:
7820 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7821 break;
7823 #endif
7824 default:
7825 gcc_unreachable ();
7829 nmp = mp->next;
7830 free (mp);
7833 minipool_vector_head = minipool_vector_tail = NULL;
7834 scan = emit_insn_after (gen_consttable_end (), scan);
7835 scan = emit_barrier_after (scan);
7838 /* Return the cost of forcibly inserting a barrier after INSN. */
7839 static int
7840 arm_barrier_cost (rtx insn)
7842 /* Basing the location of the pool on the loop depth is preferable,
7843 but at the moment, the basic block information seems to be
7844 corrupt by this stage of the compilation. */
7845 int base_cost = 50;
7846 rtx next = next_nonnote_insn (insn);
7848 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7849 base_cost -= 20;
7851 switch (GET_CODE (insn))
7853 case CODE_LABEL:
7854 /* It will always be better to place the table before the label, rather
7855 than after it. */
7856 return 50;
7858 case INSN:
7859 case CALL_INSN:
7860 return base_cost;
7862 case JUMP_INSN:
7863 return base_cost - 10;
7865 default:
7866 return base_cost + 10;
7870 /* Find the best place in the insn stream in the range
7871 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7872 Create the barrier by inserting a jump and add a new fix entry for
7873 it. */
7874 static Mfix *
7875 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7877 HOST_WIDE_INT count = 0;
7878 rtx barrier;
7879 rtx from = fix->insn;
7880 /* The instruction after which we will insert the jump. */
7881 rtx selected = NULL;
7882 int selected_cost;
7883 /* The address at which the jump instruction will be placed. */
7884 HOST_WIDE_INT selected_address;
7885 Mfix * new_fix;
7886 HOST_WIDE_INT max_count = max_address - fix->address;
7887 rtx label = gen_label_rtx ();
7889 selected_cost = arm_barrier_cost (from);
7890 selected_address = fix->address;
7892 while (from && count < max_count)
7894 rtx tmp;
7895 int new_cost;
7897 /* This code shouldn't have been called if there was a natural barrier
7898 within range. */
7899 gcc_assert (GET_CODE (from) != BARRIER);
7901 /* Count the length of this insn. */
7902 count += get_attr_length (from);
7904 /* If there is a jump table, add its length. */
7905 tmp = is_jump_table (from);
7906 if (tmp != NULL)
7908 count += get_jump_table_size (tmp);
7910 /* Jump tables aren't in a basic block, so base the cost on
7911 the dispatch insn. If we select this location, we will
7912 still put the pool after the table. */
7913 new_cost = arm_barrier_cost (from);
7915 if (count < max_count
7916 && (!selected || new_cost <= selected_cost))
7918 selected = tmp;
7919 selected_cost = new_cost;
7920 selected_address = fix->address + count;
7923 /* Continue after the dispatch table. */
7924 from = NEXT_INSN (tmp);
7925 continue;
7928 new_cost = arm_barrier_cost (from);
7930 if (count < max_count
7931 && (!selected || new_cost <= selected_cost))
7933 selected = from;
7934 selected_cost = new_cost;
7935 selected_address = fix->address + count;
7938 from = NEXT_INSN (from);
7941 /* Make sure that we found a place to insert the jump. */
7942 gcc_assert (selected);
7944 /* Create a new JUMP_INSN that branches around a barrier. */
7945 from = emit_jump_insn_after (gen_jump (label), selected);
7946 JUMP_LABEL (from) = label;
7947 barrier = emit_barrier_after (from);
7948 emit_label_after (label, barrier);
7950 /* Create a minipool barrier entry for the new barrier. */
7951 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7952 new_fix->insn = barrier;
7953 new_fix->address = selected_address;
7954 new_fix->next = fix->next;
7955 fix->next = new_fix;
7957 return new_fix;
7960 /* Record that there is a natural barrier in the insn stream at
7961 ADDRESS. */
7962 static void
7963 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7965 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7967 fix->insn = insn;
7968 fix->address = address;
7970 fix->next = NULL;
7971 if (minipool_fix_head != NULL)
7972 minipool_fix_tail->next = fix;
7973 else
7974 minipool_fix_head = fix;
7976 minipool_fix_tail = fix;
7979 /* Record INSN, which will need fixing up to load a value from the
7980 minipool. ADDRESS is the offset of the insn since the start of the
7981 function; LOC is a pointer to the part of the insn which requires
7982 fixing; VALUE is the constant that must be loaded, which is of type
7983 MODE. */
7984 static void
7985 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7986 enum machine_mode mode, rtx value)
7988 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7990 #ifdef AOF_ASSEMBLER
7991 /* PIC symbol references need to be converted into offsets into the
7992 based area. */
7993 /* XXX This shouldn't be done here. */
7994 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7995 value = aof_pic_entry (value);
7996 #endif /* AOF_ASSEMBLER */
7998 fix->insn = insn;
7999 fix->address = address;
8000 fix->loc = loc;
8001 fix->mode = mode;
8002 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8003 fix->value = value;
8004 fix->forwards = get_attr_pool_range (insn);
8005 fix->backwards = get_attr_neg_pool_range (insn);
8006 fix->minipool = NULL;
8008 /* If an insn doesn't have a range defined for it, then it isn't
8009 expecting to be reworked by this code. Better to stop now than
8010 to generate duff assembly code. */
8011 gcc_assert (fix->forwards || fix->backwards);
8013 /* If an entry requires 8-byte alignment then assume all constant pools
8014 require 4 bytes of padding. Trying to do this later on a per-pool
8015 basis is awkward because existing pool entries have to be modified. */
8016 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8017 minipool_pad = 4;
8019 if (dump_file)
8021 fprintf (dump_file,
8022 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8023 GET_MODE_NAME (mode),
8024 INSN_UID (insn), (unsigned long) address,
8025 -1 * (long)fix->backwards, (long)fix->forwards);
8026 arm_print_value (dump_file, fix->value);
8027 fprintf (dump_file, "\n");
8030 /* Add it to the chain of fixes. */
8031 fix->next = NULL;
8033 if (minipool_fix_head != NULL)
8034 minipool_fix_tail->next = fix;
8035 else
8036 minipool_fix_head = fix;
8038 minipool_fix_tail = fix;
8041 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8042 Returns the number of insns needed, or 99 if we don't know how to
8043 do it. */
8045 arm_const_double_inline_cost (rtx val)
8047 rtx lowpart, highpart;
8048 enum machine_mode mode;
8050 mode = GET_MODE (val);
8052 if (mode == VOIDmode)
8053 mode = DImode;
8055 gcc_assert (GET_MODE_SIZE (mode) == 8);
8057 lowpart = gen_lowpart (SImode, val);
8058 highpart = gen_highpart_mode (SImode, mode, val);
8060 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8061 gcc_assert (GET_CODE (highpart) == CONST_INT);
8063 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8064 NULL_RTX, NULL_RTX, 0, 0)
8065 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8066 NULL_RTX, NULL_RTX, 0, 0));
8069 /* Return true if it is worthwhile to split a 64-bit constant into two
8070 32-bit operations. This is the case if optimizing for size, or
8071 if we have load delay slots, or if one 32-bit part can be done with
8072 a single data operation. */
8073 bool
8074 arm_const_double_by_parts (rtx val)
8076 enum machine_mode mode = GET_MODE (val);
8077 rtx part;
8079 if (optimize_size || arm_ld_sched)
8080 return true;
8082 if (mode == VOIDmode)
8083 mode = DImode;
8085 part = gen_highpart_mode (SImode, mode, val);
8087 gcc_assert (GET_CODE (part) == CONST_INT);
8089 if (const_ok_for_arm (INTVAL (part))
8090 || const_ok_for_arm (~INTVAL (part)))
8091 return true;
8093 part = gen_lowpart (SImode, val);
8095 gcc_assert (GET_CODE (part) == CONST_INT);
8097 if (const_ok_for_arm (INTVAL (part))
8098 || const_ok_for_arm (~INTVAL (part)))
8099 return true;
8101 return false;
8104 /* Scan INSN and note any of its operands that need fixing.
8105 If DO_PUSHES is false we do not actually push any of the fixups
8106 needed. The function returns TRUE if any fixups were needed/pushed.
8107 This is used by arm_memory_load_p() which needs to know about loads
8108 of constants that will be converted into minipool loads. */
8109 static bool
8110 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8112 bool result = false;
8113 int opno;
8115 extract_insn (insn);
8117 if (!constrain_operands (1))
8118 fatal_insn_not_found (insn);
8120 if (recog_data.n_alternatives == 0)
8121 return false;
8123 /* Fill in recog_op_alt with information about the constraints of
8124 this insn. */
8125 preprocess_constraints ();
8127 for (opno = 0; opno < recog_data.n_operands; opno++)
8129 /* Things we need to fix can only occur in inputs. */
8130 if (recog_data.operand_type[opno] != OP_IN)
8131 continue;
8133 /* If this alternative is a memory reference, then any mention
8134 of constants in this alternative is really to fool reload
8135 into allowing us to accept one there. We need to fix them up
8136 now so that we output the right code. */
8137 if (recog_op_alt[opno][which_alternative].memory_ok)
8139 rtx op = recog_data.operand[opno];
8141 if (CONSTANT_P (op))
8143 if (do_pushes)
8144 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8145 recog_data.operand_mode[opno], op);
8146 result = true;
8148 else if (GET_CODE (op) == MEM
8149 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8150 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8152 if (do_pushes)
8154 rtx cop = avoid_constant_pool_reference (op);
8156 /* Casting the address of something to a mode narrower
8157 than a word can cause avoid_constant_pool_reference()
8158 to return the pool reference itself. That's no good to
8159 us here. Lets just hope that we can use the
8160 constant pool value directly. */
8161 if (op == cop)
8162 cop = get_pool_constant (XEXP (op, 0));
8164 push_minipool_fix (insn, address,
8165 recog_data.operand_loc[opno],
8166 recog_data.operand_mode[opno], cop);
8169 result = true;
8174 return result;
8177 /* Gcc puts the pool in the wrong place for ARM, since we can only
8178 load addresses a limited distance around the pc. We do some
8179 special munging to move the constant pool values to the correct
8180 point in the code. */
8181 static void
8182 arm_reorg (void)
8184 rtx insn;
8185 HOST_WIDE_INT address = 0;
8186 Mfix * fix;
8188 minipool_fix_head = minipool_fix_tail = NULL;
8190 /* The first insn must always be a note, or the code below won't
8191 scan it properly. */
8192 insn = get_insns ();
8193 gcc_assert (GET_CODE (insn) == NOTE);
8194 minipool_pad = 0;
8196 /* Scan all the insns and record the operands that will need fixing. */
8197 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8199 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8200 && (arm_cirrus_insn_p (insn)
8201 || GET_CODE (insn) == JUMP_INSN
8202 || arm_memory_load_p (insn)))
8203 cirrus_reorg (insn);
8205 if (GET_CODE (insn) == BARRIER)
8206 push_minipool_barrier (insn, address);
8207 else if (INSN_P (insn))
8209 rtx table;
8211 note_invalid_constants (insn, address, true);
8212 address += get_attr_length (insn);
8214 /* If the insn is a vector jump, add the size of the table
8215 and skip the table. */
8216 if ((table = is_jump_table (insn)) != NULL)
8218 address += get_jump_table_size (table);
8219 insn = table;
8224 fix = minipool_fix_head;
8226 /* Now scan the fixups and perform the required changes. */
8227 while (fix)
8229 Mfix * ftmp;
8230 Mfix * fdel;
8231 Mfix * last_added_fix;
8232 Mfix * last_barrier = NULL;
8233 Mfix * this_fix;
8235 /* Skip any further barriers before the next fix. */
8236 while (fix && GET_CODE (fix->insn) == BARRIER)
8237 fix = fix->next;
8239 /* No more fixes. */
8240 if (fix == NULL)
8241 break;
8243 last_added_fix = NULL;
8245 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8247 if (GET_CODE (ftmp->insn) == BARRIER)
8249 if (ftmp->address >= minipool_vector_head->max_address)
8250 break;
8252 last_barrier = ftmp;
8254 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8255 break;
8257 last_added_fix = ftmp; /* Keep track of the last fix added. */
8260 /* If we found a barrier, drop back to that; any fixes that we
8261 could have reached but come after the barrier will now go in
8262 the next mini-pool. */
8263 if (last_barrier != NULL)
8265 /* Reduce the refcount for those fixes that won't go into this
8266 pool after all. */
8267 for (fdel = last_barrier->next;
8268 fdel && fdel != ftmp;
8269 fdel = fdel->next)
8271 fdel->minipool->refcount--;
8272 fdel->minipool = NULL;
8275 ftmp = last_barrier;
8277 else
8279 /* ftmp is first fix that we can't fit into this pool and
8280 there no natural barriers that we could use. Insert a
8281 new barrier in the code somewhere between the previous
8282 fix and this one, and arrange to jump around it. */
8283 HOST_WIDE_INT max_address;
8285 /* The last item on the list of fixes must be a barrier, so
8286 we can never run off the end of the list of fixes without
8287 last_barrier being set. */
8288 gcc_assert (ftmp);
8290 max_address = minipool_vector_head->max_address;
8291 /* Check that there isn't another fix that is in range that
8292 we couldn't fit into this pool because the pool was
8293 already too large: we need to put the pool before such an
8294 instruction. The pool itself may come just after the
8295 fix because create_fix_barrier also allows space for a
8296 jump instruction. */
8297 if (ftmp->address < max_address)
8298 max_address = ftmp->address + 1;
8300 last_barrier = create_fix_barrier (last_added_fix, max_address);
8303 assign_minipool_offsets (last_barrier);
8305 while (ftmp)
8307 if (GET_CODE (ftmp->insn) != BARRIER
8308 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8309 == NULL))
8310 break;
8312 ftmp = ftmp->next;
8315 /* Scan over the fixes we have identified for this pool, fixing them
8316 up and adding the constants to the pool itself. */
8317 for (this_fix = fix; this_fix && ftmp != this_fix;
8318 this_fix = this_fix->next)
8319 if (GET_CODE (this_fix->insn) != BARRIER)
8321 rtx addr
8322 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8323 minipool_vector_label),
8324 this_fix->minipool->offset);
8325 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8328 dump_minipool (last_barrier->insn);
8329 fix = ftmp;
8332 /* From now on we must synthesize any constants that we can't handle
8333 directly. This can happen if the RTL gets split during final
8334 instruction generation. */
8335 after_arm_reorg = 1;
8337 /* Free the minipool memory. */
8338 obstack_free (&minipool_obstack, minipool_startobj);
8341 /* Routines to output assembly language. */
8343 /* If the rtx is the correct value then return the string of the number.
8344 In this way we can ensure that valid double constants are generated even
8345 when cross compiling. */
8346 const char *
8347 fp_immediate_constant (rtx x)
8349 REAL_VALUE_TYPE r;
8350 int i;
8352 if (!fp_consts_inited)
8353 init_fp_table ();
8355 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8356 for (i = 0; i < 8; i++)
8357 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8358 return strings_fp[i];
8360 gcc_unreachable ();
8363 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8364 static const char *
8365 fp_const_from_val (REAL_VALUE_TYPE *r)
8367 int i;
8369 if (!fp_consts_inited)
8370 init_fp_table ();
8372 for (i = 0; i < 8; i++)
8373 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8374 return strings_fp[i];
8376 gcc_unreachable ();
8379 /* Output the operands of a LDM/STM instruction to STREAM.
8380 MASK is the ARM register set mask of which only bits 0-15 are important.
8381 REG is the base register, either the frame pointer or the stack pointer,
8382 INSTR is the possibly suffixed load or store instruction. */
8384 static void
8385 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8386 unsigned long mask)
8388 unsigned i;
8389 bool not_first = FALSE;
8391 fputc ('\t', stream);
8392 asm_fprintf (stream, instr, reg);
8393 fputs (", {", stream);
8395 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8396 if (mask & (1 << i))
8398 if (not_first)
8399 fprintf (stream, ", ");
8401 asm_fprintf (stream, "%r", i);
8402 not_first = TRUE;
8405 fprintf (stream, "}\n");
8409 /* Output a FLDMX instruction to STREAM.
8410 BASE if the register containing the address.
8411 REG and COUNT specify the register range.
8412 Extra registers may be added to avoid hardware bugs. */
8414 static void
8415 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8417 int i;
8419 /* Workaround ARM10 VFPr1 bug. */
8420 if (count == 2 && !arm_arch6)
8422 if (reg == 15)
8423 reg--;
8424 count++;
8427 fputc ('\t', stream);
8428 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8430 for (i = reg; i < reg + count; i++)
8432 if (i > reg)
8433 fputs (", ", stream);
8434 asm_fprintf (stream, "d%d", i);
8436 fputs ("}\n", stream);
8441 /* Output the assembly for a store multiple. */
8443 const char *
8444 vfp_output_fstmx (rtx * operands)
8446 char pattern[100];
8447 int p;
8448 int base;
8449 int i;
8451 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8452 p = strlen (pattern);
8454 gcc_assert (GET_CODE (operands[1]) == REG);
8456 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8457 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8459 p += sprintf (&pattern[p], ", d%d", base + i);
8461 strcpy (&pattern[p], "}");
8463 output_asm_insn (pattern, operands);
8464 return "";
8468 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8469 number of bytes pushed. */
8471 static int
8472 vfp_emit_fstmx (int base_reg, int count)
8474 rtx par;
8475 rtx dwarf;
8476 rtx tmp, reg;
8477 int i;
8479 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8480 register pairs are stored by a store multiple insn. We avoid this
8481 by pushing an extra pair. */
8482 if (count == 2 && !arm_arch6)
8484 if (base_reg == LAST_VFP_REGNUM - 3)
8485 base_reg -= 2;
8486 count++;
8489 /* ??? The frame layout is implementation defined. We describe
8490 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8491 We really need some way of representing the whole block so that the
8492 unwinder can figure it out at runtime. */
8493 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8494 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8496 reg = gen_rtx_REG (DFmode, base_reg);
8497 base_reg += 2;
8499 XVECEXP (par, 0, 0)
8500 = gen_rtx_SET (VOIDmode,
8501 gen_frame_mem (BLKmode,
8502 gen_rtx_PRE_DEC (BLKmode,
8503 stack_pointer_rtx)),
8504 gen_rtx_UNSPEC (BLKmode,
8505 gen_rtvec (1, reg),
8506 UNSPEC_PUSH_MULT));
8508 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8509 plus_constant (stack_pointer_rtx, -(count * 8 + 4)));
8510 RTX_FRAME_RELATED_P (tmp) = 1;
8511 XVECEXP (dwarf, 0, 0) = tmp;
8513 tmp = gen_rtx_SET (VOIDmode,
8514 gen_frame_mem (DFmode, stack_pointer_rtx),
8515 reg);
8516 RTX_FRAME_RELATED_P (tmp) = 1;
8517 XVECEXP (dwarf, 0, 1) = tmp;
8519 for (i = 1; i < count; i++)
8521 reg = gen_rtx_REG (DFmode, base_reg);
8522 base_reg += 2;
8523 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8525 tmp = gen_rtx_SET (VOIDmode,
8526 gen_frame_mem (DFmode,
8527 plus_constant (stack_pointer_rtx,
8528 i * 8)),
8529 reg);
8530 RTX_FRAME_RELATED_P (tmp) = 1;
8531 XVECEXP (dwarf, 0, i + 1) = tmp;
8534 par = emit_insn (par);
8535 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8536 REG_NOTES (par));
8537 RTX_FRAME_RELATED_P (par) = 1;
8539 return count * 8 + 4;
8543 /* Output a 'call' insn. */
8544 const char *
8545 output_call (rtx *operands)
8547 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8549 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8550 if (REGNO (operands[0]) == LR_REGNUM)
8552 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8553 output_asm_insn ("mov%?\t%0, %|lr", operands);
8556 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8558 if (TARGET_INTERWORK || arm_arch4t)
8559 output_asm_insn ("bx%?\t%0", operands);
8560 else
8561 output_asm_insn ("mov%?\t%|pc, %0", operands);
8563 return "";
8566 /* Output a 'call' insn that is a reference in memory. */
8567 const char *
8568 output_call_mem (rtx *operands)
8570 if (TARGET_INTERWORK && !arm_arch5)
8572 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8573 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8574 output_asm_insn ("bx%?\t%|ip", operands);
8576 else if (regno_use_in (LR_REGNUM, operands[0]))
8578 /* LR is used in the memory address. We load the address in the
8579 first instruction. It's safe to use IP as the target of the
8580 load since the call will kill it anyway. */
8581 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8582 if (arm_arch5)
8583 output_asm_insn ("blx%?\t%|ip", operands);
8584 else
8586 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8587 if (arm_arch4t)
8588 output_asm_insn ("bx%?\t%|ip", operands);
8589 else
8590 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8593 else
8595 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8596 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8599 return "";
8603 /* Output a move from arm registers to an fpa registers.
8604 OPERANDS[0] is an fpa register.
8605 OPERANDS[1] is the first registers of an arm register pair. */
8606 const char *
8607 output_mov_long_double_fpa_from_arm (rtx *operands)
8609 int arm_reg0 = REGNO (operands[1]);
8610 rtx ops[3];
8612 gcc_assert (arm_reg0 != IP_REGNUM);
8614 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8615 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8616 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8618 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8619 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8621 return "";
8624 /* Output a move from an fpa register to arm registers.
8625 OPERANDS[0] is the first registers of an arm register pair.
8626 OPERANDS[1] is an fpa register. */
8627 const char *
8628 output_mov_long_double_arm_from_fpa (rtx *operands)
8630 int arm_reg0 = REGNO (operands[0]);
8631 rtx ops[3];
8633 gcc_assert (arm_reg0 != IP_REGNUM);
8635 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8636 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8637 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8639 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8640 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8641 return "";
8644 /* Output a move from arm registers to arm registers of a long double
8645 OPERANDS[0] is the destination.
8646 OPERANDS[1] is the source. */
8647 const char *
8648 output_mov_long_double_arm_from_arm (rtx *operands)
8650 /* We have to be careful here because the two might overlap. */
8651 int dest_start = REGNO (operands[0]);
8652 int src_start = REGNO (operands[1]);
8653 rtx ops[2];
8654 int i;
8656 if (dest_start < src_start)
8658 for (i = 0; i < 3; i++)
8660 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8661 ops[1] = gen_rtx_REG (SImode, src_start + i);
8662 output_asm_insn ("mov%?\t%0, %1", ops);
8665 else
8667 for (i = 2; i >= 0; i--)
8669 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8670 ops[1] = gen_rtx_REG (SImode, src_start + i);
8671 output_asm_insn ("mov%?\t%0, %1", ops);
8675 return "";
8679 /* Output a move from arm registers to an fpa registers.
8680 OPERANDS[0] is an fpa register.
8681 OPERANDS[1] is the first registers of an arm register pair. */
8682 const char *
8683 output_mov_double_fpa_from_arm (rtx *operands)
8685 int arm_reg0 = REGNO (operands[1]);
8686 rtx ops[2];
8688 gcc_assert (arm_reg0 != IP_REGNUM);
8690 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8691 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8692 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8693 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8694 return "";
8697 /* Output a move from an fpa register to arm registers.
8698 OPERANDS[0] is the first registers of an arm register pair.
8699 OPERANDS[1] is an fpa register. */
8700 const char *
8701 output_mov_double_arm_from_fpa (rtx *operands)
8703 int arm_reg0 = REGNO (operands[0]);
8704 rtx ops[2];
8706 gcc_assert (arm_reg0 != IP_REGNUM);
8708 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8709 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8710 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8711 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8712 return "";
8715 /* Output a move between double words.
8716 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8717 or MEM<-REG and all MEMs must be offsettable addresses. */
8718 const char *
8719 output_move_double (rtx *operands)
8721 enum rtx_code code0 = GET_CODE (operands[0]);
8722 enum rtx_code code1 = GET_CODE (operands[1]);
8723 rtx otherops[3];
8725 if (code0 == REG)
8727 int reg0 = REGNO (operands[0]);
8729 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8731 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8733 switch (GET_CODE (XEXP (operands[1], 0)))
8735 case REG:
8736 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8737 break;
8739 case PRE_INC:
8740 gcc_assert (TARGET_LDRD);
8741 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8742 break;
8744 case PRE_DEC:
8745 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8746 break;
8748 case POST_INC:
8749 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8750 break;
8752 case POST_DEC:
8753 gcc_assert (TARGET_LDRD);
8754 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8755 break;
8757 case PRE_MODIFY:
8758 case POST_MODIFY:
8759 otherops[0] = operands[0];
8760 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8761 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8763 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8765 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8767 /* Registers overlap so split out the increment. */
8768 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8769 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8771 else
8772 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8774 else
8776 /* We only allow constant increments, so this is safe. */
8777 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8779 break;
8781 case LABEL_REF:
8782 case CONST:
8783 output_asm_insn ("adr%?\t%0, %1", operands);
8784 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8785 break;
8787 default:
8788 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8789 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8791 otherops[0] = operands[0];
8792 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8793 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8795 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8797 if (GET_CODE (otherops[2]) == CONST_INT)
8799 switch ((int) INTVAL (otherops[2]))
8801 case -8:
8802 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8803 return "";
8804 case -4:
8805 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8806 return "";
8807 case 4:
8808 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8809 return "";
8812 if (TARGET_LDRD
8813 && (GET_CODE (otherops[2]) == REG
8814 || (GET_CODE (otherops[2]) == CONST_INT
8815 && INTVAL (otherops[2]) > -256
8816 && INTVAL (otherops[2]) < 256)))
8818 if (reg_overlap_mentioned_p (otherops[0],
8819 otherops[2]))
8821 /* Swap base and index registers over to
8822 avoid a conflict. */
8823 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8824 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8826 /* If both registers conflict, it will usually
8827 have been fixed by a splitter. */
8828 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8830 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8831 output_asm_insn ("ldr%?d\t%0, [%1]",
8832 otherops);
8834 else
8835 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8836 return "";
8839 if (GET_CODE (otherops[2]) == CONST_INT)
8841 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8842 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8843 else
8844 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8846 else
8847 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8849 else
8850 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8852 return "ldm%?ia\t%0, %M0";
8854 else
8856 otherops[1] = adjust_address (operands[1], SImode, 4);
8857 /* Take care of overlapping base/data reg. */
8858 if (reg_mentioned_p (operands[0], operands[1]))
8860 output_asm_insn ("ldr%?\t%0, %1", otherops);
8861 output_asm_insn ("ldr%?\t%0, %1", operands);
8863 else
8865 output_asm_insn ("ldr%?\t%0, %1", operands);
8866 output_asm_insn ("ldr%?\t%0, %1", otherops);
8871 else
8873 /* Constraints should ensure this. */
8874 gcc_assert (code0 == MEM && code1 == REG);
8875 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8877 switch (GET_CODE (XEXP (operands[0], 0)))
8879 case REG:
8880 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8881 break;
8883 case PRE_INC:
8884 gcc_assert (TARGET_LDRD);
8885 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8886 break;
8888 case PRE_DEC:
8889 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8890 break;
8892 case POST_INC:
8893 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8894 break;
8896 case POST_DEC:
8897 gcc_assert (TARGET_LDRD);
8898 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8899 break;
8901 case PRE_MODIFY:
8902 case POST_MODIFY:
8903 otherops[0] = operands[1];
8904 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8905 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8907 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8908 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8909 else
8910 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8911 break;
8913 case PLUS:
8914 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8915 if (GET_CODE (otherops[2]) == CONST_INT)
8917 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8919 case -8:
8920 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8921 return "";
8923 case -4:
8924 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8925 return "";
8927 case 4:
8928 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8929 return "";
8932 if (TARGET_LDRD
8933 && (GET_CODE (otherops[2]) == REG
8934 || (GET_CODE (otherops[2]) == CONST_INT
8935 && INTVAL (otherops[2]) > -256
8936 && INTVAL (otherops[2]) < 256)))
8938 otherops[0] = operands[1];
8939 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8940 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8941 return "";
8943 /* Fall through */
8945 default:
8946 otherops[0] = adjust_address (operands[0], SImode, 4);
8947 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8948 output_asm_insn ("str%?\t%1, %0", operands);
8949 output_asm_insn ("str%?\t%1, %0", otherops);
8953 return "";
8956 /* Output an ADD r, s, #n where n may be too big for one instruction.
8957 If adding zero to one register, output nothing. */
8958 const char *
8959 output_add_immediate (rtx *operands)
8961 HOST_WIDE_INT n = INTVAL (operands[2]);
8963 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8965 if (n < 0)
8966 output_multi_immediate (operands,
8967 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8968 -n);
8969 else
8970 output_multi_immediate (operands,
8971 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8975 return "";
8978 /* Output a multiple immediate operation.
8979 OPERANDS is the vector of operands referred to in the output patterns.
8980 INSTR1 is the output pattern to use for the first constant.
8981 INSTR2 is the output pattern to use for subsequent constants.
8982 IMMED_OP is the index of the constant slot in OPERANDS.
8983 N is the constant value. */
8984 static const char *
8985 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8986 int immed_op, HOST_WIDE_INT n)
8988 #if HOST_BITS_PER_WIDE_INT > 32
8989 n &= 0xffffffff;
8990 #endif
8992 if (n == 0)
8994 /* Quick and easy output. */
8995 operands[immed_op] = const0_rtx;
8996 output_asm_insn (instr1, operands);
8998 else
9000 int i;
9001 const char * instr = instr1;
9003 /* Note that n is never zero here (which would give no output). */
9004 for (i = 0; i < 32; i += 2)
9006 if (n & (3 << i))
9008 operands[immed_op] = GEN_INT (n & (255 << i));
9009 output_asm_insn (instr, operands);
9010 instr = instr2;
9011 i += 6;
9016 return "";
9019 /* Return the appropriate ARM instruction for the operation code.
9020 The returned result should not be overwritten. OP is the rtx of the
9021 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9022 was shifted. */
9023 const char *
9024 arithmetic_instr (rtx op, int shift_first_arg)
9026 switch (GET_CODE (op))
9028 case PLUS:
9029 return "add";
9031 case MINUS:
9032 return shift_first_arg ? "rsb" : "sub";
9034 case IOR:
9035 return "orr";
9037 case XOR:
9038 return "eor";
9040 case AND:
9041 return "and";
9043 default:
9044 gcc_unreachable ();
9048 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9049 for the operation code. The returned result should not be overwritten.
9050 OP is the rtx code of the shift.
9051 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9052 shift. */
9053 static const char *
9054 shift_op (rtx op, HOST_WIDE_INT *amountp)
9056 const char * mnem;
9057 enum rtx_code code = GET_CODE (op);
9059 switch (GET_CODE (XEXP (op, 1)))
9061 case REG:
9062 case SUBREG:
9063 *amountp = -1;
9064 break;
9066 case CONST_INT:
9067 *amountp = INTVAL (XEXP (op, 1));
9068 break;
9070 default:
9071 gcc_unreachable ();
9074 switch (code)
9076 case ASHIFT:
9077 mnem = "asl";
9078 break;
9080 case ASHIFTRT:
9081 mnem = "asr";
9082 break;
9084 case LSHIFTRT:
9085 mnem = "lsr";
9086 break;
9088 case ROTATE:
9089 gcc_assert (*amountp != -1);
9090 *amountp = 32 - *amountp;
9092 /* Fall through. */
9094 case ROTATERT:
9095 mnem = "ror";
9096 break;
9098 case MULT:
9099 /* We never have to worry about the amount being other than a
9100 power of 2, since this case can never be reloaded from a reg. */
9101 gcc_assert (*amountp != -1);
9102 *amountp = int_log2 (*amountp);
9103 return "asl";
9105 default:
9106 gcc_unreachable ();
9109 if (*amountp != -1)
9111 /* This is not 100% correct, but follows from the desire to merge
9112 multiplication by a power of 2 with the recognizer for a
9113 shift. >=32 is not a valid shift for "asl", so we must try and
9114 output a shift that produces the correct arithmetical result.
9115 Using lsr #32 is identical except for the fact that the carry bit
9116 is not set correctly if we set the flags; but we never use the
9117 carry bit from such an operation, so we can ignore that. */
9118 if (code == ROTATERT)
9119 /* Rotate is just modulo 32. */
9120 *amountp &= 31;
9121 else if (*amountp != (*amountp & 31))
9123 if (code == ASHIFT)
9124 mnem = "lsr";
9125 *amountp = 32;
9128 /* Shifts of 0 are no-ops. */
9129 if (*amountp == 0)
9130 return NULL;
9133 return mnem;
9136 /* Obtain the shift from the POWER of two. */
9138 static HOST_WIDE_INT
9139 int_log2 (HOST_WIDE_INT power)
9141 HOST_WIDE_INT shift = 0;
9143 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9145 gcc_assert (shift <= 31);
9146 shift++;
9149 return shift;
9152 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9153 because /bin/as is horribly restrictive. The judgement about
9154 whether or not each character is 'printable' (and can be output as
9155 is) or not (and must be printed with an octal escape) must be made
9156 with reference to the *host* character set -- the situation is
9157 similar to that discussed in the comments above pp_c_char in
9158 c-pretty-print.c. */
9160 #define MAX_ASCII_LEN 51
9162 void
9163 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9165 int i;
9166 int len_so_far = 0;
9168 fputs ("\t.ascii\t\"", stream);
9170 for (i = 0; i < len; i++)
9172 int c = p[i];
9174 if (len_so_far >= MAX_ASCII_LEN)
9176 fputs ("\"\n\t.ascii\t\"", stream);
9177 len_so_far = 0;
9180 if (ISPRINT (c))
9182 if (c == '\\' || c == '\"')
9184 putc ('\\', stream);
9185 len_so_far++;
9187 putc (c, stream);
9188 len_so_far++;
9190 else
9192 fprintf (stream, "\\%03o", c);
9193 len_so_far += 4;
9197 fputs ("\"\n", stream);
9200 /* Compute the register save mask for registers 0 through 12
9201 inclusive. This code is used by arm_compute_save_reg_mask. */
9203 static unsigned long
9204 arm_compute_save_reg0_reg12_mask (void)
9206 unsigned long func_type = arm_current_func_type ();
9207 unsigned long save_reg_mask = 0;
9208 unsigned int reg;
9210 if (IS_INTERRUPT (func_type))
9212 unsigned int max_reg;
9213 /* Interrupt functions must not corrupt any registers,
9214 even call clobbered ones. If this is a leaf function
9215 we can just examine the registers used by the RTL, but
9216 otherwise we have to assume that whatever function is
9217 called might clobber anything, and so we have to save
9218 all the call-clobbered registers as well. */
9219 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9220 /* FIQ handlers have registers r8 - r12 banked, so
9221 we only need to check r0 - r7, Normal ISRs only
9222 bank r14 and r15, so we must check up to r12.
9223 r13 is the stack pointer which is always preserved,
9224 so we do not need to consider it here. */
9225 max_reg = 7;
9226 else
9227 max_reg = 12;
9229 for (reg = 0; reg <= max_reg; reg++)
9230 if (regs_ever_live[reg]
9231 || (! current_function_is_leaf && call_used_regs [reg]))
9232 save_reg_mask |= (1 << reg);
9234 /* Also save the pic base register if necessary. */
9235 if (flag_pic
9236 && !TARGET_SINGLE_PIC_BASE
9237 && arm_pic_register != INVALID_REGNUM
9238 && current_function_uses_pic_offset_table)
9239 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9241 else
9243 /* In the normal case we only need to save those registers
9244 which are call saved and which are used by this function. */
9245 for (reg = 0; reg <= 10; reg++)
9246 if (regs_ever_live[reg] && ! call_used_regs [reg])
9247 save_reg_mask |= (1 << reg);
9249 /* Handle the frame pointer as a special case. */
9250 if (! TARGET_APCS_FRAME
9251 && ! frame_pointer_needed
9252 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9253 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9254 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9256 /* If we aren't loading the PIC register,
9257 don't stack it even though it may be live. */
9258 if (flag_pic
9259 && !TARGET_SINGLE_PIC_BASE
9260 && arm_pic_register != INVALID_REGNUM
9261 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9262 || current_function_uses_pic_offset_table))
9263 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9266 /* Save registers so the exception handler can modify them. */
9267 if (current_function_calls_eh_return)
9269 unsigned int i;
9271 for (i = 0; ; i++)
9273 reg = EH_RETURN_DATA_REGNO (i);
9274 if (reg == INVALID_REGNUM)
9275 break;
9276 save_reg_mask |= 1 << reg;
9280 return save_reg_mask;
9283 /* Compute a bit mask of which registers need to be
9284 saved on the stack for the current function. */
9286 static unsigned long
9287 arm_compute_save_reg_mask (void)
9289 unsigned int save_reg_mask = 0;
9290 unsigned long func_type = arm_current_func_type ();
9292 if (IS_NAKED (func_type))
9293 /* This should never really happen. */
9294 return 0;
9296 /* If we are creating a stack frame, then we must save the frame pointer,
9297 IP (which will hold the old stack pointer), LR and the PC. */
9298 if (frame_pointer_needed)
9299 save_reg_mask |=
9300 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9301 | (1 << IP_REGNUM)
9302 | (1 << LR_REGNUM)
9303 | (1 << PC_REGNUM);
9305 /* Volatile functions do not return, so there
9306 is no need to save any other registers. */
9307 if (IS_VOLATILE (func_type))
9308 return save_reg_mask;
9310 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9312 /* Decide if we need to save the link register.
9313 Interrupt routines have their own banked link register,
9314 so they never need to save it.
9315 Otherwise if we do not use the link register we do not need to save
9316 it. If we are pushing other registers onto the stack however, we
9317 can save an instruction in the epilogue by pushing the link register
9318 now and then popping it back into the PC. This incurs extra memory
9319 accesses though, so we only do it when optimizing for size, and only
9320 if we know that we will not need a fancy return sequence. */
9321 if (regs_ever_live [LR_REGNUM]
9322 || (save_reg_mask
9323 && optimize_size
9324 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9325 && !current_function_calls_eh_return))
9326 save_reg_mask |= 1 << LR_REGNUM;
9328 if (cfun->machine->lr_save_eliminated)
9329 save_reg_mask &= ~ (1 << LR_REGNUM);
9331 if (TARGET_REALLY_IWMMXT
9332 && ((bit_count (save_reg_mask)
9333 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9335 unsigned int reg;
9337 /* The total number of registers that are going to be pushed
9338 onto the stack is odd. We need to ensure that the stack
9339 is 64-bit aligned before we start to save iWMMXt registers,
9340 and also before we start to create locals. (A local variable
9341 might be a double or long long which we will load/store using
9342 an iWMMXt instruction). Therefore we need to push another
9343 ARM register, so that the stack will be 64-bit aligned. We
9344 try to avoid using the arg registers (r0 -r3) as they might be
9345 used to pass values in a tail call. */
9346 for (reg = 4; reg <= 12; reg++)
9347 if ((save_reg_mask & (1 << reg)) == 0)
9348 break;
9350 if (reg <= 12)
9351 save_reg_mask |= (1 << reg);
9352 else
9354 cfun->machine->sibcall_blocked = 1;
9355 save_reg_mask |= (1 << 3);
9359 return save_reg_mask;
9363 /* Compute a bit mask of which registers need to be
9364 saved on the stack for the current function. */
9365 static unsigned long
9366 thumb_compute_save_reg_mask (void)
9368 unsigned long mask;
9369 unsigned reg;
9371 mask = 0;
9372 for (reg = 0; reg < 12; reg ++)
9373 if (regs_ever_live[reg] && !call_used_regs[reg])
9374 mask |= 1 << reg;
9376 if (flag_pic
9377 && !TARGET_SINGLE_PIC_BASE
9378 && arm_pic_register != INVALID_REGNUM
9379 && current_function_uses_pic_offset_table)
9380 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9382 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9383 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9384 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9386 /* LR will also be pushed if any lo regs are pushed. */
9387 if (mask & 0xff || thumb_force_lr_save ())
9388 mask |= (1 << LR_REGNUM);
9390 /* Make sure we have a low work register if we need one.
9391 We will need one if we are going to push a high register,
9392 but we are not currently intending to push a low register. */
9393 if ((mask & 0xff) == 0
9394 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9396 /* Use thumb_find_work_register to choose which register
9397 we will use. If the register is live then we will
9398 have to push it. Use LAST_LO_REGNUM as our fallback
9399 choice for the register to select. */
9400 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9402 if (! call_used_regs[reg])
9403 mask |= 1 << reg;
9406 return mask;
9410 /* Return the number of bytes required to save VFP registers. */
9411 static int
9412 arm_get_vfp_saved_size (void)
9414 unsigned int regno;
9415 int count;
9416 int saved;
9418 saved = 0;
9419 /* Space for saved VFP registers. */
9420 if (TARGET_HARD_FLOAT && TARGET_VFP)
9422 count = 0;
9423 for (regno = FIRST_VFP_REGNUM;
9424 regno < LAST_VFP_REGNUM;
9425 regno += 2)
9427 if ((!regs_ever_live[regno] || call_used_regs[regno])
9428 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9430 if (count > 0)
9432 /* Workaround ARM10 VFPr1 bug. */
9433 if (count == 2 && !arm_arch6)
9434 count++;
9435 saved += count * 8 + 4;
9437 count = 0;
9439 else
9440 count++;
9442 if (count > 0)
9444 if (count == 2 && !arm_arch6)
9445 count++;
9446 saved += count * 8 + 4;
9449 return saved;
9453 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9454 everything bar the final return instruction. */
9455 const char *
9456 output_return_instruction (rtx operand, int really_return, int reverse)
9458 char conditional[10];
9459 char instr[100];
9460 unsigned reg;
9461 unsigned long live_regs_mask;
9462 unsigned long func_type;
9463 arm_stack_offsets *offsets;
9465 func_type = arm_current_func_type ();
9467 if (IS_NAKED (func_type))
9468 return "";
9470 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9472 /* If this function was declared non-returning, and we have
9473 found a tail call, then we have to trust that the called
9474 function won't return. */
9475 if (really_return)
9477 rtx ops[2];
9479 /* Otherwise, trap an attempted return by aborting. */
9480 ops[0] = operand;
9481 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9482 : "abort");
9483 assemble_external_libcall (ops[1]);
9484 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9487 return "";
9490 gcc_assert (!current_function_calls_alloca || really_return);
9492 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9494 return_used_this_function = 1;
9496 live_regs_mask = arm_compute_save_reg_mask ();
9498 if (live_regs_mask)
9500 const char * return_reg;
9502 /* If we do not have any special requirements for function exit
9503 (e.g. interworking, or ISR) then we can load the return address
9504 directly into the PC. Otherwise we must load it into LR. */
9505 if (really_return
9506 && ! TARGET_INTERWORK)
9507 return_reg = reg_names[PC_REGNUM];
9508 else
9509 return_reg = reg_names[LR_REGNUM];
9511 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9513 /* There are three possible reasons for the IP register
9514 being saved. 1) a stack frame was created, in which case
9515 IP contains the old stack pointer, or 2) an ISR routine
9516 corrupted it, or 3) it was saved to align the stack on
9517 iWMMXt. In case 1, restore IP into SP, otherwise just
9518 restore IP. */
9519 if (frame_pointer_needed)
9521 live_regs_mask &= ~ (1 << IP_REGNUM);
9522 live_regs_mask |= (1 << SP_REGNUM);
9524 else
9525 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9528 /* On some ARM architectures it is faster to use LDR rather than
9529 LDM to load a single register. On other architectures, the
9530 cost is the same. In 26 bit mode, or for exception handlers,
9531 we have to use LDM to load the PC so that the CPSR is also
9532 restored. */
9533 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9534 if (live_regs_mask == (1U << reg))
9535 break;
9537 if (reg <= LAST_ARM_REGNUM
9538 && (reg != LR_REGNUM
9539 || ! really_return
9540 || ! IS_INTERRUPT (func_type)))
9542 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9543 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9545 else
9547 char *p;
9548 int first = 1;
9550 /* Generate the load multiple instruction to restore the
9551 registers. Note we can get here, even if
9552 frame_pointer_needed is true, but only if sp already
9553 points to the base of the saved core registers. */
9554 if (live_regs_mask & (1 << SP_REGNUM))
9556 unsigned HOST_WIDE_INT stack_adjust;
9558 offsets = arm_get_frame_offsets ();
9559 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9560 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9562 if (stack_adjust && arm_arch5)
9563 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9564 else
9566 /* If we can't use ldmib (SA110 bug),
9567 then try to pop r3 instead. */
9568 if (stack_adjust)
9569 live_regs_mask |= 1 << 3;
9570 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9573 else
9574 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9576 p = instr + strlen (instr);
9578 for (reg = 0; reg <= SP_REGNUM; reg++)
9579 if (live_regs_mask & (1 << reg))
9581 int l = strlen (reg_names[reg]);
9583 if (first)
9584 first = 0;
9585 else
9587 memcpy (p, ", ", 2);
9588 p += 2;
9591 memcpy (p, "%|", 2);
9592 memcpy (p + 2, reg_names[reg], l);
9593 p += l + 2;
9596 if (live_regs_mask & (1 << LR_REGNUM))
9598 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9599 /* If returning from an interrupt, restore the CPSR. */
9600 if (IS_INTERRUPT (func_type))
9601 strcat (p, "^");
9603 else
9604 strcpy (p, "}");
9607 output_asm_insn (instr, & operand);
9609 /* See if we need to generate an extra instruction to
9610 perform the actual function return. */
9611 if (really_return
9612 && func_type != ARM_FT_INTERWORKED
9613 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9615 /* The return has already been handled
9616 by loading the LR into the PC. */
9617 really_return = 0;
9621 if (really_return)
9623 switch ((int) ARM_FUNC_TYPE (func_type))
9625 case ARM_FT_ISR:
9626 case ARM_FT_FIQ:
9627 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9628 break;
9630 case ARM_FT_INTERWORKED:
9631 sprintf (instr, "bx%s\t%%|lr", conditional);
9632 break;
9634 case ARM_FT_EXCEPTION:
9635 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9636 break;
9638 default:
9639 /* Use bx if it's available. */
9640 if (arm_arch5 || arm_arch4t)
9641 sprintf (instr, "bx%s\t%%|lr", conditional);
9642 else
9643 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9644 break;
9647 output_asm_insn (instr, & operand);
9650 return "";
9653 /* Write the function name into the code section, directly preceding
9654 the function prologue.
9656 Code will be output similar to this:
9658 .ascii "arm_poke_function_name", 0
9659 .align
9661 .word 0xff000000 + (t1 - t0)
9662 arm_poke_function_name
9663 mov ip, sp
9664 stmfd sp!, {fp, ip, lr, pc}
9665 sub fp, ip, #4
9667 When performing a stack backtrace, code can inspect the value
9668 of 'pc' stored at 'fp' + 0. If the trace function then looks
9669 at location pc - 12 and the top 8 bits are set, then we know
9670 that there is a function name embedded immediately preceding this
9671 location and has length ((pc[-3]) & 0xff000000).
9673 We assume that pc is declared as a pointer to an unsigned long.
9675 It is of no benefit to output the function name if we are assembling
9676 a leaf function. These function types will not contain a stack
9677 backtrace structure, therefore it is not possible to determine the
9678 function name. */
9679 void
9680 arm_poke_function_name (FILE *stream, const char *name)
9682 unsigned long alignlength;
9683 unsigned long length;
9684 rtx x;
9686 length = strlen (name) + 1;
9687 alignlength = ROUND_UP_WORD (length);
9689 ASM_OUTPUT_ASCII (stream, name, length);
9690 ASM_OUTPUT_ALIGN (stream, 2);
9691 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9692 assemble_aligned_integer (UNITS_PER_WORD, x);
9695 /* Place some comments into the assembler stream
9696 describing the current function. */
9697 static void
9698 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9700 unsigned long func_type;
9702 if (!TARGET_ARM)
9704 thumb_output_function_prologue (f, frame_size);
9705 return;
9708 /* Sanity check. */
9709 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9711 func_type = arm_current_func_type ();
9713 switch ((int) ARM_FUNC_TYPE (func_type))
9715 default:
9716 case ARM_FT_NORMAL:
9717 break;
9718 case ARM_FT_INTERWORKED:
9719 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9720 break;
9721 case ARM_FT_ISR:
9722 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9723 break;
9724 case ARM_FT_FIQ:
9725 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9726 break;
9727 case ARM_FT_EXCEPTION:
9728 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9729 break;
9732 if (IS_NAKED (func_type))
9733 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9735 if (IS_VOLATILE (func_type))
9736 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9738 if (IS_NESTED (func_type))
9739 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9741 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9742 current_function_args_size,
9743 current_function_pretend_args_size, frame_size);
9745 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9746 frame_pointer_needed,
9747 cfun->machine->uses_anonymous_args);
9749 if (cfun->machine->lr_save_eliminated)
9750 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9752 if (current_function_calls_eh_return)
9753 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9755 #ifdef AOF_ASSEMBLER
9756 if (flag_pic)
9757 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9758 #endif
9760 return_used_this_function = 0;
9763 const char *
9764 arm_output_epilogue (rtx sibling)
9766 int reg;
9767 unsigned long saved_regs_mask;
9768 unsigned long func_type;
9769 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9770 frame that is $fp + 4 for a non-variadic function. */
9771 int floats_offset = 0;
9772 rtx operands[3];
9773 FILE * f = asm_out_file;
9774 unsigned int lrm_count = 0;
9775 int really_return = (sibling == NULL);
9776 int start_reg;
9777 arm_stack_offsets *offsets;
9779 /* If we have already generated the return instruction
9780 then it is futile to generate anything else. */
9781 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9782 return "";
9784 func_type = arm_current_func_type ();
9786 if (IS_NAKED (func_type))
9787 /* Naked functions don't have epilogues. */
9788 return "";
9790 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9792 rtx op;
9794 /* A volatile function should never return. Call abort. */
9795 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9796 assemble_external_libcall (op);
9797 output_asm_insn ("bl\t%a0", &op);
9799 return "";
9802 /* If we are throwing an exception, then we really must be doing a
9803 return, so we can't tail-call. */
9804 gcc_assert (!current_function_calls_eh_return || really_return);
9806 offsets = arm_get_frame_offsets ();
9807 saved_regs_mask = arm_compute_save_reg_mask ();
9809 if (TARGET_IWMMXT)
9810 lrm_count = bit_count (saved_regs_mask);
9812 floats_offset = offsets->saved_args;
9813 /* Compute how far away the floats will be. */
9814 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9815 if (saved_regs_mask & (1 << reg))
9816 floats_offset += 4;
9818 if (frame_pointer_needed)
9820 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9821 int vfp_offset = offsets->frame;
9823 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9825 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9826 if (regs_ever_live[reg] && !call_used_regs[reg])
9828 floats_offset += 12;
9829 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9830 reg, FP_REGNUM, floats_offset - vfp_offset);
9833 else
9835 start_reg = LAST_FPA_REGNUM;
9837 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9839 if (regs_ever_live[reg] && !call_used_regs[reg])
9841 floats_offset += 12;
9843 /* We can't unstack more than four registers at once. */
9844 if (start_reg - reg == 3)
9846 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9847 reg, FP_REGNUM, floats_offset - vfp_offset);
9848 start_reg = reg - 1;
9851 else
9853 if (reg != start_reg)
9854 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9855 reg + 1, start_reg - reg,
9856 FP_REGNUM, floats_offset - vfp_offset);
9857 start_reg = reg - 1;
9861 /* Just in case the last register checked also needs unstacking. */
9862 if (reg != start_reg)
9863 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9864 reg + 1, start_reg - reg,
9865 FP_REGNUM, floats_offset - vfp_offset);
9868 if (TARGET_HARD_FLOAT && TARGET_VFP)
9870 int saved_size;
9872 /* The fldmx insn does not have base+offset addressing modes,
9873 so we use IP to hold the address. */
9874 saved_size = arm_get_vfp_saved_size ();
9876 if (saved_size > 0)
9878 floats_offset += saved_size;
9879 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9880 FP_REGNUM, floats_offset - vfp_offset);
9882 start_reg = FIRST_VFP_REGNUM;
9883 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9885 if ((!regs_ever_live[reg] || call_used_regs[reg])
9886 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9888 if (start_reg != reg)
9889 arm_output_fldmx (f, IP_REGNUM,
9890 (start_reg - FIRST_VFP_REGNUM) / 2,
9891 (reg - start_reg) / 2);
9892 start_reg = reg + 2;
9895 if (start_reg != reg)
9896 arm_output_fldmx (f, IP_REGNUM,
9897 (start_reg - FIRST_VFP_REGNUM) / 2,
9898 (reg - start_reg) / 2);
9901 if (TARGET_IWMMXT)
9903 /* The frame pointer is guaranteed to be non-double-word aligned.
9904 This is because it is set to (old_stack_pointer - 4) and the
9905 old_stack_pointer was double word aligned. Thus the offset to
9906 the iWMMXt registers to be loaded must also be non-double-word
9907 sized, so that the resultant address *is* double-word aligned.
9908 We can ignore floats_offset since that was already included in
9909 the live_regs_mask. */
9910 lrm_count += (lrm_count % 2 ? 2 : 1);
9912 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9913 if (regs_ever_live[reg] && !call_used_regs[reg])
9915 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9916 reg, FP_REGNUM, lrm_count * 4);
9917 lrm_count += 2;
9921 /* saved_regs_mask should contain the IP, which at the time of stack
9922 frame generation actually contains the old stack pointer. So a
9923 quick way to unwind the stack is just pop the IP register directly
9924 into the stack pointer. */
9925 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9926 saved_regs_mask &= ~ (1 << IP_REGNUM);
9927 saved_regs_mask |= (1 << SP_REGNUM);
9929 /* There are two registers left in saved_regs_mask - LR and PC. We
9930 only need to restore the LR register (the return address), but to
9931 save time we can load it directly into the PC, unless we need a
9932 special function exit sequence, or we are not really returning. */
9933 if (really_return
9934 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9935 && !current_function_calls_eh_return)
9936 /* Delete the LR from the register mask, so that the LR on
9937 the stack is loaded into the PC in the register mask. */
9938 saved_regs_mask &= ~ (1 << LR_REGNUM);
9939 else
9940 saved_regs_mask &= ~ (1 << PC_REGNUM);
9942 /* We must use SP as the base register, because SP is one of the
9943 registers being restored. If an interrupt or page fault
9944 happens in the ldm instruction, the SP might or might not
9945 have been restored. That would be bad, as then SP will no
9946 longer indicate the safe area of stack, and we can get stack
9947 corruption. Using SP as the base register means that it will
9948 be reset correctly to the original value, should an interrupt
9949 occur. If the stack pointer already points at the right
9950 place, then omit the subtraction. */
9951 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9952 || current_function_calls_alloca)
9953 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9954 4 * bit_count (saved_regs_mask));
9955 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9957 if (IS_INTERRUPT (func_type))
9958 /* Interrupt handlers will have pushed the
9959 IP onto the stack, so restore it now. */
9960 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9962 else
9964 /* Restore stack pointer if necessary. */
9965 if (offsets->outgoing_args != offsets->saved_regs)
9967 operands[0] = operands[1] = stack_pointer_rtx;
9968 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9969 output_add_immediate (operands);
9972 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9974 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9975 if (regs_ever_live[reg] && !call_used_regs[reg])
9976 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9977 reg, SP_REGNUM);
9979 else
9981 start_reg = FIRST_FPA_REGNUM;
9983 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9985 if (regs_ever_live[reg] && !call_used_regs[reg])
9987 if (reg - start_reg == 3)
9989 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9990 start_reg, SP_REGNUM);
9991 start_reg = reg + 1;
9994 else
9996 if (reg != start_reg)
9997 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9998 start_reg, reg - start_reg,
9999 SP_REGNUM);
10001 start_reg = reg + 1;
10005 /* Just in case the last register checked also needs unstacking. */
10006 if (reg != start_reg)
10007 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10008 start_reg, reg - start_reg, SP_REGNUM);
10011 if (TARGET_HARD_FLOAT && TARGET_VFP)
10013 start_reg = FIRST_VFP_REGNUM;
10014 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10016 if ((!regs_ever_live[reg] || call_used_regs[reg])
10017 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10019 if (start_reg != reg)
10020 arm_output_fldmx (f, SP_REGNUM,
10021 (start_reg - FIRST_VFP_REGNUM) / 2,
10022 (reg - start_reg) / 2);
10023 start_reg = reg + 2;
10026 if (start_reg != reg)
10027 arm_output_fldmx (f, SP_REGNUM,
10028 (start_reg - FIRST_VFP_REGNUM) / 2,
10029 (reg - start_reg) / 2);
10031 if (TARGET_IWMMXT)
10032 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10033 if (regs_ever_live[reg] && !call_used_regs[reg])
10034 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10036 /* If we can, restore the LR into the PC. */
10037 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10038 && really_return
10039 && current_function_pretend_args_size == 0
10040 && saved_regs_mask & (1 << LR_REGNUM)
10041 && !current_function_calls_eh_return)
10043 saved_regs_mask &= ~ (1 << LR_REGNUM);
10044 saved_regs_mask |= (1 << PC_REGNUM);
10047 /* Load the registers off the stack. If we only have one register
10048 to load use the LDR instruction - it is faster. */
10049 if (saved_regs_mask == (1 << LR_REGNUM))
10051 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10053 else if (saved_regs_mask)
10055 if (saved_regs_mask & (1 << SP_REGNUM))
10056 /* Note - write back to the stack register is not enabled
10057 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10058 in the list of registers and if we add writeback the
10059 instruction becomes UNPREDICTABLE. */
10060 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
10061 else
10062 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
10065 if (current_function_pretend_args_size)
10067 /* Unwind the pre-pushed regs. */
10068 operands[0] = operands[1] = stack_pointer_rtx;
10069 operands[2] = GEN_INT (current_function_pretend_args_size);
10070 output_add_immediate (operands);
10074 /* We may have already restored PC directly from the stack. */
10075 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10076 return "";
10078 /* Stack adjustment for exception handler. */
10079 if (current_function_calls_eh_return)
10080 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10081 ARM_EH_STACKADJ_REGNUM);
10083 /* Generate the return instruction. */
10084 switch ((int) ARM_FUNC_TYPE (func_type))
10086 case ARM_FT_ISR:
10087 case ARM_FT_FIQ:
10088 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10089 break;
10091 case ARM_FT_EXCEPTION:
10092 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10093 break;
10095 case ARM_FT_INTERWORKED:
10096 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10097 break;
10099 default:
10100 if (arm_arch5 || arm_arch4t)
10101 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10102 else
10103 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10104 break;
10107 return "";
10110 static void
10111 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10112 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10114 arm_stack_offsets *offsets;
10116 if (TARGET_THUMB)
10118 int regno;
10120 /* Emit any call-via-reg trampolines that are needed for v4t support
10121 of call_reg and call_value_reg type insns. */
10122 for (regno = 0; regno < LR_REGNUM; regno++)
10124 rtx label = cfun->machine->call_via[regno];
10126 if (label != NULL)
10128 switch_to_section (function_section (current_function_decl));
10129 targetm.asm_out.internal_label (asm_out_file, "L",
10130 CODE_LABEL_NUMBER (label));
10131 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10135 /* ??? Probably not safe to set this here, since it assumes that a
10136 function will be emitted as assembly immediately after we generate
10137 RTL for it. This does not happen for inline functions. */
10138 return_used_this_function = 0;
10140 else
10142 /* We need to take into account any stack-frame rounding. */
10143 offsets = arm_get_frame_offsets ();
10145 gcc_assert (!use_return_insn (FALSE, NULL)
10146 || !return_used_this_function
10147 || offsets->saved_regs == offsets->outgoing_args
10148 || frame_pointer_needed);
10150 /* Reset the ARM-specific per-function variables. */
10151 after_arm_reorg = 0;
10155 /* Generate and emit an insn that we will recognize as a push_multi.
10156 Unfortunately, since this insn does not reflect very well the actual
10157 semantics of the operation, we need to annotate the insn for the benefit
10158 of DWARF2 frame unwind information. */
10159 static rtx
10160 emit_multi_reg_push (unsigned long mask)
10162 int num_regs = 0;
10163 int num_dwarf_regs;
10164 int i, j;
10165 rtx par;
10166 rtx dwarf;
10167 int dwarf_par_index;
10168 rtx tmp, reg;
10170 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10171 if (mask & (1 << i))
10172 num_regs++;
10174 gcc_assert (num_regs && num_regs <= 16);
10176 /* We don't record the PC in the dwarf frame information. */
10177 num_dwarf_regs = num_regs;
10178 if (mask & (1 << PC_REGNUM))
10179 num_dwarf_regs--;
10181 /* For the body of the insn we are going to generate an UNSPEC in
10182 parallel with several USEs. This allows the insn to be recognized
10183 by the push_multi pattern in the arm.md file. The insn looks
10184 something like this:
10186 (parallel [
10187 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10188 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10189 (use (reg:SI 11 fp))
10190 (use (reg:SI 12 ip))
10191 (use (reg:SI 14 lr))
10192 (use (reg:SI 15 pc))
10195 For the frame note however, we try to be more explicit and actually
10196 show each register being stored into the stack frame, plus a (single)
10197 decrement of the stack pointer. We do it this way in order to be
10198 friendly to the stack unwinding code, which only wants to see a single
10199 stack decrement per instruction. The RTL we generate for the note looks
10200 something like this:
10202 (sequence [
10203 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10204 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10205 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10206 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10207 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10210 This sequence is used both by the code to support stack unwinding for
10211 exceptions handlers and the code to generate dwarf2 frame debugging. */
10213 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10214 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10215 dwarf_par_index = 1;
10217 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10219 if (mask & (1 << i))
10221 reg = gen_rtx_REG (SImode, i);
10223 XVECEXP (par, 0, 0)
10224 = gen_rtx_SET (VOIDmode,
10225 gen_frame_mem (BLKmode,
10226 gen_rtx_PRE_DEC (BLKmode,
10227 stack_pointer_rtx)),
10228 gen_rtx_UNSPEC (BLKmode,
10229 gen_rtvec (1, reg),
10230 UNSPEC_PUSH_MULT));
10232 if (i != PC_REGNUM)
10234 tmp = gen_rtx_SET (VOIDmode,
10235 gen_frame_mem (SImode, stack_pointer_rtx),
10236 reg);
10237 RTX_FRAME_RELATED_P (tmp) = 1;
10238 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10239 dwarf_par_index++;
10242 break;
10246 for (j = 1, i++; j < num_regs; i++)
10248 if (mask & (1 << i))
10250 reg = gen_rtx_REG (SImode, i);
10252 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10254 if (i != PC_REGNUM)
10257 = gen_rtx_SET (VOIDmode,
10258 gen_frame_mem (SImode,
10259 plus_constant (stack_pointer_rtx,
10260 4 * j)),
10261 reg);
10262 RTX_FRAME_RELATED_P (tmp) = 1;
10263 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10266 j++;
10270 par = emit_insn (par);
10272 tmp = gen_rtx_SET (VOIDmode,
10273 stack_pointer_rtx,
10274 plus_constant (stack_pointer_rtx, -4 * num_regs));
10275 RTX_FRAME_RELATED_P (tmp) = 1;
10276 XVECEXP (dwarf, 0, 0) = tmp;
10278 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10279 REG_NOTES (par));
10280 return par;
10283 /* Calculate the size of the return value that is passed in registers. */
10284 static int
10285 arm_size_return_regs (void)
10287 enum machine_mode mode;
10289 if (current_function_return_rtx != 0)
10290 mode = GET_MODE (current_function_return_rtx);
10291 else
10292 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10294 return GET_MODE_SIZE (mode);
10297 static rtx
10298 emit_sfm (int base_reg, int count)
10300 rtx par;
10301 rtx dwarf;
10302 rtx tmp, reg;
10303 int i;
10305 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10306 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10308 reg = gen_rtx_REG (XFmode, base_reg++);
10310 XVECEXP (par, 0, 0)
10311 = gen_rtx_SET (VOIDmode,
10312 gen_frame_mem (BLKmode,
10313 gen_rtx_PRE_DEC (BLKmode,
10314 stack_pointer_rtx)),
10315 gen_rtx_UNSPEC (BLKmode,
10316 gen_rtvec (1, reg),
10317 UNSPEC_PUSH_MULT));
10318 tmp = gen_rtx_SET (VOIDmode,
10319 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10320 RTX_FRAME_RELATED_P (tmp) = 1;
10321 XVECEXP (dwarf, 0, 1) = tmp;
10323 for (i = 1; i < count; i++)
10325 reg = gen_rtx_REG (XFmode, base_reg++);
10326 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10328 tmp = gen_rtx_SET (VOIDmode,
10329 gen_frame_mem (XFmode,
10330 plus_constant (stack_pointer_rtx,
10331 i * 12)),
10332 reg);
10333 RTX_FRAME_RELATED_P (tmp) = 1;
10334 XVECEXP (dwarf, 0, i + 1) = tmp;
10337 tmp = gen_rtx_SET (VOIDmode,
10338 stack_pointer_rtx,
10339 plus_constant (stack_pointer_rtx, -12 * count));
10341 RTX_FRAME_RELATED_P (tmp) = 1;
10342 XVECEXP (dwarf, 0, 0) = tmp;
10344 par = emit_insn (par);
10345 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10346 REG_NOTES (par));
10347 return par;
10351 /* Return true if the current function needs to save/restore LR. */
10353 static bool
10354 thumb_force_lr_save (void)
10356 return !cfun->machine->lr_save_eliminated
10357 && (!leaf_function_p ()
10358 || thumb_far_jump_used_p ()
10359 || regs_ever_live [LR_REGNUM]);
10363 /* Compute the distance from register FROM to register TO.
10364 These can be the arg pointer (26), the soft frame pointer (25),
10365 the stack pointer (13) or the hard frame pointer (11).
10366 In thumb mode r7 is used as the soft frame pointer, if needed.
10367 Typical stack layout looks like this:
10369 old stack pointer -> | |
10370 ----
10371 | | \
10372 | | saved arguments for
10373 | | vararg functions
10374 | | /
10376 hard FP & arg pointer -> | | \
10377 | | stack
10378 | | frame
10379 | | /
10381 | | \
10382 | | call saved
10383 | | registers
10384 soft frame pointer -> | | /
10386 | | \
10387 | | local
10388 | | variables
10389 locals base pointer -> | | /
10391 | | \
10392 | | outgoing
10393 | | arguments
10394 current stack pointer -> | | /
10397 For a given function some or all of these stack components
10398 may not be needed, giving rise to the possibility of
10399 eliminating some of the registers.
10401 The values returned by this function must reflect the behavior
10402 of arm_expand_prologue() and arm_compute_save_reg_mask().
10404 The sign of the number returned reflects the direction of stack
10405 growth, so the values are positive for all eliminations except
10406 from the soft frame pointer to the hard frame pointer.
10408 SFP may point just inside the local variables block to ensure correct
10409 alignment. */
10412 /* Calculate stack offsets. These are used to calculate register elimination
10413 offsets and in prologue/epilogue code. */
10415 static arm_stack_offsets *
10416 arm_get_frame_offsets (void)
10418 struct arm_stack_offsets *offsets;
10419 unsigned long func_type;
10420 int leaf;
10421 int saved;
10422 HOST_WIDE_INT frame_size;
10424 offsets = &cfun->machine->stack_offsets;
10426 /* We need to know if we are a leaf function. Unfortunately, it
10427 is possible to be called after start_sequence has been called,
10428 which causes get_insns to return the insns for the sequence,
10429 not the function, which will cause leaf_function_p to return
10430 the incorrect result.
10432 to know about leaf functions once reload has completed, and the
10433 frame size cannot be changed after that time, so we can safely
10434 use the cached value. */
10436 if (reload_completed)
10437 return offsets;
10439 /* Initially this is the size of the local variables. It will translated
10440 into an offset once we have determined the size of preceding data. */
10441 frame_size = ROUND_UP_WORD (get_frame_size ());
10443 leaf = leaf_function_p ();
10445 /* Space for variadic functions. */
10446 offsets->saved_args = current_function_pretend_args_size;
10448 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10450 if (TARGET_ARM)
10452 unsigned int regno;
10454 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10456 /* We know that SP will be doubleword aligned on entry, and we must
10457 preserve that condition at any subroutine call. We also require the
10458 soft frame pointer to be doubleword aligned. */
10460 if (TARGET_REALLY_IWMMXT)
10462 /* Check for the call-saved iWMMXt registers. */
10463 for (regno = FIRST_IWMMXT_REGNUM;
10464 regno <= LAST_IWMMXT_REGNUM;
10465 regno++)
10466 if (regs_ever_live [regno] && ! call_used_regs [regno])
10467 saved += 8;
10470 func_type = arm_current_func_type ();
10471 if (! IS_VOLATILE (func_type))
10473 /* Space for saved FPA registers. */
10474 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10475 if (regs_ever_live[regno] && ! call_used_regs[regno])
10476 saved += 12;
10478 /* Space for saved VFP registers. */
10479 if (TARGET_HARD_FLOAT && TARGET_VFP)
10480 saved += arm_get_vfp_saved_size ();
10483 else /* TARGET_THUMB */
10485 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10486 if (TARGET_BACKTRACE)
10487 saved += 16;
10490 /* Saved registers include the stack frame. */
10491 offsets->saved_regs = offsets->saved_args + saved;
10492 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10493 /* A leaf function does not need any stack alignment if it has nothing
10494 on the stack. */
10495 if (leaf && frame_size == 0)
10497 offsets->outgoing_args = offsets->soft_frame;
10498 return offsets;
10501 /* Ensure SFP has the correct alignment. */
10502 if (ARM_DOUBLEWORD_ALIGN
10503 && (offsets->soft_frame & 7))
10504 offsets->soft_frame += 4;
10506 offsets->locals_base = offsets->soft_frame + frame_size;
10507 offsets->outgoing_args = (offsets->locals_base
10508 + current_function_outgoing_args_size);
10510 if (ARM_DOUBLEWORD_ALIGN)
10512 /* Ensure SP remains doubleword aligned. */
10513 if (offsets->outgoing_args & 7)
10514 offsets->outgoing_args += 4;
10515 gcc_assert (!(offsets->outgoing_args & 7));
10518 return offsets;
10522 /* Calculate the relative offsets for the different stack pointers. Positive
10523 offsets are in the direction of stack growth. */
10525 HOST_WIDE_INT
10526 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10528 arm_stack_offsets *offsets;
10530 offsets = arm_get_frame_offsets ();
10532 /* OK, now we have enough information to compute the distances.
10533 There must be an entry in these switch tables for each pair
10534 of registers in ELIMINABLE_REGS, even if some of the entries
10535 seem to be redundant or useless. */
10536 switch (from)
10538 case ARG_POINTER_REGNUM:
10539 switch (to)
10541 case THUMB_HARD_FRAME_POINTER_REGNUM:
10542 return 0;
10544 case FRAME_POINTER_REGNUM:
10545 /* This is the reverse of the soft frame pointer
10546 to hard frame pointer elimination below. */
10547 return offsets->soft_frame - offsets->saved_args;
10549 case ARM_HARD_FRAME_POINTER_REGNUM:
10550 /* If there is no stack frame then the hard
10551 frame pointer and the arg pointer coincide. */
10552 if (offsets->frame == offsets->saved_regs)
10553 return 0;
10554 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10555 return (frame_pointer_needed
10556 && cfun->static_chain_decl != NULL
10557 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10559 case STACK_POINTER_REGNUM:
10560 /* If nothing has been pushed on the stack at all
10561 then this will return -4. This *is* correct! */
10562 return offsets->outgoing_args - (offsets->saved_args + 4);
10564 default:
10565 gcc_unreachable ();
10567 gcc_unreachable ();
10569 case FRAME_POINTER_REGNUM:
10570 switch (to)
10572 case THUMB_HARD_FRAME_POINTER_REGNUM:
10573 return 0;
10575 case ARM_HARD_FRAME_POINTER_REGNUM:
10576 /* The hard frame pointer points to the top entry in the
10577 stack frame. The soft frame pointer to the bottom entry
10578 in the stack frame. If there is no stack frame at all,
10579 then they are identical. */
10581 return offsets->frame - offsets->soft_frame;
10583 case STACK_POINTER_REGNUM:
10584 return offsets->outgoing_args - offsets->soft_frame;
10586 default:
10587 gcc_unreachable ();
10589 gcc_unreachable ();
10591 default:
10592 /* You cannot eliminate from the stack pointer.
10593 In theory you could eliminate from the hard frame
10594 pointer to the stack pointer, but this will never
10595 happen, since if a stack frame is not needed the
10596 hard frame pointer will never be used. */
10597 gcc_unreachable ();
10602 /* Generate the prologue instructions for entry into an ARM function. */
10603 void
10604 arm_expand_prologue (void)
10606 int reg;
10607 rtx amount;
10608 rtx insn;
10609 rtx ip_rtx;
10610 unsigned long live_regs_mask;
10611 unsigned long func_type;
10612 int fp_offset = 0;
10613 int saved_pretend_args = 0;
10614 int saved_regs = 0;
10615 unsigned HOST_WIDE_INT args_to_push;
10616 arm_stack_offsets *offsets;
10618 func_type = arm_current_func_type ();
10620 /* Naked functions don't have prologues. */
10621 if (IS_NAKED (func_type))
10622 return;
10624 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10625 args_to_push = current_function_pretend_args_size;
10627 /* Compute which register we will have to save onto the stack. */
10628 live_regs_mask = arm_compute_save_reg_mask ();
10630 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10632 if (frame_pointer_needed)
10634 if (IS_INTERRUPT (func_type))
10636 /* Interrupt functions must not corrupt any registers.
10637 Creating a frame pointer however, corrupts the IP
10638 register, so we must push it first. */
10639 insn = emit_multi_reg_push (1 << IP_REGNUM);
10641 /* Do not set RTX_FRAME_RELATED_P on this insn.
10642 The dwarf stack unwinding code only wants to see one
10643 stack decrement per function, and this is not it. If
10644 this instruction is labeled as being part of the frame
10645 creation sequence then dwarf2out_frame_debug_expr will
10646 die when it encounters the assignment of IP to FP
10647 later on, since the use of SP here establishes SP as
10648 the CFA register and not IP.
10650 Anyway this instruction is not really part of the stack
10651 frame creation although it is part of the prologue. */
10653 else if (IS_NESTED (func_type))
10655 /* The Static chain register is the same as the IP register
10656 used as a scratch register during stack frame creation.
10657 To get around this need to find somewhere to store IP
10658 whilst the frame is being created. We try the following
10659 places in order:
10661 1. The last argument register.
10662 2. A slot on the stack above the frame. (This only
10663 works if the function is not a varargs function).
10664 3. Register r3, after pushing the argument registers
10665 onto the stack.
10667 Note - we only need to tell the dwarf2 backend about the SP
10668 adjustment in the second variant; the static chain register
10669 doesn't need to be unwound, as it doesn't contain a value
10670 inherited from the caller. */
10672 if (regs_ever_live[3] == 0)
10673 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10674 else if (args_to_push == 0)
10676 rtx dwarf;
10678 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10679 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
10680 fp_offset = 4;
10682 /* Just tell the dwarf backend that we adjusted SP. */
10683 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10684 plus_constant (stack_pointer_rtx,
10685 -fp_offset));
10686 RTX_FRAME_RELATED_P (insn) = 1;
10687 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10688 dwarf, REG_NOTES (insn));
10690 else
10692 /* Store the args on the stack. */
10693 if (cfun->machine->uses_anonymous_args)
10694 insn = emit_multi_reg_push
10695 ((0xf0 >> (args_to_push / 4)) & 0xf);
10696 else
10697 insn = emit_insn
10698 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10699 GEN_INT (- args_to_push)));
10701 RTX_FRAME_RELATED_P (insn) = 1;
10703 saved_pretend_args = 1;
10704 fp_offset = args_to_push;
10705 args_to_push = 0;
10707 /* Now reuse r3 to preserve IP. */
10708 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10712 insn = emit_set_insn (ip_rtx,
10713 plus_constant (stack_pointer_rtx, fp_offset));
10714 RTX_FRAME_RELATED_P (insn) = 1;
10717 if (args_to_push)
10719 /* Push the argument registers, or reserve space for them. */
10720 if (cfun->machine->uses_anonymous_args)
10721 insn = emit_multi_reg_push
10722 ((0xf0 >> (args_to_push / 4)) & 0xf);
10723 else
10724 insn = emit_insn
10725 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10726 GEN_INT (- args_to_push)));
10727 RTX_FRAME_RELATED_P (insn) = 1;
10730 /* If this is an interrupt service routine, and the link register
10731 is going to be pushed, and we are not creating a stack frame,
10732 (which would involve an extra push of IP and a pop in the epilogue)
10733 subtracting four from LR now will mean that the function return
10734 can be done with a single instruction. */
10735 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10736 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10737 && ! frame_pointer_needed)
10739 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
10741 emit_set_insn (lr, plus_constant (lr, -4));
10744 if (live_regs_mask)
10746 insn = emit_multi_reg_push (live_regs_mask);
10747 saved_regs += bit_count (live_regs_mask) * 4;
10748 RTX_FRAME_RELATED_P (insn) = 1;
10751 if (TARGET_IWMMXT)
10752 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10753 if (regs_ever_live[reg] && ! call_used_regs [reg])
10755 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10756 insn = gen_frame_mem (V2SImode, insn);
10757 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
10758 RTX_FRAME_RELATED_P (insn) = 1;
10759 saved_regs += 8;
10762 if (! IS_VOLATILE (func_type))
10764 int start_reg;
10766 /* Save any floating point call-saved registers used by this
10767 function. */
10768 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10770 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10771 if (regs_ever_live[reg] && !call_used_regs[reg])
10773 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10774 insn = gen_frame_mem (XFmode, insn);
10775 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
10776 RTX_FRAME_RELATED_P (insn) = 1;
10777 saved_regs += 12;
10780 else
10782 start_reg = LAST_FPA_REGNUM;
10784 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10786 if (regs_ever_live[reg] && !call_used_regs[reg])
10788 if (start_reg - reg == 3)
10790 insn = emit_sfm (reg, 4);
10791 RTX_FRAME_RELATED_P (insn) = 1;
10792 saved_regs += 48;
10793 start_reg = reg - 1;
10796 else
10798 if (start_reg != reg)
10800 insn = emit_sfm (reg + 1, start_reg - reg);
10801 RTX_FRAME_RELATED_P (insn) = 1;
10802 saved_regs += (start_reg - reg) * 12;
10804 start_reg = reg - 1;
10808 if (start_reg != reg)
10810 insn = emit_sfm (reg + 1, start_reg - reg);
10811 saved_regs += (start_reg - reg) * 12;
10812 RTX_FRAME_RELATED_P (insn) = 1;
10815 if (TARGET_HARD_FLOAT && TARGET_VFP)
10817 start_reg = FIRST_VFP_REGNUM;
10819 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10821 if ((!regs_ever_live[reg] || call_used_regs[reg])
10822 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10824 if (start_reg != reg)
10825 saved_regs += vfp_emit_fstmx (start_reg,
10826 (reg - start_reg) / 2);
10827 start_reg = reg + 2;
10830 if (start_reg != reg)
10831 saved_regs += vfp_emit_fstmx (start_reg,
10832 (reg - start_reg) / 2);
10836 if (frame_pointer_needed)
10838 /* Create the new frame pointer. */
10839 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10840 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10841 RTX_FRAME_RELATED_P (insn) = 1;
10843 if (IS_NESTED (func_type))
10845 /* Recover the static chain register. */
10846 if (regs_ever_live [3] == 0
10847 || saved_pretend_args)
10848 insn = gen_rtx_REG (SImode, 3);
10849 else /* if (current_function_pretend_args_size == 0) */
10851 insn = plus_constant (hard_frame_pointer_rtx, 4);
10852 insn = gen_frame_mem (SImode, insn);
10855 emit_set_insn (ip_rtx, insn);
10856 /* Add a USE to stop propagate_one_insn() from barfing. */
10857 emit_insn (gen_prologue_use (ip_rtx));
10861 offsets = arm_get_frame_offsets ();
10862 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10864 /* This add can produce multiple insns for a large constant, so we
10865 need to get tricky. */
10866 rtx last = get_last_insn ();
10868 amount = GEN_INT (offsets->saved_args + saved_regs
10869 - offsets->outgoing_args);
10871 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10872 amount));
10875 last = last ? NEXT_INSN (last) : get_insns ();
10876 RTX_FRAME_RELATED_P (last) = 1;
10878 while (last != insn);
10880 /* If the frame pointer is needed, emit a special barrier that
10881 will prevent the scheduler from moving stores to the frame
10882 before the stack adjustment. */
10883 if (frame_pointer_needed)
10884 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10885 hard_frame_pointer_rtx));
10889 if (flag_pic && arm_pic_register != INVALID_REGNUM)
10890 arm_load_pic_register (0UL);
10892 /* If we are profiling, make sure no instructions are scheduled before
10893 the call to mcount. Similarly if the user has requested no
10894 scheduling in the prolog. Similarly if we want non-call exceptions
10895 using the EABI unwinder, to prevent faulting instructions from being
10896 swapped with a stack adjustment. */
10897 if (current_function_profile || !TARGET_SCHED_PROLOG
10898 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
10899 emit_insn (gen_blockage ());
10901 /* If the link register is being kept alive, with the return address in it,
10902 then make sure that it does not get reused by the ce2 pass. */
10903 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10905 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10906 cfun->machine->lr_save_eliminated = 1;
10910 /* If CODE is 'd', then the X is a condition operand and the instruction
10911 should only be executed if the condition is true.
10912 if CODE is 'D', then the X is a condition operand and the instruction
10913 should only be executed if the condition is false: however, if the mode
10914 of the comparison is CCFPEmode, then always execute the instruction -- we
10915 do this because in these circumstances !GE does not necessarily imply LT;
10916 in these cases the instruction pattern will take care to make sure that
10917 an instruction containing %d will follow, thereby undoing the effects of
10918 doing this instruction unconditionally.
10919 If CODE is 'N' then X is a floating point operand that must be negated
10920 before output.
10921 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10922 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10923 void
10924 arm_print_operand (FILE *stream, rtx x, int code)
10926 switch (code)
10928 case '@':
10929 fputs (ASM_COMMENT_START, stream);
10930 return;
10932 case '_':
10933 fputs (user_label_prefix, stream);
10934 return;
10936 case '|':
10937 fputs (REGISTER_PREFIX, stream);
10938 return;
10940 case '?':
10941 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10943 if (TARGET_THUMB)
10945 output_operand_lossage ("predicated Thumb instruction");
10946 break;
10948 if (current_insn_predicate != NULL)
10950 output_operand_lossage
10951 ("predicated instruction in conditional sequence");
10952 break;
10955 fputs (arm_condition_codes[arm_current_cc], stream);
10957 else if (current_insn_predicate)
10959 enum arm_cond_code code;
10961 if (TARGET_THUMB)
10963 output_operand_lossage ("predicated Thumb instruction");
10964 break;
10967 code = get_arm_condition_code (current_insn_predicate);
10968 fputs (arm_condition_codes[code], stream);
10970 return;
10972 case 'N':
10974 REAL_VALUE_TYPE r;
10975 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10976 r = REAL_VALUE_NEGATE (r);
10977 fprintf (stream, "%s", fp_const_from_val (&r));
10979 return;
10981 case 'B':
10982 if (GET_CODE (x) == CONST_INT)
10984 HOST_WIDE_INT val;
10985 val = ARM_SIGN_EXTEND (~INTVAL (x));
10986 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10988 else
10990 putc ('~', stream);
10991 output_addr_const (stream, x);
10993 return;
10995 case 'i':
10996 fprintf (stream, "%s", arithmetic_instr (x, 1));
10997 return;
10999 /* Truncate Cirrus shift counts. */
11000 case 's':
11001 if (GET_CODE (x) == CONST_INT)
11003 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11004 return;
11006 arm_print_operand (stream, x, 0);
11007 return;
11009 case 'I':
11010 fprintf (stream, "%s", arithmetic_instr (x, 0));
11011 return;
11013 case 'S':
11015 HOST_WIDE_INT val;
11016 const char * shift = shift_op (x, &val);
11018 if (shift)
11020 fprintf (stream, ", %s ", shift_op (x, &val));
11021 if (val == -1)
11022 arm_print_operand (stream, XEXP (x, 1), 0);
11023 else
11024 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11027 return;
11029 /* An explanation of the 'Q', 'R' and 'H' register operands:
11031 In a pair of registers containing a DI or DF value the 'Q'
11032 operand returns the register number of the register containing
11033 the least significant part of the value. The 'R' operand returns
11034 the register number of the register containing the most
11035 significant part of the value.
11037 The 'H' operand returns the higher of the two register numbers.
11038 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11039 same as the 'Q' operand, since the most significant part of the
11040 value is held in the lower number register. The reverse is true
11041 on systems where WORDS_BIG_ENDIAN is false.
11043 The purpose of these operands is to distinguish between cases
11044 where the endian-ness of the values is important (for example
11045 when they are added together), and cases where the endian-ness
11046 is irrelevant, but the order of register operations is important.
11047 For example when loading a value from memory into a register
11048 pair, the endian-ness does not matter. Provided that the value
11049 from the lower memory address is put into the lower numbered
11050 register, and the value from the higher address is put into the
11051 higher numbered register, the load will work regardless of whether
11052 the value being loaded is big-wordian or little-wordian. The
11053 order of the two register loads can matter however, if the address
11054 of the memory location is actually held in one of the registers
11055 being overwritten by the load. */
11056 case 'Q':
11057 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11059 output_operand_lossage ("invalid operand for code '%c'", code);
11060 return;
11063 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11064 return;
11066 case 'R':
11067 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11069 output_operand_lossage ("invalid operand for code '%c'", code);
11070 return;
11073 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11074 return;
11076 case 'H':
11077 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11079 output_operand_lossage ("invalid operand for code '%c'", code);
11080 return;
11083 asm_fprintf (stream, "%r", REGNO (x) + 1);
11084 return;
11086 case 'm':
11087 asm_fprintf (stream, "%r",
11088 GET_CODE (XEXP (x, 0)) == REG
11089 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11090 return;
11092 case 'M':
11093 asm_fprintf (stream, "{%r-%r}",
11094 REGNO (x),
11095 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11096 return;
11098 case 'd':
11099 /* CONST_TRUE_RTX means always -- that's the default. */
11100 if (x == const_true_rtx)
11101 return;
11103 if (!COMPARISON_P (x))
11105 output_operand_lossage ("invalid operand for code '%c'", code);
11106 return;
11109 fputs (arm_condition_codes[get_arm_condition_code (x)],
11110 stream);
11111 return;
11113 case 'D':
11114 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11115 want to do that. */
11116 if (x == const_true_rtx)
11118 output_operand_lossage ("instruction never exectued");
11119 return;
11121 if (!COMPARISON_P (x))
11123 output_operand_lossage ("invalid operand for code '%c'", code);
11124 return;
11127 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11128 (get_arm_condition_code (x))],
11129 stream);
11130 return;
11132 /* Cirrus registers can be accessed in a variety of ways:
11133 single floating point (f)
11134 double floating point (d)
11135 32bit integer (fx)
11136 64bit integer (dx). */
11137 case 'W': /* Cirrus register in F mode. */
11138 case 'X': /* Cirrus register in D mode. */
11139 case 'Y': /* Cirrus register in FX mode. */
11140 case 'Z': /* Cirrus register in DX mode. */
11141 gcc_assert (GET_CODE (x) == REG
11142 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11144 fprintf (stream, "mv%s%s",
11145 code == 'W' ? "f"
11146 : code == 'X' ? "d"
11147 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11149 return;
11151 /* Print cirrus register in the mode specified by the register's mode. */
11152 case 'V':
11154 int mode = GET_MODE (x);
11156 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11158 output_operand_lossage ("invalid operand for code '%c'", code);
11159 return;
11162 fprintf (stream, "mv%s%s",
11163 mode == DFmode ? "d"
11164 : mode == SImode ? "fx"
11165 : mode == DImode ? "dx"
11166 : "f", reg_names[REGNO (x)] + 2);
11168 return;
11171 case 'U':
11172 if (GET_CODE (x) != REG
11173 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11174 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11175 /* Bad value for wCG register number. */
11177 output_operand_lossage ("invalid operand for code '%c'", code);
11178 return;
11181 else
11182 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11183 return;
11185 /* Print an iWMMXt control register name. */
11186 case 'w':
11187 if (GET_CODE (x) != CONST_INT
11188 || INTVAL (x) < 0
11189 || INTVAL (x) >= 16)
11190 /* Bad value for wC register number. */
11192 output_operand_lossage ("invalid operand for code '%c'", code);
11193 return;
11196 else
11198 static const char * wc_reg_names [16] =
11200 "wCID", "wCon", "wCSSF", "wCASF",
11201 "wC4", "wC5", "wC6", "wC7",
11202 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11203 "wC12", "wC13", "wC14", "wC15"
11206 fprintf (stream, wc_reg_names [INTVAL (x)]);
11208 return;
11210 /* Print a VFP double precision register name. */
11211 case 'P':
11213 int mode = GET_MODE (x);
11214 int num;
11216 if (mode != DImode && mode != DFmode)
11218 output_operand_lossage ("invalid operand for code '%c'", code);
11219 return;
11222 if (GET_CODE (x) != REG
11223 || !IS_VFP_REGNUM (REGNO (x)))
11225 output_operand_lossage ("invalid operand for code '%c'", code);
11226 return;
11229 num = REGNO(x) - FIRST_VFP_REGNUM;
11230 if (num & 1)
11232 output_operand_lossage ("invalid operand for code '%c'", code);
11233 return;
11236 fprintf (stream, "d%d", num >> 1);
11238 return;
11240 default:
11241 if (x == 0)
11243 output_operand_lossage ("missing operand");
11244 return;
11247 switch (GET_CODE (x))
11249 case REG:
11250 asm_fprintf (stream, "%r", REGNO (x));
11251 break;
11253 case MEM:
11254 output_memory_reference_mode = GET_MODE (x);
11255 output_address (XEXP (x, 0));
11256 break;
11258 case CONST_DOUBLE:
11259 fprintf (stream, "#%s", fp_immediate_constant (x));
11260 break;
11262 default:
11263 gcc_assert (GET_CODE (x) != NEG);
11264 fputc ('#', stream);
11265 output_addr_const (stream, x);
11266 break;
11271 #ifndef AOF_ASSEMBLER
11272 /* Target hook for assembling integer objects. The ARM version needs to
11273 handle word-sized values specially. */
11274 static bool
11275 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11277 if (size == UNITS_PER_WORD && aligned_p)
11279 fputs ("\t.word\t", asm_out_file);
11280 output_addr_const (asm_out_file, x);
11282 /* Mark symbols as position independent. We only do this in the
11283 .text segment, not in the .data segment. */
11284 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11285 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11287 if (GET_CODE (x) == SYMBOL_REF
11288 && (CONSTANT_POOL_ADDRESS_P (x)
11289 || SYMBOL_REF_LOCAL_P (x)))
11290 fputs ("(GOTOFF)", asm_out_file);
11291 else if (GET_CODE (x) == LABEL_REF)
11292 fputs ("(GOTOFF)", asm_out_file);
11293 else
11294 fputs ("(GOT)", asm_out_file);
11296 fputc ('\n', asm_out_file);
11297 return true;
11300 if (arm_vector_mode_supported_p (GET_MODE (x)))
11302 int i, units;
11304 gcc_assert (GET_CODE (x) == CONST_VECTOR);
11306 units = CONST_VECTOR_NUNITS (x);
11308 switch (GET_MODE (x))
11310 case V2SImode: size = 4; break;
11311 case V4HImode: size = 2; break;
11312 case V8QImode: size = 1; break;
11313 default:
11314 gcc_unreachable ();
11317 for (i = 0; i < units; i++)
11319 rtx elt;
11321 elt = CONST_VECTOR_ELT (x, i);
11322 assemble_integer
11323 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11326 return true;
11329 return default_assemble_integer (x, size, aligned_p);
11333 /* Add a function to the list of static constructors. */
11335 static void
11336 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
11338 if (!TARGET_AAPCS_BASED)
11340 default_named_section_asm_out_constructor (symbol, priority);
11341 return;
11344 /* Put these in the .init_array section, using a special relocation. */
11345 switch_to_section (ctors_section);
11346 assemble_align (POINTER_SIZE);
11347 fputs ("\t.word\t", asm_out_file);
11348 output_addr_const (asm_out_file, symbol);
11349 fputs ("(target1)\n", asm_out_file);
11351 #endif
11353 /* A finite state machine takes care of noticing whether or not instructions
11354 can be conditionally executed, and thus decrease execution time and code
11355 size by deleting branch instructions. The fsm is controlled by
11356 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11358 /* The state of the fsm controlling condition codes are:
11359 0: normal, do nothing special
11360 1: make ASM_OUTPUT_OPCODE not output this instruction
11361 2: make ASM_OUTPUT_OPCODE not output this instruction
11362 3: make instructions conditional
11363 4: make instructions conditional
11365 State transitions (state->state by whom under condition):
11366 0 -> 1 final_prescan_insn if the `target' is a label
11367 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11368 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11369 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11370 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11371 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11372 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11373 (the target insn is arm_target_insn).
11375 If the jump clobbers the conditions then we use states 2 and 4.
11377 A similar thing can be done with conditional return insns.
11379 XXX In case the `target' is an unconditional branch, this conditionalising
11380 of the instructions always reduces code size, but not always execution
11381 time. But then, I want to reduce the code size to somewhere near what
11382 /bin/cc produces. */
11384 /* Returns the index of the ARM condition code string in
11385 `arm_condition_codes'. COMPARISON should be an rtx like
11386 `(eq (...) (...))'. */
11387 static enum arm_cond_code
11388 get_arm_condition_code (rtx comparison)
11390 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11391 int code;
11392 enum rtx_code comp_code = GET_CODE (comparison);
11394 if (GET_MODE_CLASS (mode) != MODE_CC)
11395 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11396 XEXP (comparison, 1));
11398 switch (mode)
11400 case CC_DNEmode: code = ARM_NE; goto dominance;
11401 case CC_DEQmode: code = ARM_EQ; goto dominance;
11402 case CC_DGEmode: code = ARM_GE; goto dominance;
11403 case CC_DGTmode: code = ARM_GT; goto dominance;
11404 case CC_DLEmode: code = ARM_LE; goto dominance;
11405 case CC_DLTmode: code = ARM_LT; goto dominance;
11406 case CC_DGEUmode: code = ARM_CS; goto dominance;
11407 case CC_DGTUmode: code = ARM_HI; goto dominance;
11408 case CC_DLEUmode: code = ARM_LS; goto dominance;
11409 case CC_DLTUmode: code = ARM_CC;
11411 dominance:
11412 gcc_assert (comp_code == EQ || comp_code == NE);
11414 if (comp_code == EQ)
11415 return ARM_INVERSE_CONDITION_CODE (code);
11416 return code;
11418 case CC_NOOVmode:
11419 switch (comp_code)
11421 case NE: return ARM_NE;
11422 case EQ: return ARM_EQ;
11423 case GE: return ARM_PL;
11424 case LT: return ARM_MI;
11425 default: gcc_unreachable ();
11428 case CC_Zmode:
11429 switch (comp_code)
11431 case NE: return ARM_NE;
11432 case EQ: return ARM_EQ;
11433 default: gcc_unreachable ();
11436 case CC_Nmode:
11437 switch (comp_code)
11439 case NE: return ARM_MI;
11440 case EQ: return ARM_PL;
11441 default: gcc_unreachable ();
11444 case CCFPEmode:
11445 case CCFPmode:
11446 /* These encodings assume that AC=1 in the FPA system control
11447 byte. This allows us to handle all cases except UNEQ and
11448 LTGT. */
11449 switch (comp_code)
11451 case GE: return ARM_GE;
11452 case GT: return ARM_GT;
11453 case LE: return ARM_LS;
11454 case LT: return ARM_MI;
11455 case NE: return ARM_NE;
11456 case EQ: return ARM_EQ;
11457 case ORDERED: return ARM_VC;
11458 case UNORDERED: return ARM_VS;
11459 case UNLT: return ARM_LT;
11460 case UNLE: return ARM_LE;
11461 case UNGT: return ARM_HI;
11462 case UNGE: return ARM_PL;
11463 /* UNEQ and LTGT do not have a representation. */
11464 case UNEQ: /* Fall through. */
11465 case LTGT: /* Fall through. */
11466 default: gcc_unreachable ();
11469 case CC_SWPmode:
11470 switch (comp_code)
11472 case NE: return ARM_NE;
11473 case EQ: return ARM_EQ;
11474 case GE: return ARM_LE;
11475 case GT: return ARM_LT;
11476 case LE: return ARM_GE;
11477 case LT: return ARM_GT;
11478 case GEU: return ARM_LS;
11479 case GTU: return ARM_CC;
11480 case LEU: return ARM_CS;
11481 case LTU: return ARM_HI;
11482 default: gcc_unreachable ();
11485 case CC_Cmode:
11486 switch (comp_code)
11488 case LTU: return ARM_CS;
11489 case GEU: return ARM_CC;
11490 default: gcc_unreachable ();
11493 case CCmode:
11494 switch (comp_code)
11496 case NE: return ARM_NE;
11497 case EQ: return ARM_EQ;
11498 case GE: return ARM_GE;
11499 case GT: return ARM_GT;
11500 case LE: return ARM_LE;
11501 case LT: return ARM_LT;
11502 case GEU: return ARM_CS;
11503 case GTU: return ARM_HI;
11504 case LEU: return ARM_LS;
11505 case LTU: return ARM_CC;
11506 default: gcc_unreachable ();
11509 default: gcc_unreachable ();
11513 void
11514 arm_final_prescan_insn (rtx insn)
11516 /* BODY will hold the body of INSN. */
11517 rtx body = PATTERN (insn);
11519 /* This will be 1 if trying to repeat the trick, and things need to be
11520 reversed if it appears to fail. */
11521 int reverse = 0;
11523 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11524 taken are clobbered, even if the rtl suggests otherwise. It also
11525 means that we have to grub around within the jump expression to find
11526 out what the conditions are when the jump isn't taken. */
11527 int jump_clobbers = 0;
11529 /* If we start with a return insn, we only succeed if we find another one. */
11530 int seeking_return = 0;
11532 /* START_INSN will hold the insn from where we start looking. This is the
11533 first insn after the following code_label if REVERSE is true. */
11534 rtx start_insn = insn;
11536 /* If in state 4, check if the target branch is reached, in order to
11537 change back to state 0. */
11538 if (arm_ccfsm_state == 4)
11540 if (insn == arm_target_insn)
11542 arm_target_insn = NULL;
11543 arm_ccfsm_state = 0;
11545 return;
11548 /* If in state 3, it is possible to repeat the trick, if this insn is an
11549 unconditional branch to a label, and immediately following this branch
11550 is the previous target label which is only used once, and the label this
11551 branch jumps to is not too far off. */
11552 if (arm_ccfsm_state == 3)
11554 if (simplejump_p (insn))
11556 start_insn = next_nonnote_insn (start_insn);
11557 if (GET_CODE (start_insn) == BARRIER)
11559 /* XXX Isn't this always a barrier? */
11560 start_insn = next_nonnote_insn (start_insn);
11562 if (GET_CODE (start_insn) == CODE_LABEL
11563 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11564 && LABEL_NUSES (start_insn) == 1)
11565 reverse = TRUE;
11566 else
11567 return;
11569 else if (GET_CODE (body) == RETURN)
11571 start_insn = next_nonnote_insn (start_insn);
11572 if (GET_CODE (start_insn) == BARRIER)
11573 start_insn = next_nonnote_insn (start_insn);
11574 if (GET_CODE (start_insn) == CODE_LABEL
11575 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11576 && LABEL_NUSES (start_insn) == 1)
11578 reverse = TRUE;
11579 seeking_return = 1;
11581 else
11582 return;
11584 else
11585 return;
11588 gcc_assert (!arm_ccfsm_state || reverse);
11589 if (GET_CODE (insn) != JUMP_INSN)
11590 return;
11592 /* This jump might be paralleled with a clobber of the condition codes
11593 the jump should always come first */
11594 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11595 body = XVECEXP (body, 0, 0);
11597 if (reverse
11598 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11599 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11601 int insns_skipped;
11602 int fail = FALSE, succeed = FALSE;
11603 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11604 int then_not_else = TRUE;
11605 rtx this_insn = start_insn, label = 0;
11607 /* If the jump cannot be done with one instruction, we cannot
11608 conditionally execute the instruction in the inverse case. */
11609 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11611 jump_clobbers = 1;
11612 return;
11615 /* Register the insn jumped to. */
11616 if (reverse)
11618 if (!seeking_return)
11619 label = XEXP (SET_SRC (body), 0);
11621 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11622 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11623 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11625 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11626 then_not_else = FALSE;
11628 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11629 seeking_return = 1;
11630 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11632 seeking_return = 1;
11633 then_not_else = FALSE;
11635 else
11636 gcc_unreachable ();
11638 /* See how many insns this branch skips, and what kind of insns. If all
11639 insns are okay, and the label or unconditional branch to the same
11640 label is not too far away, succeed. */
11641 for (insns_skipped = 0;
11642 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11644 rtx scanbody;
11646 this_insn = next_nonnote_insn (this_insn);
11647 if (!this_insn)
11648 break;
11650 switch (GET_CODE (this_insn))
11652 case CODE_LABEL:
11653 /* Succeed if it is the target label, otherwise fail since
11654 control falls in from somewhere else. */
11655 if (this_insn == label)
11657 if (jump_clobbers)
11659 arm_ccfsm_state = 2;
11660 this_insn = next_nonnote_insn (this_insn);
11662 else
11663 arm_ccfsm_state = 1;
11664 succeed = TRUE;
11666 else
11667 fail = TRUE;
11668 break;
11670 case BARRIER:
11671 /* Succeed if the following insn is the target label.
11672 Otherwise fail.
11673 If return insns are used then the last insn in a function
11674 will be a barrier. */
11675 this_insn = next_nonnote_insn (this_insn);
11676 if (this_insn && this_insn == label)
11678 if (jump_clobbers)
11680 arm_ccfsm_state = 2;
11681 this_insn = next_nonnote_insn (this_insn);
11683 else
11684 arm_ccfsm_state = 1;
11685 succeed = TRUE;
11687 else
11688 fail = TRUE;
11689 break;
11691 case CALL_INSN:
11692 /* The AAPCS says that conditional calls should not be
11693 used since they make interworking inefficient (the
11694 linker can't transform BL<cond> into BLX). That's
11695 only a problem if the machine has BLX. */
11696 if (arm_arch5)
11698 fail = TRUE;
11699 break;
11702 /* Succeed if the following insn is the target label, or
11703 if the following two insns are a barrier and the
11704 target label. */
11705 this_insn = next_nonnote_insn (this_insn);
11706 if (this_insn && GET_CODE (this_insn) == BARRIER)
11707 this_insn = next_nonnote_insn (this_insn);
11709 if (this_insn && this_insn == label
11710 && insns_skipped < max_insns_skipped)
11712 if (jump_clobbers)
11714 arm_ccfsm_state = 2;
11715 this_insn = next_nonnote_insn (this_insn);
11717 else
11718 arm_ccfsm_state = 1;
11719 succeed = TRUE;
11721 else
11722 fail = TRUE;
11723 break;
11725 case JUMP_INSN:
11726 /* If this is an unconditional branch to the same label, succeed.
11727 If it is to another label, do nothing. If it is conditional,
11728 fail. */
11729 /* XXX Probably, the tests for SET and the PC are
11730 unnecessary. */
11732 scanbody = PATTERN (this_insn);
11733 if (GET_CODE (scanbody) == SET
11734 && GET_CODE (SET_DEST (scanbody)) == PC)
11736 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11737 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11739 arm_ccfsm_state = 2;
11740 succeed = TRUE;
11742 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11743 fail = TRUE;
11745 /* Fail if a conditional return is undesirable (e.g. on a
11746 StrongARM), but still allow this if optimizing for size. */
11747 else if (GET_CODE (scanbody) == RETURN
11748 && !use_return_insn (TRUE, NULL)
11749 && !optimize_size)
11750 fail = TRUE;
11751 else if (GET_CODE (scanbody) == RETURN
11752 && seeking_return)
11754 arm_ccfsm_state = 2;
11755 succeed = TRUE;
11757 else if (GET_CODE (scanbody) == PARALLEL)
11759 switch (get_attr_conds (this_insn))
11761 case CONDS_NOCOND:
11762 break;
11763 default:
11764 fail = TRUE;
11765 break;
11768 else
11769 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11771 break;
11773 case INSN:
11774 /* Instructions using or affecting the condition codes make it
11775 fail. */
11776 scanbody = PATTERN (this_insn);
11777 if (!(GET_CODE (scanbody) == SET
11778 || GET_CODE (scanbody) == PARALLEL)
11779 || get_attr_conds (this_insn) != CONDS_NOCOND)
11780 fail = TRUE;
11782 /* A conditional cirrus instruction must be followed by
11783 a non Cirrus instruction. However, since we
11784 conditionalize instructions in this function and by
11785 the time we get here we can't add instructions
11786 (nops), because shorten_branches() has already been
11787 called, we will disable conditionalizing Cirrus
11788 instructions to be safe. */
11789 if (GET_CODE (scanbody) != USE
11790 && GET_CODE (scanbody) != CLOBBER
11791 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11792 fail = TRUE;
11793 break;
11795 default:
11796 break;
11799 if (succeed)
11801 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11802 arm_target_label = CODE_LABEL_NUMBER (label);
11803 else
11805 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11807 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11809 this_insn = next_nonnote_insn (this_insn);
11810 gcc_assert (!this_insn
11811 || (GET_CODE (this_insn) != BARRIER
11812 && GET_CODE (this_insn) != CODE_LABEL));
11814 if (!this_insn)
11816 /* Oh, dear! we ran off the end.. give up. */
11817 recog (PATTERN (insn), insn, NULL);
11818 arm_ccfsm_state = 0;
11819 arm_target_insn = NULL;
11820 return;
11822 arm_target_insn = this_insn;
11824 if (jump_clobbers)
11826 gcc_assert (!reverse);
11827 arm_current_cc =
11828 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11829 0), 0), 1));
11830 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11831 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11832 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11833 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11835 else
11837 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11838 what it was. */
11839 if (!reverse)
11840 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11841 0));
11844 if (reverse || then_not_else)
11845 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11848 /* Restore recog_data (getting the attributes of other insns can
11849 destroy this array, but final.c assumes that it remains intact
11850 across this call; since the insn has been recognized already we
11851 call recog direct). */
11852 recog (PATTERN (insn), insn, NULL);
11856 /* Returns true if REGNO is a valid register
11857 for holding a quantity of type MODE. */
11859 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11861 if (GET_MODE_CLASS (mode) == MODE_CC)
11862 return (regno == CC_REGNUM
11863 || (TARGET_HARD_FLOAT && TARGET_VFP
11864 && regno == VFPCC_REGNUM));
11866 if (TARGET_THUMB)
11867 /* For the Thumb we only allow values bigger than SImode in
11868 registers 0 - 6, so that there is always a second low
11869 register available to hold the upper part of the value.
11870 We probably we ought to ensure that the register is the
11871 start of an even numbered register pair. */
11872 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11874 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
11875 && IS_CIRRUS_REGNUM (regno))
11876 /* We have outlawed SI values in Cirrus registers because they
11877 reside in the lower 32 bits, but SF values reside in the
11878 upper 32 bits. This causes gcc all sorts of grief. We can't
11879 even split the registers into pairs because Cirrus SI values
11880 get sign extended to 64bits-- aldyh. */
11881 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11883 if (TARGET_HARD_FLOAT && TARGET_VFP
11884 && IS_VFP_REGNUM (regno))
11886 if (mode == SFmode || mode == SImode)
11887 return TRUE;
11889 /* DFmode values are only valid in even register pairs. */
11890 if (mode == DFmode)
11891 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11892 return FALSE;
11895 if (TARGET_REALLY_IWMMXT)
11897 if (IS_IWMMXT_GR_REGNUM (regno))
11898 return mode == SImode;
11900 if (IS_IWMMXT_REGNUM (regno))
11901 return VALID_IWMMXT_REG_MODE (mode);
11904 /* We allow any value to be stored in the general registers.
11905 Restrict doubleword quantities to even register pairs so that we can
11906 use ldrd. */
11907 if (regno <= LAST_ARM_REGNUM)
11908 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11910 if (regno == FRAME_POINTER_REGNUM
11911 || regno == ARG_POINTER_REGNUM)
11912 /* We only allow integers in the fake hard registers. */
11913 return GET_MODE_CLASS (mode) == MODE_INT;
11915 /* The only registers left are the FPA registers
11916 which we only allow to hold FP values. */
11917 return (TARGET_HARD_FLOAT && TARGET_FPA
11918 && GET_MODE_CLASS (mode) == MODE_FLOAT
11919 && regno >= FIRST_FPA_REGNUM
11920 && regno <= LAST_FPA_REGNUM);
11924 arm_regno_class (int regno)
11926 if (TARGET_THUMB)
11928 if (regno == STACK_POINTER_REGNUM)
11929 return STACK_REG;
11930 if (regno == CC_REGNUM)
11931 return CC_REG;
11932 if (regno < 8)
11933 return LO_REGS;
11934 return HI_REGS;
11937 if ( regno <= LAST_ARM_REGNUM
11938 || regno == FRAME_POINTER_REGNUM
11939 || regno == ARG_POINTER_REGNUM)
11940 return GENERAL_REGS;
11942 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11943 return NO_REGS;
11945 if (IS_CIRRUS_REGNUM (regno))
11946 return CIRRUS_REGS;
11948 if (IS_VFP_REGNUM (regno))
11949 return VFP_REGS;
11951 if (IS_IWMMXT_REGNUM (regno))
11952 return IWMMXT_REGS;
11954 if (IS_IWMMXT_GR_REGNUM (regno))
11955 return IWMMXT_GR_REGS;
11957 return FPA_REGS;
11960 /* Handle a special case when computing the offset
11961 of an argument from the frame pointer. */
11963 arm_debugger_arg_offset (int value, rtx addr)
11965 rtx insn;
11967 /* We are only interested if dbxout_parms() failed to compute the offset. */
11968 if (value != 0)
11969 return 0;
11971 /* We can only cope with the case where the address is held in a register. */
11972 if (GET_CODE (addr) != REG)
11973 return 0;
11975 /* If we are using the frame pointer to point at the argument, then
11976 an offset of 0 is correct. */
11977 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11978 return 0;
11980 /* If we are using the stack pointer to point at the
11981 argument, then an offset of 0 is correct. */
11982 if ((TARGET_THUMB || !frame_pointer_needed)
11983 && REGNO (addr) == SP_REGNUM)
11984 return 0;
11986 /* Oh dear. The argument is pointed to by a register rather
11987 than being held in a register, or being stored at a known
11988 offset from the frame pointer. Since GDB only understands
11989 those two kinds of argument we must translate the address
11990 held in the register into an offset from the frame pointer.
11991 We do this by searching through the insns for the function
11992 looking to see where this register gets its value. If the
11993 register is initialized from the frame pointer plus an offset
11994 then we are in luck and we can continue, otherwise we give up.
11996 This code is exercised by producing debugging information
11997 for a function with arguments like this:
11999 double func (double a, double b, int c, double d) {return d;}
12001 Without this code the stab for parameter 'd' will be set to
12002 an offset of 0 from the frame pointer, rather than 8. */
12004 /* The if() statement says:
12006 If the insn is a normal instruction
12007 and if the insn is setting the value in a register
12008 and if the register being set is the register holding the address of the argument
12009 and if the address is computing by an addition
12010 that involves adding to a register
12011 which is the frame pointer
12012 a constant integer
12014 then... */
12016 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12018 if ( GET_CODE (insn) == INSN
12019 && GET_CODE (PATTERN (insn)) == SET
12020 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12021 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12022 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12023 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12024 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12027 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12029 break;
12033 if (value == 0)
12035 debug_rtx (addr);
12036 warning (0, "unable to compute real location of stacked parameter");
12037 value = 8; /* XXX magic hack */
12040 return value;
12043 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12044 do \
12046 if ((MASK) & insn_flags) \
12047 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
12048 BUILT_IN_MD, NULL, NULL_TREE); \
12050 while (0)
12052 struct builtin_description
12054 const unsigned int mask;
12055 const enum insn_code icode;
12056 const char * const name;
12057 const enum arm_builtins code;
12058 const enum rtx_code comparison;
12059 const unsigned int flag;
12062 static const struct builtin_description bdesc_2arg[] =
12064 #define IWMMXT_BUILTIN(code, string, builtin) \
12065 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12066 ARM_BUILTIN_##builtin, 0, 0 },
12068 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12069 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12070 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12071 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12072 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12073 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12074 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12075 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12076 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12077 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12078 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12079 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12080 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12081 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12082 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12083 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12084 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12085 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12086 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12087 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12088 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12089 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12090 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12091 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12092 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12093 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12094 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12095 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12096 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12097 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12098 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12099 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12100 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12101 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12102 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12103 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12104 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12105 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12106 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12107 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12108 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12109 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12110 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12111 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12112 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12113 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12114 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12115 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12116 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12117 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12118 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12119 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12120 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12121 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12122 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12123 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12124 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12125 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12127 #define IWMMXT_BUILTIN2(code, builtin) \
12128 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12130 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12131 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12132 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12133 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12134 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12135 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12136 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12137 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12138 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12139 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12140 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
12141 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
12142 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
12143 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
12144 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
12145 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
12146 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
12147 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
12148 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
12149 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
12150 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
12151 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
12152 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
12153 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
12154 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
12155 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
12156 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
12157 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
12158 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
12159 IWMMXT_BUILTIN2 (rordi3, WRORDI)
12160 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
12161 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
12164 static const struct builtin_description bdesc_1arg[] =
12166 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
12167 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
12168 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
12169 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
12170 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
12171 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
12172 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
12173 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
12174 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
12175 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
12176 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
12177 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
12178 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
12179 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
12180 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
12181 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
12182 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
12183 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
12186 /* Set up all the iWMMXt builtins. This is
12187 not called if TARGET_IWMMXT is zero. */
12189 static void
12190 arm_init_iwmmxt_builtins (void)
12192 const struct builtin_description * d;
12193 size_t i;
12194 tree endlink = void_list_node;
12196 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12197 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12198 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12200 tree int_ftype_int
12201 = build_function_type (integer_type_node,
12202 tree_cons (NULL_TREE, integer_type_node, endlink));
12203 tree v8qi_ftype_v8qi_v8qi_int
12204 = build_function_type (V8QI_type_node,
12205 tree_cons (NULL_TREE, V8QI_type_node,
12206 tree_cons (NULL_TREE, V8QI_type_node,
12207 tree_cons (NULL_TREE,
12208 integer_type_node,
12209 endlink))));
12210 tree v4hi_ftype_v4hi_int
12211 = build_function_type (V4HI_type_node,
12212 tree_cons (NULL_TREE, V4HI_type_node,
12213 tree_cons (NULL_TREE, integer_type_node,
12214 endlink)));
12215 tree v2si_ftype_v2si_int
12216 = build_function_type (V2SI_type_node,
12217 tree_cons (NULL_TREE, V2SI_type_node,
12218 tree_cons (NULL_TREE, integer_type_node,
12219 endlink)));
12220 tree v2si_ftype_di_di
12221 = build_function_type (V2SI_type_node,
12222 tree_cons (NULL_TREE, long_long_integer_type_node,
12223 tree_cons (NULL_TREE, long_long_integer_type_node,
12224 endlink)));
12225 tree di_ftype_di_int
12226 = build_function_type (long_long_integer_type_node,
12227 tree_cons (NULL_TREE, long_long_integer_type_node,
12228 tree_cons (NULL_TREE, integer_type_node,
12229 endlink)));
12230 tree di_ftype_di_int_int
12231 = build_function_type (long_long_integer_type_node,
12232 tree_cons (NULL_TREE, long_long_integer_type_node,
12233 tree_cons (NULL_TREE, integer_type_node,
12234 tree_cons (NULL_TREE,
12235 integer_type_node,
12236 endlink))));
12237 tree int_ftype_v8qi
12238 = build_function_type (integer_type_node,
12239 tree_cons (NULL_TREE, V8QI_type_node,
12240 endlink));
12241 tree int_ftype_v4hi
12242 = build_function_type (integer_type_node,
12243 tree_cons (NULL_TREE, V4HI_type_node,
12244 endlink));
12245 tree int_ftype_v2si
12246 = build_function_type (integer_type_node,
12247 tree_cons (NULL_TREE, V2SI_type_node,
12248 endlink));
12249 tree int_ftype_v8qi_int
12250 = build_function_type (integer_type_node,
12251 tree_cons (NULL_TREE, V8QI_type_node,
12252 tree_cons (NULL_TREE, integer_type_node,
12253 endlink)));
12254 tree int_ftype_v4hi_int
12255 = build_function_type (integer_type_node,
12256 tree_cons (NULL_TREE, V4HI_type_node,
12257 tree_cons (NULL_TREE, integer_type_node,
12258 endlink)));
12259 tree int_ftype_v2si_int
12260 = build_function_type (integer_type_node,
12261 tree_cons (NULL_TREE, V2SI_type_node,
12262 tree_cons (NULL_TREE, integer_type_node,
12263 endlink)));
12264 tree v8qi_ftype_v8qi_int_int
12265 = build_function_type (V8QI_type_node,
12266 tree_cons (NULL_TREE, V8QI_type_node,
12267 tree_cons (NULL_TREE, integer_type_node,
12268 tree_cons (NULL_TREE,
12269 integer_type_node,
12270 endlink))));
12271 tree v4hi_ftype_v4hi_int_int
12272 = build_function_type (V4HI_type_node,
12273 tree_cons (NULL_TREE, V4HI_type_node,
12274 tree_cons (NULL_TREE, integer_type_node,
12275 tree_cons (NULL_TREE,
12276 integer_type_node,
12277 endlink))));
12278 tree v2si_ftype_v2si_int_int
12279 = build_function_type (V2SI_type_node,
12280 tree_cons (NULL_TREE, V2SI_type_node,
12281 tree_cons (NULL_TREE, integer_type_node,
12282 tree_cons (NULL_TREE,
12283 integer_type_node,
12284 endlink))));
12285 /* Miscellaneous. */
12286 tree v8qi_ftype_v4hi_v4hi
12287 = build_function_type (V8QI_type_node,
12288 tree_cons (NULL_TREE, V4HI_type_node,
12289 tree_cons (NULL_TREE, V4HI_type_node,
12290 endlink)));
12291 tree v4hi_ftype_v2si_v2si
12292 = build_function_type (V4HI_type_node,
12293 tree_cons (NULL_TREE, V2SI_type_node,
12294 tree_cons (NULL_TREE, V2SI_type_node,
12295 endlink)));
12296 tree v2si_ftype_v4hi_v4hi
12297 = build_function_type (V2SI_type_node,
12298 tree_cons (NULL_TREE, V4HI_type_node,
12299 tree_cons (NULL_TREE, V4HI_type_node,
12300 endlink)));
12301 tree v2si_ftype_v8qi_v8qi
12302 = build_function_type (V2SI_type_node,
12303 tree_cons (NULL_TREE, V8QI_type_node,
12304 tree_cons (NULL_TREE, V8QI_type_node,
12305 endlink)));
12306 tree v4hi_ftype_v4hi_di
12307 = build_function_type (V4HI_type_node,
12308 tree_cons (NULL_TREE, V4HI_type_node,
12309 tree_cons (NULL_TREE,
12310 long_long_integer_type_node,
12311 endlink)));
12312 tree v2si_ftype_v2si_di
12313 = build_function_type (V2SI_type_node,
12314 tree_cons (NULL_TREE, V2SI_type_node,
12315 tree_cons (NULL_TREE,
12316 long_long_integer_type_node,
12317 endlink)));
12318 tree void_ftype_int_int
12319 = build_function_type (void_type_node,
12320 tree_cons (NULL_TREE, integer_type_node,
12321 tree_cons (NULL_TREE, integer_type_node,
12322 endlink)));
12323 tree di_ftype_void
12324 = build_function_type (long_long_unsigned_type_node, endlink);
12325 tree di_ftype_v8qi
12326 = build_function_type (long_long_integer_type_node,
12327 tree_cons (NULL_TREE, V8QI_type_node,
12328 endlink));
12329 tree di_ftype_v4hi
12330 = build_function_type (long_long_integer_type_node,
12331 tree_cons (NULL_TREE, V4HI_type_node,
12332 endlink));
12333 tree di_ftype_v2si
12334 = build_function_type (long_long_integer_type_node,
12335 tree_cons (NULL_TREE, V2SI_type_node,
12336 endlink));
12337 tree v2si_ftype_v4hi
12338 = build_function_type (V2SI_type_node,
12339 tree_cons (NULL_TREE, V4HI_type_node,
12340 endlink));
12341 tree v4hi_ftype_v8qi
12342 = build_function_type (V4HI_type_node,
12343 tree_cons (NULL_TREE, V8QI_type_node,
12344 endlink));
12346 tree di_ftype_di_v4hi_v4hi
12347 = build_function_type (long_long_unsigned_type_node,
12348 tree_cons (NULL_TREE,
12349 long_long_unsigned_type_node,
12350 tree_cons (NULL_TREE, V4HI_type_node,
12351 tree_cons (NULL_TREE,
12352 V4HI_type_node,
12353 endlink))));
12355 tree di_ftype_v4hi_v4hi
12356 = build_function_type (long_long_unsigned_type_node,
12357 tree_cons (NULL_TREE, V4HI_type_node,
12358 tree_cons (NULL_TREE, V4HI_type_node,
12359 endlink)));
12361 /* Normal vector binops. */
12362 tree v8qi_ftype_v8qi_v8qi
12363 = build_function_type (V8QI_type_node,
12364 tree_cons (NULL_TREE, V8QI_type_node,
12365 tree_cons (NULL_TREE, V8QI_type_node,
12366 endlink)));
12367 tree v4hi_ftype_v4hi_v4hi
12368 = build_function_type (V4HI_type_node,
12369 tree_cons (NULL_TREE, V4HI_type_node,
12370 tree_cons (NULL_TREE, V4HI_type_node,
12371 endlink)));
12372 tree v2si_ftype_v2si_v2si
12373 = build_function_type (V2SI_type_node,
12374 tree_cons (NULL_TREE, V2SI_type_node,
12375 tree_cons (NULL_TREE, V2SI_type_node,
12376 endlink)));
12377 tree di_ftype_di_di
12378 = build_function_type (long_long_unsigned_type_node,
12379 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12380 tree_cons (NULL_TREE,
12381 long_long_unsigned_type_node,
12382 endlink)));
12384 /* Add all builtins that are more or less simple operations on two
12385 operands. */
12386 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12388 /* Use one of the operands; the target can have a different mode for
12389 mask-generating compares. */
12390 enum machine_mode mode;
12391 tree type;
12393 if (d->name == 0)
12394 continue;
12396 mode = insn_data[d->icode].operand[1].mode;
12398 switch (mode)
12400 case V8QImode:
12401 type = v8qi_ftype_v8qi_v8qi;
12402 break;
12403 case V4HImode:
12404 type = v4hi_ftype_v4hi_v4hi;
12405 break;
12406 case V2SImode:
12407 type = v2si_ftype_v2si_v2si;
12408 break;
12409 case DImode:
12410 type = di_ftype_di_di;
12411 break;
12413 default:
12414 gcc_unreachable ();
12417 def_mbuiltin (d->mask, d->name, type, d->code);
12420 /* Add the remaining MMX insns with somewhat more complicated types. */
12421 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12422 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12423 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12425 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12426 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12427 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12428 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12429 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12430 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12432 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12433 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12434 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12435 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12436 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12437 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12439 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12440 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12441 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12442 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12443 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12444 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12446 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12447 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12448 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12449 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12450 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12451 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12453 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12455 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12456 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12457 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12458 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12460 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12461 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12462 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12463 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12464 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12465 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12466 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12467 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12468 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12470 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12471 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12472 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12474 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12475 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12476 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12478 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12479 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12480 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12481 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12482 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12483 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12485 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12486 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12487 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12488 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12489 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12490 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12491 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12492 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12493 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12494 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12495 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12496 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12498 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12499 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12500 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12501 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12503 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12504 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12505 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12506 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12507 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12508 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12509 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12512 static void
12513 arm_init_tls_builtins (void)
12515 tree ftype;
12516 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
12517 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
12519 ftype = build_function_type (ptr_type_node, void_list_node);
12520 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
12521 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
12522 NULL, const_nothrow);
12525 static void
12526 arm_init_builtins (void)
12528 arm_init_tls_builtins ();
12530 if (TARGET_REALLY_IWMMXT)
12531 arm_init_iwmmxt_builtins ();
12534 /* Errors in the source file can cause expand_expr to return const0_rtx
12535 where we expect a vector. To avoid crashing, use one of the vector
12536 clear instructions. */
12538 static rtx
12539 safe_vector_operand (rtx x, enum machine_mode mode)
12541 if (x != const0_rtx)
12542 return x;
12543 x = gen_reg_rtx (mode);
12545 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12546 : gen_rtx_SUBREG (DImode, x, 0)));
12547 return x;
12550 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12552 static rtx
12553 arm_expand_binop_builtin (enum insn_code icode,
12554 tree arglist, rtx target)
12556 rtx pat;
12557 tree arg0 = TREE_VALUE (arglist);
12558 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12559 rtx op0 = expand_normal (arg0);
12560 rtx op1 = expand_normal (arg1);
12561 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12562 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12563 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12565 if (VECTOR_MODE_P (mode0))
12566 op0 = safe_vector_operand (op0, mode0);
12567 if (VECTOR_MODE_P (mode1))
12568 op1 = safe_vector_operand (op1, mode1);
12570 if (! target
12571 || GET_MODE (target) != tmode
12572 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12573 target = gen_reg_rtx (tmode);
12575 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12577 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12578 op0 = copy_to_mode_reg (mode0, op0);
12579 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12580 op1 = copy_to_mode_reg (mode1, op1);
12582 pat = GEN_FCN (icode) (target, op0, op1);
12583 if (! pat)
12584 return 0;
12585 emit_insn (pat);
12586 return target;
12589 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12591 static rtx
12592 arm_expand_unop_builtin (enum insn_code icode,
12593 tree arglist, rtx target, int do_load)
12595 rtx pat;
12596 tree arg0 = TREE_VALUE (arglist);
12597 rtx op0 = expand_normal (arg0);
12598 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12599 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12601 if (! target
12602 || GET_MODE (target) != tmode
12603 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12604 target = gen_reg_rtx (tmode);
12605 if (do_load)
12606 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12607 else
12609 if (VECTOR_MODE_P (mode0))
12610 op0 = safe_vector_operand (op0, mode0);
12612 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12613 op0 = copy_to_mode_reg (mode0, op0);
12616 pat = GEN_FCN (icode) (target, op0);
12617 if (! pat)
12618 return 0;
12619 emit_insn (pat);
12620 return target;
12623 /* Expand an expression EXP that calls a built-in function,
12624 with result going to TARGET if that's convenient
12625 (and in mode MODE if that's convenient).
12626 SUBTARGET may be used as the target for computing one of EXP's operands.
12627 IGNORE is nonzero if the value is to be ignored. */
12629 static rtx
12630 arm_expand_builtin (tree exp,
12631 rtx target,
12632 rtx subtarget ATTRIBUTE_UNUSED,
12633 enum machine_mode mode ATTRIBUTE_UNUSED,
12634 int ignore ATTRIBUTE_UNUSED)
12636 const struct builtin_description * d;
12637 enum insn_code icode;
12638 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12639 tree arglist = TREE_OPERAND (exp, 1);
12640 tree arg0;
12641 tree arg1;
12642 tree arg2;
12643 rtx op0;
12644 rtx op1;
12645 rtx op2;
12646 rtx pat;
12647 int fcode = DECL_FUNCTION_CODE (fndecl);
12648 size_t i;
12649 enum machine_mode tmode;
12650 enum machine_mode mode0;
12651 enum machine_mode mode1;
12652 enum machine_mode mode2;
12654 switch (fcode)
12656 case ARM_BUILTIN_TEXTRMSB:
12657 case ARM_BUILTIN_TEXTRMUB:
12658 case ARM_BUILTIN_TEXTRMSH:
12659 case ARM_BUILTIN_TEXTRMUH:
12660 case ARM_BUILTIN_TEXTRMSW:
12661 case ARM_BUILTIN_TEXTRMUW:
12662 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12663 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12664 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12665 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12666 : CODE_FOR_iwmmxt_textrmw);
12668 arg0 = TREE_VALUE (arglist);
12669 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12670 op0 = expand_normal (arg0);
12671 op1 = expand_normal (arg1);
12672 tmode = insn_data[icode].operand[0].mode;
12673 mode0 = insn_data[icode].operand[1].mode;
12674 mode1 = insn_data[icode].operand[2].mode;
12676 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12677 op0 = copy_to_mode_reg (mode0, op0);
12678 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12680 /* @@@ better error message */
12681 error ("selector must be an immediate");
12682 return gen_reg_rtx (tmode);
12684 if (target == 0
12685 || GET_MODE (target) != tmode
12686 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12687 target = gen_reg_rtx (tmode);
12688 pat = GEN_FCN (icode) (target, op0, op1);
12689 if (! pat)
12690 return 0;
12691 emit_insn (pat);
12692 return target;
12694 case ARM_BUILTIN_TINSRB:
12695 case ARM_BUILTIN_TINSRH:
12696 case ARM_BUILTIN_TINSRW:
12697 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12698 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12699 : CODE_FOR_iwmmxt_tinsrw);
12700 arg0 = TREE_VALUE (arglist);
12701 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12702 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12703 op0 = expand_normal (arg0);
12704 op1 = expand_normal (arg1);
12705 op2 = expand_normal (arg2);
12706 tmode = insn_data[icode].operand[0].mode;
12707 mode0 = insn_data[icode].operand[1].mode;
12708 mode1 = insn_data[icode].operand[2].mode;
12709 mode2 = insn_data[icode].operand[3].mode;
12711 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12712 op0 = copy_to_mode_reg (mode0, op0);
12713 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12714 op1 = copy_to_mode_reg (mode1, op1);
12715 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12717 /* @@@ better error message */
12718 error ("selector must be an immediate");
12719 return const0_rtx;
12721 if (target == 0
12722 || GET_MODE (target) != tmode
12723 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12724 target = gen_reg_rtx (tmode);
12725 pat = GEN_FCN (icode) (target, op0, op1, op2);
12726 if (! pat)
12727 return 0;
12728 emit_insn (pat);
12729 return target;
12731 case ARM_BUILTIN_SETWCX:
12732 arg0 = TREE_VALUE (arglist);
12733 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12734 op0 = force_reg (SImode, expand_normal (arg0));
12735 op1 = expand_normal (arg1);
12736 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12737 return 0;
12739 case ARM_BUILTIN_GETWCX:
12740 arg0 = TREE_VALUE (arglist);
12741 op0 = expand_normal (arg0);
12742 target = gen_reg_rtx (SImode);
12743 emit_insn (gen_iwmmxt_tmrc (target, op0));
12744 return target;
12746 case ARM_BUILTIN_WSHUFH:
12747 icode = CODE_FOR_iwmmxt_wshufh;
12748 arg0 = TREE_VALUE (arglist);
12749 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12750 op0 = expand_normal (arg0);
12751 op1 = expand_normal (arg1);
12752 tmode = insn_data[icode].operand[0].mode;
12753 mode1 = insn_data[icode].operand[1].mode;
12754 mode2 = insn_data[icode].operand[2].mode;
12756 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12757 op0 = copy_to_mode_reg (mode1, op0);
12758 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12760 /* @@@ better error message */
12761 error ("mask must be an immediate");
12762 return const0_rtx;
12764 if (target == 0
12765 || GET_MODE (target) != tmode
12766 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12767 target = gen_reg_rtx (tmode);
12768 pat = GEN_FCN (icode) (target, op0, op1);
12769 if (! pat)
12770 return 0;
12771 emit_insn (pat);
12772 return target;
12774 case ARM_BUILTIN_WSADB:
12775 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12776 case ARM_BUILTIN_WSADH:
12777 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12778 case ARM_BUILTIN_WSADBZ:
12779 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12780 case ARM_BUILTIN_WSADHZ:
12781 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12783 /* Several three-argument builtins. */
12784 case ARM_BUILTIN_WMACS:
12785 case ARM_BUILTIN_WMACU:
12786 case ARM_BUILTIN_WALIGN:
12787 case ARM_BUILTIN_TMIA:
12788 case ARM_BUILTIN_TMIAPH:
12789 case ARM_BUILTIN_TMIATT:
12790 case ARM_BUILTIN_TMIATB:
12791 case ARM_BUILTIN_TMIABT:
12792 case ARM_BUILTIN_TMIABB:
12793 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12794 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12795 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12796 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12797 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12798 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12799 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12800 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12801 : CODE_FOR_iwmmxt_walign);
12802 arg0 = TREE_VALUE (arglist);
12803 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12804 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12805 op0 = expand_normal (arg0);
12806 op1 = expand_normal (arg1);
12807 op2 = expand_normal (arg2);
12808 tmode = insn_data[icode].operand[0].mode;
12809 mode0 = insn_data[icode].operand[1].mode;
12810 mode1 = insn_data[icode].operand[2].mode;
12811 mode2 = insn_data[icode].operand[3].mode;
12813 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12814 op0 = copy_to_mode_reg (mode0, op0);
12815 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12816 op1 = copy_to_mode_reg (mode1, op1);
12817 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12818 op2 = copy_to_mode_reg (mode2, op2);
12819 if (target == 0
12820 || GET_MODE (target) != tmode
12821 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12822 target = gen_reg_rtx (tmode);
12823 pat = GEN_FCN (icode) (target, op0, op1, op2);
12824 if (! pat)
12825 return 0;
12826 emit_insn (pat);
12827 return target;
12829 case ARM_BUILTIN_WZERO:
12830 target = gen_reg_rtx (DImode);
12831 emit_insn (gen_iwmmxt_clrdi (target));
12832 return target;
12834 case ARM_BUILTIN_THREAD_POINTER:
12835 return arm_load_tp (target);
12837 default:
12838 break;
12841 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12842 if (d->code == (const enum arm_builtins) fcode)
12843 return arm_expand_binop_builtin (d->icode, arglist, target);
12845 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12846 if (d->code == (const enum arm_builtins) fcode)
12847 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12849 /* @@@ Should really do something sensible here. */
12850 return NULL_RTX;
12853 /* Return the number (counting from 0) of
12854 the least significant set bit in MASK. */
12856 inline static int
12857 number_of_first_bit_set (unsigned mask)
12859 int bit;
12861 for (bit = 0;
12862 (mask & (1 << bit)) == 0;
12863 ++bit)
12864 continue;
12866 return bit;
12869 /* Emit code to push or pop registers to or from the stack. F is the
12870 assembly file. MASK is the registers to push or pop. PUSH is
12871 nonzero if we should push, and zero if we should pop. For debugging
12872 output, if pushing, adjust CFA_OFFSET by the amount of space added
12873 to the stack. REAL_REGS should have the same number of bits set as
12874 MASK, and will be used instead (in the same order) to describe which
12875 registers were saved - this is used to mark the save slots when we
12876 push high registers after moving them to low registers. */
12877 static void
12878 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12879 unsigned long real_regs)
12881 int regno;
12882 int lo_mask = mask & 0xFF;
12883 int pushed_words = 0;
12885 gcc_assert (mask);
12887 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12889 /* Special case. Do not generate a POP PC statement here, do it in
12890 thumb_exit() */
12891 thumb_exit (f, -1);
12892 return;
12895 if (ARM_EABI_UNWIND_TABLES && push)
12897 fprintf (f, "\t.save\t{");
12898 for (regno = 0; regno < 15; regno++)
12900 if (real_regs & (1 << regno))
12902 if (real_regs & ((1 << regno) -1))
12903 fprintf (f, ", ");
12904 asm_fprintf (f, "%r", regno);
12907 fprintf (f, "}\n");
12910 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12912 /* Look at the low registers first. */
12913 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12915 if (lo_mask & 1)
12917 asm_fprintf (f, "%r", regno);
12919 if ((lo_mask & ~1) != 0)
12920 fprintf (f, ", ");
12922 pushed_words++;
12926 if (push && (mask & (1 << LR_REGNUM)))
12928 /* Catch pushing the LR. */
12929 if (mask & 0xFF)
12930 fprintf (f, ", ");
12932 asm_fprintf (f, "%r", LR_REGNUM);
12934 pushed_words++;
12936 else if (!push && (mask & (1 << PC_REGNUM)))
12938 /* Catch popping the PC. */
12939 if (TARGET_INTERWORK || TARGET_BACKTRACE
12940 || current_function_calls_eh_return)
12942 /* The PC is never poped directly, instead
12943 it is popped into r3 and then BX is used. */
12944 fprintf (f, "}\n");
12946 thumb_exit (f, -1);
12948 return;
12950 else
12952 if (mask & 0xFF)
12953 fprintf (f, ", ");
12955 asm_fprintf (f, "%r", PC_REGNUM);
12959 fprintf (f, "}\n");
12961 if (push && pushed_words && dwarf2out_do_frame ())
12963 char *l = dwarf2out_cfi_label ();
12964 int pushed_mask = real_regs;
12966 *cfa_offset += pushed_words * 4;
12967 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12969 pushed_words = 0;
12970 pushed_mask = real_regs;
12971 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12973 if (pushed_mask & 1)
12974 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12979 /* Generate code to return from a thumb function.
12980 If 'reg_containing_return_addr' is -1, then the return address is
12981 actually on the stack, at the stack pointer. */
12982 static void
12983 thumb_exit (FILE *f, int reg_containing_return_addr)
12985 unsigned regs_available_for_popping;
12986 unsigned regs_to_pop;
12987 int pops_needed;
12988 unsigned available;
12989 unsigned required;
12990 int mode;
12991 int size;
12992 int restore_a4 = FALSE;
12994 /* Compute the registers we need to pop. */
12995 regs_to_pop = 0;
12996 pops_needed = 0;
12998 if (reg_containing_return_addr == -1)
13000 regs_to_pop |= 1 << LR_REGNUM;
13001 ++pops_needed;
13004 if (TARGET_BACKTRACE)
13006 /* Restore the (ARM) frame pointer and stack pointer. */
13007 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13008 pops_needed += 2;
13011 /* If there is nothing to pop then just emit the BX instruction and
13012 return. */
13013 if (pops_needed == 0)
13015 if (current_function_calls_eh_return)
13016 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13018 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13019 return;
13021 /* Otherwise if we are not supporting interworking and we have not created
13022 a backtrace structure and the function was not entered in ARM mode then
13023 just pop the return address straight into the PC. */
13024 else if (!TARGET_INTERWORK
13025 && !TARGET_BACKTRACE
13026 && !is_called_in_ARM_mode (current_function_decl)
13027 && !current_function_calls_eh_return)
13029 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13030 return;
13033 /* Find out how many of the (return) argument registers we can corrupt. */
13034 regs_available_for_popping = 0;
13036 /* If returning via __builtin_eh_return, the bottom three registers
13037 all contain information needed for the return. */
13038 if (current_function_calls_eh_return)
13039 size = 12;
13040 else
13042 /* If we can deduce the registers used from the function's
13043 return value. This is more reliable that examining
13044 regs_ever_live[] because that will be set if the register is
13045 ever used in the function, not just if the register is used
13046 to hold a return value. */
13048 if (current_function_return_rtx != 0)
13049 mode = GET_MODE (current_function_return_rtx);
13050 else
13051 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13053 size = GET_MODE_SIZE (mode);
13055 if (size == 0)
13057 /* In a void function we can use any argument register.
13058 In a function that returns a structure on the stack
13059 we can use the second and third argument registers. */
13060 if (mode == VOIDmode)
13061 regs_available_for_popping =
13062 (1 << ARG_REGISTER (1))
13063 | (1 << ARG_REGISTER (2))
13064 | (1 << ARG_REGISTER (3));
13065 else
13066 regs_available_for_popping =
13067 (1 << ARG_REGISTER (2))
13068 | (1 << ARG_REGISTER (3));
13070 else if (size <= 4)
13071 regs_available_for_popping =
13072 (1 << ARG_REGISTER (2))
13073 | (1 << ARG_REGISTER (3));
13074 else if (size <= 8)
13075 regs_available_for_popping =
13076 (1 << ARG_REGISTER (3));
13079 /* Match registers to be popped with registers into which we pop them. */
13080 for (available = regs_available_for_popping,
13081 required = regs_to_pop;
13082 required != 0 && available != 0;
13083 available &= ~(available & - available),
13084 required &= ~(required & - required))
13085 -- pops_needed;
13087 /* If we have any popping registers left over, remove them. */
13088 if (available > 0)
13089 regs_available_for_popping &= ~available;
13091 /* Otherwise if we need another popping register we can use
13092 the fourth argument register. */
13093 else if (pops_needed)
13095 /* If we have not found any free argument registers and
13096 reg a4 contains the return address, we must move it. */
13097 if (regs_available_for_popping == 0
13098 && reg_containing_return_addr == LAST_ARG_REGNUM)
13100 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13101 reg_containing_return_addr = LR_REGNUM;
13103 else if (size > 12)
13105 /* Register a4 is being used to hold part of the return value,
13106 but we have dire need of a free, low register. */
13107 restore_a4 = TRUE;
13109 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13112 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13114 /* The fourth argument register is available. */
13115 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13117 --pops_needed;
13121 /* Pop as many registers as we can. */
13122 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13123 regs_available_for_popping);
13125 /* Process the registers we popped. */
13126 if (reg_containing_return_addr == -1)
13128 /* The return address was popped into the lowest numbered register. */
13129 regs_to_pop &= ~(1 << LR_REGNUM);
13131 reg_containing_return_addr =
13132 number_of_first_bit_set (regs_available_for_popping);
13134 /* Remove this register for the mask of available registers, so that
13135 the return address will not be corrupted by further pops. */
13136 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13139 /* If we popped other registers then handle them here. */
13140 if (regs_available_for_popping)
13142 int frame_pointer;
13144 /* Work out which register currently contains the frame pointer. */
13145 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
13147 /* Move it into the correct place. */
13148 asm_fprintf (f, "\tmov\t%r, %r\n",
13149 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
13151 /* (Temporarily) remove it from the mask of popped registers. */
13152 regs_available_for_popping &= ~(1 << frame_pointer);
13153 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
13155 if (regs_available_for_popping)
13157 int stack_pointer;
13159 /* We popped the stack pointer as well,
13160 find the register that contains it. */
13161 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
13163 /* Move it into the stack register. */
13164 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
13166 /* At this point we have popped all necessary registers, so
13167 do not worry about restoring regs_available_for_popping
13168 to its correct value:
13170 assert (pops_needed == 0)
13171 assert (regs_available_for_popping == (1 << frame_pointer))
13172 assert (regs_to_pop == (1 << STACK_POINTER)) */
13174 else
13176 /* Since we have just move the popped value into the frame
13177 pointer, the popping register is available for reuse, and
13178 we know that we still have the stack pointer left to pop. */
13179 regs_available_for_popping |= (1 << frame_pointer);
13183 /* If we still have registers left on the stack, but we no longer have
13184 any registers into which we can pop them, then we must move the return
13185 address into the link register and make available the register that
13186 contained it. */
13187 if (regs_available_for_popping == 0 && pops_needed > 0)
13189 regs_available_for_popping |= 1 << reg_containing_return_addr;
13191 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
13192 reg_containing_return_addr);
13194 reg_containing_return_addr = LR_REGNUM;
13197 /* If we have registers left on the stack then pop some more.
13198 We know that at most we will want to pop FP and SP. */
13199 if (pops_needed > 0)
13201 int popped_into;
13202 int move_to;
13204 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13205 regs_available_for_popping);
13207 /* We have popped either FP or SP.
13208 Move whichever one it is into the correct register. */
13209 popped_into = number_of_first_bit_set (regs_available_for_popping);
13210 move_to = number_of_first_bit_set (regs_to_pop);
13212 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
13214 regs_to_pop &= ~(1 << move_to);
13216 --pops_needed;
13219 /* If we still have not popped everything then we must have only
13220 had one register available to us and we are now popping the SP. */
13221 if (pops_needed > 0)
13223 int popped_into;
13225 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13226 regs_available_for_popping);
13228 popped_into = number_of_first_bit_set (regs_available_for_popping);
13230 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
13232 assert (regs_to_pop == (1 << STACK_POINTER))
13233 assert (pops_needed == 1)
13237 /* If necessary restore the a4 register. */
13238 if (restore_a4)
13240 if (reg_containing_return_addr != LR_REGNUM)
13242 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13243 reg_containing_return_addr = LR_REGNUM;
13246 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13249 if (current_function_calls_eh_return)
13250 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13252 /* Return to caller. */
13253 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13257 void
13258 thumb_final_prescan_insn (rtx insn)
13260 if (flag_print_asm_name)
13261 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
13262 INSN_ADDRESSES (INSN_UID (insn)));
13266 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
13268 unsigned HOST_WIDE_INT mask = 0xff;
13269 int i;
13271 if (val == 0) /* XXX */
13272 return 0;
13274 for (i = 0; i < 25; i++)
13275 if ((val & (mask << i)) == val)
13276 return 1;
13278 return 0;
13281 /* Returns nonzero if the current function contains,
13282 or might contain a far jump. */
13283 static int
13284 thumb_far_jump_used_p (void)
13286 rtx insn;
13288 /* This test is only important for leaf functions. */
13289 /* assert (!leaf_function_p ()); */
13291 /* If we have already decided that far jumps may be used,
13292 do not bother checking again, and always return true even if
13293 it turns out that they are not being used. Once we have made
13294 the decision that far jumps are present (and that hence the link
13295 register will be pushed onto the stack) we cannot go back on it. */
13296 if (cfun->machine->far_jump_used)
13297 return 1;
13299 /* If this function is not being called from the prologue/epilogue
13300 generation code then it must be being called from the
13301 INITIAL_ELIMINATION_OFFSET macro. */
13302 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13304 /* In this case we know that we are being asked about the elimination
13305 of the arg pointer register. If that register is not being used,
13306 then there are no arguments on the stack, and we do not have to
13307 worry that a far jump might force the prologue to push the link
13308 register, changing the stack offsets. In this case we can just
13309 return false, since the presence of far jumps in the function will
13310 not affect stack offsets.
13312 If the arg pointer is live (or if it was live, but has now been
13313 eliminated and so set to dead) then we do have to test to see if
13314 the function might contain a far jump. This test can lead to some
13315 false negatives, since before reload is completed, then length of
13316 branch instructions is not known, so gcc defaults to returning their
13317 longest length, which in turn sets the far jump attribute to true.
13319 A false negative will not result in bad code being generated, but it
13320 will result in a needless push and pop of the link register. We
13321 hope that this does not occur too often.
13323 If we need doubleword stack alignment this could affect the other
13324 elimination offsets so we can't risk getting it wrong. */
13325 if (regs_ever_live [ARG_POINTER_REGNUM])
13326 cfun->machine->arg_pointer_live = 1;
13327 else if (!cfun->machine->arg_pointer_live)
13328 return 0;
13331 /* Check to see if the function contains a branch
13332 insn with the far jump attribute set. */
13333 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13335 if (GET_CODE (insn) == JUMP_INSN
13336 /* Ignore tablejump patterns. */
13337 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13338 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13339 && get_attr_far_jump (insn) == FAR_JUMP_YES
13342 /* Record the fact that we have decided that
13343 the function does use far jumps. */
13344 cfun->machine->far_jump_used = 1;
13345 return 1;
13349 return 0;
13352 /* Return nonzero if FUNC must be entered in ARM mode. */
13354 is_called_in_ARM_mode (tree func)
13356 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
13358 /* Ignore the problem about functions whose address is taken. */
13359 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13360 return TRUE;
13362 #ifdef ARM_PE
13363 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13364 #else
13365 return FALSE;
13366 #endif
13369 /* The bits which aren't usefully expanded as rtl. */
13370 const char *
13371 thumb_unexpanded_epilogue (void)
13373 int regno;
13374 unsigned long live_regs_mask = 0;
13375 int high_regs_pushed = 0;
13376 int had_to_push_lr;
13377 int size;
13379 if (return_used_this_function)
13380 return "";
13382 if (IS_NAKED (arm_current_func_type ()))
13383 return "";
13385 live_regs_mask = thumb_compute_save_reg_mask ();
13386 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13388 /* If we can deduce the registers used from the function's return value.
13389 This is more reliable that examining regs_ever_live[] because that
13390 will be set if the register is ever used in the function, not just if
13391 the register is used to hold a return value. */
13392 size = arm_size_return_regs ();
13394 /* The prolog may have pushed some high registers to use as
13395 work registers. e.g. the testsuite file:
13396 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13397 compiles to produce:
13398 push {r4, r5, r6, r7, lr}
13399 mov r7, r9
13400 mov r6, r8
13401 push {r6, r7}
13402 as part of the prolog. We have to undo that pushing here. */
13404 if (high_regs_pushed)
13406 unsigned long mask = live_regs_mask & 0xff;
13407 int next_hi_reg;
13409 /* The available low registers depend on the size of the value we are
13410 returning. */
13411 if (size <= 12)
13412 mask |= 1 << 3;
13413 if (size <= 8)
13414 mask |= 1 << 2;
13416 if (mask == 0)
13417 /* Oh dear! We have no low registers into which we can pop
13418 high registers! */
13419 internal_error
13420 ("no low registers available for popping high registers");
13422 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13423 if (live_regs_mask & (1 << next_hi_reg))
13424 break;
13426 while (high_regs_pushed)
13428 /* Find lo register(s) into which the high register(s) can
13429 be popped. */
13430 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13432 if (mask & (1 << regno))
13433 high_regs_pushed--;
13434 if (high_regs_pushed == 0)
13435 break;
13438 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13440 /* Pop the values into the low register(s). */
13441 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13443 /* Move the value(s) into the high registers. */
13444 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13446 if (mask & (1 << regno))
13448 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13449 regno);
13451 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13452 if (live_regs_mask & (1 << next_hi_reg))
13453 break;
13457 live_regs_mask &= ~0x0f00;
13460 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13461 live_regs_mask &= 0xff;
13463 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13465 /* Pop the return address into the PC. */
13466 if (had_to_push_lr)
13467 live_regs_mask |= 1 << PC_REGNUM;
13469 /* Either no argument registers were pushed or a backtrace
13470 structure was created which includes an adjusted stack
13471 pointer, so just pop everything. */
13472 if (live_regs_mask)
13473 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13474 live_regs_mask);
13476 /* We have either just popped the return address into the
13477 PC or it is was kept in LR for the entire function. */
13478 if (!had_to_push_lr)
13479 thumb_exit (asm_out_file, LR_REGNUM);
13481 else
13483 /* Pop everything but the return address. */
13484 if (live_regs_mask)
13485 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13486 live_regs_mask);
13488 if (had_to_push_lr)
13490 if (size > 12)
13492 /* We have no free low regs, so save one. */
13493 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13494 LAST_ARG_REGNUM);
13497 /* Get the return address into a temporary register. */
13498 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13499 1 << LAST_ARG_REGNUM);
13501 if (size > 12)
13503 /* Move the return address to lr. */
13504 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13505 LAST_ARG_REGNUM);
13506 /* Restore the low register. */
13507 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13508 IP_REGNUM);
13509 regno = LR_REGNUM;
13511 else
13512 regno = LAST_ARG_REGNUM;
13514 else
13515 regno = LR_REGNUM;
13517 /* Remove the argument registers that were pushed onto the stack. */
13518 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13519 SP_REGNUM, SP_REGNUM,
13520 current_function_pretend_args_size);
13522 thumb_exit (asm_out_file, regno);
13525 return "";
13528 /* Functions to save and restore machine-specific function data. */
13529 static struct machine_function *
13530 arm_init_machine_status (void)
13532 struct machine_function *machine;
13533 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13535 #if ARM_FT_UNKNOWN != 0
13536 machine->func_type = ARM_FT_UNKNOWN;
13537 #endif
13538 return machine;
13541 /* Return an RTX indicating where the return address to the
13542 calling function can be found. */
13544 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13546 if (count != 0)
13547 return NULL_RTX;
13549 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13552 /* Do anything needed before RTL is emitted for each function. */
13553 void
13554 arm_init_expanders (void)
13556 /* Arrange to initialize and mark the machine per-function status. */
13557 init_machine_status = arm_init_machine_status;
13559 /* This is to stop the combine pass optimizing away the alignment
13560 adjustment of va_arg. */
13561 /* ??? It is claimed that this should not be necessary. */
13562 if (cfun)
13563 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13567 /* Like arm_compute_initial_elimination offset. Simpler because there
13568 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13569 to point at the base of the local variables after static stack
13570 space for a function has been allocated. */
13572 HOST_WIDE_INT
13573 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13575 arm_stack_offsets *offsets;
13577 offsets = arm_get_frame_offsets ();
13579 switch (from)
13581 case ARG_POINTER_REGNUM:
13582 switch (to)
13584 case STACK_POINTER_REGNUM:
13585 return offsets->outgoing_args - offsets->saved_args;
13587 case FRAME_POINTER_REGNUM:
13588 return offsets->soft_frame - offsets->saved_args;
13590 case ARM_HARD_FRAME_POINTER_REGNUM:
13591 return offsets->saved_regs - offsets->saved_args;
13593 case THUMB_HARD_FRAME_POINTER_REGNUM:
13594 return offsets->locals_base - offsets->saved_args;
13596 default:
13597 gcc_unreachable ();
13599 break;
13601 case FRAME_POINTER_REGNUM:
13602 switch (to)
13604 case STACK_POINTER_REGNUM:
13605 return offsets->outgoing_args - offsets->soft_frame;
13607 case ARM_HARD_FRAME_POINTER_REGNUM:
13608 return offsets->saved_regs - offsets->soft_frame;
13610 case THUMB_HARD_FRAME_POINTER_REGNUM:
13611 return offsets->locals_base - offsets->soft_frame;
13613 default:
13614 gcc_unreachable ();
13616 break;
13618 default:
13619 gcc_unreachable ();
13624 /* Generate the rest of a function's prologue. */
13625 void
13626 thumb_expand_prologue (void)
13628 rtx insn, dwarf;
13630 HOST_WIDE_INT amount;
13631 arm_stack_offsets *offsets;
13632 unsigned long func_type;
13633 int regno;
13634 unsigned long live_regs_mask;
13636 func_type = arm_current_func_type ();
13638 /* Naked functions don't have prologues. */
13639 if (IS_NAKED (func_type))
13640 return;
13642 if (IS_INTERRUPT (func_type))
13644 error ("interrupt Service Routines cannot be coded in Thumb mode");
13645 return;
13648 live_regs_mask = thumb_compute_save_reg_mask ();
13649 /* Load the pic register before setting the frame pointer,
13650 so we can use r7 as a temporary work register. */
13651 if (flag_pic && arm_pic_register != INVALID_REGNUM)
13652 arm_load_pic_register (live_regs_mask);
13654 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13655 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13656 stack_pointer_rtx);
13658 offsets = arm_get_frame_offsets ();
13659 amount = offsets->outgoing_args - offsets->saved_regs;
13660 if (amount)
13662 if (amount < 512)
13664 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13665 GEN_INT (- amount)));
13666 RTX_FRAME_RELATED_P (insn) = 1;
13668 else
13670 rtx reg;
13672 /* The stack decrement is too big for an immediate value in a single
13673 insn. In theory we could issue multiple subtracts, but after
13674 three of them it becomes more space efficient to place the full
13675 value in the constant pool and load into a register. (Also the
13676 ARM debugger really likes to see only one stack decrement per
13677 function). So instead we look for a scratch register into which
13678 we can load the decrement, and then we subtract this from the
13679 stack pointer. Unfortunately on the thumb the only available
13680 scratch registers are the argument registers, and we cannot use
13681 these as they may hold arguments to the function. Instead we
13682 attempt to locate a call preserved register which is used by this
13683 function. If we can find one, then we know that it will have
13684 been pushed at the start of the prologue and so we can corrupt
13685 it now. */
13686 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13687 if (live_regs_mask & (1 << regno)
13688 && !(frame_pointer_needed
13689 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13690 break;
13692 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13694 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13696 /* Choose an arbitrary, non-argument low register. */
13697 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13699 /* Save it by copying it into a high, scratch register. */
13700 emit_insn (gen_movsi (spare, reg));
13701 /* Add a USE to stop propagate_one_insn() from barfing. */
13702 emit_insn (gen_prologue_use (spare));
13704 /* Decrement the stack. */
13705 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13706 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13707 stack_pointer_rtx, reg));
13708 RTX_FRAME_RELATED_P (insn) = 1;
13709 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13710 plus_constant (stack_pointer_rtx,
13711 -amount));
13712 RTX_FRAME_RELATED_P (dwarf) = 1;
13713 REG_NOTES (insn)
13714 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13715 REG_NOTES (insn));
13717 /* Restore the low register's original value. */
13718 emit_insn (gen_movsi (reg, spare));
13720 /* Emit a USE of the restored scratch register, so that flow
13721 analysis will not consider the restore redundant. The
13722 register won't be used again in this function and isn't
13723 restored by the epilogue. */
13724 emit_insn (gen_prologue_use (reg));
13726 else
13728 reg = gen_rtx_REG (SImode, regno);
13730 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13732 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13733 stack_pointer_rtx, reg));
13734 RTX_FRAME_RELATED_P (insn) = 1;
13735 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13736 plus_constant (stack_pointer_rtx,
13737 -amount));
13738 RTX_FRAME_RELATED_P (dwarf) = 1;
13739 REG_NOTES (insn)
13740 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13741 REG_NOTES (insn));
13746 if (frame_pointer_needed)
13748 amount = offsets->outgoing_args - offsets->locals_base;
13750 if (amount < 1024)
13751 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13752 stack_pointer_rtx, GEN_INT (amount)));
13753 else
13755 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13756 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13757 hard_frame_pointer_rtx,
13758 stack_pointer_rtx));
13759 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
13760 plus_constant (stack_pointer_rtx, amount));
13761 RTX_FRAME_RELATED_P (dwarf) = 1;
13762 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13763 REG_NOTES (insn));
13766 RTX_FRAME_RELATED_P (insn) = 1;
13769 /* If we are profiling, make sure no instructions are scheduled before
13770 the call to mcount. Similarly if the user has requested no
13771 scheduling in the prolog. Similarly if we want non-call exceptions
13772 using the EABI unwinder, to prevent faulting instructions from being
13773 swapped with a stack adjustment. */
13774 if (current_function_profile || !TARGET_SCHED_PROLOG
13775 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
13776 emit_insn (gen_blockage ());
13778 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13779 if (live_regs_mask & 0xff)
13780 cfun->machine->lr_save_eliminated = 0;
13782 /* If the link register is being kept alive, with the return address in it,
13783 then make sure that it does not get reused by the ce2 pass. */
13784 if (cfun->machine->lr_save_eliminated)
13785 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13789 void
13790 thumb_expand_epilogue (void)
13792 HOST_WIDE_INT amount;
13793 arm_stack_offsets *offsets;
13794 int regno;
13796 /* Naked functions don't have prologues. */
13797 if (IS_NAKED (arm_current_func_type ()))
13798 return;
13800 offsets = arm_get_frame_offsets ();
13801 amount = offsets->outgoing_args - offsets->saved_regs;
13803 if (frame_pointer_needed)
13805 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13806 amount = offsets->locals_base - offsets->saved_regs;
13809 if (amount)
13811 if (amount < 512)
13812 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13813 GEN_INT (amount)));
13814 else
13816 /* r3 is always free in the epilogue. */
13817 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13819 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13820 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13824 /* Emit a USE (stack_pointer_rtx), so that
13825 the stack adjustment will not be deleted. */
13826 emit_insn (gen_prologue_use (stack_pointer_rtx));
13828 if (current_function_profile || !TARGET_SCHED_PROLOG)
13829 emit_insn (gen_blockage ());
13831 /* Emit a clobber for each insn that will be restored in the epilogue,
13832 so that flow2 will get register lifetimes correct. */
13833 for (regno = 0; regno < 13; regno++)
13834 if (regs_ever_live[regno] && !call_used_regs[regno])
13835 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13837 if (! regs_ever_live[LR_REGNUM])
13838 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13841 static void
13842 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13844 unsigned long live_regs_mask = 0;
13845 unsigned long l_mask;
13846 unsigned high_regs_pushed = 0;
13847 int cfa_offset = 0;
13848 int regno;
13850 if (IS_NAKED (arm_current_func_type ()))
13851 return;
13853 if (is_called_in_ARM_mode (current_function_decl))
13855 const char * name;
13857 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13858 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13859 == SYMBOL_REF);
13860 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13862 /* Generate code sequence to switch us into Thumb mode. */
13863 /* The .code 32 directive has already been emitted by
13864 ASM_DECLARE_FUNCTION_NAME. */
13865 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13866 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13868 /* Generate a label, so that the debugger will notice the
13869 change in instruction sets. This label is also used by
13870 the assembler to bypass the ARM code when this function
13871 is called from a Thumb encoded function elsewhere in the
13872 same file. Hence the definition of STUB_NAME here must
13873 agree with the definition in gas/config/tc-arm.c. */
13875 #define STUB_NAME ".real_start_of"
13877 fprintf (f, "\t.code\t16\n");
13878 #ifdef ARM_PE
13879 if (arm_dllexport_name_p (name))
13880 name = arm_strip_name_encoding (name);
13881 #endif
13882 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13883 fprintf (f, "\t.thumb_func\n");
13884 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13887 if (current_function_pretend_args_size)
13889 /* Output unwind directive for the stack adjustment. */
13890 if (ARM_EABI_UNWIND_TABLES)
13891 fprintf (f, "\t.pad #%d\n",
13892 current_function_pretend_args_size);
13894 if (cfun->machine->uses_anonymous_args)
13896 int num_pushes;
13898 fprintf (f, "\tpush\t{");
13900 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13902 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13903 regno <= LAST_ARG_REGNUM;
13904 regno++)
13905 asm_fprintf (f, "%r%s", regno,
13906 regno == LAST_ARG_REGNUM ? "" : ", ");
13908 fprintf (f, "}\n");
13910 else
13911 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13912 SP_REGNUM, SP_REGNUM,
13913 current_function_pretend_args_size);
13915 /* We don't need to record the stores for unwinding (would it
13916 help the debugger any if we did?), but record the change in
13917 the stack pointer. */
13918 if (dwarf2out_do_frame ())
13920 char *l = dwarf2out_cfi_label ();
13922 cfa_offset = cfa_offset + current_function_pretend_args_size;
13923 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13927 /* Get the registers we are going to push. */
13928 live_regs_mask = thumb_compute_save_reg_mask ();
13929 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13930 l_mask = live_regs_mask & 0x40ff;
13931 /* Then count how many other high registers will need to be pushed. */
13932 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13934 if (TARGET_BACKTRACE)
13936 unsigned offset;
13937 unsigned work_register;
13939 /* We have been asked to create a stack backtrace structure.
13940 The code looks like this:
13942 0 .align 2
13943 0 func:
13944 0 sub SP, #16 Reserve space for 4 registers.
13945 2 push {R7} Push low registers.
13946 4 add R7, SP, #20 Get the stack pointer before the push.
13947 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13948 8 mov R7, PC Get hold of the start of this code plus 12.
13949 10 str R7, [SP, #16] Store it.
13950 12 mov R7, FP Get hold of the current frame pointer.
13951 14 str R7, [SP, #4] Store it.
13952 16 mov R7, LR Get hold of the current return address.
13953 18 str R7, [SP, #12] Store it.
13954 20 add R7, SP, #16 Point at the start of the backtrace structure.
13955 22 mov FP, R7 Put this value into the frame pointer. */
13957 work_register = thumb_find_work_register (live_regs_mask);
13959 if (ARM_EABI_UNWIND_TABLES)
13960 asm_fprintf (f, "\t.pad #16\n");
13962 asm_fprintf
13963 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13964 SP_REGNUM, SP_REGNUM);
13966 if (dwarf2out_do_frame ())
13968 char *l = dwarf2out_cfi_label ();
13970 cfa_offset = cfa_offset + 16;
13971 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13974 if (l_mask)
13976 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13977 offset = bit_count (l_mask) * UNITS_PER_WORD;
13979 else
13980 offset = 0;
13982 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13983 offset + 16 + current_function_pretend_args_size);
13985 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13986 offset + 4);
13988 /* Make sure that the instruction fetching the PC is in the right place
13989 to calculate "start of backtrace creation code + 12". */
13990 if (l_mask)
13992 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13993 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13994 offset + 12);
13995 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13996 ARM_HARD_FRAME_POINTER_REGNUM);
13997 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13998 offset);
14000 else
14002 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14003 ARM_HARD_FRAME_POINTER_REGNUM);
14004 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14005 offset);
14006 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14007 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14008 offset + 12);
14011 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14012 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14013 offset + 8);
14014 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14015 offset + 12);
14016 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14017 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14019 /* Optimization: If we are not pushing any low registers but we are going
14020 to push some high registers then delay our first push. This will just
14021 be a push of LR and we can combine it with the push of the first high
14022 register. */
14023 else if ((l_mask & 0xff) != 0
14024 || (high_regs_pushed == 0 && l_mask))
14025 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14027 if (high_regs_pushed)
14029 unsigned pushable_regs;
14030 unsigned next_hi_reg;
14032 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14033 if (live_regs_mask & (1 << next_hi_reg))
14034 break;
14036 pushable_regs = l_mask & 0xff;
14038 if (pushable_regs == 0)
14039 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14041 while (high_regs_pushed > 0)
14043 unsigned long real_regs_mask = 0;
14045 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14047 if (pushable_regs & (1 << regno))
14049 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14051 high_regs_pushed --;
14052 real_regs_mask |= (1 << next_hi_reg);
14054 if (high_regs_pushed)
14056 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14057 next_hi_reg --)
14058 if (live_regs_mask & (1 << next_hi_reg))
14059 break;
14061 else
14063 pushable_regs &= ~((1 << regno) - 1);
14064 break;
14069 /* If we had to find a work register and we have not yet
14070 saved the LR then add it to the list of regs to push. */
14071 if (l_mask == (1 << LR_REGNUM))
14073 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14074 1, &cfa_offset,
14075 real_regs_mask | (1 << LR_REGNUM));
14076 l_mask = 0;
14078 else
14079 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14084 /* Handle the case of a double word load into a low register from
14085 a computed memory address. The computed address may involve a
14086 register which is overwritten by the load. */
14087 const char *
14088 thumb_load_double_from_address (rtx *operands)
14090 rtx addr;
14091 rtx base;
14092 rtx offset;
14093 rtx arg1;
14094 rtx arg2;
14096 gcc_assert (GET_CODE (operands[0]) == REG);
14097 gcc_assert (GET_CODE (operands[1]) == MEM);
14099 /* Get the memory address. */
14100 addr = XEXP (operands[1], 0);
14102 /* Work out how the memory address is computed. */
14103 switch (GET_CODE (addr))
14105 case REG:
14106 operands[2] = adjust_address (operands[1], SImode, 4);
14108 if (REGNO (operands[0]) == REGNO (addr))
14110 output_asm_insn ("ldr\t%H0, %2", operands);
14111 output_asm_insn ("ldr\t%0, %1", operands);
14113 else
14115 output_asm_insn ("ldr\t%0, %1", operands);
14116 output_asm_insn ("ldr\t%H0, %2", operands);
14118 break;
14120 case CONST:
14121 /* Compute <address> + 4 for the high order load. */
14122 operands[2] = adjust_address (operands[1], SImode, 4);
14124 output_asm_insn ("ldr\t%0, %1", operands);
14125 output_asm_insn ("ldr\t%H0, %2", operands);
14126 break;
14128 case PLUS:
14129 arg1 = XEXP (addr, 0);
14130 arg2 = XEXP (addr, 1);
14132 if (CONSTANT_P (arg1))
14133 base = arg2, offset = arg1;
14134 else
14135 base = arg1, offset = arg2;
14137 gcc_assert (GET_CODE (base) == REG);
14139 /* Catch the case of <address> = <reg> + <reg> */
14140 if (GET_CODE (offset) == REG)
14142 int reg_offset = REGNO (offset);
14143 int reg_base = REGNO (base);
14144 int reg_dest = REGNO (operands[0]);
14146 /* Add the base and offset registers together into the
14147 higher destination register. */
14148 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14149 reg_dest + 1, reg_base, reg_offset);
14151 /* Load the lower destination register from the address in
14152 the higher destination register. */
14153 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14154 reg_dest, reg_dest + 1);
14156 /* Load the higher destination register from its own address
14157 plus 4. */
14158 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14159 reg_dest + 1, reg_dest + 1);
14161 else
14163 /* Compute <address> + 4 for the high order load. */
14164 operands[2] = adjust_address (operands[1], SImode, 4);
14166 /* If the computed address is held in the low order register
14167 then load the high order register first, otherwise always
14168 load the low order register first. */
14169 if (REGNO (operands[0]) == REGNO (base))
14171 output_asm_insn ("ldr\t%H0, %2", operands);
14172 output_asm_insn ("ldr\t%0, %1", operands);
14174 else
14176 output_asm_insn ("ldr\t%0, %1", operands);
14177 output_asm_insn ("ldr\t%H0, %2", operands);
14180 break;
14182 case LABEL_REF:
14183 /* With no registers to worry about we can just load the value
14184 directly. */
14185 operands[2] = adjust_address (operands[1], SImode, 4);
14187 output_asm_insn ("ldr\t%H0, %2", operands);
14188 output_asm_insn ("ldr\t%0, %1", operands);
14189 break;
14191 default:
14192 gcc_unreachable ();
14195 return "";
14198 const char *
14199 thumb_output_move_mem_multiple (int n, rtx *operands)
14201 rtx tmp;
14203 switch (n)
14205 case 2:
14206 if (REGNO (operands[4]) > REGNO (operands[5]))
14208 tmp = operands[4];
14209 operands[4] = operands[5];
14210 operands[5] = tmp;
14212 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
14213 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
14214 break;
14216 case 3:
14217 if (REGNO (operands[4]) > REGNO (operands[5]))
14219 tmp = operands[4];
14220 operands[4] = operands[5];
14221 operands[5] = tmp;
14223 if (REGNO (operands[5]) > REGNO (operands[6]))
14225 tmp = operands[5];
14226 operands[5] = operands[6];
14227 operands[6] = tmp;
14229 if (REGNO (operands[4]) > REGNO (operands[5]))
14231 tmp = operands[4];
14232 operands[4] = operands[5];
14233 operands[5] = tmp;
14236 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
14237 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
14238 break;
14240 default:
14241 gcc_unreachable ();
14244 return "";
14247 /* Output a call-via instruction for thumb state. */
14248 const char *
14249 thumb_call_via_reg (rtx reg)
14251 int regno = REGNO (reg);
14252 rtx *labelp;
14254 gcc_assert (regno < LR_REGNUM);
14256 /* If we are in the normal text section we can use a single instance
14257 per compilation unit. If we are doing function sections, then we need
14258 an entry per section, since we can't rely on reachability. */
14259 if (in_section == text_section)
14261 thumb_call_reg_needed = 1;
14263 if (thumb_call_via_label[regno] == NULL)
14264 thumb_call_via_label[regno] = gen_label_rtx ();
14265 labelp = thumb_call_via_label + regno;
14267 else
14269 if (cfun->machine->call_via[regno] == NULL)
14270 cfun->machine->call_via[regno] = gen_label_rtx ();
14271 labelp = cfun->machine->call_via + regno;
14274 output_asm_insn ("bl\t%a0", labelp);
14275 return "";
14278 /* Routines for generating rtl. */
14279 void
14280 thumb_expand_movmemqi (rtx *operands)
14282 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
14283 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
14284 HOST_WIDE_INT len = INTVAL (operands[2]);
14285 HOST_WIDE_INT offset = 0;
14287 while (len >= 12)
14289 emit_insn (gen_movmem12b (out, in, out, in));
14290 len -= 12;
14293 if (len >= 8)
14295 emit_insn (gen_movmem8b (out, in, out, in));
14296 len -= 8;
14299 if (len >= 4)
14301 rtx reg = gen_reg_rtx (SImode);
14302 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
14303 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
14304 len -= 4;
14305 offset += 4;
14308 if (len >= 2)
14310 rtx reg = gen_reg_rtx (HImode);
14311 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
14312 plus_constant (in, offset))));
14313 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
14314 reg));
14315 len -= 2;
14316 offset += 2;
14319 if (len)
14321 rtx reg = gen_reg_rtx (QImode);
14322 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
14323 plus_constant (in, offset))));
14324 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
14325 reg));
14329 void
14330 thumb_reload_out_hi (rtx *operands)
14332 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14335 /* Handle reading a half-word from memory during reload. */
14336 void
14337 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14339 gcc_unreachable ();
14342 /* Return the length of a function name prefix
14343 that starts with the character 'c'. */
14344 static int
14345 arm_get_strip_length (int c)
14347 switch (c)
14349 ARM_NAME_ENCODING_LENGTHS
14350 default: return 0;
14354 /* Return a pointer to a function's name with any
14355 and all prefix encodings stripped from it. */
14356 const char *
14357 arm_strip_name_encoding (const char *name)
14359 int skip;
14361 while ((skip = arm_get_strip_length (* name)))
14362 name += skip;
14364 return name;
14367 /* If there is a '*' anywhere in the name's prefix, then
14368 emit the stripped name verbatim, otherwise prepend an
14369 underscore if leading underscores are being used. */
14370 void
14371 arm_asm_output_labelref (FILE *stream, const char *name)
14373 int skip;
14374 int verbatim = 0;
14376 while ((skip = arm_get_strip_length (* name)))
14378 verbatim |= (*name == '*');
14379 name += skip;
14382 if (verbatim)
14383 fputs (name, stream);
14384 else
14385 asm_fprintf (stream, "%U%s", name);
14388 static void
14389 arm_file_end (void)
14391 int regno;
14393 if (! thumb_call_reg_needed)
14394 return;
14396 switch_to_section (text_section);
14397 asm_fprintf (asm_out_file, "\t.code 16\n");
14398 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14400 for (regno = 0; regno < LR_REGNUM; regno++)
14402 rtx label = thumb_call_via_label[regno];
14404 if (label != 0)
14406 targetm.asm_out.internal_label (asm_out_file, "L",
14407 CODE_LABEL_NUMBER (label));
14408 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14413 rtx aof_pic_label;
14415 #ifdef AOF_ASSEMBLER
14416 /* Special functions only needed when producing AOF syntax assembler. */
14418 struct pic_chain
14420 struct pic_chain * next;
14421 const char * symname;
14424 static struct pic_chain * aof_pic_chain = NULL;
14427 aof_pic_entry (rtx x)
14429 struct pic_chain ** chainp;
14430 int offset;
14432 if (aof_pic_label == NULL_RTX)
14434 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14437 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14438 offset += 4, chainp = &(*chainp)->next)
14439 if ((*chainp)->symname == XSTR (x, 0))
14440 return plus_constant (aof_pic_label, offset);
14442 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14443 (*chainp)->next = NULL;
14444 (*chainp)->symname = XSTR (x, 0);
14445 return plus_constant (aof_pic_label, offset);
14448 void
14449 aof_dump_pic_table (FILE *f)
14451 struct pic_chain * chain;
14453 if (aof_pic_chain == NULL)
14454 return;
14456 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14457 PIC_OFFSET_TABLE_REGNUM,
14458 PIC_OFFSET_TABLE_REGNUM);
14459 fputs ("|x$adcons|\n", f);
14461 for (chain = aof_pic_chain; chain; chain = chain->next)
14463 fputs ("\tDCD\t", f);
14464 assemble_name (f, chain->symname);
14465 fputs ("\n", f);
14469 int arm_text_section_count = 1;
14471 /* A get_unnamed_section callback for switching to the text section. */
14473 static void
14474 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14476 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
14477 arm_text_section_count++);
14478 if (flag_pic)
14479 fprintf (asm_out_file, ", PIC, REENTRANT");
14480 fprintf (asm_out_file, "\n");
14483 static int arm_data_section_count = 1;
14485 /* A get_unnamed_section callback for switching to the data section. */
14487 static void
14488 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14490 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
14491 arm_data_section_count++);
14494 /* Implement TARGET_ASM_INIT_SECTIONS.
14496 AOF Assembler syntax is a nightmare when it comes to areas, since once
14497 we change from one area to another, we can't go back again. Instead,
14498 we must create a new area with the same attributes and add the new output
14499 to that. Unfortunately, there is nothing we can do here to guarantee that
14500 two areas with the same attributes will be linked adjacently in the
14501 resulting executable, so we have to be careful not to do pc-relative
14502 addressing across such boundaries. */
14504 static void
14505 aof_asm_init_sections (void)
14507 text_section = get_unnamed_section (SECTION_CODE,
14508 aof_output_text_section_asm_op, NULL);
14509 data_section = get_unnamed_section (SECTION_WRITE,
14510 aof_output_data_section_asm_op, NULL);
14511 readonly_data_section = text_section;
14514 void
14515 zero_init_section (void)
14517 static int zero_init_count = 1;
14519 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
14520 in_section = NULL;
14523 /* The AOF assembler is religiously strict about declarations of
14524 imported and exported symbols, so that it is impossible to declare
14525 a function as imported near the beginning of the file, and then to
14526 export it later on. It is, however, possible to delay the decision
14527 until all the functions in the file have been compiled. To get
14528 around this, we maintain a list of the imports and exports, and
14529 delete from it any that are subsequently defined. At the end of
14530 compilation we spit the remainder of the list out before the END
14531 directive. */
14533 struct import
14535 struct import * next;
14536 const char * name;
14539 static struct import * imports_list = NULL;
14541 void
14542 aof_add_import (const char *name)
14544 struct import * new;
14546 for (new = imports_list; new; new = new->next)
14547 if (new->name == name)
14548 return;
14550 new = (struct import *) xmalloc (sizeof (struct import));
14551 new->next = imports_list;
14552 imports_list = new;
14553 new->name = name;
14556 void
14557 aof_delete_import (const char *name)
14559 struct import ** old;
14561 for (old = &imports_list; *old; old = & (*old)->next)
14563 if ((*old)->name == name)
14565 *old = (*old)->next;
14566 return;
14571 int arm_main_function = 0;
14573 static void
14574 aof_dump_imports (FILE *f)
14576 /* The AOF assembler needs this to cause the startup code to be extracted
14577 from the library. Brining in __main causes the whole thing to work
14578 automagically. */
14579 if (arm_main_function)
14581 switch_to_section (text_section);
14582 fputs ("\tIMPORT __main\n", f);
14583 fputs ("\tDCD __main\n", f);
14586 /* Now dump the remaining imports. */
14587 while (imports_list)
14589 fprintf (f, "\tIMPORT\t");
14590 assemble_name (f, imports_list->name);
14591 fputc ('\n', f);
14592 imports_list = imports_list->next;
14596 static void
14597 aof_globalize_label (FILE *stream, const char *name)
14599 default_globalize_label (stream, name);
14600 if (! strcmp (name, "main"))
14601 arm_main_function = 1;
14604 static void
14605 aof_file_start (void)
14607 fputs ("__r0\tRN\t0\n", asm_out_file);
14608 fputs ("__a1\tRN\t0\n", asm_out_file);
14609 fputs ("__a2\tRN\t1\n", asm_out_file);
14610 fputs ("__a3\tRN\t2\n", asm_out_file);
14611 fputs ("__a4\tRN\t3\n", asm_out_file);
14612 fputs ("__v1\tRN\t4\n", asm_out_file);
14613 fputs ("__v2\tRN\t5\n", asm_out_file);
14614 fputs ("__v3\tRN\t6\n", asm_out_file);
14615 fputs ("__v4\tRN\t7\n", asm_out_file);
14616 fputs ("__v5\tRN\t8\n", asm_out_file);
14617 fputs ("__v6\tRN\t9\n", asm_out_file);
14618 fputs ("__sl\tRN\t10\n", asm_out_file);
14619 fputs ("__fp\tRN\t11\n", asm_out_file);
14620 fputs ("__ip\tRN\t12\n", asm_out_file);
14621 fputs ("__sp\tRN\t13\n", asm_out_file);
14622 fputs ("__lr\tRN\t14\n", asm_out_file);
14623 fputs ("__pc\tRN\t15\n", asm_out_file);
14624 fputs ("__f0\tFN\t0\n", asm_out_file);
14625 fputs ("__f1\tFN\t1\n", asm_out_file);
14626 fputs ("__f2\tFN\t2\n", asm_out_file);
14627 fputs ("__f3\tFN\t3\n", asm_out_file);
14628 fputs ("__f4\tFN\t4\n", asm_out_file);
14629 fputs ("__f5\tFN\t5\n", asm_out_file);
14630 fputs ("__f6\tFN\t6\n", asm_out_file);
14631 fputs ("__f7\tFN\t7\n", asm_out_file);
14632 switch_to_section (text_section);
14635 static void
14636 aof_file_end (void)
14638 if (flag_pic)
14639 aof_dump_pic_table (asm_out_file);
14640 arm_file_end ();
14641 aof_dump_imports (asm_out_file);
14642 fputs ("\tEND\n", asm_out_file);
14644 #endif /* AOF_ASSEMBLER */
14646 #ifndef ARM_PE
14647 /* Symbols in the text segment can be accessed without indirecting via the
14648 constant pool; it may take an extra binary operation, but this is still
14649 faster than indirecting via memory. Don't do this when not optimizing,
14650 since we won't be calculating al of the offsets necessary to do this
14651 simplification. */
14653 static void
14654 arm_encode_section_info (tree decl, rtx rtl, int first)
14656 /* This doesn't work with AOF syntax, since the string table may be in
14657 a different AREA. */
14658 #ifndef AOF_ASSEMBLER
14659 if (optimize > 0 && TREE_CONSTANT (decl))
14660 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14661 #endif
14663 /* If we are referencing a function that is weak then encode a long call
14664 flag in the function name, otherwise if the function is static or
14665 or known to be defined in this file then encode a short call flag. */
14666 if (first && DECL_P (decl))
14668 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14669 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14670 else if (! TREE_PUBLIC (decl))
14671 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14674 default_encode_section_info (decl, rtl, first);
14676 #endif /* !ARM_PE */
14678 static void
14679 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14681 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14682 && !strcmp (prefix, "L"))
14684 arm_ccfsm_state = 0;
14685 arm_target_insn = NULL;
14687 default_internal_label (stream, prefix, labelno);
14690 /* Output code to add DELTA to the first argument, and then jump
14691 to FUNCTION. Used for C++ multiple inheritance. */
14692 static void
14693 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14694 HOST_WIDE_INT delta,
14695 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14696 tree function)
14698 static int thunk_label = 0;
14699 char label[256];
14700 char labelpc[256];
14701 int mi_delta = delta;
14702 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14703 int shift = 0;
14704 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14705 ? 1 : 0);
14706 if (mi_delta < 0)
14707 mi_delta = - mi_delta;
14708 if (TARGET_THUMB)
14710 int labelno = thunk_label++;
14711 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14712 fputs ("\tldr\tr12, ", file);
14713 assemble_name (file, label);
14714 fputc ('\n', file);
14715 if (flag_pic)
14717 /* If we are generating PIC, the ldr instruction below loads
14718 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
14719 the address of the add + 8, so we have:
14721 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
14722 = target + 1.
14724 Note that we have "+ 1" because some versions of GNU ld
14725 don't set the low bit of the result for R_ARM_REL32
14726 relocations against thumb function symbols. */
14727 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
14728 assemble_name (file, labelpc);
14729 fputs (":\n", file);
14730 fputs ("\tadd\tr12, pc, r12\n", file);
14733 while (mi_delta != 0)
14735 if ((mi_delta & (3 << shift)) == 0)
14736 shift += 2;
14737 else
14739 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14740 mi_op, this_regno, this_regno,
14741 mi_delta & (0xff << shift));
14742 mi_delta &= ~(0xff << shift);
14743 shift += 8;
14746 if (TARGET_THUMB)
14748 fprintf (file, "\tbx\tr12\n");
14749 ASM_OUTPUT_ALIGN (file, 2);
14750 assemble_name (file, label);
14751 fputs (":\n", file);
14752 if (flag_pic)
14754 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
14755 rtx tem = XEXP (DECL_RTL (function), 0);
14756 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
14757 tem = gen_rtx_MINUS (GET_MODE (tem),
14758 tem,
14759 gen_rtx_SYMBOL_REF (Pmode,
14760 ggc_strdup (labelpc)));
14761 assemble_integer (tem, 4, BITS_PER_WORD, 1);
14763 else
14764 /* Output ".word .LTHUNKn". */
14765 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14767 else
14769 fputs ("\tb\t", file);
14770 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14771 if (NEED_PLT_RELOC)
14772 fputs ("(PLT)", file);
14773 fputc ('\n', file);
14778 arm_emit_vector_const (FILE *file, rtx x)
14780 int i;
14781 const char * pattern;
14783 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14785 switch (GET_MODE (x))
14787 case V2SImode: pattern = "%08x"; break;
14788 case V4HImode: pattern = "%04x"; break;
14789 case V8QImode: pattern = "%02x"; break;
14790 default: gcc_unreachable ();
14793 fprintf (file, "0x");
14794 for (i = CONST_VECTOR_NUNITS (x); i--;)
14796 rtx element;
14798 element = CONST_VECTOR_ELT (x, i);
14799 fprintf (file, pattern, INTVAL (element));
14802 return 1;
14805 const char *
14806 arm_output_load_gr (rtx *operands)
14808 rtx reg;
14809 rtx offset;
14810 rtx wcgr;
14811 rtx sum;
14813 if (GET_CODE (operands [1]) != MEM
14814 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14815 || GET_CODE (reg = XEXP (sum, 0)) != REG
14816 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14817 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14818 return "wldrw%?\t%0, %1";
14820 /* Fix up an out-of-range load of a GR register. */
14821 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14822 wcgr = operands[0];
14823 operands[0] = reg;
14824 output_asm_insn ("ldr%?\t%0, %1", operands);
14826 operands[0] = wcgr;
14827 operands[1] = reg;
14828 output_asm_insn ("tmcr%?\t%0, %1", operands);
14829 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14831 return "";
14834 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14836 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14837 named arg and all anonymous args onto the stack.
14838 XXX I know the prologue shouldn't be pushing registers, but it is faster
14839 that way. */
14841 static void
14842 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14843 enum machine_mode mode ATTRIBUTE_UNUSED,
14844 tree type ATTRIBUTE_UNUSED,
14845 int *pretend_size,
14846 int second_time ATTRIBUTE_UNUSED)
14848 cfun->machine->uses_anonymous_args = 1;
14849 if (cum->nregs < NUM_ARG_REGS)
14850 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14853 /* Return nonzero if the CONSUMER instruction (a store) does not need
14854 PRODUCER's value to calculate the address. */
14857 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14859 rtx value = PATTERN (producer);
14860 rtx addr = PATTERN (consumer);
14862 if (GET_CODE (value) == COND_EXEC)
14863 value = COND_EXEC_CODE (value);
14864 if (GET_CODE (value) == PARALLEL)
14865 value = XVECEXP (value, 0, 0);
14866 value = XEXP (value, 0);
14867 if (GET_CODE (addr) == COND_EXEC)
14868 addr = COND_EXEC_CODE (addr);
14869 if (GET_CODE (addr) == PARALLEL)
14870 addr = XVECEXP (addr, 0, 0);
14871 addr = XEXP (addr, 0);
14873 return !reg_overlap_mentioned_p (value, addr);
14876 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14877 have an early register shift value or amount dependency on the
14878 result of PRODUCER. */
14881 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14883 rtx value = PATTERN (producer);
14884 rtx op = PATTERN (consumer);
14885 rtx early_op;
14887 if (GET_CODE (value) == COND_EXEC)
14888 value = COND_EXEC_CODE (value);
14889 if (GET_CODE (value) == PARALLEL)
14890 value = XVECEXP (value, 0, 0);
14891 value = XEXP (value, 0);
14892 if (GET_CODE (op) == COND_EXEC)
14893 op = COND_EXEC_CODE (op);
14894 if (GET_CODE (op) == PARALLEL)
14895 op = XVECEXP (op, 0, 0);
14896 op = XEXP (op, 1);
14898 early_op = XEXP (op, 0);
14899 /* This is either an actual independent shift, or a shift applied to
14900 the first operand of another operation. We want the whole shift
14901 operation. */
14902 if (GET_CODE (early_op) == REG)
14903 early_op = op;
14905 return !reg_overlap_mentioned_p (value, early_op);
14908 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14909 have an early register shift value dependency on the result of
14910 PRODUCER. */
14913 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14915 rtx value = PATTERN (producer);
14916 rtx op = PATTERN (consumer);
14917 rtx early_op;
14919 if (GET_CODE (value) == COND_EXEC)
14920 value = COND_EXEC_CODE (value);
14921 if (GET_CODE (value) == PARALLEL)
14922 value = XVECEXP (value, 0, 0);
14923 value = XEXP (value, 0);
14924 if (GET_CODE (op) == COND_EXEC)
14925 op = COND_EXEC_CODE (op);
14926 if (GET_CODE (op) == PARALLEL)
14927 op = XVECEXP (op, 0, 0);
14928 op = XEXP (op, 1);
14930 early_op = XEXP (op, 0);
14932 /* This is either an actual independent shift, or a shift applied to
14933 the first operand of another operation. We want the value being
14934 shifted, in either case. */
14935 if (GET_CODE (early_op) != REG)
14936 early_op = XEXP (early_op, 0);
14938 return !reg_overlap_mentioned_p (value, early_op);
14941 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14942 have an early register mult dependency on the result of
14943 PRODUCER. */
14946 arm_no_early_mul_dep (rtx producer, rtx consumer)
14948 rtx value = PATTERN (producer);
14949 rtx op = PATTERN (consumer);
14951 if (GET_CODE (value) == COND_EXEC)
14952 value = COND_EXEC_CODE (value);
14953 if (GET_CODE (value) == PARALLEL)
14954 value = XVECEXP (value, 0, 0);
14955 value = XEXP (value, 0);
14956 if (GET_CODE (op) == COND_EXEC)
14957 op = COND_EXEC_CODE (op);
14958 if (GET_CODE (op) == PARALLEL)
14959 op = XVECEXP (op, 0, 0);
14960 op = XEXP (op, 1);
14962 return (GET_CODE (op) == PLUS
14963 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14967 /* We can't rely on the caller doing the proper promotion when
14968 using APCS or ATPCS. */
14970 static bool
14971 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14973 return !TARGET_AAPCS_BASED;
14977 /* AAPCS based ABIs use short enums by default. */
14979 static bool
14980 arm_default_short_enums (void)
14982 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
14986 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14988 static bool
14989 arm_align_anon_bitfield (void)
14991 return TARGET_AAPCS_BASED;
14995 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14997 static tree
14998 arm_cxx_guard_type (void)
15000 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
15004 /* The EABI says test the least significant bit of a guard variable. */
15006 static bool
15007 arm_cxx_guard_mask_bit (void)
15009 return TARGET_AAPCS_BASED;
15013 /* The EABI specifies that all array cookies are 8 bytes long. */
15015 static tree
15016 arm_get_cookie_size (tree type)
15018 tree size;
15020 if (!TARGET_AAPCS_BASED)
15021 return default_cxx_get_cookie_size (type);
15023 size = build_int_cst (sizetype, 8);
15024 return size;
15028 /* The EABI says that array cookies should also contain the element size. */
15030 static bool
15031 arm_cookie_has_size (void)
15033 return TARGET_AAPCS_BASED;
15037 /* The EABI says constructors and destructors should return a pointer to
15038 the object constructed/destroyed. */
15040 static bool
15041 arm_cxx_cdtor_returns_this (void)
15043 return TARGET_AAPCS_BASED;
15046 /* The EABI says that an inline function may never be the key
15047 method. */
15049 static bool
15050 arm_cxx_key_method_may_be_inline (void)
15052 return !TARGET_AAPCS_BASED;
15055 static void
15056 arm_cxx_determine_class_data_visibility (tree decl)
15058 if (!TARGET_AAPCS_BASED)
15059 return;
15061 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
15062 is exported. However, on systems without dynamic vague linkage,
15063 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
15064 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
15065 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
15066 else
15067 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
15068 DECL_VISIBILITY_SPECIFIED (decl) = 1;
15071 static bool
15072 arm_cxx_class_data_always_comdat (void)
15074 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
15075 vague linkage if the class has no key function. */
15076 return !TARGET_AAPCS_BASED;
15080 /* The EABI says __aeabi_atexit should be used to register static
15081 destructors. */
15083 static bool
15084 arm_cxx_use_aeabi_atexit (void)
15086 return TARGET_AAPCS_BASED;
15090 void
15091 arm_set_return_address (rtx source, rtx scratch)
15093 arm_stack_offsets *offsets;
15094 HOST_WIDE_INT delta;
15095 rtx addr;
15096 unsigned long saved_regs;
15098 saved_regs = arm_compute_save_reg_mask ();
15100 if ((saved_regs & (1 << LR_REGNUM)) == 0)
15101 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15102 else
15104 if (frame_pointer_needed)
15105 addr = plus_constant(hard_frame_pointer_rtx, -4);
15106 else
15108 /* LR will be the first saved register. */
15109 offsets = arm_get_frame_offsets ();
15110 delta = offsets->outgoing_args - (offsets->frame + 4);
15113 if (delta >= 4096)
15115 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
15116 GEN_INT (delta & ~4095)));
15117 addr = scratch;
15118 delta &= 4095;
15120 else
15121 addr = stack_pointer_rtx;
15123 addr = plus_constant (addr, delta);
15125 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15130 void
15131 thumb_set_return_address (rtx source, rtx scratch)
15133 arm_stack_offsets *offsets;
15134 HOST_WIDE_INT delta;
15135 int reg;
15136 rtx addr;
15137 unsigned long mask;
15139 emit_insn (gen_rtx_USE (VOIDmode, source));
15141 mask = thumb_compute_save_reg_mask ();
15142 if (mask & (1 << LR_REGNUM))
15144 offsets = arm_get_frame_offsets ();
15146 /* Find the saved regs. */
15147 if (frame_pointer_needed)
15149 delta = offsets->soft_frame - offsets->saved_args;
15150 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
15152 else
15154 delta = offsets->outgoing_args - offsets->saved_args;
15155 reg = SP_REGNUM;
15157 /* Allow for the stack frame. */
15158 if (TARGET_BACKTRACE)
15159 delta -= 16;
15160 /* The link register is always the first saved register. */
15161 delta -= 4;
15163 /* Construct the address. */
15164 addr = gen_rtx_REG (SImode, reg);
15165 if ((reg != SP_REGNUM && delta >= 128)
15166 || delta >= 1024)
15168 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
15169 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
15170 addr = scratch;
15172 else
15173 addr = plus_constant (addr, delta);
15175 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15177 else
15178 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15181 /* Implements target hook vector_mode_supported_p. */
15182 bool
15183 arm_vector_mode_supported_p (enum machine_mode mode)
15185 if ((mode == V2SImode)
15186 || (mode == V4HImode)
15187 || (mode == V8QImode))
15188 return true;
15190 return false;
15193 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15194 ARM insns and therefore guarantee that the shift count is modulo 256.
15195 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15196 guarantee no particular behavior for out-of-range counts. */
15198 static unsigned HOST_WIDE_INT
15199 arm_shift_truncation_mask (enum machine_mode mode)
15201 return mode == SImode ? 255 : 0;
15205 /* Map internal gcc register numbers to DWARF2 register numbers. */
15207 unsigned int
15208 arm_dbx_register_number (unsigned int regno)
15210 if (regno < 16)
15211 return regno;
15213 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15214 compatibility. The EABI defines them as registers 96-103. */
15215 if (IS_FPA_REGNUM (regno))
15216 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
15218 if (IS_VFP_REGNUM (regno))
15219 return 64 + regno - FIRST_VFP_REGNUM;
15221 if (IS_IWMMXT_GR_REGNUM (regno))
15222 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
15224 if (IS_IWMMXT_REGNUM (regno))
15225 return 112 + regno - FIRST_IWMMXT_REGNUM;
15227 gcc_unreachable ();
15231 #ifdef TARGET_UNWIND_INFO
15232 /* Emit unwind directives for a store-multiple instruction. This should
15233 only ever be generated by the function prologue code, so we expect it
15234 to have a particular form. */
15236 static void
15237 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
15239 int i;
15240 HOST_WIDE_INT offset;
15241 HOST_WIDE_INT nregs;
15242 int reg_size;
15243 unsigned reg;
15244 unsigned lastreg;
15245 rtx e;
15247 /* First insn will adjust the stack pointer. */
15248 e = XVECEXP (p, 0, 0);
15249 if (GET_CODE (e) != SET
15250 || GET_CODE (XEXP (e, 0)) != REG
15251 || REGNO (XEXP (e, 0)) != SP_REGNUM
15252 || GET_CODE (XEXP (e, 1)) != PLUS)
15253 abort ();
15255 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
15256 nregs = XVECLEN (p, 0) - 1;
15258 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
15259 if (reg < 16)
15261 /* The function prologue may also push pc, but not annotate it as it is
15262 never restored. We turn this into a stack pointer adjustment. */
15263 if (nregs * 4 == offset - 4)
15265 fprintf (asm_out_file, "\t.pad #4\n");
15266 offset -= 4;
15268 reg_size = 4;
15270 else if (IS_VFP_REGNUM (reg))
15272 /* FPA register saves use an additional word. */
15273 offset -= 4;
15274 reg_size = 8;
15276 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
15278 /* FPA registers are done differently. */
15279 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
15280 return;
15282 else
15283 /* Unknown register type. */
15284 abort ();
15286 /* If the stack increment doesn't match the size of the saved registers,
15287 something has gone horribly wrong. */
15288 if (offset != nregs * reg_size)
15289 abort ();
15291 fprintf (asm_out_file, "\t.save {");
15293 offset = 0;
15294 lastreg = 0;
15295 /* The remaining insns will describe the stores. */
15296 for (i = 1; i <= nregs; i++)
15298 /* Expect (set (mem <addr>) (reg)).
15299 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15300 e = XVECEXP (p, 0, i);
15301 if (GET_CODE (e) != SET
15302 || GET_CODE (XEXP (e, 0)) != MEM
15303 || GET_CODE (XEXP (e, 1)) != REG)
15304 abort ();
15306 reg = REGNO (XEXP (e, 1));
15307 if (reg < lastreg)
15308 abort ();
15310 if (i != 1)
15311 fprintf (asm_out_file, ", ");
15312 /* We can't use %r for vfp because we need to use the
15313 double precision register names. */
15314 if (IS_VFP_REGNUM (reg))
15315 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
15316 else
15317 asm_fprintf (asm_out_file, "%r", reg);
15319 #ifdef ENABLE_CHECKING
15320 /* Check that the addresses are consecutive. */
15321 e = XEXP (XEXP (e, 0), 0);
15322 if (GET_CODE (e) == PLUS)
15324 offset += reg_size;
15325 if (GET_CODE (XEXP (e, 0)) != REG
15326 || REGNO (XEXP (e, 0)) != SP_REGNUM
15327 || GET_CODE (XEXP (e, 1)) != CONST_INT
15328 || offset != INTVAL (XEXP (e, 1)))
15329 abort ();
15331 else if (i != 1
15332 || GET_CODE (e) != REG
15333 || REGNO (e) != SP_REGNUM)
15334 abort ();
15335 #endif
15337 fprintf (asm_out_file, "}\n");
15340 /* Emit unwind directives for a SET. */
15342 static void
15343 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
15345 rtx e0;
15346 rtx e1;
15348 e0 = XEXP (p, 0);
15349 e1 = XEXP (p, 1);
15350 switch (GET_CODE (e0))
15352 case MEM:
15353 /* Pushing a single register. */
15354 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
15355 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
15356 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
15357 abort ();
15359 asm_fprintf (asm_out_file, "\t.save ");
15360 if (IS_VFP_REGNUM (REGNO (e1)))
15361 asm_fprintf(asm_out_file, "{d%d}\n",
15362 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
15363 else
15364 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
15365 break;
15367 case REG:
15368 if (REGNO (e0) == SP_REGNUM)
15370 /* A stack increment. */
15371 if (GET_CODE (e1) != PLUS
15372 || GET_CODE (XEXP (e1, 0)) != REG
15373 || REGNO (XEXP (e1, 0)) != SP_REGNUM
15374 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15375 abort ();
15377 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
15378 -INTVAL (XEXP (e1, 1)));
15380 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
15382 HOST_WIDE_INT offset;
15383 unsigned reg;
15385 if (GET_CODE (e1) == PLUS)
15387 if (GET_CODE (XEXP (e1, 0)) != REG
15388 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15389 abort ();
15390 reg = REGNO (XEXP (e1, 0));
15391 offset = INTVAL (XEXP (e1, 1));
15392 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
15393 HARD_FRAME_POINTER_REGNUM, reg,
15394 INTVAL (XEXP (e1, 1)));
15396 else if (GET_CODE (e1) == REG)
15398 reg = REGNO (e1);
15399 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
15400 HARD_FRAME_POINTER_REGNUM, reg);
15402 else
15403 abort ();
15405 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
15407 /* Move from sp to reg. */
15408 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
15410 else
15411 abort ();
15412 break;
15414 default:
15415 abort ();
15420 /* Emit unwind directives for the given insn. */
15422 static void
15423 arm_unwind_emit (FILE * asm_out_file, rtx insn)
15425 rtx pat;
15427 if (!ARM_EABI_UNWIND_TABLES)
15428 return;
15430 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15431 return;
15433 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15434 if (pat)
15435 pat = XEXP (pat, 0);
15436 else
15437 pat = PATTERN (insn);
15439 switch (GET_CODE (pat))
15441 case SET:
15442 arm_unwind_emit_set (asm_out_file, pat);
15443 break;
15445 case SEQUENCE:
15446 /* Store multiple. */
15447 arm_unwind_emit_stm (asm_out_file, pat);
15448 break;
15450 default:
15451 abort();
15456 /* Output a reference from a function exception table to the type_info
15457 object X. The EABI specifies that the symbol should be relocated by
15458 an R_ARM_TARGET2 relocation. */
15460 static bool
15461 arm_output_ttype (rtx x)
15463 fputs ("\t.word\t", asm_out_file);
15464 output_addr_const (asm_out_file, x);
15465 /* Use special relocations for symbol references. */
15466 if (GET_CODE (x) != CONST_INT)
15467 fputs ("(TARGET2)", asm_out_file);
15468 fputc ('\n', asm_out_file);
15470 return TRUE;
15472 #endif /* TARGET_UNWIND_INFO */
15475 /* Output unwind directives for the start/end of a function. */
15477 void
15478 arm_output_fn_unwind (FILE * f, bool prologue)
15480 if (!ARM_EABI_UNWIND_TABLES)
15481 return;
15483 if (prologue)
15484 fputs ("\t.fnstart\n", f);
15485 else
15486 fputs ("\t.fnend\n", f);
15489 static bool
15490 arm_emit_tls_decoration (FILE *fp, rtx x)
15492 enum tls_reloc reloc;
15493 rtx val;
15495 val = XVECEXP (x, 0, 0);
15496 reloc = INTVAL (XVECEXP (x, 0, 1));
15498 output_addr_const (fp, val);
15500 switch (reloc)
15502 case TLS_GD32:
15503 fputs ("(tlsgd)", fp);
15504 break;
15505 case TLS_LDM32:
15506 fputs ("(tlsldm)", fp);
15507 break;
15508 case TLS_LDO32:
15509 fputs ("(tlsldo)", fp);
15510 break;
15511 case TLS_IE32:
15512 fputs ("(gottpoff)", fp);
15513 break;
15514 case TLS_LE32:
15515 fputs ("(tpoff)", fp);
15516 break;
15517 default:
15518 gcc_unreachable ();
15521 switch (reloc)
15523 case TLS_GD32:
15524 case TLS_LDM32:
15525 case TLS_IE32:
15526 fputs (" + (. - ", fp);
15527 output_addr_const (fp, XVECEXP (x, 0, 2));
15528 fputs (" - ", fp);
15529 output_addr_const (fp, XVECEXP (x, 0, 3));
15530 fputc (')', fp);
15531 break;
15532 default:
15533 break;
15536 return TRUE;
15539 bool
15540 arm_output_addr_const_extra (FILE *fp, rtx x)
15542 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
15543 return arm_emit_tls_decoration (fp, x);
15544 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
15546 char label[256];
15547 int labelno = INTVAL (XVECEXP (x, 0, 0));
15549 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
15550 assemble_name_raw (fp, label);
15552 return TRUE;
15554 else if (GET_CODE (x) == CONST_VECTOR)
15555 return arm_emit_vector_const (fp, x);
15557 return FALSE;
15560 #include "gt-arm.h"