2006-09-19 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / config / arm / arm.c
blobff5831f4325e75bf28f5f714ac9956d8f42c6038
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static rtx emit_set_insn (rtx, rtx);
146 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
147 tree, bool);
149 #ifdef OBJECT_FORMAT_ELF
150 static void arm_elf_asm_constructor (rtx, int);
151 #endif
152 #ifndef ARM_PE
153 static void arm_encode_section_info (tree, rtx, int);
154 #endif
156 static void arm_file_end (void);
158 #ifdef AOF_ASSEMBLER
159 static void aof_globalize_label (FILE *, const char *);
160 static void aof_dump_imports (FILE *);
161 static void aof_dump_pic_table (FILE *);
162 static void aof_file_start (void);
163 static void aof_file_end (void);
164 static void aof_asm_init_sections (void);
165 #endif
166 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
167 tree, int *, int);
168 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
169 enum machine_mode, tree, bool);
170 static bool arm_promote_prototypes (tree);
171 static bool arm_default_short_enums (void);
172 static bool arm_align_anon_bitfield (void);
173 static bool arm_return_in_msb (tree);
174 static bool arm_must_pass_in_stack (enum machine_mode, tree);
175 #ifdef TARGET_UNWIND_INFO
176 static void arm_unwind_emit (FILE *, rtx);
177 static bool arm_output_ttype (rtx);
178 #endif
180 static tree arm_cxx_guard_type (void);
181 static bool arm_cxx_guard_mask_bit (void);
182 static tree arm_get_cookie_size (tree);
183 static bool arm_cookie_has_size (void);
184 static bool arm_cxx_cdtor_returns_this (void);
185 static bool arm_cxx_key_method_may_be_inline (void);
186 static void arm_cxx_determine_class_data_visibility (tree);
187 static bool arm_cxx_class_data_always_comdat (void);
188 static bool arm_cxx_use_aeabi_atexit (void);
189 static void arm_init_libfuncs (void);
190 static bool arm_handle_option (size_t, const char *, int);
191 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
192 static bool arm_cannot_copy_insn_p (rtx);
193 static bool arm_tls_symbol_p (rtx x);
196 /* Initialize the GCC target structure. */
197 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
198 #undef TARGET_MERGE_DECL_ATTRIBUTES
199 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
200 #endif
202 #undef TARGET_ATTRIBUTE_TABLE
203 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
205 #undef TARGET_ASM_FILE_END
206 #define TARGET_ASM_FILE_END arm_file_end
208 #ifdef AOF_ASSEMBLER
209 #undef TARGET_ASM_BYTE_OP
210 #define TARGET_ASM_BYTE_OP "\tDCB\t"
211 #undef TARGET_ASM_ALIGNED_HI_OP
212 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
213 #undef TARGET_ASM_ALIGNED_SI_OP
214 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
215 #undef TARGET_ASM_GLOBALIZE_LABEL
216 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
217 #undef TARGET_ASM_FILE_START
218 #define TARGET_ASM_FILE_START aof_file_start
219 #undef TARGET_ASM_FILE_END
220 #define TARGET_ASM_FILE_END aof_file_end
221 #else
222 #undef TARGET_ASM_ALIGNED_SI_OP
223 #define TARGET_ASM_ALIGNED_SI_OP NULL
224 #undef TARGET_ASM_INTEGER
225 #define TARGET_ASM_INTEGER arm_assemble_integer
226 #endif
228 #undef TARGET_ASM_FUNCTION_PROLOGUE
229 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
231 #undef TARGET_ASM_FUNCTION_EPILOGUE
232 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
234 #undef TARGET_DEFAULT_TARGET_FLAGS
235 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
236 #undef TARGET_HANDLE_OPTION
237 #define TARGET_HANDLE_OPTION arm_handle_option
239 #undef TARGET_COMP_TYPE_ATTRIBUTES
240 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
242 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
243 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
245 #undef TARGET_SCHED_ADJUST_COST
246 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
248 #undef TARGET_ENCODE_SECTION_INFO
249 #ifdef ARM_PE
250 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
251 #else
252 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
253 #endif
255 #undef TARGET_STRIP_NAME_ENCODING
256 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
258 #undef TARGET_ASM_INTERNAL_LABEL
259 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
261 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
262 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
264 #undef TARGET_ASM_OUTPUT_MI_THUNK
265 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
266 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
267 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
269 /* This will be overridden in arm_override_options. */
270 #undef TARGET_RTX_COSTS
271 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
272 #undef TARGET_ADDRESS_COST
273 #define TARGET_ADDRESS_COST arm_address_cost
275 #undef TARGET_SHIFT_TRUNCATION_MASK
276 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
277 #undef TARGET_VECTOR_MODE_SUPPORTED_P
278 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
280 #undef TARGET_MACHINE_DEPENDENT_REORG
281 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
283 #undef TARGET_INIT_BUILTINS
284 #define TARGET_INIT_BUILTINS arm_init_builtins
285 #undef TARGET_EXPAND_BUILTIN
286 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
288 #undef TARGET_INIT_LIBFUNCS
289 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
291 #undef TARGET_PROMOTE_FUNCTION_ARGS
292 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
293 #undef TARGET_PROMOTE_FUNCTION_RETURN
294 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
295 #undef TARGET_PROMOTE_PROTOTYPES
296 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
297 #undef TARGET_PASS_BY_REFERENCE
298 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
299 #undef TARGET_ARG_PARTIAL_BYTES
300 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
302 #undef TARGET_SETUP_INCOMING_VARARGS
303 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
305 #undef TARGET_DEFAULT_SHORT_ENUMS
306 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
308 #undef TARGET_ALIGN_ANON_BITFIELD
309 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
311 #undef TARGET_NARROW_VOLATILE_BITFIELD
312 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
314 #undef TARGET_CXX_GUARD_TYPE
315 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
317 #undef TARGET_CXX_GUARD_MASK_BIT
318 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
320 #undef TARGET_CXX_GET_COOKIE_SIZE
321 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
323 #undef TARGET_CXX_COOKIE_HAS_SIZE
324 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
326 #undef TARGET_CXX_CDTOR_RETURNS_THIS
327 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
329 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
330 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
332 #undef TARGET_CXX_USE_AEABI_ATEXIT
333 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
335 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
336 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
337 arm_cxx_determine_class_data_visibility
339 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
340 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
342 #undef TARGET_RETURN_IN_MSB
343 #define TARGET_RETURN_IN_MSB arm_return_in_msb
345 #undef TARGET_MUST_PASS_IN_STACK
346 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
348 #ifdef TARGET_UNWIND_INFO
349 #undef TARGET_UNWIND_EMIT
350 #define TARGET_UNWIND_EMIT arm_unwind_emit
352 /* EABI unwinding tables use a different format for the typeinfo tables. */
353 #undef TARGET_ASM_TTYPE
354 #define TARGET_ASM_TTYPE arm_output_ttype
356 #undef TARGET_ARM_EABI_UNWINDER
357 #define TARGET_ARM_EABI_UNWINDER true
358 #endif /* TARGET_UNWIND_INFO */
360 #undef TARGET_CANNOT_COPY_INSN_P
361 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
363 #ifdef HAVE_AS_TLS
364 #undef TARGET_HAVE_TLS
365 #define TARGET_HAVE_TLS true
366 #endif
368 #undef TARGET_CANNOT_FORCE_CONST_MEM
369 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
371 struct gcc_target targetm = TARGET_INITIALIZER;
373 /* Obstack for minipool constant handling. */
374 static struct obstack minipool_obstack;
375 static char * minipool_startobj;
377 /* The maximum number of insns skipped which
378 will be conditionalised if possible. */
379 static int max_insns_skipped = 5;
381 extern FILE * asm_out_file;
383 /* True if we are currently building a constant table. */
384 int making_const_table;
386 /* Define the information needed to generate branch insns. This is
387 stored from the compare operation. */
388 rtx arm_compare_op0, arm_compare_op1;
390 /* The processor for which instructions should be scheduled. */
391 enum processor_type arm_tune = arm_none;
393 /* Which floating point model to use. */
394 enum arm_fp_model arm_fp_model;
396 /* Which floating point hardware is available. */
397 enum fputype arm_fpu_arch;
399 /* Which floating point hardware to schedule for. */
400 enum fputype arm_fpu_tune;
402 /* Whether to use floating point hardware. */
403 enum float_abi_type arm_float_abi;
405 /* Which ABI to use. */
406 enum arm_abi_type arm_abi;
408 /* Which thread pointer model to use. */
409 enum arm_tp_type target_thread_pointer = TP_AUTO;
411 /* Used to parse -mstructure_size_boundary command line option. */
412 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
414 /* Used for Thumb call_via trampolines. */
415 rtx thumb_call_via_label[14];
416 static int thumb_call_reg_needed;
418 /* Bit values used to identify processor capabilities. */
419 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
420 #define FL_ARCH3M (1 << 1) /* Extended multiply */
421 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
422 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
423 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
424 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
425 #define FL_THUMB (1 << 6) /* Thumb aware */
426 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
427 #define FL_STRONG (1 << 8) /* StrongARM */
428 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
429 #define FL_XSCALE (1 << 10) /* XScale */
430 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
431 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
432 media instructions. */
433 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
434 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
435 Note: ARM6 & 7 derivatives only. */
436 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
438 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
440 #define FL_FOR_ARCH2 0
441 #define FL_FOR_ARCH3 FL_MODE32
442 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
443 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
444 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
445 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
446 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
447 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
448 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
449 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
450 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
451 #define FL_FOR_ARCH6J FL_FOR_ARCH6
452 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
453 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
454 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
456 /* The bits in this mask specify which
457 instructions we are allowed to generate. */
458 static unsigned long insn_flags = 0;
460 /* The bits in this mask specify which instruction scheduling options should
461 be used. */
462 static unsigned long tune_flags = 0;
464 /* The following are used in the arm.md file as equivalents to bits
465 in the above two flag variables. */
467 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
468 int arm_arch3m = 0;
470 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
471 int arm_arch4 = 0;
473 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
474 int arm_arch4t = 0;
476 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
477 int arm_arch5 = 0;
479 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
480 int arm_arch5e = 0;
482 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
483 int arm_arch6 = 0;
485 /* Nonzero if this chip supports the ARM 6K extensions. */
486 int arm_arch6k = 0;
488 /* Nonzero if this chip can benefit from load scheduling. */
489 int arm_ld_sched = 0;
491 /* Nonzero if this chip is a StrongARM. */
492 int arm_tune_strongarm = 0;
494 /* Nonzero if this chip is a Cirrus variant. */
495 int arm_arch_cirrus = 0;
497 /* Nonzero if this chip supports Intel Wireless MMX technology. */
498 int arm_arch_iwmmxt = 0;
500 /* Nonzero if this chip is an XScale. */
501 int arm_arch_xscale = 0;
503 /* Nonzero if tuning for XScale */
504 int arm_tune_xscale = 0;
506 /* Nonzero if we want to tune for stores that access the write-buffer.
507 This typically means an ARM6 or ARM7 with MMU or MPU. */
508 int arm_tune_wbuf = 0;
510 /* Nonzero if generating Thumb instructions. */
511 int thumb_code = 0;
513 /* Nonzero if we should define __THUMB_INTERWORK__ in the
514 preprocessor.
515 XXX This is a bit of a hack, it's intended to help work around
516 problems in GLD which doesn't understand that armv5t code is
517 interworking clean. */
518 int arm_cpp_interwork = 0;
520 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
521 must report the mode of the memory reference from PRINT_OPERAND to
522 PRINT_OPERAND_ADDRESS. */
523 enum machine_mode output_memory_reference_mode;
525 /* The register number to be used for the PIC offset register. */
526 unsigned arm_pic_register = INVALID_REGNUM;
528 /* Set to 1 when a return insn is output, this means that the epilogue
529 is not needed. */
530 int return_used_this_function;
532 /* Set to 1 after arm_reorg has started. Reset to start at the start of
533 the next function. */
534 static int after_arm_reorg = 0;
536 /* The maximum number of insns to be used when loading a constant. */
537 static int arm_constant_limit = 3;
539 /* For an explanation of these variables, see final_prescan_insn below. */
540 int arm_ccfsm_state;
541 enum arm_cond_code arm_current_cc;
542 rtx arm_target_insn;
543 int arm_target_label;
545 /* The condition codes of the ARM, and the inverse function. */
546 static const char * const arm_condition_codes[] =
548 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
549 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
552 #define streq(string1, string2) (strcmp (string1, string2) == 0)
554 /* Initialization code. */
556 struct processors
558 const char *const name;
559 enum processor_type core;
560 const char *arch;
561 const unsigned long flags;
562 bool (* rtx_costs) (rtx, int, int, int *);
565 /* Not all of these give usefully different compilation alternatives,
566 but there is no simple way of generalizing them. */
567 static const struct processors all_cores[] =
569 /* ARM Cores */
570 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
571 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
572 #include "arm-cores.def"
573 #undef ARM_CORE
574 {NULL, arm_none, NULL, 0, NULL}
577 static const struct processors all_architectures[] =
579 /* ARM Architectures */
580 /* We don't specify rtx_costs here as it will be figured out
581 from the core. */
583 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
584 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
585 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
586 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
587 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
588 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
589 implementations that support it, so we will leave it out for now. */
590 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
591 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
592 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
593 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
594 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
595 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
596 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
597 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
598 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
599 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
600 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
601 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
602 {NULL, arm_none, NULL, 0 , NULL}
605 struct arm_cpu_select
607 const char * string;
608 const char * name;
609 const struct processors * processors;
612 /* This is a magic structure. The 'string' field is magically filled in
613 with a pointer to the value specified by the user on the command line
614 assuming that the user has specified such a value. */
616 static struct arm_cpu_select arm_select[] =
618 /* string name processors */
619 { NULL, "-mcpu=", all_cores },
620 { NULL, "-march=", all_architectures },
621 { NULL, "-mtune=", all_cores }
624 /* Defines representing the indexes into the above table. */
625 #define ARM_OPT_SET_CPU 0
626 #define ARM_OPT_SET_ARCH 1
627 #define ARM_OPT_SET_TUNE 2
629 /* The name of the proprocessor macro to define for this architecture. */
631 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
633 struct fpu_desc
635 const char * name;
636 enum fputype fpu;
640 /* Available values for -mfpu=. */
642 static const struct fpu_desc all_fpus[] =
644 {"fpa", FPUTYPE_FPA},
645 {"fpe2", FPUTYPE_FPA_EMU2},
646 {"fpe3", FPUTYPE_FPA_EMU2},
647 {"maverick", FPUTYPE_MAVERICK},
648 {"vfp", FPUTYPE_VFP}
652 /* Floating point models used by the different hardware.
653 See fputype in arm.h. */
655 static const enum fputype fp_model_for_fpu[] =
657 /* No FP hardware. */
658 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
659 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
660 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
661 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
662 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
663 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
667 struct float_abi
669 const char * name;
670 enum float_abi_type abi_type;
674 /* Available values for -mfloat-abi=. */
676 static const struct float_abi all_float_abis[] =
678 {"soft", ARM_FLOAT_ABI_SOFT},
679 {"softfp", ARM_FLOAT_ABI_SOFTFP},
680 {"hard", ARM_FLOAT_ABI_HARD}
684 struct abi_name
686 const char *name;
687 enum arm_abi_type abi_type;
691 /* Available values for -mabi=. */
693 static const struct abi_name arm_all_abis[] =
695 {"apcs-gnu", ARM_ABI_APCS},
696 {"atpcs", ARM_ABI_ATPCS},
697 {"aapcs", ARM_ABI_AAPCS},
698 {"iwmmxt", ARM_ABI_IWMMXT},
699 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
702 /* Supported TLS relocations. */
704 enum tls_reloc {
705 TLS_GD32,
706 TLS_LDM32,
707 TLS_LDO32,
708 TLS_IE32,
709 TLS_LE32
712 /* Emit an insn that's a simple single-set. Both the operands must be known
713 to be valid. */
714 inline static rtx
715 emit_set_insn (rtx x, rtx y)
717 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
720 /* Return the number of bits set in VALUE. */
721 static unsigned
722 bit_count (unsigned long value)
724 unsigned long count = 0;
726 while (value)
728 count++;
729 value &= value - 1; /* Clear the least-significant set bit. */
732 return count;
735 /* Set up library functions unique to ARM. */
737 static void
738 arm_init_libfuncs (void)
740 /* There are no special library functions unless we are using the
741 ARM BPABI. */
742 if (!TARGET_BPABI)
743 return;
745 /* The functions below are described in Section 4 of the "Run-Time
746 ABI for the ARM architecture", Version 1.0. */
748 /* Double-precision floating-point arithmetic. Table 2. */
749 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
750 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
751 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
752 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
753 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
755 /* Double-precision comparisons. Table 3. */
756 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
757 set_optab_libfunc (ne_optab, DFmode, NULL);
758 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
759 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
760 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
761 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
762 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
764 /* Single-precision floating-point arithmetic. Table 4. */
765 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
766 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
767 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
768 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
769 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
771 /* Single-precision comparisons. Table 5. */
772 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
773 set_optab_libfunc (ne_optab, SFmode, NULL);
774 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
775 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
776 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
777 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
778 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
780 /* Floating-point to integer conversions. Table 6. */
781 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
782 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
783 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
784 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
785 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
786 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
787 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
788 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
790 /* Conversions between floating types. Table 7. */
791 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
792 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
794 /* Integer to floating-point conversions. Table 8. */
795 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
796 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
797 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
798 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
799 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
800 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
801 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
802 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
804 /* Long long. Table 9. */
805 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
806 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
807 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
808 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
809 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
810 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
811 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
812 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
814 /* Integer (32/32->32) division. \S 4.3.1. */
815 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
816 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
818 /* The divmod functions are designed so that they can be used for
819 plain division, even though they return both the quotient and the
820 remainder. The quotient is returned in the usual location (i.e.,
821 r0 for SImode, {r0, r1} for DImode), just as would be expected
822 for an ordinary division routine. Because the AAPCS calling
823 conventions specify that all of { r0, r1, r2, r3 } are
824 callee-saved registers, there is no need to tell the compiler
825 explicitly that those registers are clobbered by these
826 routines. */
827 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
828 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
830 /* For SImode division the ABI provides div-without-mod routines,
831 which are faster. */
832 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
833 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
835 /* We don't have mod libcalls. Fortunately gcc knows how to use the
836 divmod libcalls instead. */
837 set_optab_libfunc (smod_optab, DImode, NULL);
838 set_optab_libfunc (umod_optab, DImode, NULL);
839 set_optab_libfunc (smod_optab, SImode, NULL);
840 set_optab_libfunc (umod_optab, SImode, NULL);
843 /* Implement TARGET_HANDLE_OPTION. */
845 static bool
846 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
848 switch (code)
850 case OPT_march_:
851 arm_select[1].string = arg;
852 return true;
854 case OPT_mcpu_:
855 arm_select[0].string = arg;
856 return true;
858 case OPT_mhard_float:
859 target_float_abi_name = "hard";
860 return true;
862 case OPT_msoft_float:
863 target_float_abi_name = "soft";
864 return true;
866 case OPT_mtune_:
867 arm_select[2].string = arg;
868 return true;
870 default:
871 return true;
875 /* Fix up any incompatible options that the user has specified.
876 This has now turned into a maze. */
877 void
878 arm_override_options (void)
880 unsigned i;
881 enum processor_type target_arch_cpu = arm_none;
883 /* Set up the flags based on the cpu/architecture selected by the user. */
884 for (i = ARRAY_SIZE (arm_select); i--;)
886 struct arm_cpu_select * ptr = arm_select + i;
888 if (ptr->string != NULL && ptr->string[0] != '\0')
890 const struct processors * sel;
892 for (sel = ptr->processors; sel->name != NULL; sel++)
893 if (streq (ptr->string, sel->name))
895 /* Set the architecture define. */
896 if (i != ARM_OPT_SET_TUNE)
897 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
899 /* Determine the processor core for which we should
900 tune code-generation. */
901 if (/* -mcpu= is a sensible default. */
902 i == ARM_OPT_SET_CPU
903 /* -mtune= overrides -mcpu= and -march=. */
904 || i == ARM_OPT_SET_TUNE)
905 arm_tune = (enum processor_type) (sel - ptr->processors);
907 /* Remember the CPU associated with this architecture.
908 If no other option is used to set the CPU type,
909 we'll use this to guess the most suitable tuning
910 options. */
911 if (i == ARM_OPT_SET_ARCH)
912 target_arch_cpu = sel->core;
914 if (i != ARM_OPT_SET_TUNE)
916 /* If we have been given an architecture and a processor
917 make sure that they are compatible. We only generate
918 a warning though, and we prefer the CPU over the
919 architecture. */
920 if (insn_flags != 0 && (insn_flags ^ sel->flags))
921 warning (0, "switch -mcpu=%s conflicts with -march= switch",
922 ptr->string);
924 insn_flags = sel->flags;
927 break;
930 if (sel->name == NULL)
931 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
935 /* Guess the tuning options from the architecture if necessary. */
936 if (arm_tune == arm_none)
937 arm_tune = target_arch_cpu;
939 /* If the user did not specify a processor, choose one for them. */
940 if (insn_flags == 0)
942 const struct processors * sel;
943 unsigned int sought;
944 enum processor_type cpu;
946 cpu = TARGET_CPU_DEFAULT;
947 if (cpu == arm_none)
949 #ifdef SUBTARGET_CPU_DEFAULT
950 /* Use the subtarget default CPU if none was specified by
951 configure. */
952 cpu = SUBTARGET_CPU_DEFAULT;
953 #endif
954 /* Default to ARM6. */
955 if (cpu == arm_none)
956 cpu = arm6;
958 sel = &all_cores[cpu];
960 insn_flags = sel->flags;
962 /* Now check to see if the user has specified some command line
963 switch that require certain abilities from the cpu. */
964 sought = 0;
966 if (TARGET_INTERWORK || TARGET_THUMB)
968 sought |= (FL_THUMB | FL_MODE32);
970 /* There are no ARM processors that support both APCS-26 and
971 interworking. Therefore we force FL_MODE26 to be removed
972 from insn_flags here (if it was set), so that the search
973 below will always be able to find a compatible processor. */
974 insn_flags &= ~FL_MODE26;
977 if (sought != 0 && ((sought & insn_flags) != sought))
979 /* Try to locate a CPU type that supports all of the abilities
980 of the default CPU, plus the extra abilities requested by
981 the user. */
982 for (sel = all_cores; sel->name != NULL; sel++)
983 if ((sel->flags & sought) == (sought | insn_flags))
984 break;
986 if (sel->name == NULL)
988 unsigned current_bit_count = 0;
989 const struct processors * best_fit = NULL;
991 /* Ideally we would like to issue an error message here
992 saying that it was not possible to find a CPU compatible
993 with the default CPU, but which also supports the command
994 line options specified by the programmer, and so they
995 ought to use the -mcpu=<name> command line option to
996 override the default CPU type.
998 If we cannot find a cpu that has both the
999 characteristics of the default cpu and the given
1000 command line options we scan the array again looking
1001 for a best match. */
1002 for (sel = all_cores; sel->name != NULL; sel++)
1003 if ((sel->flags & sought) == sought)
1005 unsigned count;
1007 count = bit_count (sel->flags & insn_flags);
1009 if (count >= current_bit_count)
1011 best_fit = sel;
1012 current_bit_count = count;
1016 gcc_assert (best_fit);
1017 sel = best_fit;
1020 insn_flags = sel->flags;
1022 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1023 if (arm_tune == arm_none)
1024 arm_tune = (enum processor_type) (sel - all_cores);
1027 /* The processor for which we should tune should now have been
1028 chosen. */
1029 gcc_assert (arm_tune != arm_none);
1031 tune_flags = all_cores[(int)arm_tune].flags;
1032 if (optimize_size)
1033 targetm.rtx_costs = arm_size_rtx_costs;
1034 else
1035 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1037 /* Make sure that the processor choice does not conflict with any of the
1038 other command line choices. */
1039 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1041 warning (0, "target CPU does not support interworking" );
1042 target_flags &= ~MASK_INTERWORK;
1045 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1047 warning (0, "target CPU does not support THUMB instructions");
1048 target_flags &= ~MASK_THUMB;
1051 if (TARGET_APCS_FRAME && TARGET_THUMB)
1053 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1054 target_flags &= ~MASK_APCS_FRAME;
1057 /* Callee super interworking implies thumb interworking. Adding
1058 this to the flags here simplifies the logic elsewhere. */
1059 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1060 target_flags |= MASK_INTERWORK;
1062 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1063 from here where no function is being compiled currently. */
1064 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1065 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1067 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1068 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1070 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1071 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1073 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1075 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1076 target_flags |= MASK_APCS_FRAME;
1079 if (TARGET_POKE_FUNCTION_NAME)
1080 target_flags |= MASK_APCS_FRAME;
1082 if (TARGET_APCS_REENT && flag_pic)
1083 error ("-fpic and -mapcs-reent are incompatible");
1085 if (TARGET_APCS_REENT)
1086 warning (0, "APCS reentrant code not supported. Ignored");
1088 /* If this target is normally configured to use APCS frames, warn if they
1089 are turned off and debugging is turned on. */
1090 if (TARGET_ARM
1091 && write_symbols != NO_DEBUG
1092 && !TARGET_APCS_FRAME
1093 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1094 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1096 /* If stack checking is disabled, we can use r10 as the PIC register,
1097 which keeps r9 available. */
1098 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1099 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1101 if (TARGET_APCS_FLOAT)
1102 warning (0, "passing floating point arguments in fp regs not yet supported");
1104 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1105 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1106 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1107 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1108 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1109 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1110 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1111 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1112 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1113 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1115 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1116 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1117 thumb_code = (TARGET_ARM == 0);
1118 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1119 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1120 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1122 /* V5 code we generate is completely interworking capable, so we turn off
1123 TARGET_INTERWORK here to avoid many tests later on. */
1125 /* XXX However, we must pass the right pre-processor defines to CPP
1126 or GLD can get confused. This is a hack. */
1127 if (TARGET_INTERWORK)
1128 arm_cpp_interwork = 1;
1130 if (arm_arch5)
1131 target_flags &= ~MASK_INTERWORK;
1133 if (target_abi_name)
1135 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1137 if (streq (arm_all_abis[i].name, target_abi_name))
1139 arm_abi = arm_all_abis[i].abi_type;
1140 break;
1143 if (i == ARRAY_SIZE (arm_all_abis))
1144 error ("invalid ABI option: -mabi=%s", target_abi_name);
1146 else
1147 arm_abi = ARM_DEFAULT_ABI;
1149 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1150 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1152 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1153 error ("iwmmxt abi requires an iwmmxt capable cpu");
1155 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1156 if (target_fpu_name == NULL && target_fpe_name != NULL)
1158 if (streq (target_fpe_name, "2"))
1159 target_fpu_name = "fpe2";
1160 else if (streq (target_fpe_name, "3"))
1161 target_fpu_name = "fpe3";
1162 else
1163 error ("invalid floating point emulation option: -mfpe=%s",
1164 target_fpe_name);
1166 if (target_fpu_name != NULL)
1168 /* The user specified a FPU. */
1169 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1171 if (streq (all_fpus[i].name, target_fpu_name))
1173 arm_fpu_arch = all_fpus[i].fpu;
1174 arm_fpu_tune = arm_fpu_arch;
1175 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1176 break;
1179 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1180 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1182 else
1184 #ifdef FPUTYPE_DEFAULT
1185 /* Use the default if it is specified for this platform. */
1186 arm_fpu_arch = FPUTYPE_DEFAULT;
1187 arm_fpu_tune = FPUTYPE_DEFAULT;
1188 #else
1189 /* Pick one based on CPU type. */
1190 /* ??? Some targets assume FPA is the default.
1191 if ((insn_flags & FL_VFP) != 0)
1192 arm_fpu_arch = FPUTYPE_VFP;
1193 else
1195 if (arm_arch_cirrus)
1196 arm_fpu_arch = FPUTYPE_MAVERICK;
1197 else
1198 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1199 #endif
1200 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1201 arm_fpu_tune = FPUTYPE_FPA;
1202 else
1203 arm_fpu_tune = arm_fpu_arch;
1204 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1205 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1208 if (target_float_abi_name != NULL)
1210 /* The user specified a FP ABI. */
1211 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1213 if (streq (all_float_abis[i].name, target_float_abi_name))
1215 arm_float_abi = all_float_abis[i].abi_type;
1216 break;
1219 if (i == ARRAY_SIZE (all_float_abis))
1220 error ("invalid floating point abi: -mfloat-abi=%s",
1221 target_float_abi_name);
1223 else
1224 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1226 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1227 sorry ("-mfloat-abi=hard and VFP");
1229 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1230 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1231 will ever exist. GCC makes no attempt to support this combination. */
1232 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1233 sorry ("iWMMXt and hardware floating point");
1235 /* If soft-float is specified then don't use FPU. */
1236 if (TARGET_SOFT_FLOAT)
1237 arm_fpu_arch = FPUTYPE_NONE;
1239 /* For arm2/3 there is no need to do any scheduling if there is only
1240 a floating point emulator, or we are doing software floating-point. */
1241 if ((TARGET_SOFT_FLOAT
1242 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1243 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1244 && (tune_flags & FL_MODE32) == 0)
1245 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1247 if (target_thread_switch)
1249 if (strcmp (target_thread_switch, "soft") == 0)
1250 target_thread_pointer = TP_SOFT;
1251 else if (strcmp (target_thread_switch, "auto") == 0)
1252 target_thread_pointer = TP_AUTO;
1253 else if (strcmp (target_thread_switch, "cp15") == 0)
1254 target_thread_pointer = TP_CP15;
1255 else
1256 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1259 /* Use the cp15 method if it is available. */
1260 if (target_thread_pointer == TP_AUTO)
1262 if (arm_arch6k && !TARGET_THUMB)
1263 target_thread_pointer = TP_CP15;
1264 else
1265 target_thread_pointer = TP_SOFT;
1268 if (TARGET_HARD_TP && TARGET_THUMB)
1269 error ("can not use -mtp=cp15 with -mthumb");
1271 /* Override the default structure alignment for AAPCS ABI. */
1272 if (TARGET_AAPCS_BASED)
1273 arm_structure_size_boundary = 8;
1275 if (structure_size_string != NULL)
1277 int size = strtol (structure_size_string, NULL, 0);
1279 if (size == 8 || size == 32
1280 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1281 arm_structure_size_boundary = size;
1282 else
1283 warning (0, "structure size boundary can only be set to %s",
1284 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1287 if (arm_pic_register_string != NULL)
1289 int pic_register = decode_reg_name (arm_pic_register_string);
1291 if (!flag_pic)
1292 warning (0, "-mpic-register= is useless without -fpic");
1294 /* Prevent the user from choosing an obviously stupid PIC register. */
1295 else if (pic_register < 0 || call_used_regs[pic_register]
1296 || pic_register == HARD_FRAME_POINTER_REGNUM
1297 || pic_register == STACK_POINTER_REGNUM
1298 || pic_register >= PC_REGNUM)
1299 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1300 else
1301 arm_pic_register = pic_register;
1304 if (TARGET_THUMB && flag_schedule_insns)
1306 /* Don't warn since it's on by default in -O2. */
1307 flag_schedule_insns = 0;
1310 if (optimize_size)
1312 arm_constant_limit = 1;
1314 /* If optimizing for size, bump the number of instructions that we
1315 are prepared to conditionally execute (even on a StrongARM). */
1316 max_insns_skipped = 6;
1318 else
1320 /* For processors with load scheduling, it never costs more than
1321 2 cycles to load a constant, and the load scheduler may well
1322 reduce that to 1. */
1323 if (arm_ld_sched)
1324 arm_constant_limit = 1;
1326 /* On XScale the longer latency of a load makes it more difficult
1327 to achieve a good schedule, so it's faster to synthesize
1328 constants that can be done in two insns. */
1329 if (arm_tune_xscale)
1330 arm_constant_limit = 2;
1332 /* StrongARM has early execution of branches, so a sequence
1333 that is worth skipping is shorter. */
1334 if (arm_tune_strongarm)
1335 max_insns_skipped = 3;
1338 /* Register global variables with the garbage collector. */
1339 arm_add_gc_roots ();
1342 static void
1343 arm_add_gc_roots (void)
1345 gcc_obstack_init(&minipool_obstack);
1346 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1349 /* A table of known ARM exception types.
1350 For use with the interrupt function attribute. */
1352 typedef struct
1354 const char *const arg;
1355 const unsigned long return_value;
1357 isr_attribute_arg;
1359 static const isr_attribute_arg isr_attribute_args [] =
1361 { "IRQ", ARM_FT_ISR },
1362 { "irq", ARM_FT_ISR },
1363 { "FIQ", ARM_FT_FIQ },
1364 { "fiq", ARM_FT_FIQ },
1365 { "ABORT", ARM_FT_ISR },
1366 { "abort", ARM_FT_ISR },
1367 { "ABORT", ARM_FT_ISR },
1368 { "abort", ARM_FT_ISR },
1369 { "UNDEF", ARM_FT_EXCEPTION },
1370 { "undef", ARM_FT_EXCEPTION },
1371 { "SWI", ARM_FT_EXCEPTION },
1372 { "swi", ARM_FT_EXCEPTION },
1373 { NULL, ARM_FT_NORMAL }
1376 /* Returns the (interrupt) function type of the current
1377 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1379 static unsigned long
1380 arm_isr_value (tree argument)
1382 const isr_attribute_arg * ptr;
1383 const char * arg;
1385 /* No argument - default to IRQ. */
1386 if (argument == NULL_TREE)
1387 return ARM_FT_ISR;
1389 /* Get the value of the argument. */
1390 if (TREE_VALUE (argument) == NULL_TREE
1391 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1392 return ARM_FT_UNKNOWN;
1394 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1396 /* Check it against the list of known arguments. */
1397 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1398 if (streq (arg, ptr->arg))
1399 return ptr->return_value;
1401 /* An unrecognized interrupt type. */
1402 return ARM_FT_UNKNOWN;
1405 /* Computes the type of the current function. */
1407 static unsigned long
1408 arm_compute_func_type (void)
1410 unsigned long type = ARM_FT_UNKNOWN;
1411 tree a;
1412 tree attr;
1414 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1416 /* Decide if the current function is volatile. Such functions
1417 never return, and many memory cycles can be saved by not storing
1418 register values that will never be needed again. This optimization
1419 was added to speed up context switching in a kernel application. */
1420 if (optimize > 0
1421 && (TREE_NOTHROW (current_function_decl)
1422 || !(flag_unwind_tables
1423 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1424 && TREE_THIS_VOLATILE (current_function_decl))
1425 type |= ARM_FT_VOLATILE;
1427 if (cfun->static_chain_decl != NULL)
1428 type |= ARM_FT_NESTED;
1430 attr = DECL_ATTRIBUTES (current_function_decl);
1432 a = lookup_attribute ("naked", attr);
1433 if (a != NULL_TREE)
1434 type |= ARM_FT_NAKED;
1436 a = lookup_attribute ("isr", attr);
1437 if (a == NULL_TREE)
1438 a = lookup_attribute ("interrupt", attr);
1440 if (a == NULL_TREE)
1441 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1442 else
1443 type |= arm_isr_value (TREE_VALUE (a));
1445 return type;
1448 /* Returns the type of the current function. */
1450 unsigned long
1451 arm_current_func_type (void)
1453 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1454 cfun->machine->func_type = arm_compute_func_type ();
1456 return cfun->machine->func_type;
1459 /* Return 1 if it is possible to return using a single instruction.
1460 If SIBLING is non-null, this is a test for a return before a sibling
1461 call. SIBLING is the call insn, so we can examine its register usage. */
1464 use_return_insn (int iscond, rtx sibling)
1466 int regno;
1467 unsigned int func_type;
1468 unsigned long saved_int_regs;
1469 unsigned HOST_WIDE_INT stack_adjust;
1470 arm_stack_offsets *offsets;
1472 /* Never use a return instruction before reload has run. */
1473 if (!reload_completed)
1474 return 0;
1476 func_type = arm_current_func_type ();
1478 /* Naked functions and volatile functions need special
1479 consideration. */
1480 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1481 return 0;
1483 /* So do interrupt functions that use the frame pointer. */
1484 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1485 return 0;
1487 offsets = arm_get_frame_offsets ();
1488 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1490 /* As do variadic functions. */
1491 if (current_function_pretend_args_size
1492 || cfun->machine->uses_anonymous_args
1493 /* Or if the function calls __builtin_eh_return () */
1494 || current_function_calls_eh_return
1495 /* Or if the function calls alloca */
1496 || current_function_calls_alloca
1497 /* Or if there is a stack adjustment. However, if the stack pointer
1498 is saved on the stack, we can use a pre-incrementing stack load. */
1499 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1500 return 0;
1502 saved_int_regs = arm_compute_save_reg_mask ();
1504 /* Unfortunately, the insn
1506 ldmib sp, {..., sp, ...}
1508 triggers a bug on most SA-110 based devices, such that the stack
1509 pointer won't be correctly restored if the instruction takes a
1510 page fault. We work around this problem by popping r3 along with
1511 the other registers, since that is never slower than executing
1512 another instruction.
1514 We test for !arm_arch5 here, because code for any architecture
1515 less than this could potentially be run on one of the buggy
1516 chips. */
1517 if (stack_adjust == 4 && !arm_arch5)
1519 /* Validate that r3 is a call-clobbered register (always true in
1520 the default abi) ... */
1521 if (!call_used_regs[3])
1522 return 0;
1524 /* ... that it isn't being used for a return value ... */
1525 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1526 return 0;
1528 /* ... or for a tail-call argument ... */
1529 if (sibling)
1531 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1533 if (find_regno_fusage (sibling, USE, 3))
1534 return 0;
1537 /* ... and that there are no call-saved registers in r0-r2
1538 (always true in the default ABI). */
1539 if (saved_int_regs & 0x7)
1540 return 0;
1543 /* Can't be done if interworking with Thumb, and any registers have been
1544 stacked. */
1545 if (TARGET_INTERWORK && saved_int_regs != 0)
1546 return 0;
1548 /* On StrongARM, conditional returns are expensive if they aren't
1549 taken and multiple registers have been stacked. */
1550 if (iscond && arm_tune_strongarm)
1552 /* Conditional return when just the LR is stored is a simple
1553 conditional-load instruction, that's not expensive. */
1554 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1555 return 0;
1557 if (flag_pic
1558 && arm_pic_register != INVALID_REGNUM
1559 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1560 return 0;
1563 /* If there are saved registers but the LR isn't saved, then we need
1564 two instructions for the return. */
1565 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1566 return 0;
1568 /* Can't be done if any of the FPA regs are pushed,
1569 since this also requires an insn. */
1570 if (TARGET_HARD_FLOAT && TARGET_FPA)
1571 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1572 if (regs_ever_live[regno] && !call_used_regs[regno])
1573 return 0;
1575 /* Likewise VFP regs. */
1576 if (TARGET_HARD_FLOAT && TARGET_VFP)
1577 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1578 if (regs_ever_live[regno] && !call_used_regs[regno])
1579 return 0;
1581 if (TARGET_REALLY_IWMMXT)
1582 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1583 if (regs_ever_live[regno] && ! call_used_regs [regno])
1584 return 0;
1586 return 1;
1589 /* Return TRUE if int I is a valid immediate ARM constant. */
1592 const_ok_for_arm (HOST_WIDE_INT i)
1594 int lowbit;
1596 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1597 be all zero, or all one. */
1598 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1599 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1600 != ((~(unsigned HOST_WIDE_INT) 0)
1601 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1602 return FALSE;
1604 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1606 /* Fast return for 0 and small values. We must do this for zero, since
1607 the code below can't handle that one case. */
1608 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1609 return TRUE;
1611 /* Get the number of trailing zeros, rounded down to the nearest even
1612 number. */
1613 lowbit = (ffs ((int) i) - 1) & ~1;
1615 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1616 return TRUE;
1617 else if (lowbit <= 4
1618 && ((i & ~0xc000003f) == 0
1619 || (i & ~0xf000000f) == 0
1620 || (i & ~0xfc000003) == 0))
1621 return TRUE;
1623 return FALSE;
1626 /* Return true if I is a valid constant for the operation CODE. */
1627 static int
1628 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1630 if (const_ok_for_arm (i))
1631 return 1;
1633 switch (code)
1635 case PLUS:
1636 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1638 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1639 case XOR:
1640 case IOR:
1641 return 0;
1643 case AND:
1644 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1646 default:
1647 gcc_unreachable ();
1651 /* Emit a sequence of insns to handle a large constant.
1652 CODE is the code of the operation required, it can be any of SET, PLUS,
1653 IOR, AND, XOR, MINUS;
1654 MODE is the mode in which the operation is being performed;
1655 VAL is the integer to operate on;
1656 SOURCE is the other operand (a register, or a null-pointer for SET);
1657 SUBTARGETS means it is safe to create scratch registers if that will
1658 either produce a simpler sequence, or we will want to cse the values.
1659 Return value is the number of insns emitted. */
1662 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1663 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1665 rtx cond;
1667 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1668 cond = COND_EXEC_TEST (PATTERN (insn));
1669 else
1670 cond = NULL_RTX;
1672 if (subtargets || code == SET
1673 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1674 && REGNO (target) != REGNO (source)))
1676 /* After arm_reorg has been called, we can't fix up expensive
1677 constants by pushing them into memory so we must synthesize
1678 them in-line, regardless of the cost. This is only likely to
1679 be more costly on chips that have load delay slots and we are
1680 compiling without running the scheduler (so no splitting
1681 occurred before the final instruction emission).
1683 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1685 if (!after_arm_reorg
1686 && !cond
1687 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1688 1, 0)
1689 > arm_constant_limit + (code != SET)))
1691 if (code == SET)
1693 /* Currently SET is the only monadic value for CODE, all
1694 the rest are diadic. */
1695 emit_set_insn (target, GEN_INT (val));
1696 return 1;
1698 else
1700 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1702 emit_set_insn (temp, GEN_INT (val));
1703 /* For MINUS, the value is subtracted from, since we never
1704 have subtraction of a constant. */
1705 if (code == MINUS)
1706 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1707 else
1708 emit_set_insn (target,
1709 gen_rtx_fmt_ee (code, mode, source, temp));
1710 return 2;
1715 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1719 static int
1720 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1722 HOST_WIDE_INT temp1;
1723 int num_insns = 0;
1726 int end;
1728 if (i <= 0)
1729 i += 32;
1730 if (remainder & (3 << (i - 2)))
1732 end = i - 8;
1733 if (end < 0)
1734 end += 32;
1735 temp1 = remainder & ((0x0ff << end)
1736 | ((i < end) ? (0xff >> (32 - end)) : 0));
1737 remainder &= ~temp1;
1738 num_insns++;
1739 i -= 6;
1741 i -= 2;
1742 } while (remainder);
1743 return num_insns;
1746 /* Emit an instruction with the indicated PATTERN. If COND is
1747 non-NULL, conditionalize the execution of the instruction on COND
1748 being true. */
1750 static void
1751 emit_constant_insn (rtx cond, rtx pattern)
1753 if (cond)
1754 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1755 emit_insn (pattern);
1758 /* As above, but extra parameter GENERATE which, if clear, suppresses
1759 RTL generation. */
1761 static int
1762 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1763 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1764 int generate)
1766 int can_invert = 0;
1767 int can_negate = 0;
1768 int can_negate_initial = 0;
1769 int can_shift = 0;
1770 int i;
1771 int num_bits_set = 0;
1772 int set_sign_bit_copies = 0;
1773 int clear_sign_bit_copies = 0;
1774 int clear_zero_bit_copies = 0;
1775 int set_zero_bit_copies = 0;
1776 int insns = 0;
1777 unsigned HOST_WIDE_INT temp1, temp2;
1778 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1780 /* Find out which operations are safe for a given CODE. Also do a quick
1781 check for degenerate cases; these can occur when DImode operations
1782 are split. */
1783 switch (code)
1785 case SET:
1786 can_invert = 1;
1787 can_shift = 1;
1788 can_negate = 1;
1789 break;
1791 case PLUS:
1792 can_negate = 1;
1793 can_negate_initial = 1;
1794 break;
1796 case IOR:
1797 if (remainder == 0xffffffff)
1799 if (generate)
1800 emit_constant_insn (cond,
1801 gen_rtx_SET (VOIDmode, target,
1802 GEN_INT (ARM_SIGN_EXTEND (val))));
1803 return 1;
1805 if (remainder == 0)
1807 if (reload_completed && rtx_equal_p (target, source))
1808 return 0;
1809 if (generate)
1810 emit_constant_insn (cond,
1811 gen_rtx_SET (VOIDmode, target, source));
1812 return 1;
1814 break;
1816 case AND:
1817 if (remainder == 0)
1819 if (generate)
1820 emit_constant_insn (cond,
1821 gen_rtx_SET (VOIDmode, target, const0_rtx));
1822 return 1;
1824 if (remainder == 0xffffffff)
1826 if (reload_completed && rtx_equal_p (target, source))
1827 return 0;
1828 if (generate)
1829 emit_constant_insn (cond,
1830 gen_rtx_SET (VOIDmode, target, source));
1831 return 1;
1833 can_invert = 1;
1834 break;
1836 case XOR:
1837 if (remainder == 0)
1839 if (reload_completed && rtx_equal_p (target, source))
1840 return 0;
1841 if (generate)
1842 emit_constant_insn (cond,
1843 gen_rtx_SET (VOIDmode, target, source));
1844 return 1;
1847 /* We don't know how to handle other cases yet. */
1848 gcc_assert (remainder == 0xffffffff);
1850 if (generate)
1851 emit_constant_insn (cond,
1852 gen_rtx_SET (VOIDmode, target,
1853 gen_rtx_NOT (mode, source)));
1854 return 1;
1856 case MINUS:
1857 /* We treat MINUS as (val - source), since (source - val) is always
1858 passed as (source + (-val)). */
1859 if (remainder == 0)
1861 if (generate)
1862 emit_constant_insn (cond,
1863 gen_rtx_SET (VOIDmode, target,
1864 gen_rtx_NEG (mode, source)));
1865 return 1;
1867 if (const_ok_for_arm (val))
1869 if (generate)
1870 emit_constant_insn (cond,
1871 gen_rtx_SET (VOIDmode, target,
1872 gen_rtx_MINUS (mode, GEN_INT (val),
1873 source)));
1874 return 1;
1876 can_negate = 1;
1878 break;
1880 default:
1881 gcc_unreachable ();
1884 /* If we can do it in one insn get out quickly. */
1885 if (const_ok_for_arm (val)
1886 || (can_negate_initial && const_ok_for_arm (-val))
1887 || (can_invert && const_ok_for_arm (~val)))
1889 if (generate)
1890 emit_constant_insn (cond,
1891 gen_rtx_SET (VOIDmode, target,
1892 (source
1893 ? gen_rtx_fmt_ee (code, mode, source,
1894 GEN_INT (val))
1895 : GEN_INT (val))));
1896 return 1;
1899 /* Calculate a few attributes that may be useful for specific
1900 optimizations. */
1901 for (i = 31; i >= 0; i--)
1903 if ((remainder & (1 << i)) == 0)
1904 clear_sign_bit_copies++;
1905 else
1906 break;
1909 for (i = 31; i >= 0; i--)
1911 if ((remainder & (1 << i)) != 0)
1912 set_sign_bit_copies++;
1913 else
1914 break;
1917 for (i = 0; i <= 31; i++)
1919 if ((remainder & (1 << i)) == 0)
1920 clear_zero_bit_copies++;
1921 else
1922 break;
1925 for (i = 0; i <= 31; i++)
1927 if ((remainder & (1 << i)) != 0)
1928 set_zero_bit_copies++;
1929 else
1930 break;
1933 switch (code)
1935 case SET:
1936 /* See if we can do this by sign_extending a constant that is known
1937 to be negative. This is a good, way of doing it, since the shift
1938 may well merge into a subsequent insn. */
1939 if (set_sign_bit_copies > 1)
1941 if (const_ok_for_arm
1942 (temp1 = ARM_SIGN_EXTEND (remainder
1943 << (set_sign_bit_copies - 1))))
1945 if (generate)
1947 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1948 emit_constant_insn (cond,
1949 gen_rtx_SET (VOIDmode, new_src,
1950 GEN_INT (temp1)));
1951 emit_constant_insn (cond,
1952 gen_ashrsi3 (target, new_src,
1953 GEN_INT (set_sign_bit_copies - 1)));
1955 return 2;
1957 /* For an inverted constant, we will need to set the low bits,
1958 these will be shifted out of harm's way. */
1959 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1960 if (const_ok_for_arm (~temp1))
1962 if (generate)
1964 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1965 emit_constant_insn (cond,
1966 gen_rtx_SET (VOIDmode, new_src,
1967 GEN_INT (temp1)));
1968 emit_constant_insn (cond,
1969 gen_ashrsi3 (target, new_src,
1970 GEN_INT (set_sign_bit_copies - 1)));
1972 return 2;
1976 /* See if we can calculate the value as the difference between two
1977 valid immediates. */
1978 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1980 int topshift = clear_sign_bit_copies & ~1;
1982 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1983 & (0xff000000 >> topshift));
1985 /* If temp1 is zero, then that means the 9 most significant
1986 bits of remainder were 1 and we've caused it to overflow.
1987 When topshift is 0 we don't need to do anything since we
1988 can borrow from 'bit 32'. */
1989 if (temp1 == 0 && topshift != 0)
1990 temp1 = 0x80000000 >> (topshift - 1);
1992 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1994 if (const_ok_for_arm (temp2))
1996 if (generate)
1998 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1999 emit_constant_insn (cond,
2000 gen_rtx_SET (VOIDmode, new_src,
2001 GEN_INT (temp1)));
2002 emit_constant_insn (cond,
2003 gen_addsi3 (target, new_src,
2004 GEN_INT (-temp2)));
2007 return 2;
2011 /* See if we can generate this by setting the bottom (or the top)
2012 16 bits, and then shifting these into the other half of the
2013 word. We only look for the simplest cases, to do more would cost
2014 too much. Be careful, however, not to generate this when the
2015 alternative would take fewer insns. */
2016 if (val & 0xffff0000)
2018 temp1 = remainder & 0xffff0000;
2019 temp2 = remainder & 0x0000ffff;
2021 /* Overlaps outside this range are best done using other methods. */
2022 for (i = 9; i < 24; i++)
2024 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2025 && !const_ok_for_arm (temp2))
2027 rtx new_src = (subtargets
2028 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2029 : target);
2030 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2031 source, subtargets, generate);
2032 source = new_src;
2033 if (generate)
2034 emit_constant_insn
2035 (cond,
2036 gen_rtx_SET
2037 (VOIDmode, target,
2038 gen_rtx_IOR (mode,
2039 gen_rtx_ASHIFT (mode, source,
2040 GEN_INT (i)),
2041 source)));
2042 return insns + 1;
2046 /* Don't duplicate cases already considered. */
2047 for (i = 17; i < 24; i++)
2049 if (((temp1 | (temp1 >> i)) == remainder)
2050 && !const_ok_for_arm (temp1))
2052 rtx new_src = (subtargets
2053 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2054 : target);
2055 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2056 source, subtargets, generate);
2057 source = new_src;
2058 if (generate)
2059 emit_constant_insn
2060 (cond,
2061 gen_rtx_SET (VOIDmode, target,
2062 gen_rtx_IOR
2063 (mode,
2064 gen_rtx_LSHIFTRT (mode, source,
2065 GEN_INT (i)),
2066 source)));
2067 return insns + 1;
2071 break;
2073 case IOR:
2074 case XOR:
2075 /* If we have IOR or XOR, and the constant can be loaded in a
2076 single instruction, and we can find a temporary to put it in,
2077 then this can be done in two instructions instead of 3-4. */
2078 if (subtargets
2079 /* TARGET can't be NULL if SUBTARGETS is 0 */
2080 || (reload_completed && !reg_mentioned_p (target, source)))
2082 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2084 if (generate)
2086 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2088 emit_constant_insn (cond,
2089 gen_rtx_SET (VOIDmode, sub,
2090 GEN_INT (val)));
2091 emit_constant_insn (cond,
2092 gen_rtx_SET (VOIDmode, target,
2093 gen_rtx_fmt_ee (code, mode,
2094 source, sub)));
2096 return 2;
2100 if (code == XOR)
2101 break;
2103 if (set_sign_bit_copies > 8
2104 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2106 if (generate)
2108 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2109 rtx shift = GEN_INT (set_sign_bit_copies);
2111 emit_constant_insn
2112 (cond,
2113 gen_rtx_SET (VOIDmode, sub,
2114 gen_rtx_NOT (mode,
2115 gen_rtx_ASHIFT (mode,
2116 source,
2117 shift))));
2118 emit_constant_insn
2119 (cond,
2120 gen_rtx_SET (VOIDmode, target,
2121 gen_rtx_NOT (mode,
2122 gen_rtx_LSHIFTRT (mode, sub,
2123 shift))));
2125 return 2;
2128 if (set_zero_bit_copies > 8
2129 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2131 if (generate)
2133 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2134 rtx shift = GEN_INT (set_zero_bit_copies);
2136 emit_constant_insn
2137 (cond,
2138 gen_rtx_SET (VOIDmode, sub,
2139 gen_rtx_NOT (mode,
2140 gen_rtx_LSHIFTRT (mode,
2141 source,
2142 shift))));
2143 emit_constant_insn
2144 (cond,
2145 gen_rtx_SET (VOIDmode, target,
2146 gen_rtx_NOT (mode,
2147 gen_rtx_ASHIFT (mode, sub,
2148 shift))));
2150 return 2;
2153 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2155 if (generate)
2157 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2158 emit_constant_insn (cond,
2159 gen_rtx_SET (VOIDmode, sub,
2160 gen_rtx_NOT (mode, source)));
2161 source = sub;
2162 if (subtargets)
2163 sub = gen_reg_rtx (mode);
2164 emit_constant_insn (cond,
2165 gen_rtx_SET (VOIDmode, sub,
2166 gen_rtx_AND (mode, source,
2167 GEN_INT (temp1))));
2168 emit_constant_insn (cond,
2169 gen_rtx_SET (VOIDmode, target,
2170 gen_rtx_NOT (mode, sub)));
2172 return 3;
2174 break;
2176 case AND:
2177 /* See if two shifts will do 2 or more insn's worth of work. */
2178 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2180 HOST_WIDE_INT shift_mask = ((0xffffffff
2181 << (32 - clear_sign_bit_copies))
2182 & 0xffffffff);
2184 if ((remainder | shift_mask) != 0xffffffff)
2186 if (generate)
2188 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2189 insns = arm_gen_constant (AND, mode, cond,
2190 remainder | shift_mask,
2191 new_src, source, subtargets, 1);
2192 source = new_src;
2194 else
2196 rtx targ = subtargets ? NULL_RTX : target;
2197 insns = arm_gen_constant (AND, mode, cond,
2198 remainder | shift_mask,
2199 targ, source, subtargets, 0);
2203 if (generate)
2205 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2206 rtx shift = GEN_INT (clear_sign_bit_copies);
2208 emit_insn (gen_ashlsi3 (new_src, source, shift));
2209 emit_insn (gen_lshrsi3 (target, new_src, shift));
2212 return insns + 2;
2215 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2217 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2219 if ((remainder | shift_mask) != 0xffffffff)
2221 if (generate)
2223 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2225 insns = arm_gen_constant (AND, mode, cond,
2226 remainder | shift_mask,
2227 new_src, source, subtargets, 1);
2228 source = new_src;
2230 else
2232 rtx targ = subtargets ? NULL_RTX : target;
2234 insns = arm_gen_constant (AND, mode, cond,
2235 remainder | shift_mask,
2236 targ, source, subtargets, 0);
2240 if (generate)
2242 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2243 rtx shift = GEN_INT (clear_zero_bit_copies);
2245 emit_insn (gen_lshrsi3 (new_src, source, shift));
2246 emit_insn (gen_ashlsi3 (target, new_src, shift));
2249 return insns + 2;
2252 break;
2254 default:
2255 break;
2258 for (i = 0; i < 32; i++)
2259 if (remainder & (1 << i))
2260 num_bits_set++;
2262 if (code == AND || (can_invert && num_bits_set > 16))
2263 remainder = (~remainder) & 0xffffffff;
2264 else if (code == PLUS && num_bits_set > 16)
2265 remainder = (-remainder) & 0xffffffff;
2266 else
2268 can_invert = 0;
2269 can_negate = 0;
2272 /* Now try and find a way of doing the job in either two or three
2273 instructions.
2274 We start by looking for the largest block of zeros that are aligned on
2275 a 2-bit boundary, we then fill up the temps, wrapping around to the
2276 top of the word when we drop off the bottom.
2277 In the worst case this code should produce no more than four insns. */
2279 int best_start = 0;
2280 int best_consecutive_zeros = 0;
2282 for (i = 0; i < 32; i += 2)
2284 int consecutive_zeros = 0;
2286 if (!(remainder & (3 << i)))
2288 while ((i < 32) && !(remainder & (3 << i)))
2290 consecutive_zeros += 2;
2291 i += 2;
2293 if (consecutive_zeros > best_consecutive_zeros)
2295 best_consecutive_zeros = consecutive_zeros;
2296 best_start = i - consecutive_zeros;
2298 i -= 2;
2302 /* So long as it won't require any more insns to do so, it's
2303 desirable to emit a small constant (in bits 0...9) in the last
2304 insn. This way there is more chance that it can be combined with
2305 a later addressing insn to form a pre-indexed load or store
2306 operation. Consider:
2308 *((volatile int *)0xe0000100) = 1;
2309 *((volatile int *)0xe0000110) = 2;
2311 We want this to wind up as:
2313 mov rA, #0xe0000000
2314 mov rB, #1
2315 str rB, [rA, #0x100]
2316 mov rB, #2
2317 str rB, [rA, #0x110]
2319 rather than having to synthesize both large constants from scratch.
2321 Therefore, we calculate how many insns would be required to emit
2322 the constant starting from `best_start', and also starting from
2323 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2324 yield a shorter sequence, we may as well use zero. */
2325 if (best_start != 0
2326 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2327 && (count_insns_for_constant (remainder, 0) <=
2328 count_insns_for_constant (remainder, best_start)))
2329 best_start = 0;
2331 /* Now start emitting the insns. */
2332 i = best_start;
2335 int end;
2337 if (i <= 0)
2338 i += 32;
2339 if (remainder & (3 << (i - 2)))
2341 end = i - 8;
2342 if (end < 0)
2343 end += 32;
2344 temp1 = remainder & ((0x0ff << end)
2345 | ((i < end) ? (0xff >> (32 - end)) : 0));
2346 remainder &= ~temp1;
2348 if (generate)
2350 rtx new_src, temp1_rtx;
2352 if (code == SET || code == MINUS)
2354 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2355 if (can_invert && code != MINUS)
2356 temp1 = ~temp1;
2358 else
2360 if (remainder && subtargets)
2361 new_src = gen_reg_rtx (mode);
2362 else
2363 new_src = target;
2364 if (can_invert)
2365 temp1 = ~temp1;
2366 else if (can_negate)
2367 temp1 = -temp1;
2370 temp1 = trunc_int_for_mode (temp1, mode);
2371 temp1_rtx = GEN_INT (temp1);
2373 if (code == SET)
2375 else if (code == MINUS)
2376 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2377 else
2378 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2380 emit_constant_insn (cond,
2381 gen_rtx_SET (VOIDmode, new_src,
2382 temp1_rtx));
2383 source = new_src;
2386 if (code == SET)
2388 can_invert = 0;
2389 code = PLUS;
2391 else if (code == MINUS)
2392 code = PLUS;
2394 insns++;
2395 i -= 6;
2397 i -= 2;
2399 while (remainder);
2402 return insns;
2405 /* Canonicalize a comparison so that we are more likely to recognize it.
2406 This can be done for a few constant compares, where we can make the
2407 immediate value easier to load. */
2409 enum rtx_code
2410 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2411 rtx * op1)
2413 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2414 unsigned HOST_WIDE_INT maxval;
2415 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2417 switch (code)
2419 case EQ:
2420 case NE:
2421 return code;
2423 case GT:
2424 case LE:
2425 if (i != maxval
2426 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2428 *op1 = GEN_INT (i + 1);
2429 return code == GT ? GE : LT;
2431 break;
2433 case GE:
2434 case LT:
2435 if (i != ~maxval
2436 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2438 *op1 = GEN_INT (i - 1);
2439 return code == GE ? GT : LE;
2441 break;
2443 case GTU:
2444 case LEU:
2445 if (i != ~((unsigned HOST_WIDE_INT) 0)
2446 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2448 *op1 = GEN_INT (i + 1);
2449 return code == GTU ? GEU : LTU;
2451 break;
2453 case GEU:
2454 case LTU:
2455 if (i != 0
2456 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2458 *op1 = GEN_INT (i - 1);
2459 return code == GEU ? GTU : LEU;
2461 break;
2463 default:
2464 gcc_unreachable ();
2467 return code;
2471 /* Define how to find the value returned by a function. */
2474 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2476 enum machine_mode mode;
2477 int unsignedp ATTRIBUTE_UNUSED;
2478 rtx r ATTRIBUTE_UNUSED;
2480 mode = TYPE_MODE (type);
2481 /* Promote integer types. */
2482 if (INTEGRAL_TYPE_P (type))
2483 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2485 /* Promotes small structs returned in a register to full-word size
2486 for big-endian AAPCS. */
2487 if (arm_return_in_msb (type))
2489 HOST_WIDE_INT size = int_size_in_bytes (type);
2490 if (size % UNITS_PER_WORD != 0)
2492 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2493 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2497 return LIBCALL_VALUE(mode);
2500 /* Determine the amount of memory needed to store the possible return
2501 registers of an untyped call. */
2503 arm_apply_result_size (void)
2505 int size = 16;
2507 if (TARGET_ARM)
2509 if (TARGET_HARD_FLOAT_ABI)
2511 if (TARGET_FPA)
2512 size += 12;
2513 if (TARGET_MAVERICK)
2514 size += 8;
2516 if (TARGET_IWMMXT_ABI)
2517 size += 8;
2520 return size;
2523 /* Decide whether a type should be returned in memory (true)
2524 or in a register (false). This is called by the macro
2525 RETURN_IN_MEMORY. */
2527 arm_return_in_memory (tree type)
2529 HOST_WIDE_INT size;
2531 if (!AGGREGATE_TYPE_P (type) &&
2532 (TREE_CODE (type) != VECTOR_TYPE) &&
2533 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2534 /* All simple types are returned in registers.
2535 For AAPCS, complex types are treated the same as aggregates. */
2536 return 0;
2538 size = int_size_in_bytes (type);
2540 if (arm_abi != ARM_ABI_APCS)
2542 /* ATPCS and later return aggregate types in memory only if they are
2543 larger than a word (or are variable size). */
2544 return (size < 0 || size > UNITS_PER_WORD);
2547 /* To maximize backwards compatibility with previous versions of gcc,
2548 return vectors up to 4 words in registers. */
2549 if (TREE_CODE (type) == VECTOR_TYPE)
2550 return (size < 0 || size > (4 * UNITS_PER_WORD));
2552 /* For the arm-wince targets we choose to be compatible with Microsoft's
2553 ARM and Thumb compilers, which always return aggregates in memory. */
2554 #ifndef ARM_WINCE
2555 /* All structures/unions bigger than one word are returned in memory.
2556 Also catch the case where int_size_in_bytes returns -1. In this case
2557 the aggregate is either huge or of variable size, and in either case
2558 we will want to return it via memory and not in a register. */
2559 if (size < 0 || size > UNITS_PER_WORD)
2560 return 1;
2562 if (TREE_CODE (type) == RECORD_TYPE)
2564 tree field;
2566 /* For a struct the APCS says that we only return in a register
2567 if the type is 'integer like' and every addressable element
2568 has an offset of zero. For practical purposes this means
2569 that the structure can have at most one non bit-field element
2570 and that this element must be the first one in the structure. */
2572 /* Find the first field, ignoring non FIELD_DECL things which will
2573 have been created by C++. */
2574 for (field = TYPE_FIELDS (type);
2575 field && TREE_CODE (field) != FIELD_DECL;
2576 field = TREE_CHAIN (field))
2577 continue;
2579 if (field == NULL)
2580 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2582 /* Check that the first field is valid for returning in a register. */
2584 /* ... Floats are not allowed */
2585 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2586 return 1;
2588 /* ... Aggregates that are not themselves valid for returning in
2589 a register are not allowed. */
2590 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2591 return 1;
2593 /* Now check the remaining fields, if any. Only bitfields are allowed,
2594 since they are not addressable. */
2595 for (field = TREE_CHAIN (field);
2596 field;
2597 field = TREE_CHAIN (field))
2599 if (TREE_CODE (field) != FIELD_DECL)
2600 continue;
2602 if (!DECL_BIT_FIELD_TYPE (field))
2603 return 1;
2606 return 0;
2609 if (TREE_CODE (type) == UNION_TYPE)
2611 tree field;
2613 /* Unions can be returned in registers if every element is
2614 integral, or can be returned in an integer register. */
2615 for (field = TYPE_FIELDS (type);
2616 field;
2617 field = TREE_CHAIN (field))
2619 if (TREE_CODE (field) != FIELD_DECL)
2620 continue;
2622 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2623 return 1;
2625 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2626 return 1;
2629 return 0;
2631 #endif /* not ARM_WINCE */
2633 /* Return all other types in memory. */
2634 return 1;
2637 /* Indicate whether or not words of a double are in big-endian order. */
2640 arm_float_words_big_endian (void)
2642 if (TARGET_MAVERICK)
2643 return 0;
2645 /* For FPA, float words are always big-endian. For VFP, floats words
2646 follow the memory system mode. */
2648 if (TARGET_FPA)
2650 return 1;
2653 if (TARGET_VFP)
2654 return (TARGET_BIG_END ? 1 : 0);
2656 return 1;
2659 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2660 for a call to a function whose data type is FNTYPE.
2661 For a library call, FNTYPE is NULL. */
2662 void
2663 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2664 rtx libname ATTRIBUTE_UNUSED,
2665 tree fndecl ATTRIBUTE_UNUSED)
2667 /* On the ARM, the offset starts at 0. */
2668 pcum->nregs = 0;
2669 pcum->iwmmxt_nregs = 0;
2670 pcum->can_split = true;
2672 pcum->call_cookie = CALL_NORMAL;
2674 if (TARGET_LONG_CALLS)
2675 pcum->call_cookie = CALL_LONG;
2677 /* Check for long call/short call attributes. The attributes
2678 override any command line option. */
2679 if (fntype)
2681 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2682 pcum->call_cookie = CALL_SHORT;
2683 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2684 pcum->call_cookie = CALL_LONG;
2687 /* Varargs vectors are treated the same as long long.
2688 named_count avoids having to change the way arm handles 'named' */
2689 pcum->named_count = 0;
2690 pcum->nargs = 0;
2692 if (TARGET_REALLY_IWMMXT && fntype)
2694 tree fn_arg;
2696 for (fn_arg = TYPE_ARG_TYPES (fntype);
2697 fn_arg;
2698 fn_arg = TREE_CHAIN (fn_arg))
2699 pcum->named_count += 1;
2701 if (! pcum->named_count)
2702 pcum->named_count = INT_MAX;
2707 /* Return true if mode/type need doubleword alignment. */
2708 bool
2709 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2711 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2712 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2716 /* Determine where to put an argument to a function.
2717 Value is zero to push the argument on the stack,
2718 or a hard register in which to store the argument.
2720 MODE is the argument's machine mode.
2721 TYPE is the data type of the argument (as a tree).
2722 This is null for libcalls where that information may
2723 not be available.
2724 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2725 the preceding args and about the function being called.
2726 NAMED is nonzero if this argument is a named parameter
2727 (otherwise it is an extra parameter matching an ellipsis). */
2730 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2731 tree type, int named)
2733 int nregs;
2735 /* Varargs vectors are treated the same as long long.
2736 named_count avoids having to change the way arm handles 'named' */
2737 if (TARGET_IWMMXT_ABI
2738 && arm_vector_mode_supported_p (mode)
2739 && pcum->named_count > pcum->nargs + 1)
2741 if (pcum->iwmmxt_nregs <= 9)
2742 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2743 else
2745 pcum->can_split = false;
2746 return NULL_RTX;
2750 /* Put doubleword aligned quantities in even register pairs. */
2751 if (pcum->nregs & 1
2752 && ARM_DOUBLEWORD_ALIGN
2753 && arm_needs_doubleword_align (mode, type))
2754 pcum->nregs++;
2756 if (mode == VOIDmode)
2757 /* Compute operand 2 of the call insn. */
2758 return GEN_INT (pcum->call_cookie);
2760 /* Only allow splitting an arg between regs and memory if all preceding
2761 args were allocated to regs. For args passed by reference we only count
2762 the reference pointer. */
2763 if (pcum->can_split)
2764 nregs = 1;
2765 else
2766 nregs = ARM_NUM_REGS2 (mode, type);
2768 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2769 return NULL_RTX;
2771 return gen_rtx_REG (mode, pcum->nregs);
2774 static int
2775 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2776 tree type, bool named ATTRIBUTE_UNUSED)
2778 int nregs = pcum->nregs;
2780 if (arm_vector_mode_supported_p (mode))
2781 return 0;
2783 if (NUM_ARG_REGS > nregs
2784 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2785 && pcum->can_split)
2786 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2788 return 0;
2791 /* Variable sized types are passed by reference. This is a GCC
2792 extension to the ARM ABI. */
2794 static bool
2795 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2796 enum machine_mode mode ATTRIBUTE_UNUSED,
2797 tree type, bool named ATTRIBUTE_UNUSED)
2799 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2802 /* Encode the current state of the #pragma [no_]long_calls. */
2803 typedef enum
2805 OFF, /* No #pragma [no_]long_calls is in effect. */
2806 LONG, /* #pragma long_calls is in effect. */
2807 SHORT /* #pragma no_long_calls is in effect. */
2808 } arm_pragma_enum;
2810 static arm_pragma_enum arm_pragma_long_calls = OFF;
2812 void
2813 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2815 arm_pragma_long_calls = LONG;
2818 void
2819 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2821 arm_pragma_long_calls = SHORT;
2824 void
2825 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2827 arm_pragma_long_calls = OFF;
2830 /* Table of machine attributes. */
2831 const struct attribute_spec arm_attribute_table[] =
2833 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2834 /* Function calls made to this symbol must be done indirectly, because
2835 it may lie outside of the 26 bit addressing range of a normal function
2836 call. */
2837 { "long_call", 0, 0, false, true, true, NULL },
2838 /* Whereas these functions are always known to reside within the 26 bit
2839 addressing range. */
2840 { "short_call", 0, 0, false, true, true, NULL },
2841 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2842 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2843 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2844 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2845 #ifdef ARM_PE
2846 /* ARM/PE has three new attributes:
2847 interfacearm - ?
2848 dllexport - for exporting a function/variable that will live in a dll
2849 dllimport - for importing a function/variable from a dll
2851 Microsoft allows multiple declspecs in one __declspec, separating
2852 them with spaces. We do NOT support this. Instead, use __declspec
2853 multiple times.
2855 { "dllimport", 0, 0, true, false, false, NULL },
2856 { "dllexport", 0, 0, true, false, false, NULL },
2857 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2858 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2859 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2860 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2861 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2862 #endif
2863 { NULL, 0, 0, false, false, false, NULL }
2866 /* Handle an attribute requiring a FUNCTION_DECL;
2867 arguments as in struct attribute_spec.handler. */
2868 static tree
2869 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2870 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2872 if (TREE_CODE (*node) != FUNCTION_DECL)
2874 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2875 IDENTIFIER_POINTER (name));
2876 *no_add_attrs = true;
2879 return NULL_TREE;
2882 /* Handle an "interrupt" or "isr" attribute;
2883 arguments as in struct attribute_spec.handler. */
2884 static tree
2885 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2886 bool *no_add_attrs)
2888 if (DECL_P (*node))
2890 if (TREE_CODE (*node) != FUNCTION_DECL)
2892 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2893 IDENTIFIER_POINTER (name));
2894 *no_add_attrs = true;
2896 /* FIXME: the argument if any is checked for type attributes;
2897 should it be checked for decl ones? */
2899 else
2901 if (TREE_CODE (*node) == FUNCTION_TYPE
2902 || TREE_CODE (*node) == METHOD_TYPE)
2904 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2906 warning (OPT_Wattributes, "%qs attribute ignored",
2907 IDENTIFIER_POINTER (name));
2908 *no_add_attrs = true;
2911 else if (TREE_CODE (*node) == POINTER_TYPE
2912 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2913 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2914 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2916 *node = build_variant_type_copy (*node);
2917 TREE_TYPE (*node) = build_type_attribute_variant
2918 (TREE_TYPE (*node),
2919 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2920 *no_add_attrs = true;
2922 else
2924 /* Possibly pass this attribute on from the type to a decl. */
2925 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2926 | (int) ATTR_FLAG_FUNCTION_NEXT
2927 | (int) ATTR_FLAG_ARRAY_NEXT))
2929 *no_add_attrs = true;
2930 return tree_cons (name, args, NULL_TREE);
2932 else
2934 warning (OPT_Wattributes, "%qs attribute ignored",
2935 IDENTIFIER_POINTER (name));
2940 return NULL_TREE;
2943 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2944 /* Handle the "notshared" attribute. This attribute is another way of
2945 requesting hidden visibility. ARM's compiler supports
2946 "__declspec(notshared)"; we support the same thing via an
2947 attribute. */
2949 static tree
2950 arm_handle_notshared_attribute (tree *node,
2951 tree name ATTRIBUTE_UNUSED,
2952 tree args ATTRIBUTE_UNUSED,
2953 int flags ATTRIBUTE_UNUSED,
2954 bool *no_add_attrs)
2956 tree decl = TYPE_NAME (*node);
2958 if (decl)
2960 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2961 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2962 *no_add_attrs = false;
2964 return NULL_TREE;
2966 #endif
2968 /* Return 0 if the attributes for two types are incompatible, 1 if they
2969 are compatible, and 2 if they are nearly compatible (which causes a
2970 warning to be generated). */
2971 static int
2972 arm_comp_type_attributes (tree type1, tree type2)
2974 int l1, l2, s1, s2;
2976 /* Check for mismatch of non-default calling convention. */
2977 if (TREE_CODE (type1) != FUNCTION_TYPE)
2978 return 1;
2980 /* Check for mismatched call attributes. */
2981 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2982 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2983 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2984 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2986 /* Only bother to check if an attribute is defined. */
2987 if (l1 | l2 | s1 | s2)
2989 /* If one type has an attribute, the other must have the same attribute. */
2990 if ((l1 != l2) || (s1 != s2))
2991 return 0;
2993 /* Disallow mixed attributes. */
2994 if ((l1 & s2) || (l2 & s1))
2995 return 0;
2998 /* Check for mismatched ISR attribute. */
2999 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3000 if (! l1)
3001 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3002 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3003 if (! l2)
3004 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3005 if (l1 != l2)
3006 return 0;
3008 return 1;
3011 /* Encode long_call or short_call attribute by prefixing
3012 symbol name in DECL with a special character FLAG. */
3013 void
3014 arm_encode_call_attribute (tree decl, int flag)
3016 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3017 int len = strlen (str);
3018 char * newstr;
3020 /* Do not allow weak functions to be treated as short call. */
3021 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3022 return;
3024 newstr = alloca (len + 2);
3025 newstr[0] = flag;
3026 strcpy (newstr + 1, str);
3028 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3029 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3032 /* Assigns default attributes to newly defined type. This is used to
3033 set short_call/long_call attributes for function types of
3034 functions defined inside corresponding #pragma scopes. */
3035 static void
3036 arm_set_default_type_attributes (tree type)
3038 /* Add __attribute__ ((long_call)) to all functions, when
3039 inside #pragma long_calls or __attribute__ ((short_call)),
3040 when inside #pragma no_long_calls. */
3041 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3043 tree type_attr_list, attr_name;
3044 type_attr_list = TYPE_ATTRIBUTES (type);
3046 if (arm_pragma_long_calls == LONG)
3047 attr_name = get_identifier ("long_call");
3048 else if (arm_pragma_long_calls == SHORT)
3049 attr_name = get_identifier ("short_call");
3050 else
3051 return;
3053 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3054 TYPE_ATTRIBUTES (type) = type_attr_list;
3058 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3059 defined within the current compilation unit. If this cannot be
3060 determined, then 0 is returned. */
3061 static int
3062 current_file_function_operand (rtx sym_ref)
3064 /* This is a bit of a fib. A function will have a short call flag
3065 applied to its name if it has the short call attribute, or it has
3066 already been defined within the current compilation unit. */
3067 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3068 return 1;
3070 /* The current function is always defined within the current compilation
3071 unit. If it s a weak definition however, then this may not be the real
3072 definition of the function, and so we have to say no. */
3073 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3074 && !DECL_WEAK (current_function_decl))
3075 return 1;
3077 /* We cannot make the determination - default to returning 0. */
3078 return 0;
3081 /* Return nonzero if a 32 bit "long_call" should be generated for
3082 this call. We generate a long_call if the function:
3084 a. has an __attribute__((long call))
3085 or b. is within the scope of a #pragma long_calls
3086 or c. the -mlong-calls command line switch has been specified
3087 . and either:
3088 1. -ffunction-sections is in effect
3089 or 2. the current function has __attribute__ ((section))
3090 or 3. the target function has __attribute__ ((section))
3092 However we do not generate a long call if the function:
3094 d. has an __attribute__ ((short_call))
3095 or e. is inside the scope of a #pragma no_long_calls
3096 or f. is defined within the current compilation unit.
3098 This function will be called by C fragments contained in the machine
3099 description file. SYM_REF and CALL_COOKIE correspond to the matched
3100 rtl operands. CALL_SYMBOL is used to distinguish between
3101 two different callers of the function. It is set to 1 in the
3102 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3103 and "call_value" patterns. This is because of the difference in the
3104 SYM_REFs passed by these patterns. */
3106 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3108 if (!call_symbol)
3110 if (GET_CODE (sym_ref) != MEM)
3111 return 0;
3113 sym_ref = XEXP (sym_ref, 0);
3116 if (GET_CODE (sym_ref) != SYMBOL_REF)
3117 return 0;
3119 if (call_cookie & CALL_SHORT)
3120 return 0;
3122 if (TARGET_LONG_CALLS)
3124 if (flag_function_sections
3125 || DECL_SECTION_NAME (current_function_decl))
3126 /* c.3 is handled by the definition of the
3127 ARM_DECLARE_FUNCTION_SIZE macro. */
3128 return 1;
3131 if (current_file_function_operand (sym_ref))
3132 return 0;
3134 return (call_cookie & CALL_LONG)
3135 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3136 || TARGET_LONG_CALLS;
3139 /* Return nonzero if it is ok to make a tail-call to DECL. */
3140 static bool
3141 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3143 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3145 if (cfun->machine->sibcall_blocked)
3146 return false;
3148 /* Never tailcall something for which we have no decl, or if we
3149 are in Thumb mode. */
3150 if (decl == NULL || TARGET_THUMB)
3151 return false;
3153 /* Get the calling method. */
3154 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3155 call_type = CALL_SHORT;
3156 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3157 call_type = CALL_LONG;
3159 /* Cannot tail-call to long calls, since these are out of range of
3160 a branch instruction. However, if not compiling PIC, we know
3161 we can reach the symbol if it is in this compilation unit. */
3162 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3163 return false;
3165 /* If we are interworking and the function is not declared static
3166 then we can't tail-call it unless we know that it exists in this
3167 compilation unit (since it might be a Thumb routine). */
3168 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3169 return false;
3171 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3172 if (IS_INTERRUPT (arm_current_func_type ()))
3173 return false;
3175 /* Everything else is ok. */
3176 return true;
3180 /* Addressing mode support functions. */
3182 /* Return nonzero if X is a legitimate immediate operand when compiling
3183 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3185 legitimate_pic_operand_p (rtx x)
3187 if (GET_CODE (x) == SYMBOL_REF
3188 || (GET_CODE (x) == CONST
3189 && GET_CODE (XEXP (x, 0)) == PLUS
3190 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3191 return 0;
3193 return 1;
3197 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3199 if (GET_CODE (orig) == SYMBOL_REF
3200 || GET_CODE (orig) == LABEL_REF)
3202 #ifndef AOF_ASSEMBLER
3203 rtx pic_ref, address;
3204 #endif
3205 rtx insn;
3206 int subregs = 0;
3208 /* If this function doesn't have a pic register, create one now.
3209 A lot of the logic here is made obscure by the fact that this
3210 routine gets called as part of the rtx cost estimation
3211 process. We don't want those calls to affect any assumptions
3212 about the real function; and further, we can't call
3213 entry_of_function() until we start the real expansion
3214 process. */
3215 if (!current_function_uses_pic_offset_table)
3217 gcc_assert (!no_new_pseudos);
3218 if (arm_pic_register != INVALID_REGNUM)
3220 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3222 /* Play games to avoid marking the function as needing pic
3223 if we are being called as part of the cost-estimation
3224 process. */
3225 if (!ir_type())
3226 current_function_uses_pic_offset_table = 1;
3228 else
3230 rtx seq;
3232 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3234 /* Play games to avoid marking the function as needing pic
3235 if we are being called as part of the cost-estimation
3236 process. */
3237 if (!ir_type())
3239 current_function_uses_pic_offset_table = 1;
3240 start_sequence ();
3242 arm_load_pic_register (0UL);
3244 seq = get_insns ();
3245 end_sequence ();
3246 emit_insn_after (seq, entry_of_function ());
3251 if (reg == 0)
3253 gcc_assert (!no_new_pseudos);
3254 reg = gen_reg_rtx (Pmode);
3256 subregs = 1;
3259 #ifdef AOF_ASSEMBLER
3260 /* The AOF assembler can generate relocations for these directly, and
3261 understands that the PIC register has to be added into the offset. */
3262 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3263 #else
3264 if (subregs)
3265 address = gen_reg_rtx (Pmode);
3266 else
3267 address = reg;
3269 if (TARGET_ARM)
3270 emit_insn (gen_pic_load_addr_arm (address, orig));
3271 else
3272 emit_insn (gen_pic_load_addr_thumb (address, orig));
3274 if ((GET_CODE (orig) == LABEL_REF
3275 || (GET_CODE (orig) == SYMBOL_REF &&
3276 SYMBOL_REF_LOCAL_P (orig)))
3277 && NEED_GOT_RELOC)
3278 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3279 else
3281 pic_ref = gen_const_mem (Pmode,
3282 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3283 address));
3286 insn = emit_move_insn (reg, pic_ref);
3287 #endif
3288 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3289 by loop. */
3290 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3291 REG_NOTES (insn));
3292 return reg;
3294 else if (GET_CODE (orig) == CONST)
3296 rtx base, offset;
3298 if (GET_CODE (XEXP (orig, 0)) == PLUS
3299 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3300 return orig;
3302 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3303 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3304 return orig;
3306 if (reg == 0)
3308 gcc_assert (!no_new_pseudos);
3309 reg = gen_reg_rtx (Pmode);
3312 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3314 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3315 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3316 base == reg ? 0 : reg);
3318 if (GET_CODE (offset) == CONST_INT)
3320 /* The base register doesn't really matter, we only want to
3321 test the index for the appropriate mode. */
3322 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3324 gcc_assert (!no_new_pseudos);
3325 offset = force_reg (Pmode, offset);
3328 if (GET_CODE (offset) == CONST_INT)
3329 return plus_constant (base, INTVAL (offset));
3332 if (GET_MODE_SIZE (mode) > 4
3333 && (GET_MODE_CLASS (mode) == MODE_INT
3334 || TARGET_SOFT_FLOAT))
3336 emit_insn (gen_addsi3 (reg, base, offset));
3337 return reg;
3340 return gen_rtx_PLUS (Pmode, base, offset);
3343 return orig;
3347 /* Find a spare low register to use during the prolog of a function. */
3349 static int
3350 thumb_find_work_register (unsigned long pushed_regs_mask)
3352 int reg;
3354 /* Check the argument registers first as these are call-used. The
3355 register allocation order means that sometimes r3 might be used
3356 but earlier argument registers might not, so check them all. */
3357 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3358 if (!regs_ever_live[reg])
3359 return reg;
3361 /* Before going on to check the call-saved registers we can try a couple
3362 more ways of deducing that r3 is available. The first is when we are
3363 pushing anonymous arguments onto the stack and we have less than 4
3364 registers worth of fixed arguments(*). In this case r3 will be part of
3365 the variable argument list and so we can be sure that it will be
3366 pushed right at the start of the function. Hence it will be available
3367 for the rest of the prologue.
3368 (*): ie current_function_pretend_args_size is greater than 0. */
3369 if (cfun->machine->uses_anonymous_args
3370 && current_function_pretend_args_size > 0)
3371 return LAST_ARG_REGNUM;
3373 /* The other case is when we have fixed arguments but less than 4 registers
3374 worth. In this case r3 might be used in the body of the function, but
3375 it is not being used to convey an argument into the function. In theory
3376 we could just check current_function_args_size to see how many bytes are
3377 being passed in argument registers, but it seems that it is unreliable.
3378 Sometimes it will have the value 0 when in fact arguments are being
3379 passed. (See testcase execute/20021111-1.c for an example). So we also
3380 check the args_info.nregs field as well. The problem with this field is
3381 that it makes no allowances for arguments that are passed to the
3382 function but which are not used. Hence we could miss an opportunity
3383 when a function has an unused argument in r3. But it is better to be
3384 safe than to be sorry. */
3385 if (! cfun->machine->uses_anonymous_args
3386 && current_function_args_size >= 0
3387 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3388 && cfun->args_info.nregs < 4)
3389 return LAST_ARG_REGNUM;
3391 /* Otherwise look for a call-saved register that is going to be pushed. */
3392 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3393 if (pushed_regs_mask & (1 << reg))
3394 return reg;
3396 /* Something went wrong - thumb_compute_save_reg_mask()
3397 should have arranged for a suitable register to be pushed. */
3398 gcc_unreachable ();
3401 static GTY(()) int pic_labelno;
3403 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3404 low register. */
3406 void
3407 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3409 #ifndef AOF_ASSEMBLER
3410 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3411 rtx global_offset_table;
3413 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3414 return;
3416 gcc_assert (flag_pic);
3418 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3419 in the code stream. */
3421 labelno = GEN_INT (pic_labelno++);
3422 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3423 l1 = gen_rtx_CONST (VOIDmode, l1);
3425 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3426 /* On the ARM the PC register contains 'dot + 8' at the time of the
3427 addition, on the Thumb it is 'dot + 4'. */
3428 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3429 if (GOT_PCREL)
3430 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3431 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3432 else
3433 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3435 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3437 if (TARGET_ARM)
3439 emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx));
3440 emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg,
3441 cfun->machine->pic_reg, labelno));
3443 else
3445 if (arm_pic_register != INVALID_REGNUM
3446 && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
3448 /* We will have pushed the pic register, so we should always be
3449 able to find a work register. */
3450 pic_tmp = gen_rtx_REG (SImode,
3451 thumb_find_work_register (saved_regs));
3452 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3453 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3455 else
3456 emit_insn (gen_pic_load_addr_thumb (cfun->machine->pic_reg, pic_rtx));
3457 emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg,
3458 cfun->machine->pic_reg, labelno));
3461 /* Need to emit this whether or not we obey regdecls,
3462 since setjmp/longjmp can cause life info to screw up. */
3463 emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
3464 #endif /* AOF_ASSEMBLER */
3468 /* Return nonzero if X is valid as an ARM state addressing register. */
3469 static int
3470 arm_address_register_rtx_p (rtx x, int strict_p)
3472 int regno;
3474 if (GET_CODE (x) != REG)
3475 return 0;
3477 regno = REGNO (x);
3479 if (strict_p)
3480 return ARM_REGNO_OK_FOR_BASE_P (regno);
3482 return (regno <= LAST_ARM_REGNUM
3483 || regno >= FIRST_PSEUDO_REGISTER
3484 || regno == FRAME_POINTER_REGNUM
3485 || regno == ARG_POINTER_REGNUM);
3488 /* Return TRUE if this rtx is the difference of a symbol and a label,
3489 and will reduce to a PC-relative relocation in the object file.
3490 Expressions like this can be left alone when generating PIC, rather
3491 than forced through the GOT. */
3492 static int
3493 pcrel_constant_p (rtx x)
3495 if (GET_CODE (x) == MINUS)
3496 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3498 return FALSE;
3501 /* Return nonzero if X is a valid ARM state address operand. */
3503 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3504 int strict_p)
3506 bool use_ldrd;
3507 enum rtx_code code = GET_CODE (x);
3509 if (arm_address_register_rtx_p (x, strict_p))
3510 return 1;
3512 use_ldrd = (TARGET_LDRD
3513 && (mode == DImode
3514 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3516 if (code == POST_INC || code == PRE_DEC
3517 || ((code == PRE_INC || code == POST_DEC)
3518 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3519 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3521 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3522 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3523 && GET_CODE (XEXP (x, 1)) == PLUS
3524 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3526 rtx addend = XEXP (XEXP (x, 1), 1);
3528 /* Don't allow ldrd post increment by register because it's hard
3529 to fixup invalid register choices. */
3530 if (use_ldrd
3531 && GET_CODE (x) == POST_MODIFY
3532 && GET_CODE (addend) == REG)
3533 return 0;
3535 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3536 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3539 /* After reload constants split into minipools will have addresses
3540 from a LABEL_REF. */
3541 else if (reload_completed
3542 && (code == LABEL_REF
3543 || (code == CONST
3544 && GET_CODE (XEXP (x, 0)) == PLUS
3545 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3546 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3547 return 1;
3549 else if (mode == TImode)
3550 return 0;
3552 else if (code == PLUS)
3554 rtx xop0 = XEXP (x, 0);
3555 rtx xop1 = XEXP (x, 1);
3557 return ((arm_address_register_rtx_p (xop0, strict_p)
3558 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3559 || (arm_address_register_rtx_p (xop1, strict_p)
3560 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3563 #if 0
3564 /* Reload currently can't handle MINUS, so disable this for now */
3565 else if (GET_CODE (x) == MINUS)
3567 rtx xop0 = XEXP (x, 0);
3568 rtx xop1 = XEXP (x, 1);
3570 return (arm_address_register_rtx_p (xop0, strict_p)
3571 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3573 #endif
3575 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3576 && code == SYMBOL_REF
3577 && CONSTANT_POOL_ADDRESS_P (x)
3578 && ! (flag_pic
3579 && symbol_mentioned_p (get_pool_constant (x))
3580 && ! pcrel_constant_p (get_pool_constant (x))))
3581 return 1;
3583 return 0;
3586 /* Return nonzero if INDEX is valid for an address index operand in
3587 ARM state. */
3588 static int
3589 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3590 int strict_p)
3592 HOST_WIDE_INT range;
3593 enum rtx_code code = GET_CODE (index);
3595 /* Standard coprocessor addressing modes. */
3596 if (TARGET_HARD_FLOAT
3597 && (TARGET_FPA || TARGET_MAVERICK)
3598 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3599 || (TARGET_MAVERICK && mode == DImode)))
3600 return (code == CONST_INT && INTVAL (index) < 1024
3601 && INTVAL (index) > -1024
3602 && (INTVAL (index) & 3) == 0);
3604 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3605 return (code == CONST_INT
3606 && INTVAL (index) < 1024
3607 && INTVAL (index) > -1024
3608 && (INTVAL (index) & 3) == 0);
3610 if (arm_address_register_rtx_p (index, strict_p)
3611 && (GET_MODE_SIZE (mode) <= 4))
3612 return 1;
3614 if (mode == DImode || mode == DFmode)
3616 if (code == CONST_INT)
3618 HOST_WIDE_INT val = INTVAL (index);
3620 if (TARGET_LDRD)
3621 return val > -256 && val < 256;
3622 else
3623 return val > -4096 && val < 4092;
3626 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3629 if (GET_MODE_SIZE (mode) <= 4
3630 && ! (arm_arch4
3631 && (mode == HImode
3632 || (mode == QImode && outer == SIGN_EXTEND))))
3634 if (code == MULT)
3636 rtx xiop0 = XEXP (index, 0);
3637 rtx xiop1 = XEXP (index, 1);
3639 return ((arm_address_register_rtx_p (xiop0, strict_p)
3640 && power_of_two_operand (xiop1, SImode))
3641 || (arm_address_register_rtx_p (xiop1, strict_p)
3642 && power_of_two_operand (xiop0, SImode)));
3644 else if (code == LSHIFTRT || code == ASHIFTRT
3645 || code == ASHIFT || code == ROTATERT)
3647 rtx op = XEXP (index, 1);
3649 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3650 && GET_CODE (op) == CONST_INT
3651 && INTVAL (op) > 0
3652 && INTVAL (op) <= 31);
3656 /* For ARM v4 we may be doing a sign-extend operation during the
3657 load. */
3658 if (arm_arch4)
3660 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3661 range = 256;
3662 else
3663 range = 4096;
3665 else
3666 range = (mode == HImode) ? 4095 : 4096;
3668 return (code == CONST_INT
3669 && INTVAL (index) < range
3670 && INTVAL (index) > -range);
3673 /* Return nonzero if X is valid as a Thumb state base register. */
3674 static int
3675 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3677 int regno;
3679 if (GET_CODE (x) != REG)
3680 return 0;
3682 regno = REGNO (x);
3684 if (strict_p)
3685 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3687 return (regno <= LAST_LO_REGNUM
3688 || regno > LAST_VIRTUAL_REGISTER
3689 || regno == FRAME_POINTER_REGNUM
3690 || (GET_MODE_SIZE (mode) >= 4
3691 && (regno == STACK_POINTER_REGNUM
3692 || regno >= FIRST_PSEUDO_REGISTER
3693 || x == hard_frame_pointer_rtx
3694 || x == arg_pointer_rtx)));
3697 /* Return nonzero if x is a legitimate index register. This is the case
3698 for any base register that can access a QImode object. */
3699 inline static int
3700 thumb_index_register_rtx_p (rtx x, int strict_p)
3702 return thumb_base_register_rtx_p (x, QImode, strict_p);
3705 /* Return nonzero if x is a legitimate Thumb-state address.
3707 The AP may be eliminated to either the SP or the FP, so we use the
3708 least common denominator, e.g. SImode, and offsets from 0 to 64.
3710 ??? Verify whether the above is the right approach.
3712 ??? Also, the FP may be eliminated to the SP, so perhaps that
3713 needs special handling also.
3715 ??? Look at how the mips16 port solves this problem. It probably uses
3716 better ways to solve some of these problems.
3718 Although it is not incorrect, we don't accept QImode and HImode
3719 addresses based on the frame pointer or arg pointer until the
3720 reload pass starts. This is so that eliminating such addresses
3721 into stack based ones won't produce impossible code. */
3723 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3725 /* ??? Not clear if this is right. Experiment. */
3726 if (GET_MODE_SIZE (mode) < 4
3727 && !(reload_in_progress || reload_completed)
3728 && (reg_mentioned_p (frame_pointer_rtx, x)
3729 || reg_mentioned_p (arg_pointer_rtx, x)
3730 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3731 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3732 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3733 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3734 return 0;
3736 /* Accept any base register. SP only in SImode or larger. */
3737 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3738 return 1;
3740 /* This is PC relative data before arm_reorg runs. */
3741 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3742 && GET_CODE (x) == SYMBOL_REF
3743 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
3744 return 1;
3746 /* This is PC relative data after arm_reorg runs. */
3747 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3748 && (GET_CODE (x) == LABEL_REF
3749 || (GET_CODE (x) == CONST
3750 && GET_CODE (XEXP (x, 0)) == PLUS
3751 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3752 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3753 return 1;
3755 /* Post-inc indexing only supported for SImode and larger. */
3756 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3757 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3758 return 1;
3760 else if (GET_CODE (x) == PLUS)
3762 /* REG+REG address can be any two index registers. */
3763 /* We disallow FRAME+REG addressing since we know that FRAME
3764 will be replaced with STACK, and SP relative addressing only
3765 permits SP+OFFSET. */
3766 if (GET_MODE_SIZE (mode) <= 4
3767 && XEXP (x, 0) != frame_pointer_rtx
3768 && XEXP (x, 1) != frame_pointer_rtx
3769 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3770 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3771 return 1;
3773 /* REG+const has 5-7 bit offset for non-SP registers. */
3774 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3775 || XEXP (x, 0) == arg_pointer_rtx)
3776 && GET_CODE (XEXP (x, 1)) == CONST_INT
3777 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3778 return 1;
3780 /* REG+const has 10 bit offset for SP, but only SImode and
3781 larger is supported. */
3782 /* ??? Should probably check for DI/DFmode overflow here
3783 just like GO_IF_LEGITIMATE_OFFSET does. */
3784 else if (GET_CODE (XEXP (x, 0)) == REG
3785 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3786 && GET_MODE_SIZE (mode) >= 4
3787 && GET_CODE (XEXP (x, 1)) == CONST_INT
3788 && INTVAL (XEXP (x, 1)) >= 0
3789 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3790 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3791 return 1;
3793 else if (GET_CODE (XEXP (x, 0)) == REG
3794 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3795 && GET_MODE_SIZE (mode) >= 4
3796 && GET_CODE (XEXP (x, 1)) == CONST_INT
3797 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3798 return 1;
3801 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3802 && GET_MODE_SIZE (mode) == 4
3803 && GET_CODE (x) == SYMBOL_REF
3804 && CONSTANT_POOL_ADDRESS_P (x)
3805 && ! (flag_pic
3806 && symbol_mentioned_p (get_pool_constant (x))
3807 && ! pcrel_constant_p (get_pool_constant (x))))
3808 return 1;
3810 return 0;
3813 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3814 instruction of mode MODE. */
3816 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3818 switch (GET_MODE_SIZE (mode))
3820 case 1:
3821 return val >= 0 && val < 32;
3823 case 2:
3824 return val >= 0 && val < 64 && (val & 1) == 0;
3826 default:
3827 return (val >= 0
3828 && (val + GET_MODE_SIZE (mode)) <= 128
3829 && (val & 3) == 0);
3833 /* Build the SYMBOL_REF for __tls_get_addr. */
3835 static GTY(()) rtx tls_get_addr_libfunc;
3837 static rtx
3838 get_tls_get_addr (void)
3840 if (!tls_get_addr_libfunc)
3841 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
3842 return tls_get_addr_libfunc;
3845 static rtx
3846 arm_load_tp (rtx target)
3848 if (!target)
3849 target = gen_reg_rtx (SImode);
3851 if (TARGET_HARD_TP)
3853 /* Can return in any reg. */
3854 emit_insn (gen_load_tp_hard (target));
3856 else
3858 /* Always returned in r0. Immediately copy the result into a pseudo,
3859 otherwise other uses of r0 (e.g. setting up function arguments) may
3860 clobber the value. */
3862 rtx tmp;
3864 emit_insn (gen_load_tp_soft ());
3866 tmp = gen_rtx_REG (SImode, 0);
3867 emit_move_insn (target, tmp);
3869 return target;
3872 static rtx
3873 load_tls_operand (rtx x, rtx reg)
3875 rtx tmp;
3877 if (reg == NULL_RTX)
3878 reg = gen_reg_rtx (SImode);
3880 tmp = gen_rtx_CONST (SImode, x);
3882 emit_move_insn (reg, tmp);
3884 return reg;
3887 static rtx
3888 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
3890 rtx insns, label, labelno, sum;
3892 start_sequence ();
3894 labelno = GEN_INT (pic_labelno++);
3895 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3896 label = gen_rtx_CONST (VOIDmode, label);
3898 sum = gen_rtx_UNSPEC (Pmode,
3899 gen_rtvec (4, x, GEN_INT (reloc), label,
3900 GEN_INT (TARGET_ARM ? 8 : 4)),
3901 UNSPEC_TLS);
3902 reg = load_tls_operand (sum, reg);
3904 if (TARGET_ARM)
3905 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
3906 else
3907 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3909 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
3910 Pmode, 1, reg, Pmode);
3912 insns = get_insns ();
3913 end_sequence ();
3915 return insns;
3919 legitimize_tls_address (rtx x, rtx reg)
3921 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
3922 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
3924 switch (model)
3926 case TLS_MODEL_GLOBAL_DYNAMIC:
3927 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
3928 dest = gen_reg_rtx (Pmode);
3929 emit_libcall_block (insns, dest, ret, x);
3930 return dest;
3932 case TLS_MODEL_LOCAL_DYNAMIC:
3933 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
3935 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3936 share the LDM result with other LD model accesses. */
3937 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
3938 UNSPEC_TLS);
3939 dest = gen_reg_rtx (Pmode);
3940 emit_libcall_block (insns, dest, ret, eqv);
3942 /* Load the addend. */
3943 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
3944 UNSPEC_TLS);
3945 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
3946 return gen_rtx_PLUS (Pmode, dest, addend);
3948 case TLS_MODEL_INITIAL_EXEC:
3949 labelno = GEN_INT (pic_labelno++);
3950 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3951 label = gen_rtx_CONST (VOIDmode, label);
3952 sum = gen_rtx_UNSPEC (Pmode,
3953 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
3954 GEN_INT (TARGET_ARM ? 8 : 4)),
3955 UNSPEC_TLS);
3956 reg = load_tls_operand (sum, reg);
3958 if (TARGET_ARM)
3959 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
3960 else
3962 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3963 emit_move_insn (reg, gen_const_mem (SImode, reg));
3966 tp = arm_load_tp (NULL_RTX);
3968 return gen_rtx_PLUS (Pmode, tp, reg);
3970 case TLS_MODEL_LOCAL_EXEC:
3971 tp = arm_load_tp (NULL_RTX);
3973 reg = gen_rtx_UNSPEC (Pmode,
3974 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
3975 UNSPEC_TLS);
3976 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
3978 return gen_rtx_PLUS (Pmode, tp, reg);
3980 default:
3981 abort ();
3985 /* Try machine-dependent ways of modifying an illegitimate address
3986 to be legitimate. If we find one, return the new, valid address. */
3988 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3990 if (arm_tls_symbol_p (x))
3991 return legitimize_tls_address (x, NULL_RTX);
3993 if (GET_CODE (x) == PLUS)
3995 rtx xop0 = XEXP (x, 0);
3996 rtx xop1 = XEXP (x, 1);
3998 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3999 xop0 = force_reg (SImode, xop0);
4001 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4002 xop1 = force_reg (SImode, xop1);
4004 if (ARM_BASE_REGISTER_RTX_P (xop0)
4005 && GET_CODE (xop1) == CONST_INT)
4007 HOST_WIDE_INT n, low_n;
4008 rtx base_reg, val;
4009 n = INTVAL (xop1);
4011 /* VFP addressing modes actually allow greater offsets, but for
4012 now we just stick with the lowest common denominator. */
4013 if (mode == DImode
4014 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4016 low_n = n & 0x0f;
4017 n &= ~0x0f;
4018 if (low_n > 4)
4020 n += 16;
4021 low_n -= 16;
4024 else
4026 low_n = ((mode) == TImode ? 0
4027 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4028 n -= low_n;
4031 base_reg = gen_reg_rtx (SImode);
4032 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4033 emit_move_insn (base_reg, val);
4034 x = plus_constant (base_reg, low_n);
4036 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4037 x = gen_rtx_PLUS (SImode, xop0, xop1);
4040 /* XXX We don't allow MINUS any more -- see comment in
4041 arm_legitimate_address_p (). */
4042 else if (GET_CODE (x) == MINUS)
4044 rtx xop0 = XEXP (x, 0);
4045 rtx xop1 = XEXP (x, 1);
4047 if (CONSTANT_P (xop0))
4048 xop0 = force_reg (SImode, xop0);
4050 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4051 xop1 = force_reg (SImode, xop1);
4053 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4054 x = gen_rtx_MINUS (SImode, xop0, xop1);
4057 /* Make sure to take full advantage of the pre-indexed addressing mode
4058 with absolute addresses which often allows for the base register to
4059 be factorized for multiple adjacent memory references, and it might
4060 even allows for the mini pool to be avoided entirely. */
4061 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4063 unsigned int bits;
4064 HOST_WIDE_INT mask, base, index;
4065 rtx base_reg;
4067 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4068 use a 8 bit index. So let's use a 12 bit index for SImode only and
4069 hope that arm_gen_constant will enable ldrb to use more bits. */
4070 bits = (mode == SImode) ? 12 : 8;
4071 mask = (1 << bits) - 1;
4072 base = INTVAL (x) & ~mask;
4073 index = INTVAL (x) & mask;
4074 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4076 /* It'll most probably be more efficient to generate the base
4077 with more bits set and use a negative index instead. */
4078 base |= mask;
4079 index -= mask;
4081 base_reg = force_reg (SImode, GEN_INT (base));
4082 x = plus_constant (base_reg, index);
4085 if (flag_pic)
4087 /* We need to find and carefully transform any SYMBOL and LABEL
4088 references; so go back to the original address expression. */
4089 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4091 if (new_x != orig_x)
4092 x = new_x;
4095 return x;
4099 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4100 to be legitimate. If we find one, return the new, valid address. */
4102 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4104 if (arm_tls_symbol_p (x))
4105 return legitimize_tls_address (x, NULL_RTX);
4107 if (GET_CODE (x) == PLUS
4108 && GET_CODE (XEXP (x, 1)) == CONST_INT
4109 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4110 || INTVAL (XEXP (x, 1)) < 0))
4112 rtx xop0 = XEXP (x, 0);
4113 rtx xop1 = XEXP (x, 1);
4114 HOST_WIDE_INT offset = INTVAL (xop1);
4116 /* Try and fold the offset into a biasing of the base register and
4117 then offsetting that. Don't do this when optimizing for space
4118 since it can cause too many CSEs. */
4119 if (optimize_size && offset >= 0
4120 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4122 HOST_WIDE_INT delta;
4124 if (offset >= 256)
4125 delta = offset - (256 - GET_MODE_SIZE (mode));
4126 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4127 delta = 31 * GET_MODE_SIZE (mode);
4128 else
4129 delta = offset & (~31 * GET_MODE_SIZE (mode));
4131 xop0 = force_operand (plus_constant (xop0, offset - delta),
4132 NULL_RTX);
4133 x = plus_constant (xop0, delta);
4135 else if (offset < 0 && offset > -256)
4136 /* Small negative offsets are best done with a subtract before the
4137 dereference, forcing these into a register normally takes two
4138 instructions. */
4139 x = force_operand (x, NULL_RTX);
4140 else
4142 /* For the remaining cases, force the constant into a register. */
4143 xop1 = force_reg (SImode, xop1);
4144 x = gen_rtx_PLUS (SImode, xop0, xop1);
4147 else if (GET_CODE (x) == PLUS
4148 && s_register_operand (XEXP (x, 1), SImode)
4149 && !s_register_operand (XEXP (x, 0), SImode))
4151 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4153 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4156 if (flag_pic)
4158 /* We need to find and carefully transform any SYMBOL and LABEL
4159 references; so go back to the original address expression. */
4160 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4162 if (new_x != orig_x)
4163 x = new_x;
4166 return x;
4170 thumb_legitimize_reload_address (rtx *x_p,
4171 enum machine_mode mode,
4172 int opnum, int type,
4173 int ind_levels ATTRIBUTE_UNUSED)
4175 rtx x = *x_p;
4177 if (GET_CODE (x) == PLUS
4178 && GET_MODE_SIZE (mode) < 4
4179 && REG_P (XEXP (x, 0))
4180 && XEXP (x, 0) == stack_pointer_rtx
4181 && GET_CODE (XEXP (x, 1)) == CONST_INT
4182 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4184 rtx orig_x = x;
4186 x = copy_rtx (x);
4187 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4188 Pmode, VOIDmode, 0, 0, opnum, type);
4189 return x;
4192 /* If both registers are hi-regs, then it's better to reload the
4193 entire expression rather than each register individually. That
4194 only requires one reload register rather than two. */
4195 if (GET_CODE (x) == PLUS
4196 && REG_P (XEXP (x, 0))
4197 && REG_P (XEXP (x, 1))
4198 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4199 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4201 rtx orig_x = x;
4203 x = copy_rtx (x);
4204 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4205 Pmode, VOIDmode, 0, 0, opnum, type);
4206 return x;
4209 return NULL;
4212 /* Test for various thread-local symbols. */
4214 /* Return TRUE if X is a thread-local symbol. */
4216 static bool
4217 arm_tls_symbol_p (rtx x)
4219 if (! TARGET_HAVE_TLS)
4220 return false;
4222 if (GET_CODE (x) != SYMBOL_REF)
4223 return false;
4225 return SYMBOL_REF_TLS_MODEL (x) != 0;
4228 /* Helper for arm_tls_referenced_p. */
4230 static int
4231 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4233 if (GET_CODE (*x) == SYMBOL_REF)
4234 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4236 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4237 TLS offsets, not real symbol references. */
4238 if (GET_CODE (*x) == UNSPEC
4239 && XINT (*x, 1) == UNSPEC_TLS)
4240 return -1;
4242 return 0;
4245 /* Return TRUE if X contains any TLS symbol references. */
4247 bool
4248 arm_tls_referenced_p (rtx x)
4250 if (! TARGET_HAVE_TLS)
4251 return false;
4253 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4256 #define REG_OR_SUBREG_REG(X) \
4257 (GET_CODE (X) == REG \
4258 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4260 #define REG_OR_SUBREG_RTX(X) \
4261 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4263 #ifndef COSTS_N_INSNS
4264 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4265 #endif
4266 static inline int
4267 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4269 enum machine_mode mode = GET_MODE (x);
4271 switch (code)
4273 case ASHIFT:
4274 case ASHIFTRT:
4275 case LSHIFTRT:
4276 case ROTATERT:
4277 case PLUS:
4278 case MINUS:
4279 case COMPARE:
4280 case NEG:
4281 case NOT:
4282 return COSTS_N_INSNS (1);
4284 case MULT:
4285 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4287 int cycles = 0;
4288 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4290 while (i)
4292 i >>= 2;
4293 cycles++;
4295 return COSTS_N_INSNS (2) + cycles;
4297 return COSTS_N_INSNS (1) + 16;
4299 case SET:
4300 return (COSTS_N_INSNS (1)
4301 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4302 + GET_CODE (SET_DEST (x)) == MEM));
4304 case CONST_INT:
4305 if (outer == SET)
4307 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4308 return 0;
4309 if (thumb_shiftable_const (INTVAL (x)))
4310 return COSTS_N_INSNS (2);
4311 return COSTS_N_INSNS (3);
4313 else if ((outer == PLUS || outer == COMPARE)
4314 && INTVAL (x) < 256 && INTVAL (x) > -256)
4315 return 0;
4316 else if (outer == AND
4317 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4318 return COSTS_N_INSNS (1);
4319 else if (outer == ASHIFT || outer == ASHIFTRT
4320 || outer == LSHIFTRT)
4321 return 0;
4322 return COSTS_N_INSNS (2);
4324 case CONST:
4325 case CONST_DOUBLE:
4326 case LABEL_REF:
4327 case SYMBOL_REF:
4328 return COSTS_N_INSNS (3);
4330 case UDIV:
4331 case UMOD:
4332 case DIV:
4333 case MOD:
4334 return 100;
4336 case TRUNCATE:
4337 return 99;
4339 case AND:
4340 case XOR:
4341 case IOR:
4342 /* XXX guess. */
4343 return 8;
4345 case MEM:
4346 /* XXX another guess. */
4347 /* Memory costs quite a lot for the first word, but subsequent words
4348 load at the equivalent of a single insn each. */
4349 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4350 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4351 ? 4 : 0));
4353 case IF_THEN_ELSE:
4354 /* XXX a guess. */
4355 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4356 return 14;
4357 return 2;
4359 case ZERO_EXTEND:
4360 /* XXX still guessing. */
4361 switch (GET_MODE (XEXP (x, 0)))
4363 case QImode:
4364 return (1 + (mode == DImode ? 4 : 0)
4365 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4367 case HImode:
4368 return (4 + (mode == DImode ? 4 : 0)
4369 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4371 case SImode:
4372 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4374 default:
4375 return 99;
4378 default:
4379 return 99;
4384 /* Worker routine for arm_rtx_costs. */
4385 static inline int
4386 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4388 enum machine_mode mode = GET_MODE (x);
4389 enum rtx_code subcode;
4390 int extra_cost;
4392 switch (code)
4394 case MEM:
4395 /* Memory costs quite a lot for the first word, but subsequent words
4396 load at the equivalent of a single insn each. */
4397 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4398 + (GET_CODE (x) == SYMBOL_REF
4399 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4401 case DIV:
4402 case MOD:
4403 case UDIV:
4404 case UMOD:
4405 return optimize_size ? COSTS_N_INSNS (2) : 100;
4407 case ROTATE:
4408 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4409 return 4;
4410 /* Fall through */
4411 case ROTATERT:
4412 if (mode != SImode)
4413 return 8;
4414 /* Fall through */
4415 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4416 if (mode == DImode)
4417 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4418 + ((GET_CODE (XEXP (x, 0)) == REG
4419 || (GET_CODE (XEXP (x, 0)) == SUBREG
4420 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4421 ? 0 : 8));
4422 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4423 || (GET_CODE (XEXP (x, 0)) == SUBREG
4424 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4425 ? 0 : 4)
4426 + ((GET_CODE (XEXP (x, 1)) == REG
4427 || (GET_CODE (XEXP (x, 1)) == SUBREG
4428 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4429 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4430 ? 0 : 4));
4432 case MINUS:
4433 if (mode == DImode)
4434 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4435 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4436 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4437 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4438 ? 0 : 8));
4440 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4441 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4442 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4443 && arm_const_double_rtx (XEXP (x, 1))))
4444 ? 0 : 8)
4445 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4446 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4447 && arm_const_double_rtx (XEXP (x, 0))))
4448 ? 0 : 8));
4450 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4451 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4452 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4453 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4454 || subcode == ASHIFTRT || subcode == LSHIFTRT
4455 || subcode == ROTATE || subcode == ROTATERT
4456 || (subcode == MULT
4457 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4458 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4459 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4460 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4461 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4462 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4463 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4464 return 1;
4465 /* Fall through */
4467 case PLUS:
4468 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4469 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4470 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4471 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4472 && arm_const_double_rtx (XEXP (x, 1))))
4473 ? 0 : 8));
4475 /* Fall through */
4476 case AND: case XOR: case IOR:
4477 extra_cost = 0;
4479 /* Normally the frame registers will be spilt into reg+const during
4480 reload, so it is a bad idea to combine them with other instructions,
4481 since then they might not be moved outside of loops. As a compromise
4482 we allow integration with ops that have a constant as their second
4483 operand. */
4484 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4485 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4486 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4487 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4488 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4489 extra_cost = 4;
4491 if (mode == DImode)
4492 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4493 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4494 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4495 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4496 ? 0 : 8));
4498 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4499 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4500 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4501 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4502 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4503 ? 0 : 4));
4505 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4506 return (1 + extra_cost
4507 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4508 || subcode == LSHIFTRT || subcode == ASHIFTRT
4509 || subcode == ROTATE || subcode == ROTATERT
4510 || (subcode == MULT
4511 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4512 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4513 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4514 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4515 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4516 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4517 ? 0 : 4));
4519 return 8;
4521 case MULT:
4522 /* This should have been handled by the CPU specific routines. */
4523 gcc_unreachable ();
4525 case TRUNCATE:
4526 if (arm_arch3m && mode == SImode
4527 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4528 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4529 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4530 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4531 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4532 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4533 return 8;
4534 return 99;
4536 case NEG:
4537 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4538 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4539 /* Fall through */
4540 case NOT:
4541 if (mode == DImode)
4542 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4544 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4546 case IF_THEN_ELSE:
4547 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4548 return 14;
4549 return 2;
4551 case COMPARE:
4552 return 1;
4554 case ABS:
4555 return 4 + (mode == DImode ? 4 : 0);
4557 case SIGN_EXTEND:
4558 if (GET_MODE (XEXP (x, 0)) == QImode)
4559 return (4 + (mode == DImode ? 4 : 0)
4560 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4561 /* Fall through */
4562 case ZERO_EXTEND:
4563 switch (GET_MODE (XEXP (x, 0)))
4565 case QImode:
4566 return (1 + (mode == DImode ? 4 : 0)
4567 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4569 case HImode:
4570 return (4 + (mode == DImode ? 4 : 0)
4571 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4573 case SImode:
4574 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4576 case V8QImode:
4577 case V4HImode:
4578 case V2SImode:
4579 case V4QImode:
4580 case V2HImode:
4581 return 1;
4583 default:
4584 gcc_unreachable ();
4586 gcc_unreachable ();
4588 case CONST_INT:
4589 if (const_ok_for_arm (INTVAL (x)))
4590 return outer == SET ? 2 : -1;
4591 else if (outer == AND
4592 && const_ok_for_arm (~INTVAL (x)))
4593 return -1;
4594 else if ((outer == COMPARE
4595 || outer == PLUS || outer == MINUS)
4596 && const_ok_for_arm (-INTVAL (x)))
4597 return -1;
4598 else
4599 return 5;
4601 case CONST:
4602 case LABEL_REF:
4603 case SYMBOL_REF:
4604 return 6;
4606 case CONST_DOUBLE:
4607 if (arm_const_double_rtx (x))
4608 return outer == SET ? 2 : -1;
4609 else if ((outer == COMPARE || outer == PLUS)
4610 && neg_const_double_rtx_ok_for_fpa (x))
4611 return -1;
4612 return 7;
4614 default:
4615 return 99;
4619 /* RTX costs when optimizing for size. */
4620 static bool
4621 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4623 enum machine_mode mode = GET_MODE (x);
4625 if (TARGET_THUMB)
4627 /* XXX TBD. For now, use the standard costs. */
4628 *total = thumb_rtx_costs (x, code, outer_code);
4629 return true;
4632 switch (code)
4634 case MEM:
4635 /* A memory access costs 1 insn if the mode is small, or the address is
4636 a single register, otherwise it costs one insn per word. */
4637 if (REG_P (XEXP (x, 0)))
4638 *total = COSTS_N_INSNS (1);
4639 else
4640 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4641 return true;
4643 case DIV:
4644 case MOD:
4645 case UDIV:
4646 case UMOD:
4647 /* Needs a libcall, so it costs about this. */
4648 *total = COSTS_N_INSNS (2);
4649 return false;
4651 case ROTATE:
4652 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4654 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4655 return true;
4657 /* Fall through */
4658 case ROTATERT:
4659 case ASHIFT:
4660 case LSHIFTRT:
4661 case ASHIFTRT:
4662 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4664 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4665 return true;
4667 else if (mode == SImode)
4669 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4670 /* Slightly disparage register shifts, but not by much. */
4671 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4672 *total += 1 + rtx_cost (XEXP (x, 1), code);
4673 return true;
4676 /* Needs a libcall. */
4677 *total = COSTS_N_INSNS (2);
4678 return false;
4680 case MINUS:
4681 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4683 *total = COSTS_N_INSNS (1);
4684 return false;
4687 if (mode == SImode)
4689 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4690 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4692 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4693 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4694 || subcode1 == ROTATE || subcode1 == ROTATERT
4695 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4696 || subcode1 == ASHIFTRT)
4698 /* It's just the cost of the two operands. */
4699 *total = 0;
4700 return false;
4703 *total = COSTS_N_INSNS (1);
4704 return false;
4707 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4708 return false;
4710 case PLUS:
4711 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4713 *total = COSTS_N_INSNS (1);
4714 return false;
4717 /* Fall through */
4718 case AND: case XOR: case IOR:
4719 if (mode == SImode)
4721 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4723 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4724 || subcode == LSHIFTRT || subcode == ASHIFTRT
4725 || (code == AND && subcode == NOT))
4727 /* It's just the cost of the two operands. */
4728 *total = 0;
4729 return false;
4733 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4734 return false;
4736 case MULT:
4737 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4738 return false;
4740 case NEG:
4741 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4742 *total = COSTS_N_INSNS (1);
4743 /* Fall through */
4744 case NOT:
4745 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4747 return false;
4749 case IF_THEN_ELSE:
4750 *total = 0;
4751 return false;
4753 case COMPARE:
4754 if (cc_register (XEXP (x, 0), VOIDmode))
4755 * total = 0;
4756 else
4757 *total = COSTS_N_INSNS (1);
4758 return false;
4760 case ABS:
4761 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4762 *total = COSTS_N_INSNS (1);
4763 else
4764 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4765 return false;
4767 case SIGN_EXTEND:
4768 *total = 0;
4769 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4771 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4772 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4774 if (mode == DImode)
4775 *total += COSTS_N_INSNS (1);
4776 return false;
4778 case ZERO_EXTEND:
4779 *total = 0;
4780 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4782 switch (GET_MODE (XEXP (x, 0)))
4784 case QImode:
4785 *total += COSTS_N_INSNS (1);
4786 break;
4788 case HImode:
4789 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4791 case SImode:
4792 break;
4794 default:
4795 *total += COSTS_N_INSNS (2);
4799 if (mode == DImode)
4800 *total += COSTS_N_INSNS (1);
4802 return false;
4804 case CONST_INT:
4805 if (const_ok_for_arm (INTVAL (x)))
4806 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4807 else if (const_ok_for_arm (~INTVAL (x)))
4808 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4809 else if (const_ok_for_arm (-INTVAL (x)))
4811 if (outer_code == COMPARE || outer_code == PLUS
4812 || outer_code == MINUS)
4813 *total = 0;
4814 else
4815 *total = COSTS_N_INSNS (1);
4817 else
4818 *total = COSTS_N_INSNS (2);
4819 return true;
4821 case CONST:
4822 case LABEL_REF:
4823 case SYMBOL_REF:
4824 *total = COSTS_N_INSNS (2);
4825 return true;
4827 case CONST_DOUBLE:
4828 *total = COSTS_N_INSNS (4);
4829 return true;
4831 default:
4832 if (mode != VOIDmode)
4833 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4834 else
4835 *total = COSTS_N_INSNS (4); /* How knows? */
4836 return false;
4840 /* RTX costs for cores with a slow MUL implementation. */
4842 static bool
4843 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4845 enum machine_mode mode = GET_MODE (x);
4847 if (TARGET_THUMB)
4849 *total = thumb_rtx_costs (x, code, outer_code);
4850 return true;
4853 switch (code)
4855 case MULT:
4856 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4857 || mode == DImode)
4859 *total = 30;
4860 return true;
4863 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4865 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4866 & (unsigned HOST_WIDE_INT) 0xffffffff);
4867 int cost, const_ok = const_ok_for_arm (i);
4868 int j, booth_unit_size;
4870 /* Tune as appropriate. */
4871 cost = const_ok ? 4 : 8;
4872 booth_unit_size = 2;
4873 for (j = 0; i && j < 32; j += booth_unit_size)
4875 i >>= booth_unit_size;
4876 cost += 2;
4879 *total = cost;
4880 return true;
4883 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4884 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4885 return true;
4887 default:
4888 *total = arm_rtx_costs_1 (x, code, outer_code);
4889 return true;
4894 /* RTX cost for cores with a fast multiply unit (M variants). */
4896 static bool
4897 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4899 enum machine_mode mode = GET_MODE (x);
4901 if (TARGET_THUMB)
4903 *total = thumb_rtx_costs (x, code, outer_code);
4904 return true;
4907 switch (code)
4909 case MULT:
4910 /* There is no point basing this on the tuning, since it is always the
4911 fast variant if it exists at all. */
4912 if (mode == DImode
4913 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4914 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4915 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4917 *total = 8;
4918 return true;
4922 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4923 || mode == DImode)
4925 *total = 30;
4926 return true;
4929 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4931 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4932 & (unsigned HOST_WIDE_INT) 0xffffffff);
4933 int cost, const_ok = const_ok_for_arm (i);
4934 int j, booth_unit_size;
4936 /* Tune as appropriate. */
4937 cost = const_ok ? 4 : 8;
4938 booth_unit_size = 8;
4939 for (j = 0; i && j < 32; j += booth_unit_size)
4941 i >>= booth_unit_size;
4942 cost += 2;
4945 *total = cost;
4946 return true;
4949 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4950 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4951 return true;
4953 default:
4954 *total = arm_rtx_costs_1 (x, code, outer_code);
4955 return true;
4960 /* RTX cost for XScale CPUs. */
4962 static bool
4963 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4965 enum machine_mode mode = GET_MODE (x);
4967 if (TARGET_THUMB)
4969 *total = thumb_rtx_costs (x, code, outer_code);
4970 return true;
4973 switch (code)
4975 case MULT:
4976 /* There is no point basing this on the tuning, since it is always the
4977 fast variant if it exists at all. */
4978 if (mode == DImode
4979 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4980 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4981 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4983 *total = 8;
4984 return true;
4988 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4989 || mode == DImode)
4991 *total = 30;
4992 return true;
4995 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4997 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4998 & (unsigned HOST_WIDE_INT) 0xffffffff);
4999 int cost, const_ok = const_ok_for_arm (i);
5000 unsigned HOST_WIDE_INT masked_const;
5002 /* The cost will be related to two insns.
5003 First a load of the constant (MOV or LDR), then a multiply. */
5004 cost = 2;
5005 if (! const_ok)
5006 cost += 1; /* LDR is probably more expensive because
5007 of longer result latency. */
5008 masked_const = i & 0xffff8000;
5009 if (masked_const != 0 && masked_const != 0xffff8000)
5011 masked_const = i & 0xf8000000;
5012 if (masked_const == 0 || masked_const == 0xf8000000)
5013 cost += 1;
5014 else
5015 cost += 2;
5017 *total = cost;
5018 return true;
5021 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5022 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5023 return true;
5025 case COMPARE:
5026 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5027 will stall until the multiplication is complete. */
5028 if (GET_CODE (XEXP (x, 0)) == MULT)
5029 *total = 4 + rtx_cost (XEXP (x, 0), code);
5030 else
5031 *total = arm_rtx_costs_1 (x, code, outer_code);
5032 return true;
5034 default:
5035 *total = arm_rtx_costs_1 (x, code, outer_code);
5036 return true;
5041 /* RTX costs for 9e (and later) cores. */
5043 static bool
5044 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5046 enum machine_mode mode = GET_MODE (x);
5047 int nonreg_cost;
5048 int cost;
5050 if (TARGET_THUMB)
5052 switch (code)
5054 case MULT:
5055 *total = COSTS_N_INSNS (3);
5056 return true;
5058 default:
5059 *total = thumb_rtx_costs (x, code, outer_code);
5060 return true;
5064 switch (code)
5066 case MULT:
5067 /* There is no point basing this on the tuning, since it is always the
5068 fast variant if it exists at all. */
5069 if (mode == DImode
5070 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5071 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5072 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5074 *total = 3;
5075 return true;
5079 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5081 *total = 30;
5082 return true;
5084 if (mode == DImode)
5086 cost = 7;
5087 nonreg_cost = 8;
5089 else
5091 cost = 2;
5092 nonreg_cost = 4;
5096 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5097 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5098 return true;
5100 default:
5101 *total = arm_rtx_costs_1 (x, code, outer_code);
5102 return true;
5105 /* All address computations that can be done are free, but rtx cost returns
5106 the same for practically all of them. So we weight the different types
5107 of address here in the order (most pref first):
5108 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5109 static inline int
5110 arm_arm_address_cost (rtx x)
5112 enum rtx_code c = GET_CODE (x);
5114 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5115 return 0;
5116 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5117 return 10;
5119 if (c == PLUS || c == MINUS)
5121 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5122 return 2;
5124 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5125 return 3;
5127 return 4;
5130 return 6;
5133 static inline int
5134 arm_thumb_address_cost (rtx x)
5136 enum rtx_code c = GET_CODE (x);
5138 if (c == REG)
5139 return 1;
5140 if (c == PLUS
5141 && GET_CODE (XEXP (x, 0)) == REG
5142 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5143 return 1;
5145 return 2;
5148 static int
5149 arm_address_cost (rtx x)
5151 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5154 static int
5155 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5157 rtx i_pat, d_pat;
5159 /* Some true dependencies can have a higher cost depending
5160 on precisely how certain input operands are used. */
5161 if (arm_tune_xscale
5162 && REG_NOTE_KIND (link) == 0
5163 && recog_memoized (insn) >= 0
5164 && recog_memoized (dep) >= 0)
5166 int shift_opnum = get_attr_shift (insn);
5167 enum attr_type attr_type = get_attr_type (dep);
5169 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5170 operand for INSN. If we have a shifted input operand and the
5171 instruction we depend on is another ALU instruction, then we may
5172 have to account for an additional stall. */
5173 if (shift_opnum != 0
5174 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5176 rtx shifted_operand;
5177 int opno;
5179 /* Get the shifted operand. */
5180 extract_insn (insn);
5181 shifted_operand = recog_data.operand[shift_opnum];
5183 /* Iterate over all the operands in DEP. If we write an operand
5184 that overlaps with SHIFTED_OPERAND, then we have increase the
5185 cost of this dependency. */
5186 extract_insn (dep);
5187 preprocess_constraints ();
5188 for (opno = 0; opno < recog_data.n_operands; opno++)
5190 /* We can ignore strict inputs. */
5191 if (recog_data.operand_type[opno] == OP_IN)
5192 continue;
5194 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5195 shifted_operand))
5196 return 2;
5201 /* XXX This is not strictly true for the FPA. */
5202 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5203 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5204 return 0;
5206 /* Call insns don't incur a stall, even if they follow a load. */
5207 if (REG_NOTE_KIND (link) == 0
5208 && GET_CODE (insn) == CALL_INSN)
5209 return 1;
5211 if ((i_pat = single_set (insn)) != NULL
5212 && GET_CODE (SET_SRC (i_pat)) == MEM
5213 && (d_pat = single_set (dep)) != NULL
5214 && GET_CODE (SET_DEST (d_pat)) == MEM)
5216 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5217 /* This is a load after a store, there is no conflict if the load reads
5218 from a cached area. Assume that loads from the stack, and from the
5219 constant pool are cached, and that others will miss. This is a
5220 hack. */
5222 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5223 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5224 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5225 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5226 return 1;
5229 return cost;
5232 static int fp_consts_inited = 0;
5234 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5235 static const char * const strings_fp[8] =
5237 "0", "1", "2", "3",
5238 "4", "5", "0.5", "10"
5241 static REAL_VALUE_TYPE values_fp[8];
5243 static void
5244 init_fp_table (void)
5246 int i;
5247 REAL_VALUE_TYPE r;
5249 if (TARGET_VFP)
5250 fp_consts_inited = 1;
5251 else
5252 fp_consts_inited = 8;
5254 for (i = 0; i < fp_consts_inited; i++)
5256 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5257 values_fp[i] = r;
5261 /* Return TRUE if rtx X is a valid immediate FP constant. */
5263 arm_const_double_rtx (rtx x)
5265 REAL_VALUE_TYPE r;
5266 int i;
5268 if (!fp_consts_inited)
5269 init_fp_table ();
5271 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5272 if (REAL_VALUE_MINUS_ZERO (r))
5273 return 0;
5275 for (i = 0; i < fp_consts_inited; i++)
5276 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5277 return 1;
5279 return 0;
5282 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5284 neg_const_double_rtx_ok_for_fpa (rtx x)
5286 REAL_VALUE_TYPE r;
5287 int i;
5289 if (!fp_consts_inited)
5290 init_fp_table ();
5292 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5293 r = REAL_VALUE_NEGATE (r);
5294 if (REAL_VALUE_MINUS_ZERO (r))
5295 return 0;
5297 for (i = 0; i < 8; i++)
5298 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5299 return 1;
5301 return 0;
5304 /* Predicates for `match_operand' and `match_operator'. */
5306 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5308 cirrus_memory_offset (rtx op)
5310 /* Reject eliminable registers. */
5311 if (! (reload_in_progress || reload_completed)
5312 && ( reg_mentioned_p (frame_pointer_rtx, op)
5313 || reg_mentioned_p (arg_pointer_rtx, op)
5314 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5315 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5316 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5317 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5318 return 0;
5320 if (GET_CODE (op) == MEM)
5322 rtx ind;
5324 ind = XEXP (op, 0);
5326 /* Match: (mem (reg)). */
5327 if (GET_CODE (ind) == REG)
5328 return 1;
5330 /* Match:
5331 (mem (plus (reg)
5332 (const))). */
5333 if (GET_CODE (ind) == PLUS
5334 && GET_CODE (XEXP (ind, 0)) == REG
5335 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5336 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5337 return 1;
5340 return 0;
5343 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5344 WB if true if writeback address modes are allowed. */
5347 arm_coproc_mem_operand (rtx op, bool wb)
5349 rtx ind;
5351 /* Reject eliminable registers. */
5352 if (! (reload_in_progress || reload_completed)
5353 && ( reg_mentioned_p (frame_pointer_rtx, op)
5354 || reg_mentioned_p (arg_pointer_rtx, op)
5355 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5356 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5357 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5358 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5359 return FALSE;
5361 /* Constants are converted into offsets from labels. */
5362 if (GET_CODE (op) != MEM)
5363 return FALSE;
5365 ind = XEXP (op, 0);
5367 if (reload_completed
5368 && (GET_CODE (ind) == LABEL_REF
5369 || (GET_CODE (ind) == CONST
5370 && GET_CODE (XEXP (ind, 0)) == PLUS
5371 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5372 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5373 return TRUE;
5375 /* Match: (mem (reg)). */
5376 if (GET_CODE (ind) == REG)
5377 return arm_address_register_rtx_p (ind, 0);
5379 /* Autoincremment addressing modes. */
5380 if (wb
5381 && (GET_CODE (ind) == PRE_INC
5382 || GET_CODE (ind) == POST_INC
5383 || GET_CODE (ind) == PRE_DEC
5384 || GET_CODE (ind) == POST_DEC))
5385 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5387 if (wb
5388 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5389 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5390 && GET_CODE (XEXP (ind, 1)) == PLUS
5391 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5392 ind = XEXP (ind, 1);
5394 /* Match:
5395 (plus (reg)
5396 (const)). */
5397 if (GET_CODE (ind) == PLUS
5398 && GET_CODE (XEXP (ind, 0)) == REG
5399 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5400 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5401 && INTVAL (XEXP (ind, 1)) > -1024
5402 && INTVAL (XEXP (ind, 1)) < 1024
5403 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5404 return TRUE;
5406 return FALSE;
5409 /* Return true if X is a register that will be eliminated later on. */
5411 arm_eliminable_register (rtx x)
5413 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5414 || REGNO (x) == ARG_POINTER_REGNUM
5415 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5416 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5419 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5420 VFP registers. Otherwise return NO_REGS. */
5422 enum reg_class
5423 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5425 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5426 return NO_REGS;
5428 return GENERAL_REGS;
5431 /* Values which must be returned in the most-significant end of the return
5432 register. */
5434 static bool
5435 arm_return_in_msb (tree valtype)
5437 return (TARGET_AAPCS_BASED
5438 && BYTES_BIG_ENDIAN
5439 && (AGGREGATE_TYPE_P (valtype)
5440 || TREE_CODE (valtype) == COMPLEX_TYPE));
5443 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5444 Use by the Cirrus Maverick code which has to workaround
5445 a hardware bug triggered by such instructions. */
5446 static bool
5447 arm_memory_load_p (rtx insn)
5449 rtx body, lhs, rhs;;
5451 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5452 return false;
5454 body = PATTERN (insn);
5456 if (GET_CODE (body) != SET)
5457 return false;
5459 lhs = XEXP (body, 0);
5460 rhs = XEXP (body, 1);
5462 lhs = REG_OR_SUBREG_RTX (lhs);
5464 /* If the destination is not a general purpose
5465 register we do not have to worry. */
5466 if (GET_CODE (lhs) != REG
5467 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5468 return false;
5470 /* As well as loads from memory we also have to react
5471 to loads of invalid constants which will be turned
5472 into loads from the minipool. */
5473 return (GET_CODE (rhs) == MEM
5474 || GET_CODE (rhs) == SYMBOL_REF
5475 || note_invalid_constants (insn, -1, false));
5478 /* Return TRUE if INSN is a Cirrus instruction. */
5479 static bool
5480 arm_cirrus_insn_p (rtx insn)
5482 enum attr_cirrus attr;
5484 /* get_attr cannot accept USE or CLOBBER. */
5485 if (!insn
5486 || GET_CODE (insn) != INSN
5487 || GET_CODE (PATTERN (insn)) == USE
5488 || GET_CODE (PATTERN (insn)) == CLOBBER)
5489 return 0;
5491 attr = get_attr_cirrus (insn);
5493 return attr != CIRRUS_NOT;
5496 /* Cirrus reorg for invalid instruction combinations. */
5497 static void
5498 cirrus_reorg (rtx first)
5500 enum attr_cirrus attr;
5501 rtx body = PATTERN (first);
5502 rtx t;
5503 int nops;
5505 /* Any branch must be followed by 2 non Cirrus instructions. */
5506 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5508 nops = 0;
5509 t = next_nonnote_insn (first);
5511 if (arm_cirrus_insn_p (t))
5512 ++ nops;
5514 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5515 ++ nops;
5517 while (nops --)
5518 emit_insn_after (gen_nop (), first);
5520 return;
5523 /* (float (blah)) is in parallel with a clobber. */
5524 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5525 body = XVECEXP (body, 0, 0);
5527 if (GET_CODE (body) == SET)
5529 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5531 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5532 be followed by a non Cirrus insn. */
5533 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5535 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5536 emit_insn_after (gen_nop (), first);
5538 return;
5540 else if (arm_memory_load_p (first))
5542 unsigned int arm_regno;
5544 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5545 ldr/cfmv64hr combination where the Rd field is the same
5546 in both instructions must be split with a non Cirrus
5547 insn. Example:
5549 ldr r0, blah
5551 cfmvsr mvf0, r0. */
5553 /* Get Arm register number for ldr insn. */
5554 if (GET_CODE (lhs) == REG)
5555 arm_regno = REGNO (lhs);
5556 else
5558 gcc_assert (GET_CODE (rhs) == REG);
5559 arm_regno = REGNO (rhs);
5562 /* Next insn. */
5563 first = next_nonnote_insn (first);
5565 if (! arm_cirrus_insn_p (first))
5566 return;
5568 body = PATTERN (first);
5570 /* (float (blah)) is in parallel with a clobber. */
5571 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5572 body = XVECEXP (body, 0, 0);
5574 if (GET_CODE (body) == FLOAT)
5575 body = XEXP (body, 0);
5577 if (get_attr_cirrus (first) == CIRRUS_MOVE
5578 && GET_CODE (XEXP (body, 1)) == REG
5579 && arm_regno == REGNO (XEXP (body, 1)))
5580 emit_insn_after (gen_nop (), first);
5582 return;
5586 /* get_attr cannot accept USE or CLOBBER. */
5587 if (!first
5588 || GET_CODE (first) != INSN
5589 || GET_CODE (PATTERN (first)) == USE
5590 || GET_CODE (PATTERN (first)) == CLOBBER)
5591 return;
5593 attr = get_attr_cirrus (first);
5595 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5596 must be followed by a non-coprocessor instruction. */
5597 if (attr == CIRRUS_COMPARE)
5599 nops = 0;
5601 t = next_nonnote_insn (first);
5603 if (arm_cirrus_insn_p (t))
5604 ++ nops;
5606 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5607 ++ nops;
5609 while (nops --)
5610 emit_insn_after (gen_nop (), first);
5612 return;
5616 /* Return TRUE if X references a SYMBOL_REF. */
5618 symbol_mentioned_p (rtx x)
5620 const char * fmt;
5621 int i;
5623 if (GET_CODE (x) == SYMBOL_REF)
5624 return 1;
5626 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5627 are constant offsets, not symbols. */
5628 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5629 return 0;
5631 fmt = GET_RTX_FORMAT (GET_CODE (x));
5633 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5635 if (fmt[i] == 'E')
5637 int j;
5639 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5640 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5641 return 1;
5643 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5644 return 1;
5647 return 0;
5650 /* Return TRUE if X references a LABEL_REF. */
5652 label_mentioned_p (rtx x)
5654 const char * fmt;
5655 int i;
5657 if (GET_CODE (x) == LABEL_REF)
5658 return 1;
5660 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5661 instruction, but they are constant offsets, not symbols. */
5662 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5663 return 0;
5665 fmt = GET_RTX_FORMAT (GET_CODE (x));
5666 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5668 if (fmt[i] == 'E')
5670 int j;
5672 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5673 if (label_mentioned_p (XVECEXP (x, i, j)))
5674 return 1;
5676 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5677 return 1;
5680 return 0;
5684 tls_mentioned_p (rtx x)
5686 switch (GET_CODE (x))
5688 case CONST:
5689 return tls_mentioned_p (XEXP (x, 0));
5691 case UNSPEC:
5692 if (XINT (x, 1) == UNSPEC_TLS)
5693 return 1;
5695 default:
5696 return 0;
5700 /* Must not copy a SET whose source operand is PC-relative. */
5702 static bool
5703 arm_cannot_copy_insn_p (rtx insn)
5705 rtx pat = PATTERN (insn);
5707 if (GET_CODE (pat) == PARALLEL
5708 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
5710 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
5712 if (GET_CODE (rhs) == UNSPEC
5713 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
5714 return TRUE;
5716 if (GET_CODE (rhs) == MEM
5717 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
5718 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
5719 return TRUE;
5722 return FALSE;
5725 enum rtx_code
5726 minmax_code (rtx x)
5728 enum rtx_code code = GET_CODE (x);
5730 switch (code)
5732 case SMAX:
5733 return GE;
5734 case SMIN:
5735 return LE;
5736 case UMIN:
5737 return LEU;
5738 case UMAX:
5739 return GEU;
5740 default:
5741 gcc_unreachable ();
5745 /* Return 1 if memory locations are adjacent. */
5747 adjacent_mem_locations (rtx a, rtx b)
5749 /* We don't guarantee to preserve the order of these memory refs. */
5750 if (volatile_refs_p (a) || volatile_refs_p (b))
5751 return 0;
5753 if ((GET_CODE (XEXP (a, 0)) == REG
5754 || (GET_CODE (XEXP (a, 0)) == PLUS
5755 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5756 && (GET_CODE (XEXP (b, 0)) == REG
5757 || (GET_CODE (XEXP (b, 0)) == PLUS
5758 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5760 HOST_WIDE_INT val0 = 0, val1 = 0;
5761 rtx reg0, reg1;
5762 int val_diff;
5764 if (GET_CODE (XEXP (a, 0)) == PLUS)
5766 reg0 = XEXP (XEXP (a, 0), 0);
5767 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5769 else
5770 reg0 = XEXP (a, 0);
5772 if (GET_CODE (XEXP (b, 0)) == PLUS)
5774 reg1 = XEXP (XEXP (b, 0), 0);
5775 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5777 else
5778 reg1 = XEXP (b, 0);
5780 /* Don't accept any offset that will require multiple
5781 instructions to handle, since this would cause the
5782 arith_adjacentmem pattern to output an overlong sequence. */
5783 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5784 return 0;
5786 /* Don't allow an eliminable register: register elimination can make
5787 the offset too large. */
5788 if (arm_eliminable_register (reg0))
5789 return 0;
5791 val_diff = val1 - val0;
5793 if (arm_ld_sched)
5795 /* If the target has load delay slots, then there's no benefit
5796 to using an ldm instruction unless the offset is zero and
5797 we are optimizing for size. */
5798 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5799 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5800 && (val_diff == 4 || val_diff == -4));
5803 return ((REGNO (reg0) == REGNO (reg1))
5804 && (val_diff == 4 || val_diff == -4));
5807 return 0;
5811 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5812 HOST_WIDE_INT *load_offset)
5814 int unsorted_regs[4];
5815 HOST_WIDE_INT unsorted_offsets[4];
5816 int order[4];
5817 int base_reg = -1;
5818 int i;
5820 /* Can only handle 2, 3, or 4 insns at present,
5821 though could be easily extended if required. */
5822 gcc_assert (nops >= 2 && nops <= 4);
5824 /* Loop over the operands and check that the memory references are
5825 suitable (i.e. immediate offsets from the same base register). At
5826 the same time, extract the target register, and the memory
5827 offsets. */
5828 for (i = 0; i < nops; i++)
5830 rtx reg;
5831 rtx offset;
5833 /* Convert a subreg of a mem into the mem itself. */
5834 if (GET_CODE (operands[nops + i]) == SUBREG)
5835 operands[nops + i] = alter_subreg (operands + (nops + i));
5837 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5839 /* Don't reorder volatile memory references; it doesn't seem worth
5840 looking for the case where the order is ok anyway. */
5841 if (MEM_VOLATILE_P (operands[nops + i]))
5842 return 0;
5844 offset = const0_rtx;
5846 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5847 || (GET_CODE (reg) == SUBREG
5848 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5849 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5850 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5851 == REG)
5852 || (GET_CODE (reg) == SUBREG
5853 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5854 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5855 == CONST_INT)))
5857 if (i == 0)
5859 base_reg = REGNO (reg);
5860 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5861 ? REGNO (operands[i])
5862 : REGNO (SUBREG_REG (operands[i])));
5863 order[0] = 0;
5865 else
5867 if (base_reg != (int) REGNO (reg))
5868 /* Not addressed from the same base register. */
5869 return 0;
5871 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5872 ? REGNO (operands[i])
5873 : REGNO (SUBREG_REG (operands[i])));
5874 if (unsorted_regs[i] < unsorted_regs[order[0]])
5875 order[0] = i;
5878 /* If it isn't an integer register, or if it overwrites the
5879 base register but isn't the last insn in the list, then
5880 we can't do this. */
5881 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5882 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5883 return 0;
5885 unsorted_offsets[i] = INTVAL (offset);
5887 else
5888 /* Not a suitable memory address. */
5889 return 0;
5892 /* All the useful information has now been extracted from the
5893 operands into unsorted_regs and unsorted_offsets; additionally,
5894 order[0] has been set to the lowest numbered register in the
5895 list. Sort the registers into order, and check that the memory
5896 offsets are ascending and adjacent. */
5898 for (i = 1; i < nops; i++)
5900 int j;
5902 order[i] = order[i - 1];
5903 for (j = 0; j < nops; j++)
5904 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5905 && (order[i] == order[i - 1]
5906 || unsorted_regs[j] < unsorted_regs[order[i]]))
5907 order[i] = j;
5909 /* Have we found a suitable register? if not, one must be used more
5910 than once. */
5911 if (order[i] == order[i - 1])
5912 return 0;
5914 /* Is the memory address adjacent and ascending? */
5915 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5916 return 0;
5919 if (base)
5921 *base = base_reg;
5923 for (i = 0; i < nops; i++)
5924 regs[i] = unsorted_regs[order[i]];
5926 *load_offset = unsorted_offsets[order[0]];
5929 if (unsorted_offsets[order[0]] == 0)
5930 return 1; /* ldmia */
5932 if (unsorted_offsets[order[0]] == 4)
5933 return 2; /* ldmib */
5935 if (unsorted_offsets[order[nops - 1]] == 0)
5936 return 3; /* ldmda */
5938 if (unsorted_offsets[order[nops - 1]] == -4)
5939 return 4; /* ldmdb */
5941 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5942 if the offset isn't small enough. The reason 2 ldrs are faster
5943 is because these ARMs are able to do more than one cache access
5944 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5945 whilst the ARM8 has a double bandwidth cache. This means that
5946 these cores can do both an instruction fetch and a data fetch in
5947 a single cycle, so the trick of calculating the address into a
5948 scratch register (one of the result regs) and then doing a load
5949 multiple actually becomes slower (and no smaller in code size).
5950 That is the transformation
5952 ldr rd1, [rbase + offset]
5953 ldr rd2, [rbase + offset + 4]
5957 add rd1, rbase, offset
5958 ldmia rd1, {rd1, rd2}
5960 produces worse code -- '3 cycles + any stalls on rd2' instead of
5961 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5962 access per cycle, the first sequence could never complete in less
5963 than 6 cycles, whereas the ldm sequence would only take 5 and
5964 would make better use of sequential accesses if not hitting the
5965 cache.
5967 We cheat here and test 'arm_ld_sched' which we currently know to
5968 only be true for the ARM8, ARM9 and StrongARM. If this ever
5969 changes, then the test below needs to be reworked. */
5970 if (nops == 2 && arm_ld_sched)
5971 return 0;
5973 /* Can't do it without setting up the offset, only do this if it takes
5974 no more than one insn. */
5975 return (const_ok_for_arm (unsorted_offsets[order[0]])
5976 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5979 const char *
5980 emit_ldm_seq (rtx *operands, int nops)
5982 int regs[4];
5983 int base_reg;
5984 HOST_WIDE_INT offset;
5985 char buf[100];
5986 int i;
5988 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5990 case 1:
5991 strcpy (buf, "ldm%?ia\t");
5992 break;
5994 case 2:
5995 strcpy (buf, "ldm%?ib\t");
5996 break;
5998 case 3:
5999 strcpy (buf, "ldm%?da\t");
6000 break;
6002 case 4:
6003 strcpy (buf, "ldm%?db\t");
6004 break;
6006 case 5:
6007 if (offset >= 0)
6008 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6009 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6010 (long) offset);
6011 else
6012 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6013 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6014 (long) -offset);
6015 output_asm_insn (buf, operands);
6016 base_reg = regs[0];
6017 strcpy (buf, "ldm%?ia\t");
6018 break;
6020 default:
6021 gcc_unreachable ();
6024 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6025 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6027 for (i = 1; i < nops; i++)
6028 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6029 reg_names[regs[i]]);
6031 strcat (buf, "}\t%@ phole ldm");
6033 output_asm_insn (buf, operands);
6034 return "";
6038 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6039 HOST_WIDE_INT * load_offset)
6041 int unsorted_regs[4];
6042 HOST_WIDE_INT unsorted_offsets[4];
6043 int order[4];
6044 int base_reg = -1;
6045 int i;
6047 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6048 extended if required. */
6049 gcc_assert (nops >= 2 && nops <= 4);
6051 /* Loop over the operands and check that the memory references are
6052 suitable (i.e. immediate offsets from the same base register). At
6053 the same time, extract the target register, and the memory
6054 offsets. */
6055 for (i = 0; i < nops; i++)
6057 rtx reg;
6058 rtx offset;
6060 /* Convert a subreg of a mem into the mem itself. */
6061 if (GET_CODE (operands[nops + i]) == SUBREG)
6062 operands[nops + i] = alter_subreg (operands + (nops + i));
6064 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6066 /* Don't reorder volatile memory references; it doesn't seem worth
6067 looking for the case where the order is ok anyway. */
6068 if (MEM_VOLATILE_P (operands[nops + i]))
6069 return 0;
6071 offset = const0_rtx;
6073 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6074 || (GET_CODE (reg) == SUBREG
6075 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6076 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6077 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6078 == REG)
6079 || (GET_CODE (reg) == SUBREG
6080 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6081 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6082 == CONST_INT)))
6084 if (i == 0)
6086 base_reg = REGNO (reg);
6087 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6088 ? REGNO (operands[i])
6089 : REGNO (SUBREG_REG (operands[i])));
6090 order[0] = 0;
6092 else
6094 if (base_reg != (int) REGNO (reg))
6095 /* Not addressed from the same base register. */
6096 return 0;
6098 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6099 ? REGNO (operands[i])
6100 : REGNO (SUBREG_REG (operands[i])));
6101 if (unsorted_regs[i] < unsorted_regs[order[0]])
6102 order[0] = i;
6105 /* If it isn't an integer register, then we can't do this. */
6106 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6107 return 0;
6109 unsorted_offsets[i] = INTVAL (offset);
6111 else
6112 /* Not a suitable memory address. */
6113 return 0;
6116 /* All the useful information has now been extracted from the
6117 operands into unsorted_regs and unsorted_offsets; additionally,
6118 order[0] has been set to the lowest numbered register in the
6119 list. Sort the registers into order, and check that the memory
6120 offsets are ascending and adjacent. */
6122 for (i = 1; i < nops; i++)
6124 int j;
6126 order[i] = order[i - 1];
6127 for (j = 0; j < nops; j++)
6128 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6129 && (order[i] == order[i - 1]
6130 || unsorted_regs[j] < unsorted_regs[order[i]]))
6131 order[i] = j;
6133 /* Have we found a suitable register? if not, one must be used more
6134 than once. */
6135 if (order[i] == order[i - 1])
6136 return 0;
6138 /* Is the memory address adjacent and ascending? */
6139 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6140 return 0;
6143 if (base)
6145 *base = base_reg;
6147 for (i = 0; i < nops; i++)
6148 regs[i] = unsorted_regs[order[i]];
6150 *load_offset = unsorted_offsets[order[0]];
6153 if (unsorted_offsets[order[0]] == 0)
6154 return 1; /* stmia */
6156 if (unsorted_offsets[order[0]] == 4)
6157 return 2; /* stmib */
6159 if (unsorted_offsets[order[nops - 1]] == 0)
6160 return 3; /* stmda */
6162 if (unsorted_offsets[order[nops - 1]] == -4)
6163 return 4; /* stmdb */
6165 return 0;
6168 const char *
6169 emit_stm_seq (rtx *operands, int nops)
6171 int regs[4];
6172 int base_reg;
6173 HOST_WIDE_INT offset;
6174 char buf[100];
6175 int i;
6177 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6179 case 1:
6180 strcpy (buf, "stm%?ia\t");
6181 break;
6183 case 2:
6184 strcpy (buf, "stm%?ib\t");
6185 break;
6187 case 3:
6188 strcpy (buf, "stm%?da\t");
6189 break;
6191 case 4:
6192 strcpy (buf, "stm%?db\t");
6193 break;
6195 default:
6196 gcc_unreachable ();
6199 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6200 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6202 for (i = 1; i < nops; i++)
6203 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6204 reg_names[regs[i]]);
6206 strcat (buf, "}\t%@ phole stm");
6208 output_asm_insn (buf, operands);
6209 return "";
6212 /* Routines for use in generating RTL. */
6215 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6216 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6218 HOST_WIDE_INT offset = *offsetp;
6219 int i = 0, j;
6220 rtx result;
6221 int sign = up ? 1 : -1;
6222 rtx mem, addr;
6224 /* XScale has load-store double instructions, but they have stricter
6225 alignment requirements than load-store multiple, so we cannot
6226 use them.
6228 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6229 the pipeline until completion.
6231 NREGS CYCLES
6237 An ldr instruction takes 1-3 cycles, but does not block the
6238 pipeline.
6240 NREGS CYCLES
6241 1 1-3
6242 2 2-6
6243 3 3-9
6244 4 4-12
6246 Best case ldr will always win. However, the more ldr instructions
6247 we issue, the less likely we are to be able to schedule them well.
6248 Using ldr instructions also increases code size.
6250 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6251 for counts of 3 or 4 regs. */
6252 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6254 rtx seq;
6256 start_sequence ();
6258 for (i = 0; i < count; i++)
6260 addr = plus_constant (from, i * 4 * sign);
6261 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6262 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6263 offset += 4 * sign;
6266 if (write_back)
6268 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6269 *offsetp = offset;
6272 seq = get_insns ();
6273 end_sequence ();
6275 return seq;
6278 result = gen_rtx_PARALLEL (VOIDmode,
6279 rtvec_alloc (count + (write_back ? 1 : 0)));
6280 if (write_back)
6282 XVECEXP (result, 0, 0)
6283 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6284 i = 1;
6285 count++;
6288 for (j = 0; i < count; i++, j++)
6290 addr = plus_constant (from, j * 4 * sign);
6291 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6292 XVECEXP (result, 0, i)
6293 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6294 offset += 4 * sign;
6297 if (write_back)
6298 *offsetp = offset;
6300 return result;
6304 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6305 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6307 HOST_WIDE_INT offset = *offsetp;
6308 int i = 0, j;
6309 rtx result;
6310 int sign = up ? 1 : -1;
6311 rtx mem, addr;
6313 /* See arm_gen_load_multiple for discussion of
6314 the pros/cons of ldm/stm usage for XScale. */
6315 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6317 rtx seq;
6319 start_sequence ();
6321 for (i = 0; i < count; i++)
6323 addr = plus_constant (to, i * 4 * sign);
6324 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6325 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6326 offset += 4 * sign;
6329 if (write_back)
6331 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6332 *offsetp = offset;
6335 seq = get_insns ();
6336 end_sequence ();
6338 return seq;
6341 result = gen_rtx_PARALLEL (VOIDmode,
6342 rtvec_alloc (count + (write_back ? 1 : 0)));
6343 if (write_back)
6345 XVECEXP (result, 0, 0)
6346 = gen_rtx_SET (VOIDmode, to,
6347 plus_constant (to, count * 4 * sign));
6348 i = 1;
6349 count++;
6352 for (j = 0; i < count; i++, j++)
6354 addr = plus_constant (to, j * 4 * sign);
6355 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6356 XVECEXP (result, 0, i)
6357 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6358 offset += 4 * sign;
6361 if (write_back)
6362 *offsetp = offset;
6364 return result;
6368 arm_gen_movmemqi (rtx *operands)
6370 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6371 HOST_WIDE_INT srcoffset, dstoffset;
6372 int i;
6373 rtx src, dst, srcbase, dstbase;
6374 rtx part_bytes_reg = NULL;
6375 rtx mem;
6377 if (GET_CODE (operands[2]) != CONST_INT
6378 || GET_CODE (operands[3]) != CONST_INT
6379 || INTVAL (operands[2]) > 64
6380 || INTVAL (operands[3]) & 3)
6381 return 0;
6383 dstbase = operands[0];
6384 srcbase = operands[1];
6386 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6387 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6389 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6390 out_words_to_go = INTVAL (operands[2]) / 4;
6391 last_bytes = INTVAL (operands[2]) & 3;
6392 dstoffset = srcoffset = 0;
6394 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6395 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6397 for (i = 0; in_words_to_go >= 2; i+=4)
6399 if (in_words_to_go > 4)
6400 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6401 srcbase, &srcoffset));
6402 else
6403 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6404 FALSE, srcbase, &srcoffset));
6406 if (out_words_to_go)
6408 if (out_words_to_go > 4)
6409 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6410 dstbase, &dstoffset));
6411 else if (out_words_to_go != 1)
6412 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6413 dst, TRUE,
6414 (last_bytes == 0
6415 ? FALSE : TRUE),
6416 dstbase, &dstoffset));
6417 else
6419 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6420 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6421 if (last_bytes != 0)
6423 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6424 dstoffset += 4;
6429 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6430 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6433 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6434 if (out_words_to_go)
6436 rtx sreg;
6438 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6439 sreg = copy_to_reg (mem);
6441 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6442 emit_move_insn (mem, sreg);
6443 in_words_to_go--;
6445 gcc_assert (!in_words_to_go); /* Sanity check */
6448 if (in_words_to_go)
6450 gcc_assert (in_words_to_go > 0);
6452 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6453 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6456 gcc_assert (!last_bytes || part_bytes_reg);
6458 if (BYTES_BIG_ENDIAN && last_bytes)
6460 rtx tmp = gen_reg_rtx (SImode);
6462 /* The bytes we want are in the top end of the word. */
6463 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6464 GEN_INT (8 * (4 - last_bytes))));
6465 part_bytes_reg = tmp;
6467 while (last_bytes)
6469 mem = adjust_automodify_address (dstbase, QImode,
6470 plus_constant (dst, last_bytes - 1),
6471 dstoffset + last_bytes - 1);
6472 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6474 if (--last_bytes)
6476 tmp = gen_reg_rtx (SImode);
6477 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6478 part_bytes_reg = tmp;
6483 else
6485 if (last_bytes > 1)
6487 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6488 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6489 last_bytes -= 2;
6490 if (last_bytes)
6492 rtx tmp = gen_reg_rtx (SImode);
6493 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6494 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6495 part_bytes_reg = tmp;
6496 dstoffset += 2;
6500 if (last_bytes)
6502 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6503 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6507 return 1;
6510 /* Select a dominance comparison mode if possible for a test of the general
6511 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6512 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6513 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6514 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6515 In all cases OP will be either EQ or NE, but we don't need to know which
6516 here. If we are unable to support a dominance comparison we return
6517 CC mode. This will then fail to match for the RTL expressions that
6518 generate this call. */
6519 enum machine_mode
6520 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6522 enum rtx_code cond1, cond2;
6523 int swapped = 0;
6525 /* Currently we will probably get the wrong result if the individual
6526 comparisons are not simple. This also ensures that it is safe to
6527 reverse a comparison if necessary. */
6528 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6529 != CCmode)
6530 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6531 != CCmode))
6532 return CCmode;
6534 /* The if_then_else variant of this tests the second condition if the
6535 first passes, but is true if the first fails. Reverse the first
6536 condition to get a true "inclusive-or" expression. */
6537 if (cond_or == DOM_CC_NX_OR_Y)
6538 cond1 = reverse_condition (cond1);
6540 /* If the comparisons are not equal, and one doesn't dominate the other,
6541 then we can't do this. */
6542 if (cond1 != cond2
6543 && !comparison_dominates_p (cond1, cond2)
6544 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6545 return CCmode;
6547 if (swapped)
6549 enum rtx_code temp = cond1;
6550 cond1 = cond2;
6551 cond2 = temp;
6554 switch (cond1)
6556 case EQ:
6557 if (cond_or == DOM_CC_X_AND_Y)
6558 return CC_DEQmode;
6560 switch (cond2)
6562 case EQ: return CC_DEQmode;
6563 case LE: return CC_DLEmode;
6564 case LEU: return CC_DLEUmode;
6565 case GE: return CC_DGEmode;
6566 case GEU: return CC_DGEUmode;
6567 default: gcc_unreachable ();
6570 case LT:
6571 if (cond_or == DOM_CC_X_AND_Y)
6572 return CC_DLTmode;
6574 switch (cond2)
6576 case LT:
6577 return CC_DLTmode;
6578 case LE:
6579 return CC_DLEmode;
6580 case NE:
6581 return CC_DNEmode;
6582 default:
6583 gcc_unreachable ();
6586 case GT:
6587 if (cond_or == DOM_CC_X_AND_Y)
6588 return CC_DGTmode;
6590 switch (cond2)
6592 case GT:
6593 return CC_DGTmode;
6594 case GE:
6595 return CC_DGEmode;
6596 case NE:
6597 return CC_DNEmode;
6598 default:
6599 gcc_unreachable ();
6602 case LTU:
6603 if (cond_or == DOM_CC_X_AND_Y)
6604 return CC_DLTUmode;
6606 switch (cond2)
6608 case LTU:
6609 return CC_DLTUmode;
6610 case LEU:
6611 return CC_DLEUmode;
6612 case NE:
6613 return CC_DNEmode;
6614 default:
6615 gcc_unreachable ();
6618 case GTU:
6619 if (cond_or == DOM_CC_X_AND_Y)
6620 return CC_DGTUmode;
6622 switch (cond2)
6624 case GTU:
6625 return CC_DGTUmode;
6626 case GEU:
6627 return CC_DGEUmode;
6628 case NE:
6629 return CC_DNEmode;
6630 default:
6631 gcc_unreachable ();
6634 /* The remaining cases only occur when both comparisons are the
6635 same. */
6636 case NE:
6637 gcc_assert (cond1 == cond2);
6638 return CC_DNEmode;
6640 case LE:
6641 gcc_assert (cond1 == cond2);
6642 return CC_DLEmode;
6644 case GE:
6645 gcc_assert (cond1 == cond2);
6646 return CC_DGEmode;
6648 case LEU:
6649 gcc_assert (cond1 == cond2);
6650 return CC_DLEUmode;
6652 case GEU:
6653 gcc_assert (cond1 == cond2);
6654 return CC_DGEUmode;
6656 default:
6657 gcc_unreachable ();
6661 enum machine_mode
6662 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6664 /* All floating point compares return CCFP if it is an equality
6665 comparison, and CCFPE otherwise. */
6666 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6668 switch (op)
6670 case EQ:
6671 case NE:
6672 case UNORDERED:
6673 case ORDERED:
6674 case UNLT:
6675 case UNLE:
6676 case UNGT:
6677 case UNGE:
6678 case UNEQ:
6679 case LTGT:
6680 return CCFPmode;
6682 case LT:
6683 case LE:
6684 case GT:
6685 case GE:
6686 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6687 return CCFPmode;
6688 return CCFPEmode;
6690 default:
6691 gcc_unreachable ();
6695 /* A compare with a shifted operand. Because of canonicalization, the
6696 comparison will have to be swapped when we emit the assembler. */
6697 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6698 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6699 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6700 || GET_CODE (x) == ROTATERT))
6701 return CC_SWPmode;
6703 /* This operation is performed swapped, but since we only rely on the Z
6704 flag we don't need an additional mode. */
6705 if (GET_MODE (y) == SImode && REG_P (y)
6706 && GET_CODE (x) == NEG
6707 && (op == EQ || op == NE))
6708 return CC_Zmode;
6710 /* This is a special case that is used by combine to allow a
6711 comparison of a shifted byte load to be split into a zero-extend
6712 followed by a comparison of the shifted integer (only valid for
6713 equalities and unsigned inequalities). */
6714 if (GET_MODE (x) == SImode
6715 && GET_CODE (x) == ASHIFT
6716 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6717 && GET_CODE (XEXP (x, 0)) == SUBREG
6718 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6719 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6720 && (op == EQ || op == NE
6721 || op == GEU || op == GTU || op == LTU || op == LEU)
6722 && GET_CODE (y) == CONST_INT)
6723 return CC_Zmode;
6725 /* A construct for a conditional compare, if the false arm contains
6726 0, then both conditions must be true, otherwise either condition
6727 must be true. Not all conditions are possible, so CCmode is
6728 returned if it can't be done. */
6729 if (GET_CODE (x) == IF_THEN_ELSE
6730 && (XEXP (x, 2) == const0_rtx
6731 || XEXP (x, 2) == const1_rtx)
6732 && COMPARISON_P (XEXP (x, 0))
6733 && COMPARISON_P (XEXP (x, 1)))
6734 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6735 INTVAL (XEXP (x, 2)));
6737 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6738 if (GET_CODE (x) == AND
6739 && COMPARISON_P (XEXP (x, 0))
6740 && COMPARISON_P (XEXP (x, 1)))
6741 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6742 DOM_CC_X_AND_Y);
6744 if (GET_CODE (x) == IOR
6745 && COMPARISON_P (XEXP (x, 0))
6746 && COMPARISON_P (XEXP (x, 1)))
6747 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6748 DOM_CC_X_OR_Y);
6750 /* An operation (on Thumb) where we want to test for a single bit.
6751 This is done by shifting that bit up into the top bit of a
6752 scratch register; we can then branch on the sign bit. */
6753 if (TARGET_THUMB
6754 && GET_MODE (x) == SImode
6755 && (op == EQ || op == NE)
6756 && GET_CODE (x) == ZERO_EXTRACT
6757 && XEXP (x, 1) == const1_rtx)
6758 return CC_Nmode;
6760 /* An operation that sets the condition codes as a side-effect, the
6761 V flag is not set correctly, so we can only use comparisons where
6762 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6763 instead.) */
6764 if (GET_MODE (x) == SImode
6765 && y == const0_rtx
6766 && (op == EQ || op == NE || op == LT || op == GE)
6767 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6768 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6769 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6770 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6771 || GET_CODE (x) == LSHIFTRT
6772 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6773 || GET_CODE (x) == ROTATERT
6774 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6775 return CC_NOOVmode;
6777 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6778 return CC_Zmode;
6780 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6781 && GET_CODE (x) == PLUS
6782 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6783 return CC_Cmode;
6785 return CCmode;
6788 /* X and Y are two things to compare using CODE. Emit the compare insn and
6789 return the rtx for register 0 in the proper mode. FP means this is a
6790 floating point compare: I don't think that it is needed on the arm. */
6792 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6794 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6795 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6797 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
6799 return cc_reg;
6802 /* Generate a sequence of insns that will generate the correct return
6803 address mask depending on the physical architecture that the program
6804 is running on. */
6806 arm_gen_return_addr_mask (void)
6808 rtx reg = gen_reg_rtx (Pmode);
6810 emit_insn (gen_return_addr_mask (reg));
6811 return reg;
6814 void
6815 arm_reload_in_hi (rtx *operands)
6817 rtx ref = operands[1];
6818 rtx base, scratch;
6819 HOST_WIDE_INT offset = 0;
6821 if (GET_CODE (ref) == SUBREG)
6823 offset = SUBREG_BYTE (ref);
6824 ref = SUBREG_REG (ref);
6827 if (GET_CODE (ref) == REG)
6829 /* We have a pseudo which has been spilt onto the stack; there
6830 are two cases here: the first where there is a simple
6831 stack-slot replacement and a second where the stack-slot is
6832 out of range, or is used as a subreg. */
6833 if (reg_equiv_mem[REGNO (ref)])
6835 ref = reg_equiv_mem[REGNO (ref)];
6836 base = find_replacement (&XEXP (ref, 0));
6838 else
6839 /* The slot is out of range, or was dressed up in a SUBREG. */
6840 base = reg_equiv_address[REGNO (ref)];
6842 else
6843 base = find_replacement (&XEXP (ref, 0));
6845 /* Handle the case where the address is too complex to be offset by 1. */
6846 if (GET_CODE (base) == MINUS
6847 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6849 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6851 emit_set_insn (base_plus, base);
6852 base = base_plus;
6854 else if (GET_CODE (base) == PLUS)
6856 /* The addend must be CONST_INT, or we would have dealt with it above. */
6857 HOST_WIDE_INT hi, lo;
6859 offset += INTVAL (XEXP (base, 1));
6860 base = XEXP (base, 0);
6862 /* Rework the address into a legal sequence of insns. */
6863 /* Valid range for lo is -4095 -> 4095 */
6864 lo = (offset >= 0
6865 ? (offset & 0xfff)
6866 : -((-offset) & 0xfff));
6868 /* Corner case, if lo is the max offset then we would be out of range
6869 once we have added the additional 1 below, so bump the msb into the
6870 pre-loading insn(s). */
6871 if (lo == 4095)
6872 lo &= 0x7ff;
6874 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6875 ^ (HOST_WIDE_INT) 0x80000000)
6876 - (HOST_WIDE_INT) 0x80000000);
6878 gcc_assert (hi + lo == offset);
6880 if (hi != 0)
6882 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6884 /* Get the base address; addsi3 knows how to handle constants
6885 that require more than one insn. */
6886 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6887 base = base_plus;
6888 offset = lo;
6892 /* Operands[2] may overlap operands[0] (though it won't overlap
6893 operands[1]), that's why we asked for a DImode reg -- so we can
6894 use the bit that does not overlap. */
6895 if (REGNO (operands[2]) == REGNO (operands[0]))
6896 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6897 else
6898 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6900 emit_insn (gen_zero_extendqisi2 (scratch,
6901 gen_rtx_MEM (QImode,
6902 plus_constant (base,
6903 offset))));
6904 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6905 gen_rtx_MEM (QImode,
6906 plus_constant (base,
6907 offset + 1))));
6908 if (!BYTES_BIG_ENDIAN)
6909 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6910 gen_rtx_IOR (SImode,
6911 gen_rtx_ASHIFT
6912 (SImode,
6913 gen_rtx_SUBREG (SImode, operands[0], 0),
6914 GEN_INT (8)),
6915 scratch));
6916 else
6917 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6918 gen_rtx_IOR (SImode,
6919 gen_rtx_ASHIFT (SImode, scratch,
6920 GEN_INT (8)),
6921 gen_rtx_SUBREG (SImode, operands[0], 0)));
6924 /* Handle storing a half-word to memory during reload by synthesizing as two
6925 byte stores. Take care not to clobber the input values until after we
6926 have moved them somewhere safe. This code assumes that if the DImode
6927 scratch in operands[2] overlaps either the input value or output address
6928 in some way, then that value must die in this insn (we absolutely need
6929 two scratch registers for some corner cases). */
6930 void
6931 arm_reload_out_hi (rtx *operands)
6933 rtx ref = operands[0];
6934 rtx outval = operands[1];
6935 rtx base, scratch;
6936 HOST_WIDE_INT offset = 0;
6938 if (GET_CODE (ref) == SUBREG)
6940 offset = SUBREG_BYTE (ref);
6941 ref = SUBREG_REG (ref);
6944 if (GET_CODE (ref) == REG)
6946 /* We have a pseudo which has been spilt onto the stack; there
6947 are two cases here: the first where there is a simple
6948 stack-slot replacement and a second where the stack-slot is
6949 out of range, or is used as a subreg. */
6950 if (reg_equiv_mem[REGNO (ref)])
6952 ref = reg_equiv_mem[REGNO (ref)];
6953 base = find_replacement (&XEXP (ref, 0));
6955 else
6956 /* The slot is out of range, or was dressed up in a SUBREG. */
6957 base = reg_equiv_address[REGNO (ref)];
6959 else
6960 base = find_replacement (&XEXP (ref, 0));
6962 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6964 /* Handle the case where the address is too complex to be offset by 1. */
6965 if (GET_CODE (base) == MINUS
6966 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6968 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6970 /* Be careful not to destroy OUTVAL. */
6971 if (reg_overlap_mentioned_p (base_plus, outval))
6973 /* Updating base_plus might destroy outval, see if we can
6974 swap the scratch and base_plus. */
6975 if (!reg_overlap_mentioned_p (scratch, outval))
6977 rtx tmp = scratch;
6978 scratch = base_plus;
6979 base_plus = tmp;
6981 else
6983 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6985 /* Be conservative and copy OUTVAL into the scratch now,
6986 this should only be necessary if outval is a subreg
6987 of something larger than a word. */
6988 /* XXX Might this clobber base? I can't see how it can,
6989 since scratch is known to overlap with OUTVAL, and
6990 must be wider than a word. */
6991 emit_insn (gen_movhi (scratch_hi, outval));
6992 outval = scratch_hi;
6996 emit_set_insn (base_plus, base);
6997 base = base_plus;
6999 else if (GET_CODE (base) == PLUS)
7001 /* The addend must be CONST_INT, or we would have dealt with it above. */
7002 HOST_WIDE_INT hi, lo;
7004 offset += INTVAL (XEXP (base, 1));
7005 base = XEXP (base, 0);
7007 /* Rework the address into a legal sequence of insns. */
7008 /* Valid range for lo is -4095 -> 4095 */
7009 lo = (offset >= 0
7010 ? (offset & 0xfff)
7011 : -((-offset) & 0xfff));
7013 /* Corner case, if lo is the max offset then we would be out of range
7014 once we have added the additional 1 below, so bump the msb into the
7015 pre-loading insn(s). */
7016 if (lo == 4095)
7017 lo &= 0x7ff;
7019 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7020 ^ (HOST_WIDE_INT) 0x80000000)
7021 - (HOST_WIDE_INT) 0x80000000);
7023 gcc_assert (hi + lo == offset);
7025 if (hi != 0)
7027 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7029 /* Be careful not to destroy OUTVAL. */
7030 if (reg_overlap_mentioned_p (base_plus, outval))
7032 /* Updating base_plus might destroy outval, see if we
7033 can swap the scratch and base_plus. */
7034 if (!reg_overlap_mentioned_p (scratch, outval))
7036 rtx tmp = scratch;
7037 scratch = base_plus;
7038 base_plus = tmp;
7040 else
7042 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7044 /* Be conservative and copy outval into scratch now,
7045 this should only be necessary if outval is a
7046 subreg of something larger than a word. */
7047 /* XXX Might this clobber base? I can't see how it
7048 can, since scratch is known to overlap with
7049 outval. */
7050 emit_insn (gen_movhi (scratch_hi, outval));
7051 outval = scratch_hi;
7055 /* Get the base address; addsi3 knows how to handle constants
7056 that require more than one insn. */
7057 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7058 base = base_plus;
7059 offset = lo;
7063 if (BYTES_BIG_ENDIAN)
7065 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7066 plus_constant (base, offset + 1)),
7067 gen_lowpart (QImode, outval)));
7068 emit_insn (gen_lshrsi3 (scratch,
7069 gen_rtx_SUBREG (SImode, outval, 0),
7070 GEN_INT (8)));
7071 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7072 gen_lowpart (QImode, scratch)));
7074 else
7076 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7077 gen_lowpart (QImode, outval)));
7078 emit_insn (gen_lshrsi3 (scratch,
7079 gen_rtx_SUBREG (SImode, outval, 0),
7080 GEN_INT (8)));
7081 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7082 plus_constant (base, offset + 1)),
7083 gen_lowpart (QImode, scratch)));
7087 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7088 (padded to the size of a word) should be passed in a register. */
7090 static bool
7091 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7093 if (TARGET_AAPCS_BASED)
7094 return must_pass_in_stack_var_size (mode, type);
7095 else
7096 return must_pass_in_stack_var_size_or_pad (mode, type);
7100 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7101 Return true if an argument passed on the stack should be padded upwards,
7102 i.e. if the least-significant byte has useful data.
7103 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7104 aggregate types are placed in the lowest memory address. */
7106 bool
7107 arm_pad_arg_upward (enum machine_mode mode, tree type)
7109 if (!TARGET_AAPCS_BASED)
7110 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7112 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7113 return false;
7115 return true;
7119 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7120 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7121 byte of the register has useful data, and return the opposite if the
7122 most significant byte does.
7123 For AAPCS, small aggregates and small complex types are always padded
7124 upwards. */
7126 bool
7127 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7128 tree type, int first ATTRIBUTE_UNUSED)
7130 if (TARGET_AAPCS_BASED
7131 && BYTES_BIG_ENDIAN
7132 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7133 && int_size_in_bytes (type) <= 4)
7134 return true;
7136 /* Otherwise, use default padding. */
7137 return !BYTES_BIG_ENDIAN;
7141 /* Print a symbolic form of X to the debug file, F. */
7142 static void
7143 arm_print_value (FILE *f, rtx x)
7145 switch (GET_CODE (x))
7147 case CONST_INT:
7148 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7149 return;
7151 case CONST_DOUBLE:
7152 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7153 return;
7155 case CONST_VECTOR:
7157 int i;
7159 fprintf (f, "<");
7160 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7162 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7163 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7164 fputc (',', f);
7166 fprintf (f, ">");
7168 return;
7170 case CONST_STRING:
7171 fprintf (f, "\"%s\"", XSTR (x, 0));
7172 return;
7174 case SYMBOL_REF:
7175 fprintf (f, "`%s'", XSTR (x, 0));
7176 return;
7178 case LABEL_REF:
7179 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7180 return;
7182 case CONST:
7183 arm_print_value (f, XEXP (x, 0));
7184 return;
7186 case PLUS:
7187 arm_print_value (f, XEXP (x, 0));
7188 fprintf (f, "+");
7189 arm_print_value (f, XEXP (x, 1));
7190 return;
7192 case PC:
7193 fprintf (f, "pc");
7194 return;
7196 default:
7197 fprintf (f, "????");
7198 return;
7202 /* Routines for manipulation of the constant pool. */
7204 /* Arm instructions cannot load a large constant directly into a
7205 register; they have to come from a pc relative load. The constant
7206 must therefore be placed in the addressable range of the pc
7207 relative load. Depending on the precise pc relative load
7208 instruction the range is somewhere between 256 bytes and 4k. This
7209 means that we often have to dump a constant inside a function, and
7210 generate code to branch around it.
7212 It is important to minimize this, since the branches will slow
7213 things down and make the code larger.
7215 Normally we can hide the table after an existing unconditional
7216 branch so that there is no interruption of the flow, but in the
7217 worst case the code looks like this:
7219 ldr rn, L1
7221 b L2
7222 align
7223 L1: .long value
7227 ldr rn, L3
7229 b L4
7230 align
7231 L3: .long value
7235 We fix this by performing a scan after scheduling, which notices
7236 which instructions need to have their operands fetched from the
7237 constant table and builds the table.
7239 The algorithm starts by building a table of all the constants that
7240 need fixing up and all the natural barriers in the function (places
7241 where a constant table can be dropped without breaking the flow).
7242 For each fixup we note how far the pc-relative replacement will be
7243 able to reach and the offset of the instruction into the function.
7245 Having built the table we then group the fixes together to form
7246 tables that are as large as possible (subject to addressing
7247 constraints) and emit each table of constants after the last
7248 barrier that is within range of all the instructions in the group.
7249 If a group does not contain a barrier, then we forcibly create one
7250 by inserting a jump instruction into the flow. Once the table has
7251 been inserted, the insns are then modified to reference the
7252 relevant entry in the pool.
7254 Possible enhancements to the algorithm (not implemented) are:
7256 1) For some processors and object formats, there may be benefit in
7257 aligning the pools to the start of cache lines; this alignment
7258 would need to be taken into account when calculating addressability
7259 of a pool. */
7261 /* These typedefs are located at the start of this file, so that
7262 they can be used in the prototypes there. This comment is to
7263 remind readers of that fact so that the following structures
7264 can be understood more easily.
7266 typedef struct minipool_node Mnode;
7267 typedef struct minipool_fixup Mfix; */
7269 struct minipool_node
7271 /* Doubly linked chain of entries. */
7272 Mnode * next;
7273 Mnode * prev;
7274 /* The maximum offset into the code that this entry can be placed. While
7275 pushing fixes for forward references, all entries are sorted in order
7276 of increasing max_address. */
7277 HOST_WIDE_INT max_address;
7278 /* Similarly for an entry inserted for a backwards ref. */
7279 HOST_WIDE_INT min_address;
7280 /* The number of fixes referencing this entry. This can become zero
7281 if we "unpush" an entry. In this case we ignore the entry when we
7282 come to emit the code. */
7283 int refcount;
7284 /* The offset from the start of the minipool. */
7285 HOST_WIDE_INT offset;
7286 /* The value in table. */
7287 rtx value;
7288 /* The mode of value. */
7289 enum machine_mode mode;
7290 /* The size of the value. With iWMMXt enabled
7291 sizes > 4 also imply an alignment of 8-bytes. */
7292 int fix_size;
7295 struct minipool_fixup
7297 Mfix * next;
7298 rtx insn;
7299 HOST_WIDE_INT address;
7300 rtx * loc;
7301 enum machine_mode mode;
7302 int fix_size;
7303 rtx value;
7304 Mnode * minipool;
7305 HOST_WIDE_INT forwards;
7306 HOST_WIDE_INT backwards;
7309 /* Fixes less than a word need padding out to a word boundary. */
7310 #define MINIPOOL_FIX_SIZE(mode) \
7311 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7313 static Mnode * minipool_vector_head;
7314 static Mnode * minipool_vector_tail;
7315 static rtx minipool_vector_label;
7316 static int minipool_pad;
7318 /* The linked list of all minipool fixes required for this function. */
7319 Mfix * minipool_fix_head;
7320 Mfix * minipool_fix_tail;
7321 /* The fix entry for the current minipool, once it has been placed. */
7322 Mfix * minipool_barrier;
7324 /* Determines if INSN is the start of a jump table. Returns the end
7325 of the TABLE or NULL_RTX. */
7326 static rtx
7327 is_jump_table (rtx insn)
7329 rtx table;
7331 if (GET_CODE (insn) == JUMP_INSN
7332 && JUMP_LABEL (insn) != NULL
7333 && ((table = next_real_insn (JUMP_LABEL (insn)))
7334 == next_real_insn (insn))
7335 && table != NULL
7336 && GET_CODE (table) == JUMP_INSN
7337 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7338 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7339 return table;
7341 return NULL_RTX;
7344 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7345 #define JUMP_TABLES_IN_TEXT_SECTION 0
7346 #endif
7348 static HOST_WIDE_INT
7349 get_jump_table_size (rtx insn)
7351 /* ADDR_VECs only take room if read-only data does into the text
7352 section. */
7353 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7355 rtx body = PATTERN (insn);
7356 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7358 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7361 return 0;
7364 /* Move a minipool fix MP from its current location to before MAX_MP.
7365 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7366 constraints may need updating. */
7367 static Mnode *
7368 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7369 HOST_WIDE_INT max_address)
7371 /* The code below assumes these are different. */
7372 gcc_assert (mp != max_mp);
7374 if (max_mp == NULL)
7376 if (max_address < mp->max_address)
7377 mp->max_address = max_address;
7379 else
7381 if (max_address > max_mp->max_address - mp->fix_size)
7382 mp->max_address = max_mp->max_address - mp->fix_size;
7383 else
7384 mp->max_address = max_address;
7386 /* Unlink MP from its current position. Since max_mp is non-null,
7387 mp->prev must be non-null. */
7388 mp->prev->next = mp->next;
7389 if (mp->next != NULL)
7390 mp->next->prev = mp->prev;
7391 else
7392 minipool_vector_tail = mp->prev;
7394 /* Re-insert it before MAX_MP. */
7395 mp->next = max_mp;
7396 mp->prev = max_mp->prev;
7397 max_mp->prev = mp;
7399 if (mp->prev != NULL)
7400 mp->prev->next = mp;
7401 else
7402 minipool_vector_head = mp;
7405 /* Save the new entry. */
7406 max_mp = mp;
7408 /* Scan over the preceding entries and adjust their addresses as
7409 required. */
7410 while (mp->prev != NULL
7411 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7413 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7414 mp = mp->prev;
7417 return max_mp;
7420 /* Add a constant to the minipool for a forward reference. Returns the
7421 node added or NULL if the constant will not fit in this pool. */
7422 static Mnode *
7423 add_minipool_forward_ref (Mfix *fix)
7425 /* If set, max_mp is the first pool_entry that has a lower
7426 constraint than the one we are trying to add. */
7427 Mnode * max_mp = NULL;
7428 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7429 Mnode * mp;
7431 /* If the minipool starts before the end of FIX->INSN then this FIX
7432 can not be placed into the current pool. Furthermore, adding the
7433 new constant pool entry may cause the pool to start FIX_SIZE bytes
7434 earlier. */
7435 if (minipool_vector_head &&
7436 (fix->address + get_attr_length (fix->insn)
7437 >= minipool_vector_head->max_address - fix->fix_size))
7438 return NULL;
7440 /* Scan the pool to see if a constant with the same value has
7441 already been added. While we are doing this, also note the
7442 location where we must insert the constant if it doesn't already
7443 exist. */
7444 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7446 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7447 && fix->mode == mp->mode
7448 && (GET_CODE (fix->value) != CODE_LABEL
7449 || (CODE_LABEL_NUMBER (fix->value)
7450 == CODE_LABEL_NUMBER (mp->value)))
7451 && rtx_equal_p (fix->value, mp->value))
7453 /* More than one fix references this entry. */
7454 mp->refcount++;
7455 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7458 /* Note the insertion point if necessary. */
7459 if (max_mp == NULL
7460 && mp->max_address > max_address)
7461 max_mp = mp;
7463 /* If we are inserting an 8-bytes aligned quantity and
7464 we have not already found an insertion point, then
7465 make sure that all such 8-byte aligned quantities are
7466 placed at the start of the pool. */
7467 if (ARM_DOUBLEWORD_ALIGN
7468 && max_mp == NULL
7469 && fix->fix_size == 8
7470 && mp->fix_size != 8)
7472 max_mp = mp;
7473 max_address = mp->max_address;
7477 /* The value is not currently in the minipool, so we need to create
7478 a new entry for it. If MAX_MP is NULL, the entry will be put on
7479 the end of the list since the placement is less constrained than
7480 any existing entry. Otherwise, we insert the new fix before
7481 MAX_MP and, if necessary, adjust the constraints on the other
7482 entries. */
7483 mp = XNEW (Mnode);
7484 mp->fix_size = fix->fix_size;
7485 mp->mode = fix->mode;
7486 mp->value = fix->value;
7487 mp->refcount = 1;
7488 /* Not yet required for a backwards ref. */
7489 mp->min_address = -65536;
7491 if (max_mp == NULL)
7493 mp->max_address = max_address;
7494 mp->next = NULL;
7495 mp->prev = minipool_vector_tail;
7497 if (mp->prev == NULL)
7499 minipool_vector_head = mp;
7500 minipool_vector_label = gen_label_rtx ();
7502 else
7503 mp->prev->next = mp;
7505 minipool_vector_tail = mp;
7507 else
7509 if (max_address > max_mp->max_address - mp->fix_size)
7510 mp->max_address = max_mp->max_address - mp->fix_size;
7511 else
7512 mp->max_address = max_address;
7514 mp->next = max_mp;
7515 mp->prev = max_mp->prev;
7516 max_mp->prev = mp;
7517 if (mp->prev != NULL)
7518 mp->prev->next = mp;
7519 else
7520 minipool_vector_head = mp;
7523 /* Save the new entry. */
7524 max_mp = mp;
7526 /* Scan over the preceding entries and adjust their addresses as
7527 required. */
7528 while (mp->prev != NULL
7529 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7531 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7532 mp = mp->prev;
7535 return max_mp;
7538 static Mnode *
7539 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7540 HOST_WIDE_INT min_address)
7542 HOST_WIDE_INT offset;
7544 /* The code below assumes these are different. */
7545 gcc_assert (mp != min_mp);
7547 if (min_mp == NULL)
7549 if (min_address > mp->min_address)
7550 mp->min_address = min_address;
7552 else
7554 /* We will adjust this below if it is too loose. */
7555 mp->min_address = min_address;
7557 /* Unlink MP from its current position. Since min_mp is non-null,
7558 mp->next must be non-null. */
7559 mp->next->prev = mp->prev;
7560 if (mp->prev != NULL)
7561 mp->prev->next = mp->next;
7562 else
7563 minipool_vector_head = mp->next;
7565 /* Reinsert it after MIN_MP. */
7566 mp->prev = min_mp;
7567 mp->next = min_mp->next;
7568 min_mp->next = mp;
7569 if (mp->next != NULL)
7570 mp->next->prev = mp;
7571 else
7572 minipool_vector_tail = mp;
7575 min_mp = mp;
7577 offset = 0;
7578 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7580 mp->offset = offset;
7581 if (mp->refcount > 0)
7582 offset += mp->fix_size;
7584 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7585 mp->next->min_address = mp->min_address + mp->fix_size;
7588 return min_mp;
7591 /* Add a constant to the minipool for a backward reference. Returns the
7592 node added or NULL if the constant will not fit in this pool.
7594 Note that the code for insertion for a backwards reference can be
7595 somewhat confusing because the calculated offsets for each fix do
7596 not take into account the size of the pool (which is still under
7597 construction. */
7598 static Mnode *
7599 add_minipool_backward_ref (Mfix *fix)
7601 /* If set, min_mp is the last pool_entry that has a lower constraint
7602 than the one we are trying to add. */
7603 Mnode *min_mp = NULL;
7604 /* This can be negative, since it is only a constraint. */
7605 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7606 Mnode *mp;
7608 /* If we can't reach the current pool from this insn, or if we can't
7609 insert this entry at the end of the pool without pushing other
7610 fixes out of range, then we don't try. This ensures that we
7611 can't fail later on. */
7612 if (min_address >= minipool_barrier->address
7613 || (minipool_vector_tail->min_address + fix->fix_size
7614 >= minipool_barrier->address))
7615 return NULL;
7617 /* Scan the pool to see if a constant with the same value has
7618 already been added. While we are doing this, also note the
7619 location where we must insert the constant if it doesn't already
7620 exist. */
7621 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7623 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7624 && fix->mode == mp->mode
7625 && (GET_CODE (fix->value) != CODE_LABEL
7626 || (CODE_LABEL_NUMBER (fix->value)
7627 == CODE_LABEL_NUMBER (mp->value)))
7628 && rtx_equal_p (fix->value, mp->value)
7629 /* Check that there is enough slack to move this entry to the
7630 end of the table (this is conservative). */
7631 && (mp->max_address
7632 > (minipool_barrier->address
7633 + minipool_vector_tail->offset
7634 + minipool_vector_tail->fix_size)))
7636 mp->refcount++;
7637 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7640 if (min_mp != NULL)
7641 mp->min_address += fix->fix_size;
7642 else
7644 /* Note the insertion point if necessary. */
7645 if (mp->min_address < min_address)
7647 /* For now, we do not allow the insertion of 8-byte alignment
7648 requiring nodes anywhere but at the start of the pool. */
7649 if (ARM_DOUBLEWORD_ALIGN
7650 && fix->fix_size == 8 && mp->fix_size != 8)
7651 return NULL;
7652 else
7653 min_mp = mp;
7655 else if (mp->max_address
7656 < minipool_barrier->address + mp->offset + fix->fix_size)
7658 /* Inserting before this entry would push the fix beyond
7659 its maximum address (which can happen if we have
7660 re-located a forwards fix); force the new fix to come
7661 after it. */
7662 min_mp = mp;
7663 min_address = mp->min_address + fix->fix_size;
7665 /* If we are inserting an 8-bytes aligned quantity and
7666 we have not already found an insertion point, then
7667 make sure that all such 8-byte aligned quantities are
7668 placed at the start of the pool. */
7669 else if (ARM_DOUBLEWORD_ALIGN
7670 && min_mp == NULL
7671 && fix->fix_size == 8
7672 && mp->fix_size < 8)
7674 min_mp = mp;
7675 min_address = mp->min_address + fix->fix_size;
7680 /* We need to create a new entry. */
7681 mp = XNEW (Mnode);
7682 mp->fix_size = fix->fix_size;
7683 mp->mode = fix->mode;
7684 mp->value = fix->value;
7685 mp->refcount = 1;
7686 mp->max_address = minipool_barrier->address + 65536;
7688 mp->min_address = min_address;
7690 if (min_mp == NULL)
7692 mp->prev = NULL;
7693 mp->next = minipool_vector_head;
7695 if (mp->next == NULL)
7697 minipool_vector_tail = mp;
7698 minipool_vector_label = gen_label_rtx ();
7700 else
7701 mp->next->prev = mp;
7703 minipool_vector_head = mp;
7705 else
7707 mp->next = min_mp->next;
7708 mp->prev = min_mp;
7709 min_mp->next = mp;
7711 if (mp->next != NULL)
7712 mp->next->prev = mp;
7713 else
7714 minipool_vector_tail = mp;
7717 /* Save the new entry. */
7718 min_mp = mp;
7720 if (mp->prev)
7721 mp = mp->prev;
7722 else
7723 mp->offset = 0;
7725 /* Scan over the following entries and adjust their offsets. */
7726 while (mp->next != NULL)
7728 if (mp->next->min_address < mp->min_address + mp->fix_size)
7729 mp->next->min_address = mp->min_address + mp->fix_size;
7731 if (mp->refcount)
7732 mp->next->offset = mp->offset + mp->fix_size;
7733 else
7734 mp->next->offset = mp->offset;
7736 mp = mp->next;
7739 return min_mp;
7742 static void
7743 assign_minipool_offsets (Mfix *barrier)
7745 HOST_WIDE_INT offset = 0;
7746 Mnode *mp;
7748 minipool_barrier = barrier;
7750 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7752 mp->offset = offset;
7754 if (mp->refcount > 0)
7755 offset += mp->fix_size;
7759 /* Output the literal table */
7760 static void
7761 dump_minipool (rtx scan)
7763 Mnode * mp;
7764 Mnode * nmp;
7765 int align64 = 0;
7767 if (ARM_DOUBLEWORD_ALIGN)
7768 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7769 if (mp->refcount > 0 && mp->fix_size == 8)
7771 align64 = 1;
7772 break;
7775 if (dump_file)
7776 fprintf (dump_file,
7777 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7778 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7780 scan = emit_label_after (gen_label_rtx (), scan);
7781 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7782 scan = emit_label_after (minipool_vector_label, scan);
7784 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7786 if (mp->refcount > 0)
7788 if (dump_file)
7790 fprintf (dump_file,
7791 ";; Offset %u, min %ld, max %ld ",
7792 (unsigned) mp->offset, (unsigned long) mp->min_address,
7793 (unsigned long) mp->max_address);
7794 arm_print_value (dump_file, mp->value);
7795 fputc ('\n', dump_file);
7798 switch (mp->fix_size)
7800 #ifdef HAVE_consttable_1
7801 case 1:
7802 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7803 break;
7805 #endif
7806 #ifdef HAVE_consttable_2
7807 case 2:
7808 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7809 break;
7811 #endif
7812 #ifdef HAVE_consttable_4
7813 case 4:
7814 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7815 break;
7817 #endif
7818 #ifdef HAVE_consttable_8
7819 case 8:
7820 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7821 break;
7823 #endif
7824 default:
7825 gcc_unreachable ();
7829 nmp = mp->next;
7830 free (mp);
7833 minipool_vector_head = minipool_vector_tail = NULL;
7834 scan = emit_insn_after (gen_consttable_end (), scan);
7835 scan = emit_barrier_after (scan);
7838 /* Return the cost of forcibly inserting a barrier after INSN. */
7839 static int
7840 arm_barrier_cost (rtx insn)
7842 /* Basing the location of the pool on the loop depth is preferable,
7843 but at the moment, the basic block information seems to be
7844 corrupt by this stage of the compilation. */
7845 int base_cost = 50;
7846 rtx next = next_nonnote_insn (insn);
7848 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7849 base_cost -= 20;
7851 switch (GET_CODE (insn))
7853 case CODE_LABEL:
7854 /* It will always be better to place the table before the label, rather
7855 than after it. */
7856 return 50;
7858 case INSN:
7859 case CALL_INSN:
7860 return base_cost;
7862 case JUMP_INSN:
7863 return base_cost - 10;
7865 default:
7866 return base_cost + 10;
7870 /* Find the best place in the insn stream in the range
7871 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7872 Create the barrier by inserting a jump and add a new fix entry for
7873 it. */
7874 static Mfix *
7875 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7877 HOST_WIDE_INT count = 0;
7878 rtx barrier;
7879 rtx from = fix->insn;
7880 /* The instruction after which we will insert the jump. */
7881 rtx selected = NULL;
7882 int selected_cost;
7883 /* The address at which the jump instruction will be placed. */
7884 HOST_WIDE_INT selected_address;
7885 Mfix * new_fix;
7886 HOST_WIDE_INT max_count = max_address - fix->address;
7887 rtx label = gen_label_rtx ();
7889 selected_cost = arm_barrier_cost (from);
7890 selected_address = fix->address;
7892 while (from && count < max_count)
7894 rtx tmp;
7895 int new_cost;
7897 /* This code shouldn't have been called if there was a natural barrier
7898 within range. */
7899 gcc_assert (GET_CODE (from) != BARRIER);
7901 /* Count the length of this insn. */
7902 count += get_attr_length (from);
7904 /* If there is a jump table, add its length. */
7905 tmp = is_jump_table (from);
7906 if (tmp != NULL)
7908 count += get_jump_table_size (tmp);
7910 /* Jump tables aren't in a basic block, so base the cost on
7911 the dispatch insn. If we select this location, we will
7912 still put the pool after the table. */
7913 new_cost = arm_barrier_cost (from);
7915 if (count < max_count
7916 && (!selected || new_cost <= selected_cost))
7918 selected = tmp;
7919 selected_cost = new_cost;
7920 selected_address = fix->address + count;
7923 /* Continue after the dispatch table. */
7924 from = NEXT_INSN (tmp);
7925 continue;
7928 new_cost = arm_barrier_cost (from);
7930 if (count < max_count
7931 && (!selected || new_cost <= selected_cost))
7933 selected = from;
7934 selected_cost = new_cost;
7935 selected_address = fix->address + count;
7938 from = NEXT_INSN (from);
7941 /* Make sure that we found a place to insert the jump. */
7942 gcc_assert (selected);
7944 /* Create a new JUMP_INSN that branches around a barrier. */
7945 from = emit_jump_insn_after (gen_jump (label), selected);
7946 JUMP_LABEL (from) = label;
7947 barrier = emit_barrier_after (from);
7948 emit_label_after (label, barrier);
7950 /* Create a minipool barrier entry for the new barrier. */
7951 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7952 new_fix->insn = barrier;
7953 new_fix->address = selected_address;
7954 new_fix->next = fix->next;
7955 fix->next = new_fix;
7957 return new_fix;
7960 /* Record that there is a natural barrier in the insn stream at
7961 ADDRESS. */
7962 static void
7963 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7965 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7967 fix->insn = insn;
7968 fix->address = address;
7970 fix->next = NULL;
7971 if (minipool_fix_head != NULL)
7972 minipool_fix_tail->next = fix;
7973 else
7974 minipool_fix_head = fix;
7976 minipool_fix_tail = fix;
7979 /* Record INSN, which will need fixing up to load a value from the
7980 minipool. ADDRESS is the offset of the insn since the start of the
7981 function; LOC is a pointer to the part of the insn which requires
7982 fixing; VALUE is the constant that must be loaded, which is of type
7983 MODE. */
7984 static void
7985 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7986 enum machine_mode mode, rtx value)
7988 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7990 #ifdef AOF_ASSEMBLER
7991 /* PIC symbol references need to be converted into offsets into the
7992 based area. */
7993 /* XXX This shouldn't be done here. */
7994 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7995 value = aof_pic_entry (value);
7996 #endif /* AOF_ASSEMBLER */
7998 fix->insn = insn;
7999 fix->address = address;
8000 fix->loc = loc;
8001 fix->mode = mode;
8002 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8003 fix->value = value;
8004 fix->forwards = get_attr_pool_range (insn);
8005 fix->backwards = get_attr_neg_pool_range (insn);
8006 fix->minipool = NULL;
8008 /* If an insn doesn't have a range defined for it, then it isn't
8009 expecting to be reworked by this code. Better to stop now than
8010 to generate duff assembly code. */
8011 gcc_assert (fix->forwards || fix->backwards);
8013 /* If an entry requires 8-byte alignment then assume all constant pools
8014 require 4 bytes of padding. Trying to do this later on a per-pool
8015 basis is awkward because existing pool entries have to be modified. */
8016 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8017 minipool_pad = 4;
8019 if (dump_file)
8021 fprintf (dump_file,
8022 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8023 GET_MODE_NAME (mode),
8024 INSN_UID (insn), (unsigned long) address,
8025 -1 * (long)fix->backwards, (long)fix->forwards);
8026 arm_print_value (dump_file, fix->value);
8027 fprintf (dump_file, "\n");
8030 /* Add it to the chain of fixes. */
8031 fix->next = NULL;
8033 if (minipool_fix_head != NULL)
8034 minipool_fix_tail->next = fix;
8035 else
8036 minipool_fix_head = fix;
8038 minipool_fix_tail = fix;
8041 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8042 Returns the number of insns needed, or 99 if we don't know how to
8043 do it. */
8045 arm_const_double_inline_cost (rtx val)
8047 rtx lowpart, highpart;
8048 enum machine_mode mode;
8050 mode = GET_MODE (val);
8052 if (mode == VOIDmode)
8053 mode = DImode;
8055 gcc_assert (GET_MODE_SIZE (mode) == 8);
8057 lowpart = gen_lowpart (SImode, val);
8058 highpart = gen_highpart_mode (SImode, mode, val);
8060 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8061 gcc_assert (GET_CODE (highpart) == CONST_INT);
8063 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8064 NULL_RTX, NULL_RTX, 0, 0)
8065 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8066 NULL_RTX, NULL_RTX, 0, 0));
8069 /* Return true if it is worthwhile to split a 64-bit constant into two
8070 32-bit operations. This is the case if optimizing for size, or
8071 if we have load delay slots, or if one 32-bit part can be done with
8072 a single data operation. */
8073 bool
8074 arm_const_double_by_parts (rtx val)
8076 enum machine_mode mode = GET_MODE (val);
8077 rtx part;
8079 if (optimize_size || arm_ld_sched)
8080 return true;
8082 if (mode == VOIDmode)
8083 mode = DImode;
8085 part = gen_highpart_mode (SImode, mode, val);
8087 gcc_assert (GET_CODE (part) == CONST_INT);
8089 if (const_ok_for_arm (INTVAL (part))
8090 || const_ok_for_arm (~INTVAL (part)))
8091 return true;
8093 part = gen_lowpart (SImode, val);
8095 gcc_assert (GET_CODE (part) == CONST_INT);
8097 if (const_ok_for_arm (INTVAL (part))
8098 || const_ok_for_arm (~INTVAL (part)))
8099 return true;
8101 return false;
8104 /* Scan INSN and note any of its operands that need fixing.
8105 If DO_PUSHES is false we do not actually push any of the fixups
8106 needed. The function returns TRUE if any fixups were needed/pushed.
8107 This is used by arm_memory_load_p() which needs to know about loads
8108 of constants that will be converted into minipool loads. */
8109 static bool
8110 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8112 bool result = false;
8113 int opno;
8115 extract_insn (insn);
8117 if (!constrain_operands (1))
8118 fatal_insn_not_found (insn);
8120 if (recog_data.n_alternatives == 0)
8121 return false;
8123 /* Fill in recog_op_alt with information about the constraints of
8124 this insn. */
8125 preprocess_constraints ();
8127 for (opno = 0; opno < recog_data.n_operands; opno++)
8129 /* Things we need to fix can only occur in inputs. */
8130 if (recog_data.operand_type[opno] != OP_IN)
8131 continue;
8133 /* If this alternative is a memory reference, then any mention
8134 of constants in this alternative is really to fool reload
8135 into allowing us to accept one there. We need to fix them up
8136 now so that we output the right code. */
8137 if (recog_op_alt[opno][which_alternative].memory_ok)
8139 rtx op = recog_data.operand[opno];
8141 if (CONSTANT_P (op))
8143 if (do_pushes)
8144 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8145 recog_data.operand_mode[opno], op);
8146 result = true;
8148 else if (GET_CODE (op) == MEM
8149 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8150 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8152 if (do_pushes)
8154 rtx cop = avoid_constant_pool_reference (op);
8156 /* Casting the address of something to a mode narrower
8157 than a word can cause avoid_constant_pool_reference()
8158 to return the pool reference itself. That's no good to
8159 us here. Lets just hope that we can use the
8160 constant pool value directly. */
8161 if (op == cop)
8162 cop = get_pool_constant (XEXP (op, 0));
8164 push_minipool_fix (insn, address,
8165 recog_data.operand_loc[opno],
8166 recog_data.operand_mode[opno], cop);
8169 result = true;
8174 return result;
8177 /* Gcc puts the pool in the wrong place for ARM, since we can only
8178 load addresses a limited distance around the pc. We do some
8179 special munging to move the constant pool values to the correct
8180 point in the code. */
8181 static void
8182 arm_reorg (void)
8184 rtx insn;
8185 HOST_WIDE_INT address = 0;
8186 Mfix * fix;
8188 minipool_fix_head = minipool_fix_tail = NULL;
8190 /* The first insn must always be a note, or the code below won't
8191 scan it properly. */
8192 insn = get_insns ();
8193 gcc_assert (GET_CODE (insn) == NOTE);
8194 minipool_pad = 0;
8196 /* Scan all the insns and record the operands that will need fixing. */
8197 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8199 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8200 && (arm_cirrus_insn_p (insn)
8201 || GET_CODE (insn) == JUMP_INSN
8202 || arm_memory_load_p (insn)))
8203 cirrus_reorg (insn);
8205 if (GET_CODE (insn) == BARRIER)
8206 push_minipool_barrier (insn, address);
8207 else if (INSN_P (insn))
8209 rtx table;
8211 note_invalid_constants (insn, address, true);
8212 address += get_attr_length (insn);
8214 /* If the insn is a vector jump, add the size of the table
8215 and skip the table. */
8216 if ((table = is_jump_table (insn)) != NULL)
8218 address += get_jump_table_size (table);
8219 insn = table;
8224 fix = minipool_fix_head;
8226 /* Now scan the fixups and perform the required changes. */
8227 while (fix)
8229 Mfix * ftmp;
8230 Mfix * fdel;
8231 Mfix * last_added_fix;
8232 Mfix * last_barrier = NULL;
8233 Mfix * this_fix;
8235 /* Skip any further barriers before the next fix. */
8236 while (fix && GET_CODE (fix->insn) == BARRIER)
8237 fix = fix->next;
8239 /* No more fixes. */
8240 if (fix == NULL)
8241 break;
8243 last_added_fix = NULL;
8245 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8247 if (GET_CODE (ftmp->insn) == BARRIER)
8249 if (ftmp->address >= minipool_vector_head->max_address)
8250 break;
8252 last_barrier = ftmp;
8254 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8255 break;
8257 last_added_fix = ftmp; /* Keep track of the last fix added. */
8260 /* If we found a barrier, drop back to that; any fixes that we
8261 could have reached but come after the barrier will now go in
8262 the next mini-pool. */
8263 if (last_barrier != NULL)
8265 /* Reduce the refcount for those fixes that won't go into this
8266 pool after all. */
8267 for (fdel = last_barrier->next;
8268 fdel && fdel != ftmp;
8269 fdel = fdel->next)
8271 fdel->minipool->refcount--;
8272 fdel->minipool = NULL;
8275 ftmp = last_barrier;
8277 else
8279 /* ftmp is first fix that we can't fit into this pool and
8280 there no natural barriers that we could use. Insert a
8281 new barrier in the code somewhere between the previous
8282 fix and this one, and arrange to jump around it. */
8283 HOST_WIDE_INT max_address;
8285 /* The last item on the list of fixes must be a barrier, so
8286 we can never run off the end of the list of fixes without
8287 last_barrier being set. */
8288 gcc_assert (ftmp);
8290 max_address = minipool_vector_head->max_address;
8291 /* Check that there isn't another fix that is in range that
8292 we couldn't fit into this pool because the pool was
8293 already too large: we need to put the pool before such an
8294 instruction. The pool itself may come just after the
8295 fix because create_fix_barrier also allows space for a
8296 jump instruction. */
8297 if (ftmp->address < max_address)
8298 max_address = ftmp->address + 1;
8300 last_barrier = create_fix_barrier (last_added_fix, max_address);
8303 assign_minipool_offsets (last_barrier);
8305 while (ftmp)
8307 if (GET_CODE (ftmp->insn) != BARRIER
8308 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8309 == NULL))
8310 break;
8312 ftmp = ftmp->next;
8315 /* Scan over the fixes we have identified for this pool, fixing them
8316 up and adding the constants to the pool itself. */
8317 for (this_fix = fix; this_fix && ftmp != this_fix;
8318 this_fix = this_fix->next)
8319 if (GET_CODE (this_fix->insn) != BARRIER)
8321 rtx addr
8322 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8323 minipool_vector_label),
8324 this_fix->minipool->offset);
8325 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8328 dump_minipool (last_barrier->insn);
8329 fix = ftmp;
8332 /* From now on we must synthesize any constants that we can't handle
8333 directly. This can happen if the RTL gets split during final
8334 instruction generation. */
8335 after_arm_reorg = 1;
8337 /* Free the minipool memory. */
8338 obstack_free (&minipool_obstack, minipool_startobj);
8341 /* Routines to output assembly language. */
8343 /* If the rtx is the correct value then return the string of the number.
8344 In this way we can ensure that valid double constants are generated even
8345 when cross compiling. */
8346 const char *
8347 fp_immediate_constant (rtx x)
8349 REAL_VALUE_TYPE r;
8350 int i;
8352 if (!fp_consts_inited)
8353 init_fp_table ();
8355 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8356 for (i = 0; i < 8; i++)
8357 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8358 return strings_fp[i];
8360 gcc_unreachable ();
8363 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8364 static const char *
8365 fp_const_from_val (REAL_VALUE_TYPE *r)
8367 int i;
8369 if (!fp_consts_inited)
8370 init_fp_table ();
8372 for (i = 0; i < 8; i++)
8373 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8374 return strings_fp[i];
8376 gcc_unreachable ();
8379 /* Output the operands of a LDM/STM instruction to STREAM.
8380 MASK is the ARM register set mask of which only bits 0-15 are important.
8381 REG is the base register, either the frame pointer or the stack pointer,
8382 INSTR is the possibly suffixed load or store instruction. */
8384 static void
8385 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8386 unsigned long mask)
8388 unsigned i;
8389 bool not_first = FALSE;
8391 fputc ('\t', stream);
8392 asm_fprintf (stream, instr, reg);
8393 fputs (", {", stream);
8395 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8396 if (mask & (1 << i))
8398 if (not_first)
8399 fprintf (stream, ", ");
8401 asm_fprintf (stream, "%r", i);
8402 not_first = TRUE;
8405 fprintf (stream, "}\n");
8409 /* Output a FLDMX instruction to STREAM.
8410 BASE if the register containing the address.
8411 REG and COUNT specify the register range.
8412 Extra registers may be added to avoid hardware bugs. */
8414 static void
8415 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8417 int i;
8419 /* Workaround ARM10 VFPr1 bug. */
8420 if (count == 2 && !arm_arch6)
8422 if (reg == 15)
8423 reg--;
8424 count++;
8427 fputc ('\t', stream);
8428 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8430 for (i = reg; i < reg + count; i++)
8432 if (i > reg)
8433 fputs (", ", stream);
8434 asm_fprintf (stream, "d%d", i);
8436 fputs ("}\n", stream);
8441 /* Output the assembly for a store multiple. */
8443 const char *
8444 vfp_output_fstmx (rtx * operands)
8446 char pattern[100];
8447 int p;
8448 int base;
8449 int i;
8451 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8452 p = strlen (pattern);
8454 gcc_assert (GET_CODE (operands[1]) == REG);
8456 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8457 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8459 p += sprintf (&pattern[p], ", d%d", base + i);
8461 strcpy (&pattern[p], "}");
8463 output_asm_insn (pattern, operands);
8464 return "";
8468 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8469 number of bytes pushed. */
8471 static int
8472 vfp_emit_fstmx (int base_reg, int count)
8474 rtx par;
8475 rtx dwarf;
8476 rtx tmp, reg;
8477 int i;
8479 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8480 register pairs are stored by a store multiple insn. We avoid this
8481 by pushing an extra pair. */
8482 if (count == 2 && !arm_arch6)
8484 if (base_reg == LAST_VFP_REGNUM - 3)
8485 base_reg -= 2;
8486 count++;
8489 /* ??? The frame layout is implementation defined. We describe
8490 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8491 We really need some way of representing the whole block so that the
8492 unwinder can figure it out at runtime. */
8493 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8494 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8496 reg = gen_rtx_REG (DFmode, base_reg);
8497 base_reg += 2;
8499 XVECEXP (par, 0, 0)
8500 = gen_rtx_SET (VOIDmode,
8501 gen_frame_mem (BLKmode,
8502 gen_rtx_PRE_DEC (BLKmode,
8503 stack_pointer_rtx)),
8504 gen_rtx_UNSPEC (BLKmode,
8505 gen_rtvec (1, reg),
8506 UNSPEC_PUSH_MULT));
8508 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8509 plus_constant (stack_pointer_rtx, -(count * 8 + 4)));
8510 RTX_FRAME_RELATED_P (tmp) = 1;
8511 XVECEXP (dwarf, 0, 0) = tmp;
8513 tmp = gen_rtx_SET (VOIDmode,
8514 gen_frame_mem (DFmode, stack_pointer_rtx),
8515 reg);
8516 RTX_FRAME_RELATED_P (tmp) = 1;
8517 XVECEXP (dwarf, 0, 1) = tmp;
8519 for (i = 1; i < count; i++)
8521 reg = gen_rtx_REG (DFmode, base_reg);
8522 base_reg += 2;
8523 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8525 tmp = gen_rtx_SET (VOIDmode,
8526 gen_frame_mem (DFmode,
8527 plus_constant (stack_pointer_rtx,
8528 i * 8)),
8529 reg);
8530 RTX_FRAME_RELATED_P (tmp) = 1;
8531 XVECEXP (dwarf, 0, i + 1) = tmp;
8534 par = emit_insn (par);
8535 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8536 REG_NOTES (par));
8537 RTX_FRAME_RELATED_P (par) = 1;
8539 return count * 8 + 4;
8543 /* Output a 'call' insn. */
8544 const char *
8545 output_call (rtx *operands)
8547 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8549 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8550 if (REGNO (operands[0]) == LR_REGNUM)
8552 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8553 output_asm_insn ("mov%?\t%0, %|lr", operands);
8556 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8558 if (TARGET_INTERWORK || arm_arch4t)
8559 output_asm_insn ("bx%?\t%0", operands);
8560 else
8561 output_asm_insn ("mov%?\t%|pc, %0", operands);
8563 return "";
8566 /* Output a 'call' insn that is a reference in memory. */
8567 const char *
8568 output_call_mem (rtx *operands)
8570 if (TARGET_INTERWORK && !arm_arch5)
8572 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8573 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8574 output_asm_insn ("bx%?\t%|ip", operands);
8576 else if (regno_use_in (LR_REGNUM, operands[0]))
8578 /* LR is used in the memory address. We load the address in the
8579 first instruction. It's safe to use IP as the target of the
8580 load since the call will kill it anyway. */
8581 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8582 if (arm_arch5)
8583 output_asm_insn ("blx%?\t%|ip", operands);
8584 else
8586 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8587 if (arm_arch4t)
8588 output_asm_insn ("bx%?\t%|ip", operands);
8589 else
8590 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8593 else
8595 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8596 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8599 return "";
8603 /* Output a move from arm registers to an fpa registers.
8604 OPERANDS[0] is an fpa register.
8605 OPERANDS[1] is the first registers of an arm register pair. */
8606 const char *
8607 output_mov_long_double_fpa_from_arm (rtx *operands)
8609 int arm_reg0 = REGNO (operands[1]);
8610 rtx ops[3];
8612 gcc_assert (arm_reg0 != IP_REGNUM);
8614 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8615 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8616 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8618 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8619 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8621 return "";
8624 /* Output a move from an fpa register to arm registers.
8625 OPERANDS[0] is the first registers of an arm register pair.
8626 OPERANDS[1] is an fpa register. */
8627 const char *
8628 output_mov_long_double_arm_from_fpa (rtx *operands)
8630 int arm_reg0 = REGNO (operands[0]);
8631 rtx ops[3];
8633 gcc_assert (arm_reg0 != IP_REGNUM);
8635 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8636 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8637 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8639 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8640 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8641 return "";
8644 /* Output a move from arm registers to arm registers of a long double
8645 OPERANDS[0] is the destination.
8646 OPERANDS[1] is the source. */
8647 const char *
8648 output_mov_long_double_arm_from_arm (rtx *operands)
8650 /* We have to be careful here because the two might overlap. */
8651 int dest_start = REGNO (operands[0]);
8652 int src_start = REGNO (operands[1]);
8653 rtx ops[2];
8654 int i;
8656 if (dest_start < src_start)
8658 for (i = 0; i < 3; i++)
8660 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8661 ops[1] = gen_rtx_REG (SImode, src_start + i);
8662 output_asm_insn ("mov%?\t%0, %1", ops);
8665 else
8667 for (i = 2; i >= 0; i--)
8669 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8670 ops[1] = gen_rtx_REG (SImode, src_start + i);
8671 output_asm_insn ("mov%?\t%0, %1", ops);
8675 return "";
8679 /* Output a move from arm registers to an fpa registers.
8680 OPERANDS[0] is an fpa register.
8681 OPERANDS[1] is the first registers of an arm register pair. */
8682 const char *
8683 output_mov_double_fpa_from_arm (rtx *operands)
8685 int arm_reg0 = REGNO (operands[1]);
8686 rtx ops[2];
8688 gcc_assert (arm_reg0 != IP_REGNUM);
8690 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8691 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8692 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8693 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8694 return "";
8697 /* Output a move from an fpa register to arm registers.
8698 OPERANDS[0] is the first registers of an arm register pair.
8699 OPERANDS[1] is an fpa register. */
8700 const char *
8701 output_mov_double_arm_from_fpa (rtx *operands)
8703 int arm_reg0 = REGNO (operands[0]);
8704 rtx ops[2];
8706 gcc_assert (arm_reg0 != IP_REGNUM);
8708 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8709 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8710 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8711 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8712 return "";
8715 /* Output a move between double words.
8716 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8717 or MEM<-REG and all MEMs must be offsettable addresses. */
8718 const char *
8719 output_move_double (rtx *operands)
8721 enum rtx_code code0 = GET_CODE (operands[0]);
8722 enum rtx_code code1 = GET_CODE (operands[1]);
8723 rtx otherops[3];
8725 if (code0 == REG)
8727 int reg0 = REGNO (operands[0]);
8729 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8731 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8733 switch (GET_CODE (XEXP (operands[1], 0)))
8735 case REG:
8736 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8737 break;
8739 case PRE_INC:
8740 gcc_assert (TARGET_LDRD);
8741 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8742 break;
8744 case PRE_DEC:
8745 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8746 break;
8748 case POST_INC:
8749 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8750 break;
8752 case POST_DEC:
8753 gcc_assert (TARGET_LDRD);
8754 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8755 break;
8757 case PRE_MODIFY:
8758 case POST_MODIFY:
8759 otherops[0] = operands[0];
8760 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8761 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8763 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8765 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8767 /* Registers overlap so split out the increment. */
8768 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8769 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8771 else
8772 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8774 else
8776 /* We only allow constant increments, so this is safe. */
8777 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8779 break;
8781 case LABEL_REF:
8782 case CONST:
8783 output_asm_insn ("adr%?\t%0, %1", operands);
8784 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8785 break;
8787 default:
8788 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8789 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8791 otherops[0] = operands[0];
8792 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8793 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8795 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8797 if (GET_CODE (otherops[2]) == CONST_INT)
8799 switch ((int) INTVAL (otherops[2]))
8801 case -8:
8802 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8803 return "";
8804 case -4:
8805 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8806 return "";
8807 case 4:
8808 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8809 return "";
8812 if (TARGET_LDRD
8813 && (GET_CODE (otherops[2]) == REG
8814 || (GET_CODE (otherops[2]) == CONST_INT
8815 && INTVAL (otherops[2]) > -256
8816 && INTVAL (otherops[2]) < 256)))
8818 if (reg_overlap_mentioned_p (otherops[0],
8819 otherops[2]))
8821 /* Swap base and index registers over to
8822 avoid a conflict. */
8823 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8824 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8826 /* If both registers conflict, it will usually
8827 have been fixed by a splitter. */
8828 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8830 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8831 output_asm_insn ("ldr%?d\t%0, [%1]",
8832 otherops);
8834 else
8835 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8836 return "";
8839 if (GET_CODE (otherops[2]) == CONST_INT)
8841 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8842 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8843 else
8844 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8846 else
8847 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8849 else
8850 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8852 return "ldm%?ia\t%0, %M0";
8854 else
8856 otherops[1] = adjust_address (operands[1], SImode, 4);
8857 /* Take care of overlapping base/data reg. */
8858 if (reg_mentioned_p (operands[0], operands[1]))
8860 output_asm_insn ("ldr%?\t%0, %1", otherops);
8861 output_asm_insn ("ldr%?\t%0, %1", operands);
8863 else
8865 output_asm_insn ("ldr%?\t%0, %1", operands);
8866 output_asm_insn ("ldr%?\t%0, %1", otherops);
8871 else
8873 /* Constraints should ensure this. */
8874 gcc_assert (code0 == MEM && code1 == REG);
8875 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8877 switch (GET_CODE (XEXP (operands[0], 0)))
8879 case REG:
8880 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8881 break;
8883 case PRE_INC:
8884 gcc_assert (TARGET_LDRD);
8885 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8886 break;
8888 case PRE_DEC:
8889 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8890 break;
8892 case POST_INC:
8893 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8894 break;
8896 case POST_DEC:
8897 gcc_assert (TARGET_LDRD);
8898 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8899 break;
8901 case PRE_MODIFY:
8902 case POST_MODIFY:
8903 otherops[0] = operands[1];
8904 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8905 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8907 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8908 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8909 else
8910 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8911 break;
8913 case PLUS:
8914 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8915 if (GET_CODE (otherops[2]) == CONST_INT)
8917 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8919 case -8:
8920 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8921 return "";
8923 case -4:
8924 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8925 return "";
8927 case 4:
8928 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8929 return "";
8932 if (TARGET_LDRD
8933 && (GET_CODE (otherops[2]) == REG
8934 || (GET_CODE (otherops[2]) == CONST_INT
8935 && INTVAL (otherops[2]) > -256
8936 && INTVAL (otherops[2]) < 256)))
8938 otherops[0] = operands[1];
8939 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8940 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8941 return "";
8943 /* Fall through */
8945 default:
8946 otherops[0] = adjust_address (operands[0], SImode, 4);
8947 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8948 output_asm_insn ("str%?\t%1, %0", operands);
8949 output_asm_insn ("str%?\t%1, %0", otherops);
8953 return "";
8956 /* Output an ADD r, s, #n where n may be too big for one instruction.
8957 If adding zero to one register, output nothing. */
8958 const char *
8959 output_add_immediate (rtx *operands)
8961 HOST_WIDE_INT n = INTVAL (operands[2]);
8963 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8965 if (n < 0)
8966 output_multi_immediate (operands,
8967 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8968 -n);
8969 else
8970 output_multi_immediate (operands,
8971 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8975 return "";
8978 /* Output a multiple immediate operation.
8979 OPERANDS is the vector of operands referred to in the output patterns.
8980 INSTR1 is the output pattern to use for the first constant.
8981 INSTR2 is the output pattern to use for subsequent constants.
8982 IMMED_OP is the index of the constant slot in OPERANDS.
8983 N is the constant value. */
8984 static const char *
8985 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8986 int immed_op, HOST_WIDE_INT n)
8988 #if HOST_BITS_PER_WIDE_INT > 32
8989 n &= 0xffffffff;
8990 #endif
8992 if (n == 0)
8994 /* Quick and easy output. */
8995 operands[immed_op] = const0_rtx;
8996 output_asm_insn (instr1, operands);
8998 else
9000 int i;
9001 const char * instr = instr1;
9003 /* Note that n is never zero here (which would give no output). */
9004 for (i = 0; i < 32; i += 2)
9006 if (n & (3 << i))
9008 operands[immed_op] = GEN_INT (n & (255 << i));
9009 output_asm_insn (instr, operands);
9010 instr = instr2;
9011 i += 6;
9016 return "";
9019 /* Return the appropriate ARM instruction for the operation code.
9020 The returned result should not be overwritten. OP is the rtx of the
9021 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9022 was shifted. */
9023 const char *
9024 arithmetic_instr (rtx op, int shift_first_arg)
9026 switch (GET_CODE (op))
9028 case PLUS:
9029 return "add";
9031 case MINUS:
9032 return shift_first_arg ? "rsb" : "sub";
9034 case IOR:
9035 return "orr";
9037 case XOR:
9038 return "eor";
9040 case AND:
9041 return "and";
9043 default:
9044 gcc_unreachable ();
9048 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9049 for the operation code. The returned result should not be overwritten.
9050 OP is the rtx code of the shift.
9051 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9052 shift. */
9053 static const char *
9054 shift_op (rtx op, HOST_WIDE_INT *amountp)
9056 const char * mnem;
9057 enum rtx_code code = GET_CODE (op);
9059 switch (GET_CODE (XEXP (op, 1)))
9061 case REG:
9062 case SUBREG:
9063 *amountp = -1;
9064 break;
9066 case CONST_INT:
9067 *amountp = INTVAL (XEXP (op, 1));
9068 break;
9070 default:
9071 gcc_unreachable ();
9074 switch (code)
9076 case ASHIFT:
9077 mnem = "asl";
9078 break;
9080 case ASHIFTRT:
9081 mnem = "asr";
9082 break;
9084 case LSHIFTRT:
9085 mnem = "lsr";
9086 break;
9088 case ROTATE:
9089 gcc_assert (*amountp != -1);
9090 *amountp = 32 - *amountp;
9092 /* Fall through. */
9094 case ROTATERT:
9095 mnem = "ror";
9096 break;
9098 case MULT:
9099 /* We never have to worry about the amount being other than a
9100 power of 2, since this case can never be reloaded from a reg. */
9101 gcc_assert (*amountp != -1);
9102 *amountp = int_log2 (*amountp);
9103 return "asl";
9105 default:
9106 gcc_unreachable ();
9109 if (*amountp != -1)
9111 /* This is not 100% correct, but follows from the desire to merge
9112 multiplication by a power of 2 with the recognizer for a
9113 shift. >=32 is not a valid shift for "asl", so we must try and
9114 output a shift that produces the correct arithmetical result.
9115 Using lsr #32 is identical except for the fact that the carry bit
9116 is not set correctly if we set the flags; but we never use the
9117 carry bit from such an operation, so we can ignore that. */
9118 if (code == ROTATERT)
9119 /* Rotate is just modulo 32. */
9120 *amountp &= 31;
9121 else if (*amountp != (*amountp & 31))
9123 if (code == ASHIFT)
9124 mnem = "lsr";
9125 *amountp = 32;
9128 /* Shifts of 0 are no-ops. */
9129 if (*amountp == 0)
9130 return NULL;
9133 return mnem;
9136 /* Obtain the shift from the POWER of two. */
9138 static HOST_WIDE_INT
9139 int_log2 (HOST_WIDE_INT power)
9141 HOST_WIDE_INT shift = 0;
9143 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9145 gcc_assert (shift <= 31);
9146 shift++;
9149 return shift;
9152 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9153 because /bin/as is horribly restrictive. The judgement about
9154 whether or not each character is 'printable' (and can be output as
9155 is) or not (and must be printed with an octal escape) must be made
9156 with reference to the *host* character set -- the situation is
9157 similar to that discussed in the comments above pp_c_char in
9158 c-pretty-print.c. */
9160 #define MAX_ASCII_LEN 51
9162 void
9163 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9165 int i;
9166 int len_so_far = 0;
9168 fputs ("\t.ascii\t\"", stream);
9170 for (i = 0; i < len; i++)
9172 int c = p[i];
9174 if (len_so_far >= MAX_ASCII_LEN)
9176 fputs ("\"\n\t.ascii\t\"", stream);
9177 len_so_far = 0;
9180 if (ISPRINT (c))
9182 if (c == '\\' || c == '\"')
9184 putc ('\\', stream);
9185 len_so_far++;
9187 putc (c, stream);
9188 len_so_far++;
9190 else
9192 fprintf (stream, "\\%03o", c);
9193 len_so_far += 4;
9197 fputs ("\"\n", stream);
9200 /* Compute the register save mask for registers 0 through 12
9201 inclusive. This code is used by arm_compute_save_reg_mask. */
9203 static unsigned long
9204 arm_compute_save_reg0_reg12_mask (void)
9206 unsigned long func_type = arm_current_func_type ();
9207 unsigned long save_reg_mask = 0;
9208 unsigned int reg;
9210 if (IS_INTERRUPT (func_type))
9212 unsigned int max_reg;
9213 /* Interrupt functions must not corrupt any registers,
9214 even call clobbered ones. If this is a leaf function
9215 we can just examine the registers used by the RTL, but
9216 otherwise we have to assume that whatever function is
9217 called might clobber anything, and so we have to save
9218 all the call-clobbered registers as well. */
9219 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9220 /* FIQ handlers have registers r8 - r12 banked, so
9221 we only need to check r0 - r7, Normal ISRs only
9222 bank r14 and r15, so we must check up to r12.
9223 r13 is the stack pointer which is always preserved,
9224 so we do not need to consider it here. */
9225 max_reg = 7;
9226 else
9227 max_reg = 12;
9229 for (reg = 0; reg <= max_reg; reg++)
9230 if (regs_ever_live[reg]
9231 || (! current_function_is_leaf && call_used_regs [reg]))
9232 save_reg_mask |= (1 << reg);
9234 /* Also save the pic base register if necessary. */
9235 if (flag_pic
9236 && !TARGET_SINGLE_PIC_BASE
9237 && arm_pic_register != INVALID_REGNUM
9238 && current_function_uses_pic_offset_table)
9239 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9241 else
9243 /* In the normal case we only need to save those registers
9244 which are call saved and which are used by this function. */
9245 for (reg = 0; reg <= 10; reg++)
9246 if (regs_ever_live[reg] && ! call_used_regs [reg])
9247 save_reg_mask |= (1 << reg);
9249 /* Handle the frame pointer as a special case. */
9250 if (! TARGET_APCS_FRAME
9251 && ! frame_pointer_needed
9252 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9253 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9254 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9256 /* If we aren't loading the PIC register,
9257 don't stack it even though it may be live. */
9258 if (flag_pic
9259 && !TARGET_SINGLE_PIC_BASE
9260 && arm_pic_register != INVALID_REGNUM
9261 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9262 || current_function_uses_pic_offset_table))
9263 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9266 /* Save registers so the exception handler can modify them. */
9267 if (current_function_calls_eh_return)
9269 unsigned int i;
9271 for (i = 0; ; i++)
9273 reg = EH_RETURN_DATA_REGNO (i);
9274 if (reg == INVALID_REGNUM)
9275 break;
9276 save_reg_mask |= 1 << reg;
9280 return save_reg_mask;
9283 /* Compute a bit mask of which registers need to be
9284 saved on the stack for the current function. */
9286 static unsigned long
9287 arm_compute_save_reg_mask (void)
9289 unsigned int save_reg_mask = 0;
9290 unsigned long func_type = arm_current_func_type ();
9292 if (IS_NAKED (func_type))
9293 /* This should never really happen. */
9294 return 0;
9296 /* If we are creating a stack frame, then we must save the frame pointer,
9297 IP (which will hold the old stack pointer), LR and the PC. */
9298 if (frame_pointer_needed)
9299 save_reg_mask |=
9300 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9301 | (1 << IP_REGNUM)
9302 | (1 << LR_REGNUM)
9303 | (1 << PC_REGNUM);
9305 /* Volatile functions do not return, so there
9306 is no need to save any other registers. */
9307 if (IS_VOLATILE (func_type))
9308 return save_reg_mask;
9310 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9312 /* Decide if we need to save the link register.
9313 Interrupt routines have their own banked link register,
9314 so they never need to save it.
9315 Otherwise if we do not use the link register we do not need to save
9316 it. If we are pushing other registers onto the stack however, we
9317 can save an instruction in the epilogue by pushing the link register
9318 now and then popping it back into the PC. This incurs extra memory
9319 accesses though, so we only do it when optimizing for size, and only
9320 if we know that we will not need a fancy return sequence. */
9321 if (regs_ever_live [LR_REGNUM]
9322 || (save_reg_mask
9323 && optimize_size
9324 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9325 && !current_function_calls_eh_return))
9326 save_reg_mask |= 1 << LR_REGNUM;
9328 if (cfun->machine->lr_save_eliminated)
9329 save_reg_mask &= ~ (1 << LR_REGNUM);
9331 if (TARGET_REALLY_IWMMXT
9332 && ((bit_count (save_reg_mask)
9333 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9335 unsigned int reg;
9337 /* The total number of registers that are going to be pushed
9338 onto the stack is odd. We need to ensure that the stack
9339 is 64-bit aligned before we start to save iWMMXt registers,
9340 and also before we start to create locals. (A local variable
9341 might be a double or long long which we will load/store using
9342 an iWMMXt instruction). Therefore we need to push another
9343 ARM register, so that the stack will be 64-bit aligned. We
9344 try to avoid using the arg registers (r0 -r3) as they might be
9345 used to pass values in a tail call. */
9346 for (reg = 4; reg <= 12; reg++)
9347 if ((save_reg_mask & (1 << reg)) == 0)
9348 break;
9350 if (reg <= 12)
9351 save_reg_mask |= (1 << reg);
9352 else
9354 cfun->machine->sibcall_blocked = 1;
9355 save_reg_mask |= (1 << 3);
9359 return save_reg_mask;
9363 /* Compute a bit mask of which registers need to be
9364 saved on the stack for the current function. */
9365 static unsigned long
9366 thumb_compute_save_reg_mask (void)
9368 unsigned long mask;
9369 unsigned reg;
9371 mask = 0;
9372 for (reg = 0; reg < 12; reg ++)
9373 if (regs_ever_live[reg] && !call_used_regs[reg])
9374 mask |= 1 << reg;
9376 if (flag_pic
9377 && !TARGET_SINGLE_PIC_BASE
9378 && arm_pic_register != INVALID_REGNUM
9379 && current_function_uses_pic_offset_table)
9380 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9382 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9383 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9384 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9386 /* LR will also be pushed if any lo regs are pushed. */
9387 if (mask & 0xff || thumb_force_lr_save ())
9388 mask |= (1 << LR_REGNUM);
9390 /* Make sure we have a low work register if we need one.
9391 We will need one if we are going to push a high register,
9392 but we are not currently intending to push a low register. */
9393 if ((mask & 0xff) == 0
9394 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9396 /* Use thumb_find_work_register to choose which register
9397 we will use. If the register is live then we will
9398 have to push it. Use LAST_LO_REGNUM as our fallback
9399 choice for the register to select. */
9400 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9402 if (! call_used_regs[reg])
9403 mask |= 1 << reg;
9406 return mask;
9410 /* Return the number of bytes required to save VFP registers. */
9411 static int
9412 arm_get_vfp_saved_size (void)
9414 unsigned int regno;
9415 int count;
9416 int saved;
9418 saved = 0;
9419 /* Space for saved VFP registers. */
9420 if (TARGET_HARD_FLOAT && TARGET_VFP)
9422 count = 0;
9423 for (regno = FIRST_VFP_REGNUM;
9424 regno < LAST_VFP_REGNUM;
9425 regno += 2)
9427 if ((!regs_ever_live[regno] || call_used_regs[regno])
9428 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9430 if (count > 0)
9432 /* Workaround ARM10 VFPr1 bug. */
9433 if (count == 2 && !arm_arch6)
9434 count++;
9435 saved += count * 8 + 4;
9437 count = 0;
9439 else
9440 count++;
9442 if (count > 0)
9444 if (count == 2 && !arm_arch6)
9445 count++;
9446 saved += count * 8 + 4;
9449 return saved;
9453 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9454 everything bar the final return instruction. */
9455 const char *
9456 output_return_instruction (rtx operand, int really_return, int reverse)
9458 char conditional[10];
9459 char instr[100];
9460 unsigned reg;
9461 unsigned long live_regs_mask;
9462 unsigned long func_type;
9463 arm_stack_offsets *offsets;
9465 func_type = arm_current_func_type ();
9467 if (IS_NAKED (func_type))
9468 return "";
9470 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9472 /* If this function was declared non-returning, and we have
9473 found a tail call, then we have to trust that the called
9474 function won't return. */
9475 if (really_return)
9477 rtx ops[2];
9479 /* Otherwise, trap an attempted return by aborting. */
9480 ops[0] = operand;
9481 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9482 : "abort");
9483 assemble_external_libcall (ops[1]);
9484 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9487 return "";
9490 gcc_assert (!current_function_calls_alloca || really_return);
9492 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9494 return_used_this_function = 1;
9496 live_regs_mask = arm_compute_save_reg_mask ();
9498 if (live_regs_mask)
9500 const char * return_reg;
9502 /* If we do not have any special requirements for function exit
9503 (e.g. interworking, or ISR) then we can load the return address
9504 directly into the PC. Otherwise we must load it into LR. */
9505 if (really_return
9506 && ! TARGET_INTERWORK)
9507 return_reg = reg_names[PC_REGNUM];
9508 else
9509 return_reg = reg_names[LR_REGNUM];
9511 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9513 /* There are three possible reasons for the IP register
9514 being saved. 1) a stack frame was created, in which case
9515 IP contains the old stack pointer, or 2) an ISR routine
9516 corrupted it, or 3) it was saved to align the stack on
9517 iWMMXt. In case 1, restore IP into SP, otherwise just
9518 restore IP. */
9519 if (frame_pointer_needed)
9521 live_regs_mask &= ~ (1 << IP_REGNUM);
9522 live_regs_mask |= (1 << SP_REGNUM);
9524 else
9525 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9528 /* On some ARM architectures it is faster to use LDR rather than
9529 LDM to load a single register. On other architectures, the
9530 cost is the same. In 26 bit mode, or for exception handlers,
9531 we have to use LDM to load the PC so that the CPSR is also
9532 restored. */
9533 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9534 if (live_regs_mask == (1U << reg))
9535 break;
9537 if (reg <= LAST_ARM_REGNUM
9538 && (reg != LR_REGNUM
9539 || ! really_return
9540 || ! IS_INTERRUPT (func_type)))
9542 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9543 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9545 else
9547 char *p;
9548 int first = 1;
9550 /* Generate the load multiple instruction to restore the
9551 registers. Note we can get here, even if
9552 frame_pointer_needed is true, but only if sp already
9553 points to the base of the saved core registers. */
9554 if (live_regs_mask & (1 << SP_REGNUM))
9556 unsigned HOST_WIDE_INT stack_adjust;
9558 offsets = arm_get_frame_offsets ();
9559 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9560 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9562 if (stack_adjust && arm_arch5)
9563 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9564 else
9566 /* If we can't use ldmib (SA110 bug),
9567 then try to pop r3 instead. */
9568 if (stack_adjust)
9569 live_regs_mask |= 1 << 3;
9570 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9573 else
9574 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9576 p = instr + strlen (instr);
9578 for (reg = 0; reg <= SP_REGNUM; reg++)
9579 if (live_regs_mask & (1 << reg))
9581 int l = strlen (reg_names[reg]);
9583 if (first)
9584 first = 0;
9585 else
9587 memcpy (p, ", ", 2);
9588 p += 2;
9591 memcpy (p, "%|", 2);
9592 memcpy (p + 2, reg_names[reg], l);
9593 p += l + 2;
9596 if (live_regs_mask & (1 << LR_REGNUM))
9598 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9599 /* If returning from an interrupt, restore the CPSR. */
9600 if (IS_INTERRUPT (func_type))
9601 strcat (p, "^");
9603 else
9604 strcpy (p, "}");
9607 output_asm_insn (instr, & operand);
9609 /* See if we need to generate an extra instruction to
9610 perform the actual function return. */
9611 if (really_return
9612 && func_type != ARM_FT_INTERWORKED
9613 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9615 /* The return has already been handled
9616 by loading the LR into the PC. */
9617 really_return = 0;
9621 if (really_return)
9623 switch ((int) ARM_FUNC_TYPE (func_type))
9625 case ARM_FT_ISR:
9626 case ARM_FT_FIQ:
9627 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9628 break;
9630 case ARM_FT_INTERWORKED:
9631 sprintf (instr, "bx%s\t%%|lr", conditional);
9632 break;
9634 case ARM_FT_EXCEPTION:
9635 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9636 break;
9638 default:
9639 /* Use bx if it's available. */
9640 if (arm_arch5 || arm_arch4t)
9641 sprintf (instr, "bx%s\t%%|lr", conditional);
9642 else
9643 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9644 break;
9647 output_asm_insn (instr, & operand);
9650 return "";
9653 /* Write the function name into the code section, directly preceding
9654 the function prologue.
9656 Code will be output similar to this:
9658 .ascii "arm_poke_function_name", 0
9659 .align
9661 .word 0xff000000 + (t1 - t0)
9662 arm_poke_function_name
9663 mov ip, sp
9664 stmfd sp!, {fp, ip, lr, pc}
9665 sub fp, ip, #4
9667 When performing a stack backtrace, code can inspect the value
9668 of 'pc' stored at 'fp' + 0. If the trace function then looks
9669 at location pc - 12 and the top 8 bits are set, then we know
9670 that there is a function name embedded immediately preceding this
9671 location and has length ((pc[-3]) & 0xff000000).
9673 We assume that pc is declared as a pointer to an unsigned long.
9675 It is of no benefit to output the function name if we are assembling
9676 a leaf function. These function types will not contain a stack
9677 backtrace structure, therefore it is not possible to determine the
9678 function name. */
9679 void
9680 arm_poke_function_name (FILE *stream, const char *name)
9682 unsigned long alignlength;
9683 unsigned long length;
9684 rtx x;
9686 length = strlen (name) + 1;
9687 alignlength = ROUND_UP_WORD (length);
9689 ASM_OUTPUT_ASCII (stream, name, length);
9690 ASM_OUTPUT_ALIGN (stream, 2);
9691 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9692 assemble_aligned_integer (UNITS_PER_WORD, x);
9695 /* Place some comments into the assembler stream
9696 describing the current function. */
9697 static void
9698 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9700 unsigned long func_type;
9702 if (!TARGET_ARM)
9704 thumb_output_function_prologue (f, frame_size);
9705 return;
9708 /* Sanity check. */
9709 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9711 func_type = arm_current_func_type ();
9713 switch ((int) ARM_FUNC_TYPE (func_type))
9715 default:
9716 case ARM_FT_NORMAL:
9717 break;
9718 case ARM_FT_INTERWORKED:
9719 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9720 break;
9721 case ARM_FT_ISR:
9722 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9723 break;
9724 case ARM_FT_FIQ:
9725 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9726 break;
9727 case ARM_FT_EXCEPTION:
9728 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9729 break;
9732 if (IS_NAKED (func_type))
9733 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9735 if (IS_VOLATILE (func_type))
9736 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9738 if (IS_NESTED (func_type))
9739 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9741 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9742 current_function_args_size,
9743 current_function_pretend_args_size, frame_size);
9745 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9746 frame_pointer_needed,
9747 cfun->machine->uses_anonymous_args);
9749 if (cfun->machine->lr_save_eliminated)
9750 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9752 if (current_function_calls_eh_return)
9753 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9755 #ifdef AOF_ASSEMBLER
9756 if (flag_pic)
9757 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9758 #endif
9760 return_used_this_function = 0;
9763 const char *
9764 arm_output_epilogue (rtx sibling)
9766 int reg;
9767 unsigned long saved_regs_mask;
9768 unsigned long func_type;
9769 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9770 frame that is $fp + 4 for a non-variadic function. */
9771 int floats_offset = 0;
9772 rtx operands[3];
9773 FILE * f = asm_out_file;
9774 unsigned int lrm_count = 0;
9775 int really_return = (sibling == NULL);
9776 int start_reg;
9777 arm_stack_offsets *offsets;
9779 /* If we have already generated the return instruction
9780 then it is futile to generate anything else. */
9781 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9782 return "";
9784 func_type = arm_current_func_type ();
9786 if (IS_NAKED (func_type))
9787 /* Naked functions don't have epilogues. */
9788 return "";
9790 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9792 rtx op;
9794 /* A volatile function should never return. Call abort. */
9795 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9796 assemble_external_libcall (op);
9797 output_asm_insn ("bl\t%a0", &op);
9799 return "";
9802 /* If we are throwing an exception, then we really must be doing a
9803 return, so we can't tail-call. */
9804 gcc_assert (!current_function_calls_eh_return || really_return);
9806 offsets = arm_get_frame_offsets ();
9807 saved_regs_mask = arm_compute_save_reg_mask ();
9809 if (TARGET_IWMMXT)
9810 lrm_count = bit_count (saved_regs_mask);
9812 floats_offset = offsets->saved_args;
9813 /* Compute how far away the floats will be. */
9814 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9815 if (saved_regs_mask & (1 << reg))
9816 floats_offset += 4;
9818 if (frame_pointer_needed)
9820 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9821 int vfp_offset = offsets->frame;
9823 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9825 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9826 if (regs_ever_live[reg] && !call_used_regs[reg])
9828 floats_offset += 12;
9829 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9830 reg, FP_REGNUM, floats_offset - vfp_offset);
9833 else
9835 start_reg = LAST_FPA_REGNUM;
9837 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9839 if (regs_ever_live[reg] && !call_used_regs[reg])
9841 floats_offset += 12;
9843 /* We can't unstack more than four registers at once. */
9844 if (start_reg - reg == 3)
9846 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9847 reg, FP_REGNUM, floats_offset - vfp_offset);
9848 start_reg = reg - 1;
9851 else
9853 if (reg != start_reg)
9854 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9855 reg + 1, start_reg - reg,
9856 FP_REGNUM, floats_offset - vfp_offset);
9857 start_reg = reg - 1;
9861 /* Just in case the last register checked also needs unstacking. */
9862 if (reg != start_reg)
9863 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9864 reg + 1, start_reg - reg,
9865 FP_REGNUM, floats_offset - vfp_offset);
9868 if (TARGET_HARD_FLOAT && TARGET_VFP)
9870 int saved_size;
9872 /* The fldmx insn does not have base+offset addressing modes,
9873 so we use IP to hold the address. */
9874 saved_size = arm_get_vfp_saved_size ();
9876 if (saved_size > 0)
9878 floats_offset += saved_size;
9879 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9880 FP_REGNUM, floats_offset - vfp_offset);
9882 start_reg = FIRST_VFP_REGNUM;
9883 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9885 if ((!regs_ever_live[reg] || call_used_regs[reg])
9886 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9888 if (start_reg != reg)
9889 arm_output_fldmx (f, IP_REGNUM,
9890 (start_reg - FIRST_VFP_REGNUM) / 2,
9891 (reg - start_reg) / 2);
9892 start_reg = reg + 2;
9895 if (start_reg != reg)
9896 arm_output_fldmx (f, IP_REGNUM,
9897 (start_reg - FIRST_VFP_REGNUM) / 2,
9898 (reg - start_reg) / 2);
9901 if (TARGET_IWMMXT)
9903 /* The frame pointer is guaranteed to be non-double-word aligned.
9904 This is because it is set to (old_stack_pointer - 4) and the
9905 old_stack_pointer was double word aligned. Thus the offset to
9906 the iWMMXt registers to be loaded must also be non-double-word
9907 sized, so that the resultant address *is* double-word aligned.
9908 We can ignore floats_offset since that was already included in
9909 the live_regs_mask. */
9910 lrm_count += (lrm_count % 2 ? 2 : 1);
9912 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9913 if (regs_ever_live[reg] && !call_used_regs[reg])
9915 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9916 reg, FP_REGNUM, lrm_count * 4);
9917 lrm_count += 2;
9921 /* saved_regs_mask should contain the IP, which at the time of stack
9922 frame generation actually contains the old stack pointer. So a
9923 quick way to unwind the stack is just pop the IP register directly
9924 into the stack pointer. */
9925 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9926 saved_regs_mask &= ~ (1 << IP_REGNUM);
9927 saved_regs_mask |= (1 << SP_REGNUM);
9929 /* There are two registers left in saved_regs_mask - LR and PC. We
9930 only need to restore the LR register (the return address), but to
9931 save time we can load it directly into the PC, unless we need a
9932 special function exit sequence, or we are not really returning. */
9933 if (really_return
9934 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9935 && !current_function_calls_eh_return)
9936 /* Delete the LR from the register mask, so that the LR on
9937 the stack is loaded into the PC in the register mask. */
9938 saved_regs_mask &= ~ (1 << LR_REGNUM);
9939 else
9940 saved_regs_mask &= ~ (1 << PC_REGNUM);
9942 /* We must use SP as the base register, because SP is one of the
9943 registers being restored. If an interrupt or page fault
9944 happens in the ldm instruction, the SP might or might not
9945 have been restored. That would be bad, as then SP will no
9946 longer indicate the safe area of stack, and we can get stack
9947 corruption. Using SP as the base register means that it will
9948 be reset correctly to the original value, should an interrupt
9949 occur. If the stack pointer already points at the right
9950 place, then omit the subtraction. */
9951 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9952 || current_function_calls_alloca)
9953 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9954 4 * bit_count (saved_regs_mask));
9955 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9957 if (IS_INTERRUPT (func_type))
9958 /* Interrupt handlers will have pushed the
9959 IP onto the stack, so restore it now. */
9960 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9962 else
9964 /* Restore stack pointer if necessary. */
9965 if (offsets->outgoing_args != offsets->saved_regs)
9967 operands[0] = operands[1] = stack_pointer_rtx;
9968 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9969 output_add_immediate (operands);
9972 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9974 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9975 if (regs_ever_live[reg] && !call_used_regs[reg])
9976 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9977 reg, SP_REGNUM);
9979 else
9981 start_reg = FIRST_FPA_REGNUM;
9983 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9985 if (regs_ever_live[reg] && !call_used_regs[reg])
9987 if (reg - start_reg == 3)
9989 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9990 start_reg, SP_REGNUM);
9991 start_reg = reg + 1;
9994 else
9996 if (reg != start_reg)
9997 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9998 start_reg, reg - start_reg,
9999 SP_REGNUM);
10001 start_reg = reg + 1;
10005 /* Just in case the last register checked also needs unstacking. */
10006 if (reg != start_reg)
10007 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10008 start_reg, reg - start_reg, SP_REGNUM);
10011 if (TARGET_HARD_FLOAT && TARGET_VFP)
10013 start_reg = FIRST_VFP_REGNUM;
10014 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10016 if ((!regs_ever_live[reg] || call_used_regs[reg])
10017 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10019 if (start_reg != reg)
10020 arm_output_fldmx (f, SP_REGNUM,
10021 (start_reg - FIRST_VFP_REGNUM) / 2,
10022 (reg - start_reg) / 2);
10023 start_reg = reg + 2;
10026 if (start_reg != reg)
10027 arm_output_fldmx (f, SP_REGNUM,
10028 (start_reg - FIRST_VFP_REGNUM) / 2,
10029 (reg - start_reg) / 2);
10031 if (TARGET_IWMMXT)
10032 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10033 if (regs_ever_live[reg] && !call_used_regs[reg])
10034 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10036 /* If we can, restore the LR into the PC. */
10037 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10038 && really_return
10039 && current_function_pretend_args_size == 0
10040 && saved_regs_mask & (1 << LR_REGNUM)
10041 && !current_function_calls_eh_return)
10043 saved_regs_mask &= ~ (1 << LR_REGNUM);
10044 saved_regs_mask |= (1 << PC_REGNUM);
10047 /* Load the registers off the stack. If we only have one register
10048 to load use the LDR instruction - it is faster. */
10049 if (saved_regs_mask == (1 << LR_REGNUM))
10051 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10053 else if (saved_regs_mask)
10055 if (saved_regs_mask & (1 << SP_REGNUM))
10056 /* Note - write back to the stack register is not enabled
10057 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10058 in the list of registers and if we add writeback the
10059 instruction becomes UNPREDICTABLE. */
10060 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
10061 else
10062 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
10065 if (current_function_pretend_args_size)
10067 /* Unwind the pre-pushed regs. */
10068 operands[0] = operands[1] = stack_pointer_rtx;
10069 operands[2] = GEN_INT (current_function_pretend_args_size);
10070 output_add_immediate (operands);
10074 /* We may have already restored PC directly from the stack. */
10075 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10076 return "";
10078 /* Stack adjustment for exception handler. */
10079 if (current_function_calls_eh_return)
10080 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10081 ARM_EH_STACKADJ_REGNUM);
10083 /* Generate the return instruction. */
10084 switch ((int) ARM_FUNC_TYPE (func_type))
10086 case ARM_FT_ISR:
10087 case ARM_FT_FIQ:
10088 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10089 break;
10091 case ARM_FT_EXCEPTION:
10092 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10093 break;
10095 case ARM_FT_INTERWORKED:
10096 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10097 break;
10099 default:
10100 if (arm_arch5 || arm_arch4t)
10101 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10102 else
10103 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10104 break;
10107 return "";
10110 static void
10111 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10112 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10114 arm_stack_offsets *offsets;
10116 if (TARGET_THUMB)
10118 int regno;
10120 /* Emit any call-via-reg trampolines that are needed for v4t support
10121 of call_reg and call_value_reg type insns. */
10122 for (regno = 0; regno < LR_REGNUM; regno++)
10124 rtx label = cfun->machine->call_via[regno];
10126 if (label != NULL)
10128 switch_to_section (function_section (current_function_decl));
10129 targetm.asm_out.internal_label (asm_out_file, "L",
10130 CODE_LABEL_NUMBER (label));
10131 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10135 /* ??? Probably not safe to set this here, since it assumes that a
10136 function will be emitted as assembly immediately after we generate
10137 RTL for it. This does not happen for inline functions. */
10138 return_used_this_function = 0;
10140 else
10142 /* We need to take into account any stack-frame rounding. */
10143 offsets = arm_get_frame_offsets ();
10145 gcc_assert (!use_return_insn (FALSE, NULL)
10146 || !return_used_this_function
10147 || offsets->saved_regs == offsets->outgoing_args
10148 || frame_pointer_needed);
10150 /* Reset the ARM-specific per-function variables. */
10151 after_arm_reorg = 0;
10155 /* Generate and emit an insn that we will recognize as a push_multi.
10156 Unfortunately, since this insn does not reflect very well the actual
10157 semantics of the operation, we need to annotate the insn for the benefit
10158 of DWARF2 frame unwind information. */
10159 static rtx
10160 emit_multi_reg_push (unsigned long mask)
10162 int num_regs = 0;
10163 int num_dwarf_regs;
10164 int i, j;
10165 rtx par;
10166 rtx dwarf;
10167 int dwarf_par_index;
10168 rtx tmp, reg;
10170 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10171 if (mask & (1 << i))
10172 num_regs++;
10174 gcc_assert (num_regs && num_regs <= 16);
10176 /* We don't record the PC in the dwarf frame information. */
10177 num_dwarf_regs = num_regs;
10178 if (mask & (1 << PC_REGNUM))
10179 num_dwarf_regs--;
10181 /* For the body of the insn we are going to generate an UNSPEC in
10182 parallel with several USEs. This allows the insn to be recognized
10183 by the push_multi pattern in the arm.md file. The insn looks
10184 something like this:
10186 (parallel [
10187 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10188 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10189 (use (reg:SI 11 fp))
10190 (use (reg:SI 12 ip))
10191 (use (reg:SI 14 lr))
10192 (use (reg:SI 15 pc))
10195 For the frame note however, we try to be more explicit and actually
10196 show each register being stored into the stack frame, plus a (single)
10197 decrement of the stack pointer. We do it this way in order to be
10198 friendly to the stack unwinding code, which only wants to see a single
10199 stack decrement per instruction. The RTL we generate for the note looks
10200 something like this:
10202 (sequence [
10203 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10204 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10205 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10206 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10207 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10210 This sequence is used both by the code to support stack unwinding for
10211 exceptions handlers and the code to generate dwarf2 frame debugging. */
10213 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10214 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10215 dwarf_par_index = 1;
10217 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10219 if (mask & (1 << i))
10221 reg = gen_rtx_REG (SImode, i);
10223 XVECEXP (par, 0, 0)
10224 = gen_rtx_SET (VOIDmode,
10225 gen_frame_mem (BLKmode,
10226 gen_rtx_PRE_DEC (BLKmode,
10227 stack_pointer_rtx)),
10228 gen_rtx_UNSPEC (BLKmode,
10229 gen_rtvec (1, reg),
10230 UNSPEC_PUSH_MULT));
10232 if (i != PC_REGNUM)
10234 tmp = gen_rtx_SET (VOIDmode,
10235 gen_frame_mem (SImode, stack_pointer_rtx),
10236 reg);
10237 RTX_FRAME_RELATED_P (tmp) = 1;
10238 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10239 dwarf_par_index++;
10242 break;
10246 for (j = 1, i++; j < num_regs; i++)
10248 if (mask & (1 << i))
10250 reg = gen_rtx_REG (SImode, i);
10252 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10254 if (i != PC_REGNUM)
10257 = gen_rtx_SET (VOIDmode,
10258 gen_frame_mem (SImode,
10259 plus_constant (stack_pointer_rtx,
10260 4 * j)),
10261 reg);
10262 RTX_FRAME_RELATED_P (tmp) = 1;
10263 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10266 j++;
10270 par = emit_insn (par);
10272 tmp = gen_rtx_SET (VOIDmode,
10273 stack_pointer_rtx,
10274 plus_constant (stack_pointer_rtx, -4 * num_regs));
10275 RTX_FRAME_RELATED_P (tmp) = 1;
10276 XVECEXP (dwarf, 0, 0) = tmp;
10278 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10279 REG_NOTES (par));
10280 return par;
10283 /* Calculate the size of the return value that is passed in registers. */
10284 static int
10285 arm_size_return_regs (void)
10287 enum machine_mode mode;
10289 if (current_function_return_rtx != 0)
10290 mode = GET_MODE (current_function_return_rtx);
10291 else
10292 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10294 return GET_MODE_SIZE (mode);
10297 static rtx
10298 emit_sfm (int base_reg, int count)
10300 rtx par;
10301 rtx dwarf;
10302 rtx tmp, reg;
10303 int i;
10305 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10306 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10308 reg = gen_rtx_REG (XFmode, base_reg++);
10310 XVECEXP (par, 0, 0)
10311 = gen_rtx_SET (VOIDmode,
10312 gen_frame_mem (BLKmode,
10313 gen_rtx_PRE_DEC (BLKmode,
10314 stack_pointer_rtx)),
10315 gen_rtx_UNSPEC (BLKmode,
10316 gen_rtvec (1, reg),
10317 UNSPEC_PUSH_MULT));
10318 tmp = gen_rtx_SET (VOIDmode,
10319 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10320 RTX_FRAME_RELATED_P (tmp) = 1;
10321 XVECEXP (dwarf, 0, 1) = tmp;
10323 for (i = 1; i < count; i++)
10325 reg = gen_rtx_REG (XFmode, base_reg++);
10326 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10328 tmp = gen_rtx_SET (VOIDmode,
10329 gen_frame_mem (XFmode,
10330 plus_constant (stack_pointer_rtx,
10331 i * 12)),
10332 reg);
10333 RTX_FRAME_RELATED_P (tmp) = 1;
10334 XVECEXP (dwarf, 0, i + 1) = tmp;
10337 tmp = gen_rtx_SET (VOIDmode,
10338 stack_pointer_rtx,
10339 plus_constant (stack_pointer_rtx, -12 * count));
10341 RTX_FRAME_RELATED_P (tmp) = 1;
10342 XVECEXP (dwarf, 0, 0) = tmp;
10344 par = emit_insn (par);
10345 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10346 REG_NOTES (par));
10347 return par;
10351 /* Return true if the current function needs to save/restore LR. */
10353 static bool
10354 thumb_force_lr_save (void)
10356 return !cfun->machine->lr_save_eliminated
10357 && (!leaf_function_p ()
10358 || thumb_far_jump_used_p ()
10359 || regs_ever_live [LR_REGNUM]);
10363 /* Compute the distance from register FROM to register TO.
10364 These can be the arg pointer (26), the soft frame pointer (25),
10365 the stack pointer (13) or the hard frame pointer (11).
10366 In thumb mode r7 is used as the soft frame pointer, if needed.
10367 Typical stack layout looks like this:
10369 old stack pointer -> | |
10370 ----
10371 | | \
10372 | | saved arguments for
10373 | | vararg functions
10374 | | /
10376 hard FP & arg pointer -> | | \
10377 | | stack
10378 | | frame
10379 | | /
10381 | | \
10382 | | call saved
10383 | | registers
10384 soft frame pointer -> | | /
10386 | | \
10387 | | local
10388 | | variables
10389 locals base pointer -> | | /
10391 | | \
10392 | | outgoing
10393 | | arguments
10394 current stack pointer -> | | /
10397 For a given function some or all of these stack components
10398 may not be needed, giving rise to the possibility of
10399 eliminating some of the registers.
10401 The values returned by this function must reflect the behavior
10402 of arm_expand_prologue() and arm_compute_save_reg_mask().
10404 The sign of the number returned reflects the direction of stack
10405 growth, so the values are positive for all eliminations except
10406 from the soft frame pointer to the hard frame pointer.
10408 SFP may point just inside the local variables block to ensure correct
10409 alignment. */
10412 /* Calculate stack offsets. These are used to calculate register elimination
10413 offsets and in prologue/epilogue code. */
10415 static arm_stack_offsets *
10416 arm_get_frame_offsets (void)
10418 struct arm_stack_offsets *offsets;
10419 unsigned long func_type;
10420 int leaf;
10421 int saved;
10422 HOST_WIDE_INT frame_size;
10424 offsets = &cfun->machine->stack_offsets;
10426 /* We need to know if we are a leaf function. Unfortunately, it
10427 is possible to be called after start_sequence has been called,
10428 which causes get_insns to return the insns for the sequence,
10429 not the function, which will cause leaf_function_p to return
10430 the incorrect result.
10432 to know about leaf functions once reload has completed, and the
10433 frame size cannot be changed after that time, so we can safely
10434 use the cached value. */
10436 if (reload_completed)
10437 return offsets;
10439 /* Initially this is the size of the local variables. It will translated
10440 into an offset once we have determined the size of preceding data. */
10441 frame_size = ROUND_UP_WORD (get_frame_size ());
10443 leaf = leaf_function_p ();
10445 /* Space for variadic functions. */
10446 offsets->saved_args = current_function_pretend_args_size;
10448 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10450 if (TARGET_ARM)
10452 unsigned int regno;
10454 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10456 /* We know that SP will be doubleword aligned on entry, and we must
10457 preserve that condition at any subroutine call. We also require the
10458 soft frame pointer to be doubleword aligned. */
10460 if (TARGET_REALLY_IWMMXT)
10462 /* Check for the call-saved iWMMXt registers. */
10463 for (regno = FIRST_IWMMXT_REGNUM;
10464 regno <= LAST_IWMMXT_REGNUM;
10465 regno++)
10466 if (regs_ever_live [regno] && ! call_used_regs [regno])
10467 saved += 8;
10470 func_type = arm_current_func_type ();
10471 if (! IS_VOLATILE (func_type))
10473 /* Space for saved FPA registers. */
10474 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10475 if (regs_ever_live[regno] && ! call_used_regs[regno])
10476 saved += 12;
10478 /* Space for saved VFP registers. */
10479 if (TARGET_HARD_FLOAT && TARGET_VFP)
10480 saved += arm_get_vfp_saved_size ();
10483 else /* TARGET_THUMB */
10485 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10486 if (TARGET_BACKTRACE)
10487 saved += 16;
10490 /* Saved registers include the stack frame. */
10491 offsets->saved_regs = offsets->saved_args + saved;
10492 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10493 /* A leaf function does not need any stack alignment if it has nothing
10494 on the stack. */
10495 if (leaf && frame_size == 0)
10497 offsets->outgoing_args = offsets->soft_frame;
10498 return offsets;
10501 /* Ensure SFP has the correct alignment. */
10502 if (ARM_DOUBLEWORD_ALIGN
10503 && (offsets->soft_frame & 7))
10504 offsets->soft_frame += 4;
10506 offsets->locals_base = offsets->soft_frame + frame_size;
10507 offsets->outgoing_args = (offsets->locals_base
10508 + current_function_outgoing_args_size);
10510 if (ARM_DOUBLEWORD_ALIGN)
10512 /* Ensure SP remains doubleword aligned. */
10513 if (offsets->outgoing_args & 7)
10514 offsets->outgoing_args += 4;
10515 gcc_assert (!(offsets->outgoing_args & 7));
10518 return offsets;
10522 /* Calculate the relative offsets for the different stack pointers. Positive
10523 offsets are in the direction of stack growth. */
10525 HOST_WIDE_INT
10526 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10528 arm_stack_offsets *offsets;
10530 offsets = arm_get_frame_offsets ();
10532 /* OK, now we have enough information to compute the distances.
10533 There must be an entry in these switch tables for each pair
10534 of registers in ELIMINABLE_REGS, even if some of the entries
10535 seem to be redundant or useless. */
10536 switch (from)
10538 case ARG_POINTER_REGNUM:
10539 switch (to)
10541 case THUMB_HARD_FRAME_POINTER_REGNUM:
10542 return 0;
10544 case FRAME_POINTER_REGNUM:
10545 /* This is the reverse of the soft frame pointer
10546 to hard frame pointer elimination below. */
10547 return offsets->soft_frame - offsets->saved_args;
10549 case ARM_HARD_FRAME_POINTER_REGNUM:
10550 /* If there is no stack frame then the hard
10551 frame pointer and the arg pointer coincide. */
10552 if (offsets->frame == offsets->saved_regs)
10553 return 0;
10554 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10555 return (frame_pointer_needed
10556 && cfun->static_chain_decl != NULL
10557 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10559 case STACK_POINTER_REGNUM:
10560 /* If nothing has been pushed on the stack at all
10561 then this will return -4. This *is* correct! */
10562 return offsets->outgoing_args - (offsets->saved_args + 4);
10564 default:
10565 gcc_unreachable ();
10567 gcc_unreachable ();
10569 case FRAME_POINTER_REGNUM:
10570 switch (to)
10572 case THUMB_HARD_FRAME_POINTER_REGNUM:
10573 return 0;
10575 case ARM_HARD_FRAME_POINTER_REGNUM:
10576 /* The hard frame pointer points to the top entry in the
10577 stack frame. The soft frame pointer to the bottom entry
10578 in the stack frame. If there is no stack frame at all,
10579 then they are identical. */
10581 return offsets->frame - offsets->soft_frame;
10583 case STACK_POINTER_REGNUM:
10584 return offsets->outgoing_args - offsets->soft_frame;
10586 default:
10587 gcc_unreachable ();
10589 gcc_unreachable ();
10591 default:
10592 /* You cannot eliminate from the stack pointer.
10593 In theory you could eliminate from the hard frame
10594 pointer to the stack pointer, but this will never
10595 happen, since if a stack frame is not needed the
10596 hard frame pointer will never be used. */
10597 gcc_unreachable ();
10602 /* Generate the prologue instructions for entry into an ARM function. */
10603 void
10604 arm_expand_prologue (void)
10606 int reg;
10607 rtx amount;
10608 rtx insn;
10609 rtx ip_rtx;
10610 unsigned long live_regs_mask;
10611 unsigned long func_type;
10612 int fp_offset = 0;
10613 int saved_pretend_args = 0;
10614 int saved_regs = 0;
10615 unsigned HOST_WIDE_INT args_to_push;
10616 arm_stack_offsets *offsets;
10618 func_type = arm_current_func_type ();
10620 /* Naked functions don't have prologues. */
10621 if (IS_NAKED (func_type))
10622 return;
10624 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10625 args_to_push = current_function_pretend_args_size;
10627 /* Compute which register we will have to save onto the stack. */
10628 live_regs_mask = arm_compute_save_reg_mask ();
10630 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10632 if (frame_pointer_needed)
10634 if (IS_INTERRUPT (func_type))
10636 /* Interrupt functions must not corrupt any registers.
10637 Creating a frame pointer however, corrupts the IP
10638 register, so we must push it first. */
10639 insn = emit_multi_reg_push (1 << IP_REGNUM);
10641 /* Do not set RTX_FRAME_RELATED_P on this insn.
10642 The dwarf stack unwinding code only wants to see one
10643 stack decrement per function, and this is not it. If
10644 this instruction is labeled as being part of the frame
10645 creation sequence then dwarf2out_frame_debug_expr will
10646 die when it encounters the assignment of IP to FP
10647 later on, since the use of SP here establishes SP as
10648 the CFA register and not IP.
10650 Anyway this instruction is not really part of the stack
10651 frame creation although it is part of the prologue. */
10653 else if (IS_NESTED (func_type))
10655 /* The Static chain register is the same as the IP register
10656 used as a scratch register during stack frame creation.
10657 To get around this need to find somewhere to store IP
10658 whilst the frame is being created. We try the following
10659 places in order:
10661 1. The last argument register.
10662 2. A slot on the stack above the frame. (This only
10663 works if the function is not a varargs function).
10664 3. Register r3, after pushing the argument registers
10665 onto the stack.
10667 Note - we only need to tell the dwarf2 backend about the SP
10668 adjustment in the second variant; the static chain register
10669 doesn't need to be unwound, as it doesn't contain a value
10670 inherited from the caller. */
10672 if (regs_ever_live[3] == 0)
10673 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10674 else if (args_to_push == 0)
10676 rtx dwarf;
10678 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10679 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
10680 fp_offset = 4;
10682 /* Just tell the dwarf backend that we adjusted SP. */
10683 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10684 plus_constant (stack_pointer_rtx,
10685 -fp_offset));
10686 RTX_FRAME_RELATED_P (insn) = 1;
10687 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10688 dwarf, REG_NOTES (insn));
10690 else
10692 /* Store the args on the stack. */
10693 if (cfun->machine->uses_anonymous_args)
10694 insn = emit_multi_reg_push
10695 ((0xf0 >> (args_to_push / 4)) & 0xf);
10696 else
10697 insn = emit_insn
10698 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10699 GEN_INT (- args_to_push)));
10701 RTX_FRAME_RELATED_P (insn) = 1;
10703 saved_pretend_args = 1;
10704 fp_offset = args_to_push;
10705 args_to_push = 0;
10707 /* Now reuse r3 to preserve IP. */
10708 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10712 insn = emit_set_insn (ip_rtx,
10713 plus_constant (stack_pointer_rtx, fp_offset));
10714 RTX_FRAME_RELATED_P (insn) = 1;
10717 if (args_to_push)
10719 /* Push the argument registers, or reserve space for them. */
10720 if (cfun->machine->uses_anonymous_args)
10721 insn = emit_multi_reg_push
10722 ((0xf0 >> (args_to_push / 4)) & 0xf);
10723 else
10724 insn = emit_insn
10725 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10726 GEN_INT (- args_to_push)));
10727 RTX_FRAME_RELATED_P (insn) = 1;
10730 /* If this is an interrupt service routine, and the link register
10731 is going to be pushed, and we are not creating a stack frame,
10732 (which would involve an extra push of IP and a pop in the epilogue)
10733 subtracting four from LR now will mean that the function return
10734 can be done with a single instruction. */
10735 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10736 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10737 && ! frame_pointer_needed)
10739 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
10741 emit_set_insn (lr, plus_constant (lr, -4));
10744 if (live_regs_mask)
10746 insn = emit_multi_reg_push (live_regs_mask);
10747 saved_regs += bit_count (live_regs_mask) * 4;
10748 RTX_FRAME_RELATED_P (insn) = 1;
10751 if (TARGET_IWMMXT)
10752 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10753 if (regs_ever_live[reg] && ! call_used_regs [reg])
10755 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10756 insn = gen_frame_mem (V2SImode, insn);
10757 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
10758 RTX_FRAME_RELATED_P (insn) = 1;
10759 saved_regs += 8;
10762 if (! IS_VOLATILE (func_type))
10764 int start_reg;
10766 /* Save any floating point call-saved registers used by this
10767 function. */
10768 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10770 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10771 if (regs_ever_live[reg] && !call_used_regs[reg])
10773 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10774 insn = gen_frame_mem (XFmode, insn);
10775 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
10776 RTX_FRAME_RELATED_P (insn) = 1;
10777 saved_regs += 12;
10780 else
10782 start_reg = LAST_FPA_REGNUM;
10784 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10786 if (regs_ever_live[reg] && !call_used_regs[reg])
10788 if (start_reg - reg == 3)
10790 insn = emit_sfm (reg, 4);
10791 RTX_FRAME_RELATED_P (insn) = 1;
10792 saved_regs += 48;
10793 start_reg = reg - 1;
10796 else
10798 if (start_reg != reg)
10800 insn = emit_sfm (reg + 1, start_reg - reg);
10801 RTX_FRAME_RELATED_P (insn) = 1;
10802 saved_regs += (start_reg - reg) * 12;
10804 start_reg = reg - 1;
10808 if (start_reg != reg)
10810 insn = emit_sfm (reg + 1, start_reg - reg);
10811 saved_regs += (start_reg - reg) * 12;
10812 RTX_FRAME_RELATED_P (insn) = 1;
10815 if (TARGET_HARD_FLOAT && TARGET_VFP)
10817 start_reg = FIRST_VFP_REGNUM;
10819 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10821 if ((!regs_ever_live[reg] || call_used_regs[reg])
10822 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10824 if (start_reg != reg)
10825 saved_regs += vfp_emit_fstmx (start_reg,
10826 (reg - start_reg) / 2);
10827 start_reg = reg + 2;
10830 if (start_reg != reg)
10831 saved_regs += vfp_emit_fstmx (start_reg,
10832 (reg - start_reg) / 2);
10836 if (frame_pointer_needed)
10838 /* Create the new frame pointer. */
10839 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10840 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10841 RTX_FRAME_RELATED_P (insn) = 1;
10843 if (IS_NESTED (func_type))
10845 /* Recover the static chain register. */
10846 if (regs_ever_live [3] == 0
10847 || saved_pretend_args)
10848 insn = gen_rtx_REG (SImode, 3);
10849 else /* if (current_function_pretend_args_size == 0) */
10851 insn = plus_constant (hard_frame_pointer_rtx, 4);
10852 insn = gen_frame_mem (SImode, insn);
10855 emit_set_insn (ip_rtx, insn);
10856 /* Add a USE to stop propagate_one_insn() from barfing. */
10857 emit_insn (gen_prologue_use (ip_rtx));
10861 offsets = arm_get_frame_offsets ();
10862 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10864 /* This add can produce multiple insns for a large constant, so we
10865 need to get tricky. */
10866 rtx last = get_last_insn ();
10868 amount = GEN_INT (offsets->saved_args + saved_regs
10869 - offsets->outgoing_args);
10871 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10872 amount));
10875 last = last ? NEXT_INSN (last) : get_insns ();
10876 RTX_FRAME_RELATED_P (last) = 1;
10878 while (last != insn);
10880 /* If the frame pointer is needed, emit a special barrier that
10881 will prevent the scheduler from moving stores to the frame
10882 before the stack adjustment. */
10883 if (frame_pointer_needed)
10884 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10885 hard_frame_pointer_rtx));
10889 if (flag_pic && arm_pic_register != INVALID_REGNUM)
10890 arm_load_pic_register (0UL);
10892 /* If we are profiling, make sure no instructions are scheduled before
10893 the call to mcount. Similarly if the user has requested no
10894 scheduling in the prolog. Similarly if we want non-call exceptions
10895 using the EABI unwinder, to prevent faulting instructions from being
10896 swapped with a stack adjustment. */
10897 if (current_function_profile || !TARGET_SCHED_PROLOG
10898 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
10899 emit_insn (gen_blockage ());
10901 /* If the link register is being kept alive, with the return address in it,
10902 then make sure that it does not get reused by the ce2 pass. */
10903 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10905 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10906 cfun->machine->lr_save_eliminated = 1;
10910 /* If CODE is 'd', then the X is a condition operand and the instruction
10911 should only be executed if the condition is true.
10912 if CODE is 'D', then the X is a condition operand and the instruction
10913 should only be executed if the condition is false: however, if the mode
10914 of the comparison is CCFPEmode, then always execute the instruction -- we
10915 do this because in these circumstances !GE does not necessarily imply LT;
10916 in these cases the instruction pattern will take care to make sure that
10917 an instruction containing %d will follow, thereby undoing the effects of
10918 doing this instruction unconditionally.
10919 If CODE is 'N' then X is a floating point operand that must be negated
10920 before output.
10921 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10922 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10923 void
10924 arm_print_operand (FILE *stream, rtx x, int code)
10926 switch (code)
10928 case '@':
10929 fputs (ASM_COMMENT_START, stream);
10930 return;
10932 case '_':
10933 fputs (user_label_prefix, stream);
10934 return;
10936 case '|':
10937 fputs (REGISTER_PREFIX, stream);
10938 return;
10940 case '?':
10941 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10943 if (TARGET_THUMB)
10945 output_operand_lossage ("predicated Thumb instruction");
10946 break;
10948 if (current_insn_predicate != NULL)
10950 output_operand_lossage
10951 ("predicated instruction in conditional sequence");
10952 break;
10955 fputs (arm_condition_codes[arm_current_cc], stream);
10957 else if (current_insn_predicate)
10959 enum arm_cond_code code;
10961 if (TARGET_THUMB)
10963 output_operand_lossage ("predicated Thumb instruction");
10964 break;
10967 code = get_arm_condition_code (current_insn_predicate);
10968 fputs (arm_condition_codes[code], stream);
10970 return;
10972 case 'N':
10974 REAL_VALUE_TYPE r;
10975 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10976 r = REAL_VALUE_NEGATE (r);
10977 fprintf (stream, "%s", fp_const_from_val (&r));
10979 return;
10981 case 'B':
10982 if (GET_CODE (x) == CONST_INT)
10984 HOST_WIDE_INT val;
10985 val = ARM_SIGN_EXTEND (~INTVAL (x));
10986 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10988 else
10990 putc ('~', stream);
10991 output_addr_const (stream, x);
10993 return;
10995 case 'i':
10996 fprintf (stream, "%s", arithmetic_instr (x, 1));
10997 return;
10999 /* Truncate Cirrus shift counts. */
11000 case 's':
11001 if (GET_CODE (x) == CONST_INT)
11003 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11004 return;
11006 arm_print_operand (stream, x, 0);
11007 return;
11009 case 'I':
11010 fprintf (stream, "%s", arithmetic_instr (x, 0));
11011 return;
11013 case 'S':
11015 HOST_WIDE_INT val;
11016 const char *shift;
11018 if (!shift_operator (x, SImode))
11020 output_operand_lossage ("invalid shift operand");
11021 break;
11024 shift = shift_op (x, &val);
11026 if (shift)
11028 fprintf (stream, ", %s ", shift);
11029 if (val == -1)
11030 arm_print_operand (stream, XEXP (x, 1), 0);
11031 else
11032 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11035 return;
11037 /* An explanation of the 'Q', 'R' and 'H' register operands:
11039 In a pair of registers containing a DI or DF value the 'Q'
11040 operand returns the register number of the register containing
11041 the least significant part of the value. The 'R' operand returns
11042 the register number of the register containing the most
11043 significant part of the value.
11045 The 'H' operand returns the higher of the two register numbers.
11046 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11047 same as the 'Q' operand, since the most significant part of the
11048 value is held in the lower number register. The reverse is true
11049 on systems where WORDS_BIG_ENDIAN is false.
11051 The purpose of these operands is to distinguish between cases
11052 where the endian-ness of the values is important (for example
11053 when they are added together), and cases where the endian-ness
11054 is irrelevant, but the order of register operations is important.
11055 For example when loading a value from memory into a register
11056 pair, the endian-ness does not matter. Provided that the value
11057 from the lower memory address is put into the lower numbered
11058 register, and the value from the higher address is put into the
11059 higher numbered register, the load will work regardless of whether
11060 the value being loaded is big-wordian or little-wordian. The
11061 order of the two register loads can matter however, if the address
11062 of the memory location is actually held in one of the registers
11063 being overwritten by the load. */
11064 case 'Q':
11065 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11067 output_operand_lossage ("invalid operand for code '%c'", code);
11068 return;
11071 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11072 return;
11074 case 'R':
11075 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11077 output_operand_lossage ("invalid operand for code '%c'", code);
11078 return;
11081 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11082 return;
11084 case 'H':
11085 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11087 output_operand_lossage ("invalid operand for code '%c'", code);
11088 return;
11091 asm_fprintf (stream, "%r", REGNO (x) + 1);
11092 return;
11094 case 'm':
11095 asm_fprintf (stream, "%r",
11096 GET_CODE (XEXP (x, 0)) == REG
11097 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11098 return;
11100 case 'M':
11101 asm_fprintf (stream, "{%r-%r}",
11102 REGNO (x),
11103 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11104 return;
11106 case 'd':
11107 /* CONST_TRUE_RTX means always -- that's the default. */
11108 if (x == const_true_rtx)
11109 return;
11111 if (!COMPARISON_P (x))
11113 output_operand_lossage ("invalid operand for code '%c'", code);
11114 return;
11117 fputs (arm_condition_codes[get_arm_condition_code (x)],
11118 stream);
11119 return;
11121 case 'D':
11122 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11123 want to do that. */
11124 if (x == const_true_rtx)
11126 output_operand_lossage ("instruction never exectued");
11127 return;
11129 if (!COMPARISON_P (x))
11131 output_operand_lossage ("invalid operand for code '%c'", code);
11132 return;
11135 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11136 (get_arm_condition_code (x))],
11137 stream);
11138 return;
11140 /* Cirrus registers can be accessed in a variety of ways:
11141 single floating point (f)
11142 double floating point (d)
11143 32bit integer (fx)
11144 64bit integer (dx). */
11145 case 'W': /* Cirrus register in F mode. */
11146 case 'X': /* Cirrus register in D mode. */
11147 case 'Y': /* Cirrus register in FX mode. */
11148 case 'Z': /* Cirrus register in DX mode. */
11149 gcc_assert (GET_CODE (x) == REG
11150 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11152 fprintf (stream, "mv%s%s",
11153 code == 'W' ? "f"
11154 : code == 'X' ? "d"
11155 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11157 return;
11159 /* Print cirrus register in the mode specified by the register's mode. */
11160 case 'V':
11162 int mode = GET_MODE (x);
11164 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11166 output_operand_lossage ("invalid operand for code '%c'", code);
11167 return;
11170 fprintf (stream, "mv%s%s",
11171 mode == DFmode ? "d"
11172 : mode == SImode ? "fx"
11173 : mode == DImode ? "dx"
11174 : "f", reg_names[REGNO (x)] + 2);
11176 return;
11179 case 'U':
11180 if (GET_CODE (x) != REG
11181 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11182 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11183 /* Bad value for wCG register number. */
11185 output_operand_lossage ("invalid operand for code '%c'", code);
11186 return;
11189 else
11190 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11191 return;
11193 /* Print an iWMMXt control register name. */
11194 case 'w':
11195 if (GET_CODE (x) != CONST_INT
11196 || INTVAL (x) < 0
11197 || INTVAL (x) >= 16)
11198 /* Bad value for wC register number. */
11200 output_operand_lossage ("invalid operand for code '%c'", code);
11201 return;
11204 else
11206 static const char * wc_reg_names [16] =
11208 "wCID", "wCon", "wCSSF", "wCASF",
11209 "wC4", "wC5", "wC6", "wC7",
11210 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11211 "wC12", "wC13", "wC14", "wC15"
11214 fprintf (stream, wc_reg_names [INTVAL (x)]);
11216 return;
11218 /* Print a VFP double precision register name. */
11219 case 'P':
11221 int mode = GET_MODE (x);
11222 int num;
11224 if (mode != DImode && mode != DFmode)
11226 output_operand_lossage ("invalid operand for code '%c'", code);
11227 return;
11230 if (GET_CODE (x) != REG
11231 || !IS_VFP_REGNUM (REGNO (x)))
11233 output_operand_lossage ("invalid operand for code '%c'", code);
11234 return;
11237 num = REGNO(x) - FIRST_VFP_REGNUM;
11238 if (num & 1)
11240 output_operand_lossage ("invalid operand for code '%c'", code);
11241 return;
11244 fprintf (stream, "d%d", num >> 1);
11246 return;
11248 default:
11249 if (x == 0)
11251 output_operand_lossage ("missing operand");
11252 return;
11255 switch (GET_CODE (x))
11257 case REG:
11258 asm_fprintf (stream, "%r", REGNO (x));
11259 break;
11261 case MEM:
11262 output_memory_reference_mode = GET_MODE (x);
11263 output_address (XEXP (x, 0));
11264 break;
11266 case CONST_DOUBLE:
11267 fprintf (stream, "#%s", fp_immediate_constant (x));
11268 break;
11270 default:
11271 gcc_assert (GET_CODE (x) != NEG);
11272 fputc ('#', stream);
11273 output_addr_const (stream, x);
11274 break;
11279 #ifndef AOF_ASSEMBLER
11280 /* Target hook for assembling integer objects. The ARM version needs to
11281 handle word-sized values specially. */
11282 static bool
11283 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11285 if (size == UNITS_PER_WORD && aligned_p)
11287 fputs ("\t.word\t", asm_out_file);
11288 output_addr_const (asm_out_file, x);
11290 /* Mark symbols as position independent. We only do this in the
11291 .text segment, not in the .data segment. */
11292 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11293 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11295 if (GET_CODE (x) == SYMBOL_REF
11296 && (CONSTANT_POOL_ADDRESS_P (x)
11297 || SYMBOL_REF_LOCAL_P (x)))
11298 fputs ("(GOTOFF)", asm_out_file);
11299 else if (GET_CODE (x) == LABEL_REF)
11300 fputs ("(GOTOFF)", asm_out_file);
11301 else
11302 fputs ("(GOT)", asm_out_file);
11304 fputc ('\n', asm_out_file);
11305 return true;
11308 if (arm_vector_mode_supported_p (GET_MODE (x)))
11310 int i, units;
11312 gcc_assert (GET_CODE (x) == CONST_VECTOR);
11314 units = CONST_VECTOR_NUNITS (x);
11316 switch (GET_MODE (x))
11318 case V2SImode: size = 4; break;
11319 case V4HImode: size = 2; break;
11320 case V8QImode: size = 1; break;
11321 default:
11322 gcc_unreachable ();
11325 for (i = 0; i < units; i++)
11327 rtx elt;
11329 elt = CONST_VECTOR_ELT (x, i);
11330 assemble_integer
11331 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11334 return true;
11337 return default_assemble_integer (x, size, aligned_p);
11341 /* Add a function to the list of static constructors. */
11343 static void
11344 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
11346 if (!TARGET_AAPCS_BASED)
11348 default_named_section_asm_out_constructor (symbol, priority);
11349 return;
11352 /* Put these in the .init_array section, using a special relocation. */
11353 switch_to_section (ctors_section);
11354 assemble_align (POINTER_SIZE);
11355 fputs ("\t.word\t", asm_out_file);
11356 output_addr_const (asm_out_file, symbol);
11357 fputs ("(target1)\n", asm_out_file);
11359 #endif
11361 /* A finite state machine takes care of noticing whether or not instructions
11362 can be conditionally executed, and thus decrease execution time and code
11363 size by deleting branch instructions. The fsm is controlled by
11364 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11366 /* The state of the fsm controlling condition codes are:
11367 0: normal, do nothing special
11368 1: make ASM_OUTPUT_OPCODE not output this instruction
11369 2: make ASM_OUTPUT_OPCODE not output this instruction
11370 3: make instructions conditional
11371 4: make instructions conditional
11373 State transitions (state->state by whom under condition):
11374 0 -> 1 final_prescan_insn if the `target' is a label
11375 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11376 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11377 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11378 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11379 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11380 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11381 (the target insn is arm_target_insn).
11383 If the jump clobbers the conditions then we use states 2 and 4.
11385 A similar thing can be done with conditional return insns.
11387 XXX In case the `target' is an unconditional branch, this conditionalising
11388 of the instructions always reduces code size, but not always execution
11389 time. But then, I want to reduce the code size to somewhere near what
11390 /bin/cc produces. */
11392 /* Returns the index of the ARM condition code string in
11393 `arm_condition_codes'. COMPARISON should be an rtx like
11394 `(eq (...) (...))'. */
11395 static enum arm_cond_code
11396 get_arm_condition_code (rtx comparison)
11398 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11399 int code;
11400 enum rtx_code comp_code = GET_CODE (comparison);
11402 if (GET_MODE_CLASS (mode) != MODE_CC)
11403 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11404 XEXP (comparison, 1));
11406 switch (mode)
11408 case CC_DNEmode: code = ARM_NE; goto dominance;
11409 case CC_DEQmode: code = ARM_EQ; goto dominance;
11410 case CC_DGEmode: code = ARM_GE; goto dominance;
11411 case CC_DGTmode: code = ARM_GT; goto dominance;
11412 case CC_DLEmode: code = ARM_LE; goto dominance;
11413 case CC_DLTmode: code = ARM_LT; goto dominance;
11414 case CC_DGEUmode: code = ARM_CS; goto dominance;
11415 case CC_DGTUmode: code = ARM_HI; goto dominance;
11416 case CC_DLEUmode: code = ARM_LS; goto dominance;
11417 case CC_DLTUmode: code = ARM_CC;
11419 dominance:
11420 gcc_assert (comp_code == EQ || comp_code == NE);
11422 if (comp_code == EQ)
11423 return ARM_INVERSE_CONDITION_CODE (code);
11424 return code;
11426 case CC_NOOVmode:
11427 switch (comp_code)
11429 case NE: return ARM_NE;
11430 case EQ: return ARM_EQ;
11431 case GE: return ARM_PL;
11432 case LT: return ARM_MI;
11433 default: gcc_unreachable ();
11436 case CC_Zmode:
11437 switch (comp_code)
11439 case NE: return ARM_NE;
11440 case EQ: return ARM_EQ;
11441 default: gcc_unreachable ();
11444 case CC_Nmode:
11445 switch (comp_code)
11447 case NE: return ARM_MI;
11448 case EQ: return ARM_PL;
11449 default: gcc_unreachable ();
11452 case CCFPEmode:
11453 case CCFPmode:
11454 /* These encodings assume that AC=1 in the FPA system control
11455 byte. This allows us to handle all cases except UNEQ and
11456 LTGT. */
11457 switch (comp_code)
11459 case GE: return ARM_GE;
11460 case GT: return ARM_GT;
11461 case LE: return ARM_LS;
11462 case LT: return ARM_MI;
11463 case NE: return ARM_NE;
11464 case EQ: return ARM_EQ;
11465 case ORDERED: return ARM_VC;
11466 case UNORDERED: return ARM_VS;
11467 case UNLT: return ARM_LT;
11468 case UNLE: return ARM_LE;
11469 case UNGT: return ARM_HI;
11470 case UNGE: return ARM_PL;
11471 /* UNEQ and LTGT do not have a representation. */
11472 case UNEQ: /* Fall through. */
11473 case LTGT: /* Fall through. */
11474 default: gcc_unreachable ();
11477 case CC_SWPmode:
11478 switch (comp_code)
11480 case NE: return ARM_NE;
11481 case EQ: return ARM_EQ;
11482 case GE: return ARM_LE;
11483 case GT: return ARM_LT;
11484 case LE: return ARM_GE;
11485 case LT: return ARM_GT;
11486 case GEU: return ARM_LS;
11487 case GTU: return ARM_CC;
11488 case LEU: return ARM_CS;
11489 case LTU: return ARM_HI;
11490 default: gcc_unreachable ();
11493 case CC_Cmode:
11494 switch (comp_code)
11496 case LTU: return ARM_CS;
11497 case GEU: return ARM_CC;
11498 default: gcc_unreachable ();
11501 case CCmode:
11502 switch (comp_code)
11504 case NE: return ARM_NE;
11505 case EQ: return ARM_EQ;
11506 case GE: return ARM_GE;
11507 case GT: return ARM_GT;
11508 case LE: return ARM_LE;
11509 case LT: return ARM_LT;
11510 case GEU: return ARM_CS;
11511 case GTU: return ARM_HI;
11512 case LEU: return ARM_LS;
11513 case LTU: return ARM_CC;
11514 default: gcc_unreachable ();
11517 default: gcc_unreachable ();
11521 void
11522 arm_final_prescan_insn (rtx insn)
11524 /* BODY will hold the body of INSN. */
11525 rtx body = PATTERN (insn);
11527 /* This will be 1 if trying to repeat the trick, and things need to be
11528 reversed if it appears to fail. */
11529 int reverse = 0;
11531 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11532 taken are clobbered, even if the rtl suggests otherwise. It also
11533 means that we have to grub around within the jump expression to find
11534 out what the conditions are when the jump isn't taken. */
11535 int jump_clobbers = 0;
11537 /* If we start with a return insn, we only succeed if we find another one. */
11538 int seeking_return = 0;
11540 /* START_INSN will hold the insn from where we start looking. This is the
11541 first insn after the following code_label if REVERSE is true. */
11542 rtx start_insn = insn;
11544 /* If in state 4, check if the target branch is reached, in order to
11545 change back to state 0. */
11546 if (arm_ccfsm_state == 4)
11548 if (insn == arm_target_insn)
11550 arm_target_insn = NULL;
11551 arm_ccfsm_state = 0;
11553 return;
11556 /* If in state 3, it is possible to repeat the trick, if this insn is an
11557 unconditional branch to a label, and immediately following this branch
11558 is the previous target label which is only used once, and the label this
11559 branch jumps to is not too far off. */
11560 if (arm_ccfsm_state == 3)
11562 if (simplejump_p (insn))
11564 start_insn = next_nonnote_insn (start_insn);
11565 if (GET_CODE (start_insn) == BARRIER)
11567 /* XXX Isn't this always a barrier? */
11568 start_insn = next_nonnote_insn (start_insn);
11570 if (GET_CODE (start_insn) == CODE_LABEL
11571 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11572 && LABEL_NUSES (start_insn) == 1)
11573 reverse = TRUE;
11574 else
11575 return;
11577 else if (GET_CODE (body) == RETURN)
11579 start_insn = next_nonnote_insn (start_insn);
11580 if (GET_CODE (start_insn) == BARRIER)
11581 start_insn = next_nonnote_insn (start_insn);
11582 if (GET_CODE (start_insn) == CODE_LABEL
11583 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11584 && LABEL_NUSES (start_insn) == 1)
11586 reverse = TRUE;
11587 seeking_return = 1;
11589 else
11590 return;
11592 else
11593 return;
11596 gcc_assert (!arm_ccfsm_state || reverse);
11597 if (GET_CODE (insn) != JUMP_INSN)
11598 return;
11600 /* This jump might be paralleled with a clobber of the condition codes
11601 the jump should always come first */
11602 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11603 body = XVECEXP (body, 0, 0);
11605 if (reverse
11606 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11607 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11609 int insns_skipped;
11610 int fail = FALSE, succeed = FALSE;
11611 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11612 int then_not_else = TRUE;
11613 rtx this_insn = start_insn, label = 0;
11615 /* If the jump cannot be done with one instruction, we cannot
11616 conditionally execute the instruction in the inverse case. */
11617 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11619 jump_clobbers = 1;
11620 return;
11623 /* Register the insn jumped to. */
11624 if (reverse)
11626 if (!seeking_return)
11627 label = XEXP (SET_SRC (body), 0);
11629 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11630 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11631 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11633 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11634 then_not_else = FALSE;
11636 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11637 seeking_return = 1;
11638 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11640 seeking_return = 1;
11641 then_not_else = FALSE;
11643 else
11644 gcc_unreachable ();
11646 /* See how many insns this branch skips, and what kind of insns. If all
11647 insns are okay, and the label or unconditional branch to the same
11648 label is not too far away, succeed. */
11649 for (insns_skipped = 0;
11650 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11652 rtx scanbody;
11654 this_insn = next_nonnote_insn (this_insn);
11655 if (!this_insn)
11656 break;
11658 switch (GET_CODE (this_insn))
11660 case CODE_LABEL:
11661 /* Succeed if it is the target label, otherwise fail since
11662 control falls in from somewhere else. */
11663 if (this_insn == label)
11665 if (jump_clobbers)
11667 arm_ccfsm_state = 2;
11668 this_insn = next_nonnote_insn (this_insn);
11670 else
11671 arm_ccfsm_state = 1;
11672 succeed = TRUE;
11674 else
11675 fail = TRUE;
11676 break;
11678 case BARRIER:
11679 /* Succeed if the following insn is the target label.
11680 Otherwise fail.
11681 If return insns are used then the last insn in a function
11682 will be a barrier. */
11683 this_insn = next_nonnote_insn (this_insn);
11684 if (this_insn && this_insn == label)
11686 if (jump_clobbers)
11688 arm_ccfsm_state = 2;
11689 this_insn = next_nonnote_insn (this_insn);
11691 else
11692 arm_ccfsm_state = 1;
11693 succeed = TRUE;
11695 else
11696 fail = TRUE;
11697 break;
11699 case CALL_INSN:
11700 /* The AAPCS says that conditional calls should not be
11701 used since they make interworking inefficient (the
11702 linker can't transform BL<cond> into BLX). That's
11703 only a problem if the machine has BLX. */
11704 if (arm_arch5)
11706 fail = TRUE;
11707 break;
11710 /* Succeed if the following insn is the target label, or
11711 if the following two insns are a barrier and the
11712 target label. */
11713 this_insn = next_nonnote_insn (this_insn);
11714 if (this_insn && GET_CODE (this_insn) == BARRIER)
11715 this_insn = next_nonnote_insn (this_insn);
11717 if (this_insn && this_insn == label
11718 && insns_skipped < max_insns_skipped)
11720 if (jump_clobbers)
11722 arm_ccfsm_state = 2;
11723 this_insn = next_nonnote_insn (this_insn);
11725 else
11726 arm_ccfsm_state = 1;
11727 succeed = TRUE;
11729 else
11730 fail = TRUE;
11731 break;
11733 case JUMP_INSN:
11734 /* If this is an unconditional branch to the same label, succeed.
11735 If it is to another label, do nothing. If it is conditional,
11736 fail. */
11737 /* XXX Probably, the tests for SET and the PC are
11738 unnecessary. */
11740 scanbody = PATTERN (this_insn);
11741 if (GET_CODE (scanbody) == SET
11742 && GET_CODE (SET_DEST (scanbody)) == PC)
11744 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11745 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11747 arm_ccfsm_state = 2;
11748 succeed = TRUE;
11750 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11751 fail = TRUE;
11753 /* Fail if a conditional return is undesirable (e.g. on a
11754 StrongARM), but still allow this if optimizing for size. */
11755 else if (GET_CODE (scanbody) == RETURN
11756 && !use_return_insn (TRUE, NULL)
11757 && !optimize_size)
11758 fail = TRUE;
11759 else if (GET_CODE (scanbody) == RETURN
11760 && seeking_return)
11762 arm_ccfsm_state = 2;
11763 succeed = TRUE;
11765 else if (GET_CODE (scanbody) == PARALLEL)
11767 switch (get_attr_conds (this_insn))
11769 case CONDS_NOCOND:
11770 break;
11771 default:
11772 fail = TRUE;
11773 break;
11776 else
11777 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11779 break;
11781 case INSN:
11782 /* Instructions using or affecting the condition codes make it
11783 fail. */
11784 scanbody = PATTERN (this_insn);
11785 if (!(GET_CODE (scanbody) == SET
11786 || GET_CODE (scanbody) == PARALLEL)
11787 || get_attr_conds (this_insn) != CONDS_NOCOND)
11788 fail = TRUE;
11790 /* A conditional cirrus instruction must be followed by
11791 a non Cirrus instruction. However, since we
11792 conditionalize instructions in this function and by
11793 the time we get here we can't add instructions
11794 (nops), because shorten_branches() has already been
11795 called, we will disable conditionalizing Cirrus
11796 instructions to be safe. */
11797 if (GET_CODE (scanbody) != USE
11798 && GET_CODE (scanbody) != CLOBBER
11799 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11800 fail = TRUE;
11801 break;
11803 default:
11804 break;
11807 if (succeed)
11809 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11810 arm_target_label = CODE_LABEL_NUMBER (label);
11811 else
11813 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11815 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11817 this_insn = next_nonnote_insn (this_insn);
11818 gcc_assert (!this_insn
11819 || (GET_CODE (this_insn) != BARRIER
11820 && GET_CODE (this_insn) != CODE_LABEL));
11822 if (!this_insn)
11824 /* Oh, dear! we ran off the end.. give up. */
11825 recog (PATTERN (insn), insn, NULL);
11826 arm_ccfsm_state = 0;
11827 arm_target_insn = NULL;
11828 return;
11830 arm_target_insn = this_insn;
11832 if (jump_clobbers)
11834 gcc_assert (!reverse);
11835 arm_current_cc =
11836 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11837 0), 0), 1));
11838 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11839 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11840 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11841 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11843 else
11845 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11846 what it was. */
11847 if (!reverse)
11848 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11849 0));
11852 if (reverse || then_not_else)
11853 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11856 /* Restore recog_data (getting the attributes of other insns can
11857 destroy this array, but final.c assumes that it remains intact
11858 across this call; since the insn has been recognized already we
11859 call recog direct). */
11860 recog (PATTERN (insn), insn, NULL);
11864 /* Returns true if REGNO is a valid register
11865 for holding a quantity of type MODE. */
11867 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11869 if (GET_MODE_CLASS (mode) == MODE_CC)
11870 return (regno == CC_REGNUM
11871 || (TARGET_HARD_FLOAT && TARGET_VFP
11872 && regno == VFPCC_REGNUM));
11874 if (TARGET_THUMB)
11875 /* For the Thumb we only allow values bigger than SImode in
11876 registers 0 - 6, so that there is always a second low
11877 register available to hold the upper part of the value.
11878 We probably we ought to ensure that the register is the
11879 start of an even numbered register pair. */
11880 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11882 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
11883 && IS_CIRRUS_REGNUM (regno))
11884 /* We have outlawed SI values in Cirrus registers because they
11885 reside in the lower 32 bits, but SF values reside in the
11886 upper 32 bits. This causes gcc all sorts of grief. We can't
11887 even split the registers into pairs because Cirrus SI values
11888 get sign extended to 64bits-- aldyh. */
11889 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11891 if (TARGET_HARD_FLOAT && TARGET_VFP
11892 && IS_VFP_REGNUM (regno))
11894 if (mode == SFmode || mode == SImode)
11895 return TRUE;
11897 /* DFmode values are only valid in even register pairs. */
11898 if (mode == DFmode)
11899 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11900 return FALSE;
11903 if (TARGET_REALLY_IWMMXT)
11905 if (IS_IWMMXT_GR_REGNUM (regno))
11906 return mode == SImode;
11908 if (IS_IWMMXT_REGNUM (regno))
11909 return VALID_IWMMXT_REG_MODE (mode);
11912 /* We allow any value to be stored in the general registers.
11913 Restrict doubleword quantities to even register pairs so that we can
11914 use ldrd. */
11915 if (regno <= LAST_ARM_REGNUM)
11916 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11918 if (regno == FRAME_POINTER_REGNUM
11919 || regno == ARG_POINTER_REGNUM)
11920 /* We only allow integers in the fake hard registers. */
11921 return GET_MODE_CLASS (mode) == MODE_INT;
11923 /* The only registers left are the FPA registers
11924 which we only allow to hold FP values. */
11925 return (TARGET_HARD_FLOAT && TARGET_FPA
11926 && GET_MODE_CLASS (mode) == MODE_FLOAT
11927 && regno >= FIRST_FPA_REGNUM
11928 && regno <= LAST_FPA_REGNUM);
11932 arm_regno_class (int regno)
11934 if (TARGET_THUMB)
11936 if (regno == STACK_POINTER_REGNUM)
11937 return STACK_REG;
11938 if (regno == CC_REGNUM)
11939 return CC_REG;
11940 if (regno < 8)
11941 return LO_REGS;
11942 return HI_REGS;
11945 if ( regno <= LAST_ARM_REGNUM
11946 || regno == FRAME_POINTER_REGNUM
11947 || regno == ARG_POINTER_REGNUM)
11948 return GENERAL_REGS;
11950 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11951 return NO_REGS;
11953 if (IS_CIRRUS_REGNUM (regno))
11954 return CIRRUS_REGS;
11956 if (IS_VFP_REGNUM (regno))
11957 return VFP_REGS;
11959 if (IS_IWMMXT_REGNUM (regno))
11960 return IWMMXT_REGS;
11962 if (IS_IWMMXT_GR_REGNUM (regno))
11963 return IWMMXT_GR_REGS;
11965 return FPA_REGS;
11968 /* Handle a special case when computing the offset
11969 of an argument from the frame pointer. */
11971 arm_debugger_arg_offset (int value, rtx addr)
11973 rtx insn;
11975 /* We are only interested if dbxout_parms() failed to compute the offset. */
11976 if (value != 0)
11977 return 0;
11979 /* We can only cope with the case where the address is held in a register. */
11980 if (GET_CODE (addr) != REG)
11981 return 0;
11983 /* If we are using the frame pointer to point at the argument, then
11984 an offset of 0 is correct. */
11985 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11986 return 0;
11988 /* If we are using the stack pointer to point at the
11989 argument, then an offset of 0 is correct. */
11990 if ((TARGET_THUMB || !frame_pointer_needed)
11991 && REGNO (addr) == SP_REGNUM)
11992 return 0;
11994 /* Oh dear. The argument is pointed to by a register rather
11995 than being held in a register, or being stored at a known
11996 offset from the frame pointer. Since GDB only understands
11997 those two kinds of argument we must translate the address
11998 held in the register into an offset from the frame pointer.
11999 We do this by searching through the insns for the function
12000 looking to see where this register gets its value. If the
12001 register is initialized from the frame pointer plus an offset
12002 then we are in luck and we can continue, otherwise we give up.
12004 This code is exercised by producing debugging information
12005 for a function with arguments like this:
12007 double func (double a, double b, int c, double d) {return d;}
12009 Without this code the stab for parameter 'd' will be set to
12010 an offset of 0 from the frame pointer, rather than 8. */
12012 /* The if() statement says:
12014 If the insn is a normal instruction
12015 and if the insn is setting the value in a register
12016 and if the register being set is the register holding the address of the argument
12017 and if the address is computing by an addition
12018 that involves adding to a register
12019 which is the frame pointer
12020 a constant integer
12022 then... */
12024 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12026 if ( GET_CODE (insn) == INSN
12027 && GET_CODE (PATTERN (insn)) == SET
12028 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12029 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12030 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12031 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12032 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12035 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12037 break;
12041 if (value == 0)
12043 debug_rtx (addr);
12044 warning (0, "unable to compute real location of stacked parameter");
12045 value = 8; /* XXX magic hack */
12048 return value;
12051 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12052 do \
12054 if ((MASK) & insn_flags) \
12055 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
12056 BUILT_IN_MD, NULL, NULL_TREE); \
12058 while (0)
12060 struct builtin_description
12062 const unsigned int mask;
12063 const enum insn_code icode;
12064 const char * const name;
12065 const enum arm_builtins code;
12066 const enum rtx_code comparison;
12067 const unsigned int flag;
12070 static const struct builtin_description bdesc_2arg[] =
12072 #define IWMMXT_BUILTIN(code, string, builtin) \
12073 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12074 ARM_BUILTIN_##builtin, 0, 0 },
12076 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12077 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12078 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12079 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12080 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12081 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12082 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12083 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12084 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12085 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12086 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12087 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12088 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12089 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12090 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12091 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12092 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12093 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12094 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12095 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12096 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12097 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12098 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12099 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12100 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12101 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12102 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12103 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12104 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12105 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12106 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12107 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12108 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12109 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12110 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12111 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12112 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12113 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12114 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12115 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12116 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12117 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12118 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12119 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12120 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12121 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12122 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12123 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12124 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12125 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12126 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12127 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12128 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12129 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12130 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12131 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12132 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12133 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12135 #define IWMMXT_BUILTIN2(code, builtin) \
12136 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12138 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12139 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12140 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12141 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12142 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12143 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12144 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12145 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12146 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12147 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12148 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
12149 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
12150 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
12151 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
12152 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
12153 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
12154 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
12155 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
12156 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
12157 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
12158 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
12159 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
12160 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
12161 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
12162 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
12163 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
12164 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
12165 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
12166 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
12167 IWMMXT_BUILTIN2 (rordi3, WRORDI)
12168 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
12169 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
12172 static const struct builtin_description bdesc_1arg[] =
12174 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
12175 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
12176 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
12177 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
12178 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
12179 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
12180 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
12181 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
12182 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
12183 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
12184 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
12185 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
12186 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
12187 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
12188 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
12189 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
12190 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
12191 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
12194 /* Set up all the iWMMXt builtins. This is
12195 not called if TARGET_IWMMXT is zero. */
12197 static void
12198 arm_init_iwmmxt_builtins (void)
12200 const struct builtin_description * d;
12201 size_t i;
12202 tree endlink = void_list_node;
12204 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12205 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12206 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12208 tree int_ftype_int
12209 = build_function_type (integer_type_node,
12210 tree_cons (NULL_TREE, integer_type_node, endlink));
12211 tree v8qi_ftype_v8qi_v8qi_int
12212 = build_function_type (V8QI_type_node,
12213 tree_cons (NULL_TREE, V8QI_type_node,
12214 tree_cons (NULL_TREE, V8QI_type_node,
12215 tree_cons (NULL_TREE,
12216 integer_type_node,
12217 endlink))));
12218 tree v4hi_ftype_v4hi_int
12219 = build_function_type (V4HI_type_node,
12220 tree_cons (NULL_TREE, V4HI_type_node,
12221 tree_cons (NULL_TREE, integer_type_node,
12222 endlink)));
12223 tree v2si_ftype_v2si_int
12224 = build_function_type (V2SI_type_node,
12225 tree_cons (NULL_TREE, V2SI_type_node,
12226 tree_cons (NULL_TREE, integer_type_node,
12227 endlink)));
12228 tree v2si_ftype_di_di
12229 = build_function_type (V2SI_type_node,
12230 tree_cons (NULL_TREE, long_long_integer_type_node,
12231 tree_cons (NULL_TREE, long_long_integer_type_node,
12232 endlink)));
12233 tree di_ftype_di_int
12234 = build_function_type (long_long_integer_type_node,
12235 tree_cons (NULL_TREE, long_long_integer_type_node,
12236 tree_cons (NULL_TREE, integer_type_node,
12237 endlink)));
12238 tree di_ftype_di_int_int
12239 = build_function_type (long_long_integer_type_node,
12240 tree_cons (NULL_TREE, long_long_integer_type_node,
12241 tree_cons (NULL_TREE, integer_type_node,
12242 tree_cons (NULL_TREE,
12243 integer_type_node,
12244 endlink))));
12245 tree int_ftype_v8qi
12246 = build_function_type (integer_type_node,
12247 tree_cons (NULL_TREE, V8QI_type_node,
12248 endlink));
12249 tree int_ftype_v4hi
12250 = build_function_type (integer_type_node,
12251 tree_cons (NULL_TREE, V4HI_type_node,
12252 endlink));
12253 tree int_ftype_v2si
12254 = build_function_type (integer_type_node,
12255 tree_cons (NULL_TREE, V2SI_type_node,
12256 endlink));
12257 tree int_ftype_v8qi_int
12258 = build_function_type (integer_type_node,
12259 tree_cons (NULL_TREE, V8QI_type_node,
12260 tree_cons (NULL_TREE, integer_type_node,
12261 endlink)));
12262 tree int_ftype_v4hi_int
12263 = build_function_type (integer_type_node,
12264 tree_cons (NULL_TREE, V4HI_type_node,
12265 tree_cons (NULL_TREE, integer_type_node,
12266 endlink)));
12267 tree int_ftype_v2si_int
12268 = build_function_type (integer_type_node,
12269 tree_cons (NULL_TREE, V2SI_type_node,
12270 tree_cons (NULL_TREE, integer_type_node,
12271 endlink)));
12272 tree v8qi_ftype_v8qi_int_int
12273 = build_function_type (V8QI_type_node,
12274 tree_cons (NULL_TREE, V8QI_type_node,
12275 tree_cons (NULL_TREE, integer_type_node,
12276 tree_cons (NULL_TREE,
12277 integer_type_node,
12278 endlink))));
12279 tree v4hi_ftype_v4hi_int_int
12280 = build_function_type (V4HI_type_node,
12281 tree_cons (NULL_TREE, V4HI_type_node,
12282 tree_cons (NULL_TREE, integer_type_node,
12283 tree_cons (NULL_TREE,
12284 integer_type_node,
12285 endlink))));
12286 tree v2si_ftype_v2si_int_int
12287 = build_function_type (V2SI_type_node,
12288 tree_cons (NULL_TREE, V2SI_type_node,
12289 tree_cons (NULL_TREE, integer_type_node,
12290 tree_cons (NULL_TREE,
12291 integer_type_node,
12292 endlink))));
12293 /* Miscellaneous. */
12294 tree v8qi_ftype_v4hi_v4hi
12295 = build_function_type (V8QI_type_node,
12296 tree_cons (NULL_TREE, V4HI_type_node,
12297 tree_cons (NULL_TREE, V4HI_type_node,
12298 endlink)));
12299 tree v4hi_ftype_v2si_v2si
12300 = build_function_type (V4HI_type_node,
12301 tree_cons (NULL_TREE, V2SI_type_node,
12302 tree_cons (NULL_TREE, V2SI_type_node,
12303 endlink)));
12304 tree v2si_ftype_v4hi_v4hi
12305 = build_function_type (V2SI_type_node,
12306 tree_cons (NULL_TREE, V4HI_type_node,
12307 tree_cons (NULL_TREE, V4HI_type_node,
12308 endlink)));
12309 tree v2si_ftype_v8qi_v8qi
12310 = build_function_type (V2SI_type_node,
12311 tree_cons (NULL_TREE, V8QI_type_node,
12312 tree_cons (NULL_TREE, V8QI_type_node,
12313 endlink)));
12314 tree v4hi_ftype_v4hi_di
12315 = build_function_type (V4HI_type_node,
12316 tree_cons (NULL_TREE, V4HI_type_node,
12317 tree_cons (NULL_TREE,
12318 long_long_integer_type_node,
12319 endlink)));
12320 tree v2si_ftype_v2si_di
12321 = build_function_type (V2SI_type_node,
12322 tree_cons (NULL_TREE, V2SI_type_node,
12323 tree_cons (NULL_TREE,
12324 long_long_integer_type_node,
12325 endlink)));
12326 tree void_ftype_int_int
12327 = build_function_type (void_type_node,
12328 tree_cons (NULL_TREE, integer_type_node,
12329 tree_cons (NULL_TREE, integer_type_node,
12330 endlink)));
12331 tree di_ftype_void
12332 = build_function_type (long_long_unsigned_type_node, endlink);
12333 tree di_ftype_v8qi
12334 = build_function_type (long_long_integer_type_node,
12335 tree_cons (NULL_TREE, V8QI_type_node,
12336 endlink));
12337 tree di_ftype_v4hi
12338 = build_function_type (long_long_integer_type_node,
12339 tree_cons (NULL_TREE, V4HI_type_node,
12340 endlink));
12341 tree di_ftype_v2si
12342 = build_function_type (long_long_integer_type_node,
12343 tree_cons (NULL_TREE, V2SI_type_node,
12344 endlink));
12345 tree v2si_ftype_v4hi
12346 = build_function_type (V2SI_type_node,
12347 tree_cons (NULL_TREE, V4HI_type_node,
12348 endlink));
12349 tree v4hi_ftype_v8qi
12350 = build_function_type (V4HI_type_node,
12351 tree_cons (NULL_TREE, V8QI_type_node,
12352 endlink));
12354 tree di_ftype_di_v4hi_v4hi
12355 = build_function_type (long_long_unsigned_type_node,
12356 tree_cons (NULL_TREE,
12357 long_long_unsigned_type_node,
12358 tree_cons (NULL_TREE, V4HI_type_node,
12359 tree_cons (NULL_TREE,
12360 V4HI_type_node,
12361 endlink))));
12363 tree di_ftype_v4hi_v4hi
12364 = build_function_type (long_long_unsigned_type_node,
12365 tree_cons (NULL_TREE, V4HI_type_node,
12366 tree_cons (NULL_TREE, V4HI_type_node,
12367 endlink)));
12369 /* Normal vector binops. */
12370 tree v8qi_ftype_v8qi_v8qi
12371 = build_function_type (V8QI_type_node,
12372 tree_cons (NULL_TREE, V8QI_type_node,
12373 tree_cons (NULL_TREE, V8QI_type_node,
12374 endlink)));
12375 tree v4hi_ftype_v4hi_v4hi
12376 = build_function_type (V4HI_type_node,
12377 tree_cons (NULL_TREE, V4HI_type_node,
12378 tree_cons (NULL_TREE, V4HI_type_node,
12379 endlink)));
12380 tree v2si_ftype_v2si_v2si
12381 = build_function_type (V2SI_type_node,
12382 tree_cons (NULL_TREE, V2SI_type_node,
12383 tree_cons (NULL_TREE, V2SI_type_node,
12384 endlink)));
12385 tree di_ftype_di_di
12386 = build_function_type (long_long_unsigned_type_node,
12387 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12388 tree_cons (NULL_TREE,
12389 long_long_unsigned_type_node,
12390 endlink)));
12392 /* Add all builtins that are more or less simple operations on two
12393 operands. */
12394 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12396 /* Use one of the operands; the target can have a different mode for
12397 mask-generating compares. */
12398 enum machine_mode mode;
12399 tree type;
12401 if (d->name == 0)
12402 continue;
12404 mode = insn_data[d->icode].operand[1].mode;
12406 switch (mode)
12408 case V8QImode:
12409 type = v8qi_ftype_v8qi_v8qi;
12410 break;
12411 case V4HImode:
12412 type = v4hi_ftype_v4hi_v4hi;
12413 break;
12414 case V2SImode:
12415 type = v2si_ftype_v2si_v2si;
12416 break;
12417 case DImode:
12418 type = di_ftype_di_di;
12419 break;
12421 default:
12422 gcc_unreachable ();
12425 def_mbuiltin (d->mask, d->name, type, d->code);
12428 /* Add the remaining MMX insns with somewhat more complicated types. */
12429 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12430 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12431 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12433 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12434 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12435 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12436 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12437 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12438 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12440 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12441 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12442 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12443 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12444 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12445 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12447 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12448 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12449 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12450 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12451 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12452 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12454 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12455 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12456 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12457 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12458 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12459 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12461 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12463 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12464 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12465 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12466 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12468 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12469 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12470 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12471 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12472 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12473 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12474 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12475 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12476 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12478 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12479 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12480 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12482 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12483 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12484 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12486 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12487 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12488 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12489 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12490 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12491 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12493 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12494 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12495 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12496 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12497 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12498 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12499 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12500 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12501 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12502 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12503 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12504 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12506 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12507 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12508 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12509 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12511 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12512 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12513 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12514 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12515 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12516 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12517 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12520 static void
12521 arm_init_tls_builtins (void)
12523 tree ftype;
12524 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
12525 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
12527 ftype = build_function_type (ptr_type_node, void_list_node);
12528 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
12529 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
12530 NULL, const_nothrow);
12533 static void
12534 arm_init_builtins (void)
12536 arm_init_tls_builtins ();
12538 if (TARGET_REALLY_IWMMXT)
12539 arm_init_iwmmxt_builtins ();
12542 /* Errors in the source file can cause expand_expr to return const0_rtx
12543 where we expect a vector. To avoid crashing, use one of the vector
12544 clear instructions. */
12546 static rtx
12547 safe_vector_operand (rtx x, enum machine_mode mode)
12549 if (x != const0_rtx)
12550 return x;
12551 x = gen_reg_rtx (mode);
12553 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12554 : gen_rtx_SUBREG (DImode, x, 0)));
12555 return x;
12558 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12560 static rtx
12561 arm_expand_binop_builtin (enum insn_code icode,
12562 tree arglist, rtx target)
12564 rtx pat;
12565 tree arg0 = TREE_VALUE (arglist);
12566 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12567 rtx op0 = expand_normal (arg0);
12568 rtx op1 = expand_normal (arg1);
12569 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12570 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12571 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12573 if (VECTOR_MODE_P (mode0))
12574 op0 = safe_vector_operand (op0, mode0);
12575 if (VECTOR_MODE_P (mode1))
12576 op1 = safe_vector_operand (op1, mode1);
12578 if (! target
12579 || GET_MODE (target) != tmode
12580 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12581 target = gen_reg_rtx (tmode);
12583 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12585 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12586 op0 = copy_to_mode_reg (mode0, op0);
12587 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12588 op1 = copy_to_mode_reg (mode1, op1);
12590 pat = GEN_FCN (icode) (target, op0, op1);
12591 if (! pat)
12592 return 0;
12593 emit_insn (pat);
12594 return target;
12597 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12599 static rtx
12600 arm_expand_unop_builtin (enum insn_code icode,
12601 tree arglist, rtx target, int do_load)
12603 rtx pat;
12604 tree arg0 = TREE_VALUE (arglist);
12605 rtx op0 = expand_normal (arg0);
12606 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12607 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12609 if (! target
12610 || GET_MODE (target) != tmode
12611 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12612 target = gen_reg_rtx (tmode);
12613 if (do_load)
12614 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12615 else
12617 if (VECTOR_MODE_P (mode0))
12618 op0 = safe_vector_operand (op0, mode0);
12620 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12621 op0 = copy_to_mode_reg (mode0, op0);
12624 pat = GEN_FCN (icode) (target, op0);
12625 if (! pat)
12626 return 0;
12627 emit_insn (pat);
12628 return target;
12631 /* Expand an expression EXP that calls a built-in function,
12632 with result going to TARGET if that's convenient
12633 (and in mode MODE if that's convenient).
12634 SUBTARGET may be used as the target for computing one of EXP's operands.
12635 IGNORE is nonzero if the value is to be ignored. */
12637 static rtx
12638 arm_expand_builtin (tree exp,
12639 rtx target,
12640 rtx subtarget ATTRIBUTE_UNUSED,
12641 enum machine_mode mode ATTRIBUTE_UNUSED,
12642 int ignore ATTRIBUTE_UNUSED)
12644 const struct builtin_description * d;
12645 enum insn_code icode;
12646 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12647 tree arglist = TREE_OPERAND (exp, 1);
12648 tree arg0;
12649 tree arg1;
12650 tree arg2;
12651 rtx op0;
12652 rtx op1;
12653 rtx op2;
12654 rtx pat;
12655 int fcode = DECL_FUNCTION_CODE (fndecl);
12656 size_t i;
12657 enum machine_mode tmode;
12658 enum machine_mode mode0;
12659 enum machine_mode mode1;
12660 enum machine_mode mode2;
12662 switch (fcode)
12664 case ARM_BUILTIN_TEXTRMSB:
12665 case ARM_BUILTIN_TEXTRMUB:
12666 case ARM_BUILTIN_TEXTRMSH:
12667 case ARM_BUILTIN_TEXTRMUH:
12668 case ARM_BUILTIN_TEXTRMSW:
12669 case ARM_BUILTIN_TEXTRMUW:
12670 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12671 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12672 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12673 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12674 : CODE_FOR_iwmmxt_textrmw);
12676 arg0 = TREE_VALUE (arglist);
12677 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12678 op0 = expand_normal (arg0);
12679 op1 = expand_normal (arg1);
12680 tmode = insn_data[icode].operand[0].mode;
12681 mode0 = insn_data[icode].operand[1].mode;
12682 mode1 = insn_data[icode].operand[2].mode;
12684 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12685 op0 = copy_to_mode_reg (mode0, op0);
12686 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12688 /* @@@ better error message */
12689 error ("selector must be an immediate");
12690 return gen_reg_rtx (tmode);
12692 if (target == 0
12693 || GET_MODE (target) != tmode
12694 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12695 target = gen_reg_rtx (tmode);
12696 pat = GEN_FCN (icode) (target, op0, op1);
12697 if (! pat)
12698 return 0;
12699 emit_insn (pat);
12700 return target;
12702 case ARM_BUILTIN_TINSRB:
12703 case ARM_BUILTIN_TINSRH:
12704 case ARM_BUILTIN_TINSRW:
12705 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12706 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12707 : CODE_FOR_iwmmxt_tinsrw);
12708 arg0 = TREE_VALUE (arglist);
12709 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12710 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12711 op0 = expand_normal (arg0);
12712 op1 = expand_normal (arg1);
12713 op2 = expand_normal (arg2);
12714 tmode = insn_data[icode].operand[0].mode;
12715 mode0 = insn_data[icode].operand[1].mode;
12716 mode1 = insn_data[icode].operand[2].mode;
12717 mode2 = insn_data[icode].operand[3].mode;
12719 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12720 op0 = copy_to_mode_reg (mode0, op0);
12721 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12722 op1 = copy_to_mode_reg (mode1, op1);
12723 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12725 /* @@@ better error message */
12726 error ("selector must be an immediate");
12727 return const0_rtx;
12729 if (target == 0
12730 || GET_MODE (target) != tmode
12731 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12732 target = gen_reg_rtx (tmode);
12733 pat = GEN_FCN (icode) (target, op0, op1, op2);
12734 if (! pat)
12735 return 0;
12736 emit_insn (pat);
12737 return target;
12739 case ARM_BUILTIN_SETWCX:
12740 arg0 = TREE_VALUE (arglist);
12741 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12742 op0 = force_reg (SImode, expand_normal (arg0));
12743 op1 = expand_normal (arg1);
12744 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12745 return 0;
12747 case ARM_BUILTIN_GETWCX:
12748 arg0 = TREE_VALUE (arglist);
12749 op0 = expand_normal (arg0);
12750 target = gen_reg_rtx (SImode);
12751 emit_insn (gen_iwmmxt_tmrc (target, op0));
12752 return target;
12754 case ARM_BUILTIN_WSHUFH:
12755 icode = CODE_FOR_iwmmxt_wshufh;
12756 arg0 = TREE_VALUE (arglist);
12757 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12758 op0 = expand_normal (arg0);
12759 op1 = expand_normal (arg1);
12760 tmode = insn_data[icode].operand[0].mode;
12761 mode1 = insn_data[icode].operand[1].mode;
12762 mode2 = insn_data[icode].operand[2].mode;
12764 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12765 op0 = copy_to_mode_reg (mode1, op0);
12766 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12768 /* @@@ better error message */
12769 error ("mask must be an immediate");
12770 return const0_rtx;
12772 if (target == 0
12773 || GET_MODE (target) != tmode
12774 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12775 target = gen_reg_rtx (tmode);
12776 pat = GEN_FCN (icode) (target, op0, op1);
12777 if (! pat)
12778 return 0;
12779 emit_insn (pat);
12780 return target;
12782 case ARM_BUILTIN_WSADB:
12783 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12784 case ARM_BUILTIN_WSADH:
12785 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12786 case ARM_BUILTIN_WSADBZ:
12787 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12788 case ARM_BUILTIN_WSADHZ:
12789 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12791 /* Several three-argument builtins. */
12792 case ARM_BUILTIN_WMACS:
12793 case ARM_BUILTIN_WMACU:
12794 case ARM_BUILTIN_WALIGN:
12795 case ARM_BUILTIN_TMIA:
12796 case ARM_BUILTIN_TMIAPH:
12797 case ARM_BUILTIN_TMIATT:
12798 case ARM_BUILTIN_TMIATB:
12799 case ARM_BUILTIN_TMIABT:
12800 case ARM_BUILTIN_TMIABB:
12801 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12802 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12803 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12804 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12805 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12806 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12807 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12808 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12809 : CODE_FOR_iwmmxt_walign);
12810 arg0 = TREE_VALUE (arglist);
12811 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12812 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12813 op0 = expand_normal (arg0);
12814 op1 = expand_normal (arg1);
12815 op2 = expand_normal (arg2);
12816 tmode = insn_data[icode].operand[0].mode;
12817 mode0 = insn_data[icode].operand[1].mode;
12818 mode1 = insn_data[icode].operand[2].mode;
12819 mode2 = insn_data[icode].operand[3].mode;
12821 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12822 op0 = copy_to_mode_reg (mode0, op0);
12823 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12824 op1 = copy_to_mode_reg (mode1, op1);
12825 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12826 op2 = copy_to_mode_reg (mode2, op2);
12827 if (target == 0
12828 || GET_MODE (target) != tmode
12829 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12830 target = gen_reg_rtx (tmode);
12831 pat = GEN_FCN (icode) (target, op0, op1, op2);
12832 if (! pat)
12833 return 0;
12834 emit_insn (pat);
12835 return target;
12837 case ARM_BUILTIN_WZERO:
12838 target = gen_reg_rtx (DImode);
12839 emit_insn (gen_iwmmxt_clrdi (target));
12840 return target;
12842 case ARM_BUILTIN_THREAD_POINTER:
12843 return arm_load_tp (target);
12845 default:
12846 break;
12849 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12850 if (d->code == (const enum arm_builtins) fcode)
12851 return arm_expand_binop_builtin (d->icode, arglist, target);
12853 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12854 if (d->code == (const enum arm_builtins) fcode)
12855 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12857 /* @@@ Should really do something sensible here. */
12858 return NULL_RTX;
12861 /* Return the number (counting from 0) of
12862 the least significant set bit in MASK. */
12864 inline static int
12865 number_of_first_bit_set (unsigned mask)
12867 int bit;
12869 for (bit = 0;
12870 (mask & (1 << bit)) == 0;
12871 ++bit)
12872 continue;
12874 return bit;
12877 /* Emit code to push or pop registers to or from the stack. F is the
12878 assembly file. MASK is the registers to push or pop. PUSH is
12879 nonzero if we should push, and zero if we should pop. For debugging
12880 output, if pushing, adjust CFA_OFFSET by the amount of space added
12881 to the stack. REAL_REGS should have the same number of bits set as
12882 MASK, and will be used instead (in the same order) to describe which
12883 registers were saved - this is used to mark the save slots when we
12884 push high registers after moving them to low registers. */
12885 static void
12886 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12887 unsigned long real_regs)
12889 int regno;
12890 int lo_mask = mask & 0xFF;
12891 int pushed_words = 0;
12893 gcc_assert (mask);
12895 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12897 /* Special case. Do not generate a POP PC statement here, do it in
12898 thumb_exit() */
12899 thumb_exit (f, -1);
12900 return;
12903 if (ARM_EABI_UNWIND_TABLES && push)
12905 fprintf (f, "\t.save\t{");
12906 for (regno = 0; regno < 15; regno++)
12908 if (real_regs & (1 << regno))
12910 if (real_regs & ((1 << regno) -1))
12911 fprintf (f, ", ");
12912 asm_fprintf (f, "%r", regno);
12915 fprintf (f, "}\n");
12918 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12920 /* Look at the low registers first. */
12921 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12923 if (lo_mask & 1)
12925 asm_fprintf (f, "%r", regno);
12927 if ((lo_mask & ~1) != 0)
12928 fprintf (f, ", ");
12930 pushed_words++;
12934 if (push && (mask & (1 << LR_REGNUM)))
12936 /* Catch pushing the LR. */
12937 if (mask & 0xFF)
12938 fprintf (f, ", ");
12940 asm_fprintf (f, "%r", LR_REGNUM);
12942 pushed_words++;
12944 else if (!push && (mask & (1 << PC_REGNUM)))
12946 /* Catch popping the PC. */
12947 if (TARGET_INTERWORK || TARGET_BACKTRACE
12948 || current_function_calls_eh_return)
12950 /* The PC is never poped directly, instead
12951 it is popped into r3 and then BX is used. */
12952 fprintf (f, "}\n");
12954 thumb_exit (f, -1);
12956 return;
12958 else
12960 if (mask & 0xFF)
12961 fprintf (f, ", ");
12963 asm_fprintf (f, "%r", PC_REGNUM);
12967 fprintf (f, "}\n");
12969 if (push && pushed_words && dwarf2out_do_frame ())
12971 char *l = dwarf2out_cfi_label ();
12972 int pushed_mask = real_regs;
12974 *cfa_offset += pushed_words * 4;
12975 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12977 pushed_words = 0;
12978 pushed_mask = real_regs;
12979 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12981 if (pushed_mask & 1)
12982 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12987 /* Generate code to return from a thumb function.
12988 If 'reg_containing_return_addr' is -1, then the return address is
12989 actually on the stack, at the stack pointer. */
12990 static void
12991 thumb_exit (FILE *f, int reg_containing_return_addr)
12993 unsigned regs_available_for_popping;
12994 unsigned regs_to_pop;
12995 int pops_needed;
12996 unsigned available;
12997 unsigned required;
12998 int mode;
12999 int size;
13000 int restore_a4 = FALSE;
13002 /* Compute the registers we need to pop. */
13003 regs_to_pop = 0;
13004 pops_needed = 0;
13006 if (reg_containing_return_addr == -1)
13008 regs_to_pop |= 1 << LR_REGNUM;
13009 ++pops_needed;
13012 if (TARGET_BACKTRACE)
13014 /* Restore the (ARM) frame pointer and stack pointer. */
13015 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13016 pops_needed += 2;
13019 /* If there is nothing to pop then just emit the BX instruction and
13020 return. */
13021 if (pops_needed == 0)
13023 if (current_function_calls_eh_return)
13024 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13026 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13027 return;
13029 /* Otherwise if we are not supporting interworking and we have not created
13030 a backtrace structure and the function was not entered in ARM mode then
13031 just pop the return address straight into the PC. */
13032 else if (!TARGET_INTERWORK
13033 && !TARGET_BACKTRACE
13034 && !is_called_in_ARM_mode (current_function_decl)
13035 && !current_function_calls_eh_return)
13037 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13038 return;
13041 /* Find out how many of the (return) argument registers we can corrupt. */
13042 regs_available_for_popping = 0;
13044 /* If returning via __builtin_eh_return, the bottom three registers
13045 all contain information needed for the return. */
13046 if (current_function_calls_eh_return)
13047 size = 12;
13048 else
13050 /* If we can deduce the registers used from the function's
13051 return value. This is more reliable that examining
13052 regs_ever_live[] because that will be set if the register is
13053 ever used in the function, not just if the register is used
13054 to hold a return value. */
13056 if (current_function_return_rtx != 0)
13057 mode = GET_MODE (current_function_return_rtx);
13058 else
13059 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13061 size = GET_MODE_SIZE (mode);
13063 if (size == 0)
13065 /* In a void function we can use any argument register.
13066 In a function that returns a structure on the stack
13067 we can use the second and third argument registers. */
13068 if (mode == VOIDmode)
13069 regs_available_for_popping =
13070 (1 << ARG_REGISTER (1))
13071 | (1 << ARG_REGISTER (2))
13072 | (1 << ARG_REGISTER (3));
13073 else
13074 regs_available_for_popping =
13075 (1 << ARG_REGISTER (2))
13076 | (1 << ARG_REGISTER (3));
13078 else if (size <= 4)
13079 regs_available_for_popping =
13080 (1 << ARG_REGISTER (2))
13081 | (1 << ARG_REGISTER (3));
13082 else if (size <= 8)
13083 regs_available_for_popping =
13084 (1 << ARG_REGISTER (3));
13087 /* Match registers to be popped with registers into which we pop them. */
13088 for (available = regs_available_for_popping,
13089 required = regs_to_pop;
13090 required != 0 && available != 0;
13091 available &= ~(available & - available),
13092 required &= ~(required & - required))
13093 -- pops_needed;
13095 /* If we have any popping registers left over, remove them. */
13096 if (available > 0)
13097 regs_available_for_popping &= ~available;
13099 /* Otherwise if we need another popping register we can use
13100 the fourth argument register. */
13101 else if (pops_needed)
13103 /* If we have not found any free argument registers and
13104 reg a4 contains the return address, we must move it. */
13105 if (regs_available_for_popping == 0
13106 && reg_containing_return_addr == LAST_ARG_REGNUM)
13108 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13109 reg_containing_return_addr = LR_REGNUM;
13111 else if (size > 12)
13113 /* Register a4 is being used to hold part of the return value,
13114 but we have dire need of a free, low register. */
13115 restore_a4 = TRUE;
13117 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13120 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13122 /* The fourth argument register is available. */
13123 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13125 --pops_needed;
13129 /* Pop as many registers as we can. */
13130 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13131 regs_available_for_popping);
13133 /* Process the registers we popped. */
13134 if (reg_containing_return_addr == -1)
13136 /* The return address was popped into the lowest numbered register. */
13137 regs_to_pop &= ~(1 << LR_REGNUM);
13139 reg_containing_return_addr =
13140 number_of_first_bit_set (regs_available_for_popping);
13142 /* Remove this register for the mask of available registers, so that
13143 the return address will not be corrupted by further pops. */
13144 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13147 /* If we popped other registers then handle them here. */
13148 if (regs_available_for_popping)
13150 int frame_pointer;
13152 /* Work out which register currently contains the frame pointer. */
13153 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
13155 /* Move it into the correct place. */
13156 asm_fprintf (f, "\tmov\t%r, %r\n",
13157 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
13159 /* (Temporarily) remove it from the mask of popped registers. */
13160 regs_available_for_popping &= ~(1 << frame_pointer);
13161 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
13163 if (regs_available_for_popping)
13165 int stack_pointer;
13167 /* We popped the stack pointer as well,
13168 find the register that contains it. */
13169 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
13171 /* Move it into the stack register. */
13172 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
13174 /* At this point we have popped all necessary registers, so
13175 do not worry about restoring regs_available_for_popping
13176 to its correct value:
13178 assert (pops_needed == 0)
13179 assert (regs_available_for_popping == (1 << frame_pointer))
13180 assert (regs_to_pop == (1 << STACK_POINTER)) */
13182 else
13184 /* Since we have just move the popped value into the frame
13185 pointer, the popping register is available for reuse, and
13186 we know that we still have the stack pointer left to pop. */
13187 regs_available_for_popping |= (1 << frame_pointer);
13191 /* If we still have registers left on the stack, but we no longer have
13192 any registers into which we can pop them, then we must move the return
13193 address into the link register and make available the register that
13194 contained it. */
13195 if (regs_available_for_popping == 0 && pops_needed > 0)
13197 regs_available_for_popping |= 1 << reg_containing_return_addr;
13199 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
13200 reg_containing_return_addr);
13202 reg_containing_return_addr = LR_REGNUM;
13205 /* If we have registers left on the stack then pop some more.
13206 We know that at most we will want to pop FP and SP. */
13207 if (pops_needed > 0)
13209 int popped_into;
13210 int move_to;
13212 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13213 regs_available_for_popping);
13215 /* We have popped either FP or SP.
13216 Move whichever one it is into the correct register. */
13217 popped_into = number_of_first_bit_set (regs_available_for_popping);
13218 move_to = number_of_first_bit_set (regs_to_pop);
13220 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
13222 regs_to_pop &= ~(1 << move_to);
13224 --pops_needed;
13227 /* If we still have not popped everything then we must have only
13228 had one register available to us and we are now popping the SP. */
13229 if (pops_needed > 0)
13231 int popped_into;
13233 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13234 regs_available_for_popping);
13236 popped_into = number_of_first_bit_set (regs_available_for_popping);
13238 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
13240 assert (regs_to_pop == (1 << STACK_POINTER))
13241 assert (pops_needed == 1)
13245 /* If necessary restore the a4 register. */
13246 if (restore_a4)
13248 if (reg_containing_return_addr != LR_REGNUM)
13250 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13251 reg_containing_return_addr = LR_REGNUM;
13254 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13257 if (current_function_calls_eh_return)
13258 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13260 /* Return to caller. */
13261 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13265 void
13266 thumb_final_prescan_insn (rtx insn)
13268 if (flag_print_asm_name)
13269 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
13270 INSN_ADDRESSES (INSN_UID (insn)));
13274 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
13276 unsigned HOST_WIDE_INT mask = 0xff;
13277 int i;
13279 if (val == 0) /* XXX */
13280 return 0;
13282 for (i = 0; i < 25; i++)
13283 if ((val & (mask << i)) == val)
13284 return 1;
13286 return 0;
13289 /* Returns nonzero if the current function contains,
13290 or might contain a far jump. */
13291 static int
13292 thumb_far_jump_used_p (void)
13294 rtx insn;
13296 /* This test is only important for leaf functions. */
13297 /* assert (!leaf_function_p ()); */
13299 /* If we have already decided that far jumps may be used,
13300 do not bother checking again, and always return true even if
13301 it turns out that they are not being used. Once we have made
13302 the decision that far jumps are present (and that hence the link
13303 register will be pushed onto the stack) we cannot go back on it. */
13304 if (cfun->machine->far_jump_used)
13305 return 1;
13307 /* If this function is not being called from the prologue/epilogue
13308 generation code then it must be being called from the
13309 INITIAL_ELIMINATION_OFFSET macro. */
13310 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13312 /* In this case we know that we are being asked about the elimination
13313 of the arg pointer register. If that register is not being used,
13314 then there are no arguments on the stack, and we do not have to
13315 worry that a far jump might force the prologue to push the link
13316 register, changing the stack offsets. In this case we can just
13317 return false, since the presence of far jumps in the function will
13318 not affect stack offsets.
13320 If the arg pointer is live (or if it was live, but has now been
13321 eliminated and so set to dead) then we do have to test to see if
13322 the function might contain a far jump. This test can lead to some
13323 false negatives, since before reload is completed, then length of
13324 branch instructions is not known, so gcc defaults to returning their
13325 longest length, which in turn sets the far jump attribute to true.
13327 A false negative will not result in bad code being generated, but it
13328 will result in a needless push and pop of the link register. We
13329 hope that this does not occur too often.
13331 If we need doubleword stack alignment this could affect the other
13332 elimination offsets so we can't risk getting it wrong. */
13333 if (regs_ever_live [ARG_POINTER_REGNUM])
13334 cfun->machine->arg_pointer_live = 1;
13335 else if (!cfun->machine->arg_pointer_live)
13336 return 0;
13339 /* Check to see if the function contains a branch
13340 insn with the far jump attribute set. */
13341 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13343 if (GET_CODE (insn) == JUMP_INSN
13344 /* Ignore tablejump patterns. */
13345 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13346 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13347 && get_attr_far_jump (insn) == FAR_JUMP_YES
13350 /* Record the fact that we have decided that
13351 the function does use far jumps. */
13352 cfun->machine->far_jump_used = 1;
13353 return 1;
13357 return 0;
13360 /* Return nonzero if FUNC must be entered in ARM mode. */
13362 is_called_in_ARM_mode (tree func)
13364 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
13366 /* Ignore the problem about functions whose address is taken. */
13367 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13368 return TRUE;
13370 #ifdef ARM_PE
13371 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13372 #else
13373 return FALSE;
13374 #endif
13377 /* The bits which aren't usefully expanded as rtl. */
13378 const char *
13379 thumb_unexpanded_epilogue (void)
13381 int regno;
13382 unsigned long live_regs_mask = 0;
13383 int high_regs_pushed = 0;
13384 int had_to_push_lr;
13385 int size;
13387 if (return_used_this_function)
13388 return "";
13390 if (IS_NAKED (arm_current_func_type ()))
13391 return "";
13393 live_regs_mask = thumb_compute_save_reg_mask ();
13394 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13396 /* If we can deduce the registers used from the function's return value.
13397 This is more reliable that examining regs_ever_live[] because that
13398 will be set if the register is ever used in the function, not just if
13399 the register is used to hold a return value. */
13400 size = arm_size_return_regs ();
13402 /* The prolog may have pushed some high registers to use as
13403 work registers. e.g. the testsuite file:
13404 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13405 compiles to produce:
13406 push {r4, r5, r6, r7, lr}
13407 mov r7, r9
13408 mov r6, r8
13409 push {r6, r7}
13410 as part of the prolog. We have to undo that pushing here. */
13412 if (high_regs_pushed)
13414 unsigned long mask = live_regs_mask & 0xff;
13415 int next_hi_reg;
13417 /* The available low registers depend on the size of the value we are
13418 returning. */
13419 if (size <= 12)
13420 mask |= 1 << 3;
13421 if (size <= 8)
13422 mask |= 1 << 2;
13424 if (mask == 0)
13425 /* Oh dear! We have no low registers into which we can pop
13426 high registers! */
13427 internal_error
13428 ("no low registers available for popping high registers");
13430 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13431 if (live_regs_mask & (1 << next_hi_reg))
13432 break;
13434 while (high_regs_pushed)
13436 /* Find lo register(s) into which the high register(s) can
13437 be popped. */
13438 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13440 if (mask & (1 << regno))
13441 high_regs_pushed--;
13442 if (high_regs_pushed == 0)
13443 break;
13446 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13448 /* Pop the values into the low register(s). */
13449 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13451 /* Move the value(s) into the high registers. */
13452 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13454 if (mask & (1 << regno))
13456 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13457 regno);
13459 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13460 if (live_regs_mask & (1 << next_hi_reg))
13461 break;
13465 live_regs_mask &= ~0x0f00;
13468 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13469 live_regs_mask &= 0xff;
13471 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13473 /* Pop the return address into the PC. */
13474 if (had_to_push_lr)
13475 live_regs_mask |= 1 << PC_REGNUM;
13477 /* Either no argument registers were pushed or a backtrace
13478 structure was created which includes an adjusted stack
13479 pointer, so just pop everything. */
13480 if (live_regs_mask)
13481 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13482 live_regs_mask);
13484 /* We have either just popped the return address into the
13485 PC or it is was kept in LR for the entire function. */
13486 if (!had_to_push_lr)
13487 thumb_exit (asm_out_file, LR_REGNUM);
13489 else
13491 /* Pop everything but the return address. */
13492 if (live_regs_mask)
13493 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13494 live_regs_mask);
13496 if (had_to_push_lr)
13498 if (size > 12)
13500 /* We have no free low regs, so save one. */
13501 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13502 LAST_ARG_REGNUM);
13505 /* Get the return address into a temporary register. */
13506 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13507 1 << LAST_ARG_REGNUM);
13509 if (size > 12)
13511 /* Move the return address to lr. */
13512 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13513 LAST_ARG_REGNUM);
13514 /* Restore the low register. */
13515 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13516 IP_REGNUM);
13517 regno = LR_REGNUM;
13519 else
13520 regno = LAST_ARG_REGNUM;
13522 else
13523 regno = LR_REGNUM;
13525 /* Remove the argument registers that were pushed onto the stack. */
13526 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13527 SP_REGNUM, SP_REGNUM,
13528 current_function_pretend_args_size);
13530 thumb_exit (asm_out_file, regno);
13533 return "";
13536 /* Functions to save and restore machine-specific function data. */
13537 static struct machine_function *
13538 arm_init_machine_status (void)
13540 struct machine_function *machine;
13541 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13543 #if ARM_FT_UNKNOWN != 0
13544 machine->func_type = ARM_FT_UNKNOWN;
13545 #endif
13546 return machine;
13549 /* Return an RTX indicating where the return address to the
13550 calling function can be found. */
13552 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13554 if (count != 0)
13555 return NULL_RTX;
13557 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13560 /* Do anything needed before RTL is emitted for each function. */
13561 void
13562 arm_init_expanders (void)
13564 /* Arrange to initialize and mark the machine per-function status. */
13565 init_machine_status = arm_init_machine_status;
13567 /* This is to stop the combine pass optimizing away the alignment
13568 adjustment of va_arg. */
13569 /* ??? It is claimed that this should not be necessary. */
13570 if (cfun)
13571 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13575 /* Like arm_compute_initial_elimination offset. Simpler because there
13576 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13577 to point at the base of the local variables after static stack
13578 space for a function has been allocated. */
13580 HOST_WIDE_INT
13581 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13583 arm_stack_offsets *offsets;
13585 offsets = arm_get_frame_offsets ();
13587 switch (from)
13589 case ARG_POINTER_REGNUM:
13590 switch (to)
13592 case STACK_POINTER_REGNUM:
13593 return offsets->outgoing_args - offsets->saved_args;
13595 case FRAME_POINTER_REGNUM:
13596 return offsets->soft_frame - offsets->saved_args;
13598 case ARM_HARD_FRAME_POINTER_REGNUM:
13599 return offsets->saved_regs - offsets->saved_args;
13601 case THUMB_HARD_FRAME_POINTER_REGNUM:
13602 return offsets->locals_base - offsets->saved_args;
13604 default:
13605 gcc_unreachable ();
13607 break;
13609 case FRAME_POINTER_REGNUM:
13610 switch (to)
13612 case STACK_POINTER_REGNUM:
13613 return offsets->outgoing_args - offsets->soft_frame;
13615 case ARM_HARD_FRAME_POINTER_REGNUM:
13616 return offsets->saved_regs - offsets->soft_frame;
13618 case THUMB_HARD_FRAME_POINTER_REGNUM:
13619 return offsets->locals_base - offsets->soft_frame;
13621 default:
13622 gcc_unreachable ();
13624 break;
13626 default:
13627 gcc_unreachable ();
13632 /* Generate the rest of a function's prologue. */
13633 void
13634 thumb_expand_prologue (void)
13636 rtx insn, dwarf;
13638 HOST_WIDE_INT amount;
13639 arm_stack_offsets *offsets;
13640 unsigned long func_type;
13641 int regno;
13642 unsigned long live_regs_mask;
13644 func_type = arm_current_func_type ();
13646 /* Naked functions don't have prologues. */
13647 if (IS_NAKED (func_type))
13648 return;
13650 if (IS_INTERRUPT (func_type))
13652 error ("interrupt Service Routines cannot be coded in Thumb mode");
13653 return;
13656 live_regs_mask = thumb_compute_save_reg_mask ();
13657 /* Load the pic register before setting the frame pointer,
13658 so we can use r7 as a temporary work register. */
13659 if (flag_pic && arm_pic_register != INVALID_REGNUM)
13660 arm_load_pic_register (live_regs_mask);
13662 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13663 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13664 stack_pointer_rtx);
13666 offsets = arm_get_frame_offsets ();
13667 amount = offsets->outgoing_args - offsets->saved_regs;
13668 if (amount)
13670 if (amount < 512)
13672 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13673 GEN_INT (- amount)));
13674 RTX_FRAME_RELATED_P (insn) = 1;
13676 else
13678 rtx reg;
13680 /* The stack decrement is too big for an immediate value in a single
13681 insn. In theory we could issue multiple subtracts, but after
13682 three of them it becomes more space efficient to place the full
13683 value in the constant pool and load into a register. (Also the
13684 ARM debugger really likes to see only one stack decrement per
13685 function). So instead we look for a scratch register into which
13686 we can load the decrement, and then we subtract this from the
13687 stack pointer. Unfortunately on the thumb the only available
13688 scratch registers are the argument registers, and we cannot use
13689 these as they may hold arguments to the function. Instead we
13690 attempt to locate a call preserved register which is used by this
13691 function. If we can find one, then we know that it will have
13692 been pushed at the start of the prologue and so we can corrupt
13693 it now. */
13694 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13695 if (live_regs_mask & (1 << regno)
13696 && !(frame_pointer_needed
13697 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13698 break;
13700 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13702 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13704 /* Choose an arbitrary, non-argument low register. */
13705 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13707 /* Save it by copying it into a high, scratch register. */
13708 emit_insn (gen_movsi (spare, reg));
13709 /* Add a USE to stop propagate_one_insn() from barfing. */
13710 emit_insn (gen_prologue_use (spare));
13712 /* Decrement the stack. */
13713 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13714 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13715 stack_pointer_rtx, reg));
13716 RTX_FRAME_RELATED_P (insn) = 1;
13717 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13718 plus_constant (stack_pointer_rtx,
13719 -amount));
13720 RTX_FRAME_RELATED_P (dwarf) = 1;
13721 REG_NOTES (insn)
13722 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13723 REG_NOTES (insn));
13725 /* Restore the low register's original value. */
13726 emit_insn (gen_movsi (reg, spare));
13728 /* Emit a USE of the restored scratch register, so that flow
13729 analysis will not consider the restore redundant. The
13730 register won't be used again in this function and isn't
13731 restored by the epilogue. */
13732 emit_insn (gen_prologue_use (reg));
13734 else
13736 reg = gen_rtx_REG (SImode, regno);
13738 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13740 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13741 stack_pointer_rtx, reg));
13742 RTX_FRAME_RELATED_P (insn) = 1;
13743 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13744 plus_constant (stack_pointer_rtx,
13745 -amount));
13746 RTX_FRAME_RELATED_P (dwarf) = 1;
13747 REG_NOTES (insn)
13748 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13749 REG_NOTES (insn));
13754 if (frame_pointer_needed)
13756 amount = offsets->outgoing_args - offsets->locals_base;
13758 if (amount < 1024)
13759 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13760 stack_pointer_rtx, GEN_INT (amount)));
13761 else
13763 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13764 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13765 hard_frame_pointer_rtx,
13766 stack_pointer_rtx));
13767 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
13768 plus_constant (stack_pointer_rtx, amount));
13769 RTX_FRAME_RELATED_P (dwarf) = 1;
13770 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13771 REG_NOTES (insn));
13774 RTX_FRAME_RELATED_P (insn) = 1;
13777 /* If we are profiling, make sure no instructions are scheduled before
13778 the call to mcount. Similarly if the user has requested no
13779 scheduling in the prolog. Similarly if we want non-call exceptions
13780 using the EABI unwinder, to prevent faulting instructions from being
13781 swapped with a stack adjustment. */
13782 if (current_function_profile || !TARGET_SCHED_PROLOG
13783 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
13784 emit_insn (gen_blockage ());
13786 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13787 if (live_regs_mask & 0xff)
13788 cfun->machine->lr_save_eliminated = 0;
13790 /* If the link register is being kept alive, with the return address in it,
13791 then make sure that it does not get reused by the ce2 pass. */
13792 if (cfun->machine->lr_save_eliminated)
13793 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13797 void
13798 thumb_expand_epilogue (void)
13800 HOST_WIDE_INT amount;
13801 arm_stack_offsets *offsets;
13802 int regno;
13804 /* Naked functions don't have prologues. */
13805 if (IS_NAKED (arm_current_func_type ()))
13806 return;
13808 offsets = arm_get_frame_offsets ();
13809 amount = offsets->outgoing_args - offsets->saved_regs;
13811 if (frame_pointer_needed)
13813 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13814 amount = offsets->locals_base - offsets->saved_regs;
13817 if (amount)
13819 if (amount < 512)
13820 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13821 GEN_INT (amount)));
13822 else
13824 /* r3 is always free in the epilogue. */
13825 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13827 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13828 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13832 /* Emit a USE (stack_pointer_rtx), so that
13833 the stack adjustment will not be deleted. */
13834 emit_insn (gen_prologue_use (stack_pointer_rtx));
13836 if (current_function_profile || !TARGET_SCHED_PROLOG)
13837 emit_insn (gen_blockage ());
13839 /* Emit a clobber for each insn that will be restored in the epilogue,
13840 so that flow2 will get register lifetimes correct. */
13841 for (regno = 0; regno < 13; regno++)
13842 if (regs_ever_live[regno] && !call_used_regs[regno])
13843 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13845 if (! regs_ever_live[LR_REGNUM])
13846 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13849 static void
13850 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13852 unsigned long live_regs_mask = 0;
13853 unsigned long l_mask;
13854 unsigned high_regs_pushed = 0;
13855 int cfa_offset = 0;
13856 int regno;
13858 if (IS_NAKED (arm_current_func_type ()))
13859 return;
13861 if (is_called_in_ARM_mode (current_function_decl))
13863 const char * name;
13865 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13866 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13867 == SYMBOL_REF);
13868 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13870 /* Generate code sequence to switch us into Thumb mode. */
13871 /* The .code 32 directive has already been emitted by
13872 ASM_DECLARE_FUNCTION_NAME. */
13873 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13874 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13876 /* Generate a label, so that the debugger will notice the
13877 change in instruction sets. This label is also used by
13878 the assembler to bypass the ARM code when this function
13879 is called from a Thumb encoded function elsewhere in the
13880 same file. Hence the definition of STUB_NAME here must
13881 agree with the definition in gas/config/tc-arm.c. */
13883 #define STUB_NAME ".real_start_of"
13885 fprintf (f, "\t.code\t16\n");
13886 #ifdef ARM_PE
13887 if (arm_dllexport_name_p (name))
13888 name = arm_strip_name_encoding (name);
13889 #endif
13890 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13891 fprintf (f, "\t.thumb_func\n");
13892 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13895 if (current_function_pretend_args_size)
13897 /* Output unwind directive for the stack adjustment. */
13898 if (ARM_EABI_UNWIND_TABLES)
13899 fprintf (f, "\t.pad #%d\n",
13900 current_function_pretend_args_size);
13902 if (cfun->machine->uses_anonymous_args)
13904 int num_pushes;
13906 fprintf (f, "\tpush\t{");
13908 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13910 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13911 regno <= LAST_ARG_REGNUM;
13912 regno++)
13913 asm_fprintf (f, "%r%s", regno,
13914 regno == LAST_ARG_REGNUM ? "" : ", ");
13916 fprintf (f, "}\n");
13918 else
13919 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13920 SP_REGNUM, SP_REGNUM,
13921 current_function_pretend_args_size);
13923 /* We don't need to record the stores for unwinding (would it
13924 help the debugger any if we did?), but record the change in
13925 the stack pointer. */
13926 if (dwarf2out_do_frame ())
13928 char *l = dwarf2out_cfi_label ();
13930 cfa_offset = cfa_offset + current_function_pretend_args_size;
13931 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13935 /* Get the registers we are going to push. */
13936 live_regs_mask = thumb_compute_save_reg_mask ();
13937 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13938 l_mask = live_regs_mask & 0x40ff;
13939 /* Then count how many other high registers will need to be pushed. */
13940 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13942 if (TARGET_BACKTRACE)
13944 unsigned offset;
13945 unsigned work_register;
13947 /* We have been asked to create a stack backtrace structure.
13948 The code looks like this:
13950 0 .align 2
13951 0 func:
13952 0 sub SP, #16 Reserve space for 4 registers.
13953 2 push {R7} Push low registers.
13954 4 add R7, SP, #20 Get the stack pointer before the push.
13955 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13956 8 mov R7, PC Get hold of the start of this code plus 12.
13957 10 str R7, [SP, #16] Store it.
13958 12 mov R7, FP Get hold of the current frame pointer.
13959 14 str R7, [SP, #4] Store it.
13960 16 mov R7, LR Get hold of the current return address.
13961 18 str R7, [SP, #12] Store it.
13962 20 add R7, SP, #16 Point at the start of the backtrace structure.
13963 22 mov FP, R7 Put this value into the frame pointer. */
13965 work_register = thumb_find_work_register (live_regs_mask);
13967 if (ARM_EABI_UNWIND_TABLES)
13968 asm_fprintf (f, "\t.pad #16\n");
13970 asm_fprintf
13971 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13972 SP_REGNUM, SP_REGNUM);
13974 if (dwarf2out_do_frame ())
13976 char *l = dwarf2out_cfi_label ();
13978 cfa_offset = cfa_offset + 16;
13979 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13982 if (l_mask)
13984 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13985 offset = bit_count (l_mask) * UNITS_PER_WORD;
13987 else
13988 offset = 0;
13990 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13991 offset + 16 + current_function_pretend_args_size);
13993 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13994 offset + 4);
13996 /* Make sure that the instruction fetching the PC is in the right place
13997 to calculate "start of backtrace creation code + 12". */
13998 if (l_mask)
14000 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14001 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14002 offset + 12);
14003 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14004 ARM_HARD_FRAME_POINTER_REGNUM);
14005 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14006 offset);
14008 else
14010 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14011 ARM_HARD_FRAME_POINTER_REGNUM);
14012 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14013 offset);
14014 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14015 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14016 offset + 12);
14019 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14020 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14021 offset + 8);
14022 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14023 offset + 12);
14024 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14025 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14027 /* Optimization: If we are not pushing any low registers but we are going
14028 to push some high registers then delay our first push. This will just
14029 be a push of LR and we can combine it with the push of the first high
14030 register. */
14031 else if ((l_mask & 0xff) != 0
14032 || (high_regs_pushed == 0 && l_mask))
14033 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14035 if (high_regs_pushed)
14037 unsigned pushable_regs;
14038 unsigned next_hi_reg;
14040 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14041 if (live_regs_mask & (1 << next_hi_reg))
14042 break;
14044 pushable_regs = l_mask & 0xff;
14046 if (pushable_regs == 0)
14047 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14049 while (high_regs_pushed > 0)
14051 unsigned long real_regs_mask = 0;
14053 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14055 if (pushable_regs & (1 << regno))
14057 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14059 high_regs_pushed --;
14060 real_regs_mask |= (1 << next_hi_reg);
14062 if (high_regs_pushed)
14064 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14065 next_hi_reg --)
14066 if (live_regs_mask & (1 << next_hi_reg))
14067 break;
14069 else
14071 pushable_regs &= ~((1 << regno) - 1);
14072 break;
14077 /* If we had to find a work register and we have not yet
14078 saved the LR then add it to the list of regs to push. */
14079 if (l_mask == (1 << LR_REGNUM))
14081 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14082 1, &cfa_offset,
14083 real_regs_mask | (1 << LR_REGNUM));
14084 l_mask = 0;
14086 else
14087 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14092 /* Handle the case of a double word load into a low register from
14093 a computed memory address. The computed address may involve a
14094 register which is overwritten by the load. */
14095 const char *
14096 thumb_load_double_from_address (rtx *operands)
14098 rtx addr;
14099 rtx base;
14100 rtx offset;
14101 rtx arg1;
14102 rtx arg2;
14104 gcc_assert (GET_CODE (operands[0]) == REG);
14105 gcc_assert (GET_CODE (operands[1]) == MEM);
14107 /* Get the memory address. */
14108 addr = XEXP (operands[1], 0);
14110 /* Work out how the memory address is computed. */
14111 switch (GET_CODE (addr))
14113 case REG:
14114 operands[2] = adjust_address (operands[1], SImode, 4);
14116 if (REGNO (operands[0]) == REGNO (addr))
14118 output_asm_insn ("ldr\t%H0, %2", operands);
14119 output_asm_insn ("ldr\t%0, %1", operands);
14121 else
14123 output_asm_insn ("ldr\t%0, %1", operands);
14124 output_asm_insn ("ldr\t%H0, %2", operands);
14126 break;
14128 case CONST:
14129 /* Compute <address> + 4 for the high order load. */
14130 operands[2] = adjust_address (operands[1], SImode, 4);
14132 output_asm_insn ("ldr\t%0, %1", operands);
14133 output_asm_insn ("ldr\t%H0, %2", operands);
14134 break;
14136 case PLUS:
14137 arg1 = XEXP (addr, 0);
14138 arg2 = XEXP (addr, 1);
14140 if (CONSTANT_P (arg1))
14141 base = arg2, offset = arg1;
14142 else
14143 base = arg1, offset = arg2;
14145 gcc_assert (GET_CODE (base) == REG);
14147 /* Catch the case of <address> = <reg> + <reg> */
14148 if (GET_CODE (offset) == REG)
14150 int reg_offset = REGNO (offset);
14151 int reg_base = REGNO (base);
14152 int reg_dest = REGNO (operands[0]);
14154 /* Add the base and offset registers together into the
14155 higher destination register. */
14156 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14157 reg_dest + 1, reg_base, reg_offset);
14159 /* Load the lower destination register from the address in
14160 the higher destination register. */
14161 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14162 reg_dest, reg_dest + 1);
14164 /* Load the higher destination register from its own address
14165 plus 4. */
14166 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14167 reg_dest + 1, reg_dest + 1);
14169 else
14171 /* Compute <address> + 4 for the high order load. */
14172 operands[2] = adjust_address (operands[1], SImode, 4);
14174 /* If the computed address is held in the low order register
14175 then load the high order register first, otherwise always
14176 load the low order register first. */
14177 if (REGNO (operands[0]) == REGNO (base))
14179 output_asm_insn ("ldr\t%H0, %2", operands);
14180 output_asm_insn ("ldr\t%0, %1", operands);
14182 else
14184 output_asm_insn ("ldr\t%0, %1", operands);
14185 output_asm_insn ("ldr\t%H0, %2", operands);
14188 break;
14190 case LABEL_REF:
14191 /* With no registers to worry about we can just load the value
14192 directly. */
14193 operands[2] = adjust_address (operands[1], SImode, 4);
14195 output_asm_insn ("ldr\t%H0, %2", operands);
14196 output_asm_insn ("ldr\t%0, %1", operands);
14197 break;
14199 default:
14200 gcc_unreachable ();
14203 return "";
14206 const char *
14207 thumb_output_move_mem_multiple (int n, rtx *operands)
14209 rtx tmp;
14211 switch (n)
14213 case 2:
14214 if (REGNO (operands[4]) > REGNO (operands[5]))
14216 tmp = operands[4];
14217 operands[4] = operands[5];
14218 operands[5] = tmp;
14220 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
14221 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
14222 break;
14224 case 3:
14225 if (REGNO (operands[4]) > REGNO (operands[5]))
14227 tmp = operands[4];
14228 operands[4] = operands[5];
14229 operands[5] = tmp;
14231 if (REGNO (operands[5]) > REGNO (operands[6]))
14233 tmp = operands[5];
14234 operands[5] = operands[6];
14235 operands[6] = tmp;
14237 if (REGNO (operands[4]) > REGNO (operands[5]))
14239 tmp = operands[4];
14240 operands[4] = operands[5];
14241 operands[5] = tmp;
14244 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
14245 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
14246 break;
14248 default:
14249 gcc_unreachable ();
14252 return "";
14255 /* Output a call-via instruction for thumb state. */
14256 const char *
14257 thumb_call_via_reg (rtx reg)
14259 int regno = REGNO (reg);
14260 rtx *labelp;
14262 gcc_assert (regno < LR_REGNUM);
14264 /* If we are in the normal text section we can use a single instance
14265 per compilation unit. If we are doing function sections, then we need
14266 an entry per section, since we can't rely on reachability. */
14267 if (in_section == text_section)
14269 thumb_call_reg_needed = 1;
14271 if (thumb_call_via_label[regno] == NULL)
14272 thumb_call_via_label[regno] = gen_label_rtx ();
14273 labelp = thumb_call_via_label + regno;
14275 else
14277 if (cfun->machine->call_via[regno] == NULL)
14278 cfun->machine->call_via[regno] = gen_label_rtx ();
14279 labelp = cfun->machine->call_via + regno;
14282 output_asm_insn ("bl\t%a0", labelp);
14283 return "";
14286 /* Routines for generating rtl. */
14287 void
14288 thumb_expand_movmemqi (rtx *operands)
14290 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
14291 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
14292 HOST_WIDE_INT len = INTVAL (operands[2]);
14293 HOST_WIDE_INT offset = 0;
14295 while (len >= 12)
14297 emit_insn (gen_movmem12b (out, in, out, in));
14298 len -= 12;
14301 if (len >= 8)
14303 emit_insn (gen_movmem8b (out, in, out, in));
14304 len -= 8;
14307 if (len >= 4)
14309 rtx reg = gen_reg_rtx (SImode);
14310 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
14311 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
14312 len -= 4;
14313 offset += 4;
14316 if (len >= 2)
14318 rtx reg = gen_reg_rtx (HImode);
14319 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
14320 plus_constant (in, offset))));
14321 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
14322 reg));
14323 len -= 2;
14324 offset += 2;
14327 if (len)
14329 rtx reg = gen_reg_rtx (QImode);
14330 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
14331 plus_constant (in, offset))));
14332 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
14333 reg));
14337 void
14338 thumb_reload_out_hi (rtx *operands)
14340 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14343 /* Handle reading a half-word from memory during reload. */
14344 void
14345 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14347 gcc_unreachable ();
14350 /* Return the length of a function name prefix
14351 that starts with the character 'c'. */
14352 static int
14353 arm_get_strip_length (int c)
14355 switch (c)
14357 ARM_NAME_ENCODING_LENGTHS
14358 default: return 0;
14362 /* Return a pointer to a function's name with any
14363 and all prefix encodings stripped from it. */
14364 const char *
14365 arm_strip_name_encoding (const char *name)
14367 int skip;
14369 while ((skip = arm_get_strip_length (* name)))
14370 name += skip;
14372 return name;
14375 /* If there is a '*' anywhere in the name's prefix, then
14376 emit the stripped name verbatim, otherwise prepend an
14377 underscore if leading underscores are being used. */
14378 void
14379 arm_asm_output_labelref (FILE *stream, const char *name)
14381 int skip;
14382 int verbatim = 0;
14384 while ((skip = arm_get_strip_length (* name)))
14386 verbatim |= (*name == '*');
14387 name += skip;
14390 if (verbatim)
14391 fputs (name, stream);
14392 else
14393 asm_fprintf (stream, "%U%s", name);
14396 static void
14397 arm_file_end (void)
14399 int regno;
14401 if (! thumb_call_reg_needed)
14402 return;
14404 switch_to_section (text_section);
14405 asm_fprintf (asm_out_file, "\t.code 16\n");
14406 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14408 for (regno = 0; regno < LR_REGNUM; regno++)
14410 rtx label = thumb_call_via_label[regno];
14412 if (label != 0)
14414 targetm.asm_out.internal_label (asm_out_file, "L",
14415 CODE_LABEL_NUMBER (label));
14416 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14421 rtx aof_pic_label;
14423 #ifdef AOF_ASSEMBLER
14424 /* Special functions only needed when producing AOF syntax assembler. */
14426 struct pic_chain
14428 struct pic_chain * next;
14429 const char * symname;
14432 static struct pic_chain * aof_pic_chain = NULL;
14435 aof_pic_entry (rtx x)
14437 struct pic_chain ** chainp;
14438 int offset;
14440 if (aof_pic_label == NULL_RTX)
14442 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14445 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14446 offset += 4, chainp = &(*chainp)->next)
14447 if ((*chainp)->symname == XSTR (x, 0))
14448 return plus_constant (aof_pic_label, offset);
14450 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14451 (*chainp)->next = NULL;
14452 (*chainp)->symname = XSTR (x, 0);
14453 return plus_constant (aof_pic_label, offset);
14456 void
14457 aof_dump_pic_table (FILE *f)
14459 struct pic_chain * chain;
14461 if (aof_pic_chain == NULL)
14462 return;
14464 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14465 PIC_OFFSET_TABLE_REGNUM,
14466 PIC_OFFSET_TABLE_REGNUM);
14467 fputs ("|x$adcons|\n", f);
14469 for (chain = aof_pic_chain; chain; chain = chain->next)
14471 fputs ("\tDCD\t", f);
14472 assemble_name (f, chain->symname);
14473 fputs ("\n", f);
14477 int arm_text_section_count = 1;
14479 /* A get_unnamed_section callback for switching to the text section. */
14481 static void
14482 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14484 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
14485 arm_text_section_count++);
14486 if (flag_pic)
14487 fprintf (asm_out_file, ", PIC, REENTRANT");
14488 fprintf (asm_out_file, "\n");
14491 static int arm_data_section_count = 1;
14493 /* A get_unnamed_section callback for switching to the data section. */
14495 static void
14496 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14498 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
14499 arm_data_section_count++);
14502 /* Implement TARGET_ASM_INIT_SECTIONS.
14504 AOF Assembler syntax is a nightmare when it comes to areas, since once
14505 we change from one area to another, we can't go back again. Instead,
14506 we must create a new area with the same attributes and add the new output
14507 to that. Unfortunately, there is nothing we can do here to guarantee that
14508 two areas with the same attributes will be linked adjacently in the
14509 resulting executable, so we have to be careful not to do pc-relative
14510 addressing across such boundaries. */
14512 static void
14513 aof_asm_init_sections (void)
14515 text_section = get_unnamed_section (SECTION_CODE,
14516 aof_output_text_section_asm_op, NULL);
14517 data_section = get_unnamed_section (SECTION_WRITE,
14518 aof_output_data_section_asm_op, NULL);
14519 readonly_data_section = text_section;
14522 void
14523 zero_init_section (void)
14525 static int zero_init_count = 1;
14527 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
14528 in_section = NULL;
14531 /* The AOF assembler is religiously strict about declarations of
14532 imported and exported symbols, so that it is impossible to declare
14533 a function as imported near the beginning of the file, and then to
14534 export it later on. It is, however, possible to delay the decision
14535 until all the functions in the file have been compiled. To get
14536 around this, we maintain a list of the imports and exports, and
14537 delete from it any that are subsequently defined. At the end of
14538 compilation we spit the remainder of the list out before the END
14539 directive. */
14541 struct import
14543 struct import * next;
14544 const char * name;
14547 static struct import * imports_list = NULL;
14549 void
14550 aof_add_import (const char *name)
14552 struct import * new;
14554 for (new = imports_list; new; new = new->next)
14555 if (new->name == name)
14556 return;
14558 new = (struct import *) xmalloc (sizeof (struct import));
14559 new->next = imports_list;
14560 imports_list = new;
14561 new->name = name;
14564 void
14565 aof_delete_import (const char *name)
14567 struct import ** old;
14569 for (old = &imports_list; *old; old = & (*old)->next)
14571 if ((*old)->name == name)
14573 *old = (*old)->next;
14574 return;
14579 int arm_main_function = 0;
14581 static void
14582 aof_dump_imports (FILE *f)
14584 /* The AOF assembler needs this to cause the startup code to be extracted
14585 from the library. Brining in __main causes the whole thing to work
14586 automagically. */
14587 if (arm_main_function)
14589 switch_to_section (text_section);
14590 fputs ("\tIMPORT __main\n", f);
14591 fputs ("\tDCD __main\n", f);
14594 /* Now dump the remaining imports. */
14595 while (imports_list)
14597 fprintf (f, "\tIMPORT\t");
14598 assemble_name (f, imports_list->name);
14599 fputc ('\n', f);
14600 imports_list = imports_list->next;
14604 static void
14605 aof_globalize_label (FILE *stream, const char *name)
14607 default_globalize_label (stream, name);
14608 if (! strcmp (name, "main"))
14609 arm_main_function = 1;
14612 static void
14613 aof_file_start (void)
14615 fputs ("__r0\tRN\t0\n", asm_out_file);
14616 fputs ("__a1\tRN\t0\n", asm_out_file);
14617 fputs ("__a2\tRN\t1\n", asm_out_file);
14618 fputs ("__a3\tRN\t2\n", asm_out_file);
14619 fputs ("__a4\tRN\t3\n", asm_out_file);
14620 fputs ("__v1\tRN\t4\n", asm_out_file);
14621 fputs ("__v2\tRN\t5\n", asm_out_file);
14622 fputs ("__v3\tRN\t6\n", asm_out_file);
14623 fputs ("__v4\tRN\t7\n", asm_out_file);
14624 fputs ("__v5\tRN\t8\n", asm_out_file);
14625 fputs ("__v6\tRN\t9\n", asm_out_file);
14626 fputs ("__sl\tRN\t10\n", asm_out_file);
14627 fputs ("__fp\tRN\t11\n", asm_out_file);
14628 fputs ("__ip\tRN\t12\n", asm_out_file);
14629 fputs ("__sp\tRN\t13\n", asm_out_file);
14630 fputs ("__lr\tRN\t14\n", asm_out_file);
14631 fputs ("__pc\tRN\t15\n", asm_out_file);
14632 fputs ("__f0\tFN\t0\n", asm_out_file);
14633 fputs ("__f1\tFN\t1\n", asm_out_file);
14634 fputs ("__f2\tFN\t2\n", asm_out_file);
14635 fputs ("__f3\tFN\t3\n", asm_out_file);
14636 fputs ("__f4\tFN\t4\n", asm_out_file);
14637 fputs ("__f5\tFN\t5\n", asm_out_file);
14638 fputs ("__f6\tFN\t6\n", asm_out_file);
14639 fputs ("__f7\tFN\t7\n", asm_out_file);
14640 switch_to_section (text_section);
14643 static void
14644 aof_file_end (void)
14646 if (flag_pic)
14647 aof_dump_pic_table (asm_out_file);
14648 arm_file_end ();
14649 aof_dump_imports (asm_out_file);
14650 fputs ("\tEND\n", asm_out_file);
14652 #endif /* AOF_ASSEMBLER */
14654 #ifndef ARM_PE
14655 /* Symbols in the text segment can be accessed without indirecting via the
14656 constant pool; it may take an extra binary operation, but this is still
14657 faster than indirecting via memory. Don't do this when not optimizing,
14658 since we won't be calculating al of the offsets necessary to do this
14659 simplification. */
14661 static void
14662 arm_encode_section_info (tree decl, rtx rtl, int first)
14664 /* This doesn't work with AOF syntax, since the string table may be in
14665 a different AREA. */
14666 #ifndef AOF_ASSEMBLER
14667 if (optimize > 0 && TREE_CONSTANT (decl))
14668 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14669 #endif
14671 /* If we are referencing a function that is weak then encode a long call
14672 flag in the function name, otherwise if the function is static or
14673 or known to be defined in this file then encode a short call flag. */
14674 if (first && DECL_P (decl))
14676 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14677 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14678 else if (! TREE_PUBLIC (decl))
14679 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14682 default_encode_section_info (decl, rtl, first);
14684 #endif /* !ARM_PE */
14686 static void
14687 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14689 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14690 && !strcmp (prefix, "L"))
14692 arm_ccfsm_state = 0;
14693 arm_target_insn = NULL;
14695 default_internal_label (stream, prefix, labelno);
14698 /* Output code to add DELTA to the first argument, and then jump
14699 to FUNCTION. Used for C++ multiple inheritance. */
14700 static void
14701 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14702 HOST_WIDE_INT delta,
14703 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14704 tree function)
14706 static int thunk_label = 0;
14707 char label[256];
14708 char labelpc[256];
14709 int mi_delta = delta;
14710 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14711 int shift = 0;
14712 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14713 ? 1 : 0);
14714 if (mi_delta < 0)
14715 mi_delta = - mi_delta;
14716 if (TARGET_THUMB)
14718 int labelno = thunk_label++;
14719 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14720 fputs ("\tldr\tr12, ", file);
14721 assemble_name (file, label);
14722 fputc ('\n', file);
14723 if (flag_pic)
14725 /* If we are generating PIC, the ldr instruction below loads
14726 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
14727 the address of the add + 8, so we have:
14729 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
14730 = target + 1.
14732 Note that we have "+ 1" because some versions of GNU ld
14733 don't set the low bit of the result for R_ARM_REL32
14734 relocations against thumb function symbols. */
14735 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
14736 assemble_name (file, labelpc);
14737 fputs (":\n", file);
14738 fputs ("\tadd\tr12, pc, r12\n", file);
14741 while (mi_delta != 0)
14743 if ((mi_delta & (3 << shift)) == 0)
14744 shift += 2;
14745 else
14747 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14748 mi_op, this_regno, this_regno,
14749 mi_delta & (0xff << shift));
14750 mi_delta &= ~(0xff << shift);
14751 shift += 8;
14754 if (TARGET_THUMB)
14756 fprintf (file, "\tbx\tr12\n");
14757 ASM_OUTPUT_ALIGN (file, 2);
14758 assemble_name (file, label);
14759 fputs (":\n", file);
14760 if (flag_pic)
14762 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
14763 rtx tem = XEXP (DECL_RTL (function), 0);
14764 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
14765 tem = gen_rtx_MINUS (GET_MODE (tem),
14766 tem,
14767 gen_rtx_SYMBOL_REF (Pmode,
14768 ggc_strdup (labelpc)));
14769 assemble_integer (tem, 4, BITS_PER_WORD, 1);
14771 else
14772 /* Output ".word .LTHUNKn". */
14773 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14775 else
14777 fputs ("\tb\t", file);
14778 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14779 if (NEED_PLT_RELOC)
14780 fputs ("(PLT)", file);
14781 fputc ('\n', file);
14786 arm_emit_vector_const (FILE *file, rtx x)
14788 int i;
14789 const char * pattern;
14791 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14793 switch (GET_MODE (x))
14795 case V2SImode: pattern = "%08x"; break;
14796 case V4HImode: pattern = "%04x"; break;
14797 case V8QImode: pattern = "%02x"; break;
14798 default: gcc_unreachable ();
14801 fprintf (file, "0x");
14802 for (i = CONST_VECTOR_NUNITS (x); i--;)
14804 rtx element;
14806 element = CONST_VECTOR_ELT (x, i);
14807 fprintf (file, pattern, INTVAL (element));
14810 return 1;
14813 const char *
14814 arm_output_load_gr (rtx *operands)
14816 rtx reg;
14817 rtx offset;
14818 rtx wcgr;
14819 rtx sum;
14821 if (GET_CODE (operands [1]) != MEM
14822 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14823 || GET_CODE (reg = XEXP (sum, 0)) != REG
14824 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14825 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14826 return "wldrw%?\t%0, %1";
14828 /* Fix up an out-of-range load of a GR register. */
14829 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14830 wcgr = operands[0];
14831 operands[0] = reg;
14832 output_asm_insn ("ldr%?\t%0, %1", operands);
14834 operands[0] = wcgr;
14835 operands[1] = reg;
14836 output_asm_insn ("tmcr%?\t%0, %1", operands);
14837 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14839 return "";
14842 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14844 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14845 named arg and all anonymous args onto the stack.
14846 XXX I know the prologue shouldn't be pushing registers, but it is faster
14847 that way. */
14849 static void
14850 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14851 enum machine_mode mode ATTRIBUTE_UNUSED,
14852 tree type ATTRIBUTE_UNUSED,
14853 int *pretend_size,
14854 int second_time ATTRIBUTE_UNUSED)
14856 cfun->machine->uses_anonymous_args = 1;
14857 if (cum->nregs < NUM_ARG_REGS)
14858 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14861 /* Return nonzero if the CONSUMER instruction (a store) does not need
14862 PRODUCER's value to calculate the address. */
14865 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14867 rtx value = PATTERN (producer);
14868 rtx addr = PATTERN (consumer);
14870 if (GET_CODE (value) == COND_EXEC)
14871 value = COND_EXEC_CODE (value);
14872 if (GET_CODE (value) == PARALLEL)
14873 value = XVECEXP (value, 0, 0);
14874 value = XEXP (value, 0);
14875 if (GET_CODE (addr) == COND_EXEC)
14876 addr = COND_EXEC_CODE (addr);
14877 if (GET_CODE (addr) == PARALLEL)
14878 addr = XVECEXP (addr, 0, 0);
14879 addr = XEXP (addr, 0);
14881 return !reg_overlap_mentioned_p (value, addr);
14884 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14885 have an early register shift value or amount dependency on the
14886 result of PRODUCER. */
14889 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14891 rtx value = PATTERN (producer);
14892 rtx op = PATTERN (consumer);
14893 rtx early_op;
14895 if (GET_CODE (value) == COND_EXEC)
14896 value = COND_EXEC_CODE (value);
14897 if (GET_CODE (value) == PARALLEL)
14898 value = XVECEXP (value, 0, 0);
14899 value = XEXP (value, 0);
14900 if (GET_CODE (op) == COND_EXEC)
14901 op = COND_EXEC_CODE (op);
14902 if (GET_CODE (op) == PARALLEL)
14903 op = XVECEXP (op, 0, 0);
14904 op = XEXP (op, 1);
14906 early_op = XEXP (op, 0);
14907 /* This is either an actual independent shift, or a shift applied to
14908 the first operand of another operation. We want the whole shift
14909 operation. */
14910 if (GET_CODE (early_op) == REG)
14911 early_op = op;
14913 return !reg_overlap_mentioned_p (value, early_op);
14916 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14917 have an early register shift value dependency on the result of
14918 PRODUCER. */
14921 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14923 rtx value = PATTERN (producer);
14924 rtx op = PATTERN (consumer);
14925 rtx early_op;
14927 if (GET_CODE (value) == COND_EXEC)
14928 value = COND_EXEC_CODE (value);
14929 if (GET_CODE (value) == PARALLEL)
14930 value = XVECEXP (value, 0, 0);
14931 value = XEXP (value, 0);
14932 if (GET_CODE (op) == COND_EXEC)
14933 op = COND_EXEC_CODE (op);
14934 if (GET_CODE (op) == PARALLEL)
14935 op = XVECEXP (op, 0, 0);
14936 op = XEXP (op, 1);
14938 early_op = XEXP (op, 0);
14940 /* This is either an actual independent shift, or a shift applied to
14941 the first operand of another operation. We want the value being
14942 shifted, in either case. */
14943 if (GET_CODE (early_op) != REG)
14944 early_op = XEXP (early_op, 0);
14946 return !reg_overlap_mentioned_p (value, early_op);
14949 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14950 have an early register mult dependency on the result of
14951 PRODUCER. */
14954 arm_no_early_mul_dep (rtx producer, rtx consumer)
14956 rtx value = PATTERN (producer);
14957 rtx op = PATTERN (consumer);
14959 if (GET_CODE (value) == COND_EXEC)
14960 value = COND_EXEC_CODE (value);
14961 if (GET_CODE (value) == PARALLEL)
14962 value = XVECEXP (value, 0, 0);
14963 value = XEXP (value, 0);
14964 if (GET_CODE (op) == COND_EXEC)
14965 op = COND_EXEC_CODE (op);
14966 if (GET_CODE (op) == PARALLEL)
14967 op = XVECEXP (op, 0, 0);
14968 op = XEXP (op, 1);
14970 return (GET_CODE (op) == PLUS
14971 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14975 /* We can't rely on the caller doing the proper promotion when
14976 using APCS or ATPCS. */
14978 static bool
14979 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14981 return !TARGET_AAPCS_BASED;
14985 /* AAPCS based ABIs use short enums by default. */
14987 static bool
14988 arm_default_short_enums (void)
14990 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
14994 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14996 static bool
14997 arm_align_anon_bitfield (void)
14999 return TARGET_AAPCS_BASED;
15003 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
15005 static tree
15006 arm_cxx_guard_type (void)
15008 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
15012 /* The EABI says test the least significant bit of a guard variable. */
15014 static bool
15015 arm_cxx_guard_mask_bit (void)
15017 return TARGET_AAPCS_BASED;
15021 /* The EABI specifies that all array cookies are 8 bytes long. */
15023 static tree
15024 arm_get_cookie_size (tree type)
15026 tree size;
15028 if (!TARGET_AAPCS_BASED)
15029 return default_cxx_get_cookie_size (type);
15031 size = build_int_cst (sizetype, 8);
15032 return size;
15036 /* The EABI says that array cookies should also contain the element size. */
15038 static bool
15039 arm_cookie_has_size (void)
15041 return TARGET_AAPCS_BASED;
15045 /* The EABI says constructors and destructors should return a pointer to
15046 the object constructed/destroyed. */
15048 static bool
15049 arm_cxx_cdtor_returns_this (void)
15051 return TARGET_AAPCS_BASED;
15054 /* The EABI says that an inline function may never be the key
15055 method. */
15057 static bool
15058 arm_cxx_key_method_may_be_inline (void)
15060 return !TARGET_AAPCS_BASED;
15063 static void
15064 arm_cxx_determine_class_data_visibility (tree decl)
15066 if (!TARGET_AAPCS_BASED)
15067 return;
15069 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
15070 is exported. However, on systems without dynamic vague linkage,
15071 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
15072 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
15073 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
15074 else
15075 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
15076 DECL_VISIBILITY_SPECIFIED (decl) = 1;
15079 static bool
15080 arm_cxx_class_data_always_comdat (void)
15082 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
15083 vague linkage if the class has no key function. */
15084 return !TARGET_AAPCS_BASED;
15088 /* The EABI says __aeabi_atexit should be used to register static
15089 destructors. */
15091 static bool
15092 arm_cxx_use_aeabi_atexit (void)
15094 return TARGET_AAPCS_BASED;
15098 void
15099 arm_set_return_address (rtx source, rtx scratch)
15101 arm_stack_offsets *offsets;
15102 HOST_WIDE_INT delta;
15103 rtx addr;
15104 unsigned long saved_regs;
15106 saved_regs = arm_compute_save_reg_mask ();
15108 if ((saved_regs & (1 << LR_REGNUM)) == 0)
15109 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15110 else
15112 if (frame_pointer_needed)
15113 addr = plus_constant(hard_frame_pointer_rtx, -4);
15114 else
15116 /* LR will be the first saved register. */
15117 offsets = arm_get_frame_offsets ();
15118 delta = offsets->outgoing_args - (offsets->frame + 4);
15121 if (delta >= 4096)
15123 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
15124 GEN_INT (delta & ~4095)));
15125 addr = scratch;
15126 delta &= 4095;
15128 else
15129 addr = stack_pointer_rtx;
15131 addr = plus_constant (addr, delta);
15133 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15138 void
15139 thumb_set_return_address (rtx source, rtx scratch)
15141 arm_stack_offsets *offsets;
15142 HOST_WIDE_INT delta;
15143 int reg;
15144 rtx addr;
15145 unsigned long mask;
15147 emit_insn (gen_rtx_USE (VOIDmode, source));
15149 mask = thumb_compute_save_reg_mask ();
15150 if (mask & (1 << LR_REGNUM))
15152 offsets = arm_get_frame_offsets ();
15154 /* Find the saved regs. */
15155 if (frame_pointer_needed)
15157 delta = offsets->soft_frame - offsets->saved_args;
15158 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
15160 else
15162 delta = offsets->outgoing_args - offsets->saved_args;
15163 reg = SP_REGNUM;
15165 /* Allow for the stack frame. */
15166 if (TARGET_BACKTRACE)
15167 delta -= 16;
15168 /* The link register is always the first saved register. */
15169 delta -= 4;
15171 /* Construct the address. */
15172 addr = gen_rtx_REG (SImode, reg);
15173 if ((reg != SP_REGNUM && delta >= 128)
15174 || delta >= 1024)
15176 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
15177 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
15178 addr = scratch;
15180 else
15181 addr = plus_constant (addr, delta);
15183 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15185 else
15186 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15189 /* Implements target hook vector_mode_supported_p. */
15190 bool
15191 arm_vector_mode_supported_p (enum machine_mode mode)
15193 if ((mode == V2SImode)
15194 || (mode == V4HImode)
15195 || (mode == V8QImode))
15196 return true;
15198 return false;
15201 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15202 ARM insns and therefore guarantee that the shift count is modulo 256.
15203 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15204 guarantee no particular behavior for out-of-range counts. */
15206 static unsigned HOST_WIDE_INT
15207 arm_shift_truncation_mask (enum machine_mode mode)
15209 return mode == SImode ? 255 : 0;
15213 /* Map internal gcc register numbers to DWARF2 register numbers. */
15215 unsigned int
15216 arm_dbx_register_number (unsigned int regno)
15218 if (regno < 16)
15219 return regno;
15221 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15222 compatibility. The EABI defines them as registers 96-103. */
15223 if (IS_FPA_REGNUM (regno))
15224 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
15226 if (IS_VFP_REGNUM (regno))
15227 return 64 + regno - FIRST_VFP_REGNUM;
15229 if (IS_IWMMXT_GR_REGNUM (regno))
15230 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
15232 if (IS_IWMMXT_REGNUM (regno))
15233 return 112 + regno - FIRST_IWMMXT_REGNUM;
15235 gcc_unreachable ();
15239 #ifdef TARGET_UNWIND_INFO
15240 /* Emit unwind directives for a store-multiple instruction. This should
15241 only ever be generated by the function prologue code, so we expect it
15242 to have a particular form. */
15244 static void
15245 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
15247 int i;
15248 HOST_WIDE_INT offset;
15249 HOST_WIDE_INT nregs;
15250 int reg_size;
15251 unsigned reg;
15252 unsigned lastreg;
15253 rtx e;
15255 /* First insn will adjust the stack pointer. */
15256 e = XVECEXP (p, 0, 0);
15257 if (GET_CODE (e) != SET
15258 || GET_CODE (XEXP (e, 0)) != REG
15259 || REGNO (XEXP (e, 0)) != SP_REGNUM
15260 || GET_CODE (XEXP (e, 1)) != PLUS)
15261 abort ();
15263 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
15264 nregs = XVECLEN (p, 0) - 1;
15266 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
15267 if (reg < 16)
15269 /* The function prologue may also push pc, but not annotate it as it is
15270 never restored. We turn this into a stack pointer adjustment. */
15271 if (nregs * 4 == offset - 4)
15273 fprintf (asm_out_file, "\t.pad #4\n");
15274 offset -= 4;
15276 reg_size = 4;
15278 else if (IS_VFP_REGNUM (reg))
15280 /* FPA register saves use an additional word. */
15281 offset -= 4;
15282 reg_size = 8;
15284 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
15286 /* FPA registers are done differently. */
15287 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
15288 return;
15290 else
15291 /* Unknown register type. */
15292 abort ();
15294 /* If the stack increment doesn't match the size of the saved registers,
15295 something has gone horribly wrong. */
15296 if (offset != nregs * reg_size)
15297 abort ();
15299 fprintf (asm_out_file, "\t.save {");
15301 offset = 0;
15302 lastreg = 0;
15303 /* The remaining insns will describe the stores. */
15304 for (i = 1; i <= nregs; i++)
15306 /* Expect (set (mem <addr>) (reg)).
15307 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15308 e = XVECEXP (p, 0, i);
15309 if (GET_CODE (e) != SET
15310 || GET_CODE (XEXP (e, 0)) != MEM
15311 || GET_CODE (XEXP (e, 1)) != REG)
15312 abort ();
15314 reg = REGNO (XEXP (e, 1));
15315 if (reg < lastreg)
15316 abort ();
15318 if (i != 1)
15319 fprintf (asm_out_file, ", ");
15320 /* We can't use %r for vfp because we need to use the
15321 double precision register names. */
15322 if (IS_VFP_REGNUM (reg))
15323 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
15324 else
15325 asm_fprintf (asm_out_file, "%r", reg);
15327 #ifdef ENABLE_CHECKING
15328 /* Check that the addresses are consecutive. */
15329 e = XEXP (XEXP (e, 0), 0);
15330 if (GET_CODE (e) == PLUS)
15332 offset += reg_size;
15333 if (GET_CODE (XEXP (e, 0)) != REG
15334 || REGNO (XEXP (e, 0)) != SP_REGNUM
15335 || GET_CODE (XEXP (e, 1)) != CONST_INT
15336 || offset != INTVAL (XEXP (e, 1)))
15337 abort ();
15339 else if (i != 1
15340 || GET_CODE (e) != REG
15341 || REGNO (e) != SP_REGNUM)
15342 abort ();
15343 #endif
15345 fprintf (asm_out_file, "}\n");
15348 /* Emit unwind directives for a SET. */
15350 static void
15351 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
15353 rtx e0;
15354 rtx e1;
15356 e0 = XEXP (p, 0);
15357 e1 = XEXP (p, 1);
15358 switch (GET_CODE (e0))
15360 case MEM:
15361 /* Pushing a single register. */
15362 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
15363 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
15364 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
15365 abort ();
15367 asm_fprintf (asm_out_file, "\t.save ");
15368 if (IS_VFP_REGNUM (REGNO (e1)))
15369 asm_fprintf(asm_out_file, "{d%d}\n",
15370 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
15371 else
15372 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
15373 break;
15375 case REG:
15376 if (REGNO (e0) == SP_REGNUM)
15378 /* A stack increment. */
15379 if (GET_CODE (e1) != PLUS
15380 || GET_CODE (XEXP (e1, 0)) != REG
15381 || REGNO (XEXP (e1, 0)) != SP_REGNUM
15382 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15383 abort ();
15385 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
15386 -INTVAL (XEXP (e1, 1)));
15388 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
15390 HOST_WIDE_INT offset;
15391 unsigned reg;
15393 if (GET_CODE (e1) == PLUS)
15395 if (GET_CODE (XEXP (e1, 0)) != REG
15396 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15397 abort ();
15398 reg = REGNO (XEXP (e1, 0));
15399 offset = INTVAL (XEXP (e1, 1));
15400 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
15401 HARD_FRAME_POINTER_REGNUM, reg,
15402 INTVAL (XEXP (e1, 1)));
15404 else if (GET_CODE (e1) == REG)
15406 reg = REGNO (e1);
15407 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
15408 HARD_FRAME_POINTER_REGNUM, reg);
15410 else
15411 abort ();
15413 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
15415 /* Move from sp to reg. */
15416 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
15418 else if (GET_CODE (e1) == PLUS
15419 && GET_CODE (XEXP (e1, 0)) == REG
15420 && REGNO (XEXP (e1, 0)) == SP_REGNUM
15421 && GET_CODE (XEXP (e1, 1)) == CONST_INT)
15423 /* Set reg to offset from sp. */
15424 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
15425 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
15427 else
15428 abort ();
15429 break;
15431 default:
15432 abort ();
15437 /* Emit unwind directives for the given insn. */
15439 static void
15440 arm_unwind_emit (FILE * asm_out_file, rtx insn)
15442 rtx pat;
15444 if (!ARM_EABI_UNWIND_TABLES)
15445 return;
15447 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15448 return;
15450 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15451 if (pat)
15452 pat = XEXP (pat, 0);
15453 else
15454 pat = PATTERN (insn);
15456 switch (GET_CODE (pat))
15458 case SET:
15459 arm_unwind_emit_set (asm_out_file, pat);
15460 break;
15462 case SEQUENCE:
15463 /* Store multiple. */
15464 arm_unwind_emit_stm (asm_out_file, pat);
15465 break;
15467 default:
15468 abort();
15473 /* Output a reference from a function exception table to the type_info
15474 object X. The EABI specifies that the symbol should be relocated by
15475 an R_ARM_TARGET2 relocation. */
15477 static bool
15478 arm_output_ttype (rtx x)
15480 fputs ("\t.word\t", asm_out_file);
15481 output_addr_const (asm_out_file, x);
15482 /* Use special relocations for symbol references. */
15483 if (GET_CODE (x) != CONST_INT)
15484 fputs ("(TARGET2)", asm_out_file);
15485 fputc ('\n', asm_out_file);
15487 return TRUE;
15489 #endif /* TARGET_UNWIND_INFO */
15492 /* Output unwind directives for the start/end of a function. */
15494 void
15495 arm_output_fn_unwind (FILE * f, bool prologue)
15497 if (!ARM_EABI_UNWIND_TABLES)
15498 return;
15500 if (prologue)
15501 fputs ("\t.fnstart\n", f);
15502 else
15503 fputs ("\t.fnend\n", f);
15506 static bool
15507 arm_emit_tls_decoration (FILE *fp, rtx x)
15509 enum tls_reloc reloc;
15510 rtx val;
15512 val = XVECEXP (x, 0, 0);
15513 reloc = INTVAL (XVECEXP (x, 0, 1));
15515 output_addr_const (fp, val);
15517 switch (reloc)
15519 case TLS_GD32:
15520 fputs ("(tlsgd)", fp);
15521 break;
15522 case TLS_LDM32:
15523 fputs ("(tlsldm)", fp);
15524 break;
15525 case TLS_LDO32:
15526 fputs ("(tlsldo)", fp);
15527 break;
15528 case TLS_IE32:
15529 fputs ("(gottpoff)", fp);
15530 break;
15531 case TLS_LE32:
15532 fputs ("(tpoff)", fp);
15533 break;
15534 default:
15535 gcc_unreachable ();
15538 switch (reloc)
15540 case TLS_GD32:
15541 case TLS_LDM32:
15542 case TLS_IE32:
15543 fputs (" + (. - ", fp);
15544 output_addr_const (fp, XVECEXP (x, 0, 2));
15545 fputs (" - ", fp);
15546 output_addr_const (fp, XVECEXP (x, 0, 3));
15547 fputc (')', fp);
15548 break;
15549 default:
15550 break;
15553 return TRUE;
15556 bool
15557 arm_output_addr_const_extra (FILE *fp, rtx x)
15559 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
15560 return arm_emit_tls_decoration (fp, x);
15561 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
15563 char label[256];
15564 int labelno = INTVAL (XVECEXP (x, 0, 0));
15566 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
15567 assemble_name_raw (fp, label);
15569 return TRUE;
15571 else if (GET_CODE (x) == CONST_VECTOR)
15572 return arm_emit_vector_const (fp, x);
15574 return FALSE;
15577 #include "gt-arm.h"