2006-11-02 Carlos O'Donell <carlos@codesourcery.com>
[official-gcc.git] / gcc / config / arm / arm.c
blobbbf4863a7adfff80eedceca1d069959c12d698aa
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static rtx emit_set_insn (rtx, rtx);
146 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
147 tree, bool);
149 #ifdef OBJECT_FORMAT_ELF
150 static void arm_elf_asm_constructor (rtx, int);
151 #endif
152 #ifndef ARM_PE
153 static void arm_encode_section_info (tree, rtx, int);
154 #endif
156 static void arm_file_end (void);
158 #ifdef AOF_ASSEMBLER
159 static void aof_globalize_label (FILE *, const char *);
160 static void aof_dump_imports (FILE *);
161 static void aof_dump_pic_table (FILE *);
162 static void aof_file_start (void);
163 static void aof_file_end (void);
164 static void aof_asm_init_sections (void);
165 #endif
166 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
167 tree, int *, int);
168 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
169 enum machine_mode, tree, bool);
170 static bool arm_promote_prototypes (tree);
171 static bool arm_default_short_enums (void);
172 static bool arm_align_anon_bitfield (void);
173 static bool arm_return_in_msb (tree);
174 static bool arm_must_pass_in_stack (enum machine_mode, tree);
175 #ifdef TARGET_UNWIND_INFO
176 static void arm_unwind_emit (FILE *, rtx);
177 static bool arm_output_ttype (rtx);
178 #endif
180 static tree arm_cxx_guard_type (void);
181 static bool arm_cxx_guard_mask_bit (void);
182 static tree arm_get_cookie_size (tree);
183 static bool arm_cookie_has_size (void);
184 static bool arm_cxx_cdtor_returns_this (void);
185 static bool arm_cxx_key_method_may_be_inline (void);
186 static void arm_cxx_determine_class_data_visibility (tree);
187 static bool arm_cxx_class_data_always_comdat (void);
188 static bool arm_cxx_use_aeabi_atexit (void);
189 static void arm_init_libfuncs (void);
190 static bool arm_handle_option (size_t, const char *, int);
191 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
192 static bool arm_cannot_copy_insn_p (rtx);
193 static bool arm_tls_symbol_p (rtx x);
196 /* Initialize the GCC target structure. */
197 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
198 #undef TARGET_MERGE_DECL_ATTRIBUTES
199 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
200 #endif
202 #undef TARGET_ATTRIBUTE_TABLE
203 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
205 #undef TARGET_ASM_FILE_END
206 #define TARGET_ASM_FILE_END arm_file_end
208 #ifdef AOF_ASSEMBLER
209 #undef TARGET_ASM_BYTE_OP
210 #define TARGET_ASM_BYTE_OP "\tDCB\t"
211 #undef TARGET_ASM_ALIGNED_HI_OP
212 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
213 #undef TARGET_ASM_ALIGNED_SI_OP
214 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
215 #undef TARGET_ASM_GLOBALIZE_LABEL
216 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
217 #undef TARGET_ASM_FILE_START
218 #define TARGET_ASM_FILE_START aof_file_start
219 #undef TARGET_ASM_FILE_END
220 #define TARGET_ASM_FILE_END aof_file_end
221 #else
222 #undef TARGET_ASM_ALIGNED_SI_OP
223 #define TARGET_ASM_ALIGNED_SI_OP NULL
224 #undef TARGET_ASM_INTEGER
225 #define TARGET_ASM_INTEGER arm_assemble_integer
226 #endif
228 #undef TARGET_ASM_FUNCTION_PROLOGUE
229 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
231 #undef TARGET_ASM_FUNCTION_EPILOGUE
232 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
234 #undef TARGET_DEFAULT_TARGET_FLAGS
235 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
236 #undef TARGET_HANDLE_OPTION
237 #define TARGET_HANDLE_OPTION arm_handle_option
239 #undef TARGET_COMP_TYPE_ATTRIBUTES
240 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
242 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
243 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
245 #undef TARGET_SCHED_ADJUST_COST
246 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
248 #undef TARGET_ENCODE_SECTION_INFO
249 #ifdef ARM_PE
250 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
251 #else
252 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
253 #endif
255 #undef TARGET_STRIP_NAME_ENCODING
256 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
258 #undef TARGET_ASM_INTERNAL_LABEL
259 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
261 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
262 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
264 #undef TARGET_ASM_OUTPUT_MI_THUNK
265 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
266 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
267 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
269 /* This will be overridden in arm_override_options. */
270 #undef TARGET_RTX_COSTS
271 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
272 #undef TARGET_ADDRESS_COST
273 #define TARGET_ADDRESS_COST arm_address_cost
275 #undef TARGET_SHIFT_TRUNCATION_MASK
276 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
277 #undef TARGET_VECTOR_MODE_SUPPORTED_P
278 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
280 #undef TARGET_MACHINE_DEPENDENT_REORG
281 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
283 #undef TARGET_INIT_BUILTINS
284 #define TARGET_INIT_BUILTINS arm_init_builtins
285 #undef TARGET_EXPAND_BUILTIN
286 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
288 #undef TARGET_INIT_LIBFUNCS
289 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
291 #undef TARGET_PROMOTE_FUNCTION_ARGS
292 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
293 #undef TARGET_PROMOTE_FUNCTION_RETURN
294 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
295 #undef TARGET_PROMOTE_PROTOTYPES
296 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
297 #undef TARGET_PASS_BY_REFERENCE
298 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
299 #undef TARGET_ARG_PARTIAL_BYTES
300 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
302 #undef TARGET_SETUP_INCOMING_VARARGS
303 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
305 #undef TARGET_DEFAULT_SHORT_ENUMS
306 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
308 #undef TARGET_ALIGN_ANON_BITFIELD
309 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
311 #undef TARGET_NARROW_VOLATILE_BITFIELD
312 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
314 #undef TARGET_CXX_GUARD_TYPE
315 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
317 #undef TARGET_CXX_GUARD_MASK_BIT
318 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
320 #undef TARGET_CXX_GET_COOKIE_SIZE
321 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
323 #undef TARGET_CXX_COOKIE_HAS_SIZE
324 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
326 #undef TARGET_CXX_CDTOR_RETURNS_THIS
327 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
329 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
330 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
332 #undef TARGET_CXX_USE_AEABI_ATEXIT
333 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
335 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
336 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
337 arm_cxx_determine_class_data_visibility
339 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
340 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
342 #undef TARGET_RETURN_IN_MSB
343 #define TARGET_RETURN_IN_MSB arm_return_in_msb
345 #undef TARGET_MUST_PASS_IN_STACK
346 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
348 #ifdef TARGET_UNWIND_INFO
349 #undef TARGET_UNWIND_EMIT
350 #define TARGET_UNWIND_EMIT arm_unwind_emit
352 /* EABI unwinding tables use a different format for the typeinfo tables. */
353 #undef TARGET_ASM_TTYPE
354 #define TARGET_ASM_TTYPE arm_output_ttype
356 #undef TARGET_ARM_EABI_UNWINDER
357 #define TARGET_ARM_EABI_UNWINDER true
358 #endif /* TARGET_UNWIND_INFO */
360 #undef TARGET_CANNOT_COPY_INSN_P
361 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
363 #ifdef HAVE_AS_TLS
364 #undef TARGET_HAVE_TLS
365 #define TARGET_HAVE_TLS true
366 #endif
368 #undef TARGET_CANNOT_FORCE_CONST_MEM
369 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
371 struct gcc_target targetm = TARGET_INITIALIZER;
373 /* Obstack for minipool constant handling. */
374 static struct obstack minipool_obstack;
375 static char * minipool_startobj;
377 /* The maximum number of insns skipped which
378 will be conditionalised if possible. */
379 static int max_insns_skipped = 5;
381 extern FILE * asm_out_file;
383 /* True if we are currently building a constant table. */
384 int making_const_table;
386 /* Define the information needed to generate branch insns. This is
387 stored from the compare operation. */
388 rtx arm_compare_op0, arm_compare_op1;
390 /* The processor for which instructions should be scheduled. */
391 enum processor_type arm_tune = arm_none;
393 /* Which floating point model to use. */
394 enum arm_fp_model arm_fp_model;
396 /* Which floating point hardware is available. */
397 enum fputype arm_fpu_arch;
399 /* Which floating point hardware to schedule for. */
400 enum fputype arm_fpu_tune;
402 /* Whether to use floating point hardware. */
403 enum float_abi_type arm_float_abi;
405 /* Which ABI to use. */
406 enum arm_abi_type arm_abi;
408 /* Which thread pointer model to use. */
409 enum arm_tp_type target_thread_pointer = TP_AUTO;
411 /* Used to parse -mstructure_size_boundary command line option. */
412 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
414 /* Used for Thumb call_via trampolines. */
415 rtx thumb_call_via_label[14];
416 static int thumb_call_reg_needed;
418 /* Bit values used to identify processor capabilities. */
419 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
420 #define FL_ARCH3M (1 << 1) /* Extended multiply */
421 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
422 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
423 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
424 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
425 #define FL_THUMB (1 << 6) /* Thumb aware */
426 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
427 #define FL_STRONG (1 << 8) /* StrongARM */
428 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
429 #define FL_XSCALE (1 << 10) /* XScale */
430 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
431 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
432 media instructions. */
433 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
434 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
435 Note: ARM6 & 7 derivatives only. */
436 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
438 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
440 #define FL_FOR_ARCH2 0
441 #define FL_FOR_ARCH3 FL_MODE32
442 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
443 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
444 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
445 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
446 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
447 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
448 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
449 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
450 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
451 #define FL_FOR_ARCH6J FL_FOR_ARCH6
452 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
453 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
454 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
456 /* The bits in this mask specify which
457 instructions we are allowed to generate. */
458 static unsigned long insn_flags = 0;
460 /* The bits in this mask specify which instruction scheduling options should
461 be used. */
462 static unsigned long tune_flags = 0;
464 /* The following are used in the arm.md file as equivalents to bits
465 in the above two flag variables. */
467 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
468 int arm_arch3m = 0;
470 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
471 int arm_arch4 = 0;
473 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
474 int arm_arch4t = 0;
476 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
477 int arm_arch5 = 0;
479 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
480 int arm_arch5e = 0;
482 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
483 int arm_arch6 = 0;
485 /* Nonzero if this chip supports the ARM 6K extensions. */
486 int arm_arch6k = 0;
488 /* Nonzero if this chip can benefit from load scheduling. */
489 int arm_ld_sched = 0;
491 /* Nonzero if this chip is a StrongARM. */
492 int arm_tune_strongarm = 0;
494 /* Nonzero if this chip is a Cirrus variant. */
495 int arm_arch_cirrus = 0;
497 /* Nonzero if this chip supports Intel Wireless MMX technology. */
498 int arm_arch_iwmmxt = 0;
500 /* Nonzero if this chip is an XScale. */
501 int arm_arch_xscale = 0;
503 /* Nonzero if tuning for XScale */
504 int arm_tune_xscale = 0;
506 /* Nonzero if we want to tune for stores that access the write-buffer.
507 This typically means an ARM6 or ARM7 with MMU or MPU. */
508 int arm_tune_wbuf = 0;
510 /* Nonzero if generating Thumb instructions. */
511 int thumb_code = 0;
513 /* Nonzero if we should define __THUMB_INTERWORK__ in the
514 preprocessor.
515 XXX This is a bit of a hack, it's intended to help work around
516 problems in GLD which doesn't understand that armv5t code is
517 interworking clean. */
518 int arm_cpp_interwork = 0;
520 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
521 must report the mode of the memory reference from PRINT_OPERAND to
522 PRINT_OPERAND_ADDRESS. */
523 enum machine_mode output_memory_reference_mode;
525 /* The register number to be used for the PIC offset register. */
526 unsigned arm_pic_register = INVALID_REGNUM;
528 /* Set to 1 when a return insn is output, this means that the epilogue
529 is not needed. */
530 int return_used_this_function;
532 /* Set to 1 after arm_reorg has started. Reset to start at the start of
533 the next function. */
534 static int after_arm_reorg = 0;
536 /* The maximum number of insns to be used when loading a constant. */
537 static int arm_constant_limit = 3;
539 /* For an explanation of these variables, see final_prescan_insn below. */
540 int arm_ccfsm_state;
541 enum arm_cond_code arm_current_cc;
542 rtx arm_target_insn;
543 int arm_target_label;
545 /* The condition codes of the ARM, and the inverse function. */
546 static const char * const arm_condition_codes[] =
548 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
549 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
552 #define streq(string1, string2) (strcmp (string1, string2) == 0)
554 /* Initialization code. */
556 struct processors
558 const char *const name;
559 enum processor_type core;
560 const char *arch;
561 const unsigned long flags;
562 bool (* rtx_costs) (rtx, int, int, int *);
565 /* Not all of these give usefully different compilation alternatives,
566 but there is no simple way of generalizing them. */
567 static const struct processors all_cores[] =
569 /* ARM Cores */
570 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
571 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
572 #include "arm-cores.def"
573 #undef ARM_CORE
574 {NULL, arm_none, NULL, 0, NULL}
577 static const struct processors all_architectures[] =
579 /* ARM Architectures */
580 /* We don't specify rtx_costs here as it will be figured out
581 from the core. */
583 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
584 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
585 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
586 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
587 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
588 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
589 implementations that support it, so we will leave it out for now. */
590 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
591 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
592 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
593 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
594 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
595 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
596 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
597 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
598 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
599 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
600 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
601 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
602 {NULL, arm_none, NULL, 0 , NULL}
605 struct arm_cpu_select
607 const char * string;
608 const char * name;
609 const struct processors * processors;
612 /* This is a magic structure. The 'string' field is magically filled in
613 with a pointer to the value specified by the user on the command line
614 assuming that the user has specified such a value. */
616 static struct arm_cpu_select arm_select[] =
618 /* string name processors */
619 { NULL, "-mcpu=", all_cores },
620 { NULL, "-march=", all_architectures },
621 { NULL, "-mtune=", all_cores }
624 /* Defines representing the indexes into the above table. */
625 #define ARM_OPT_SET_CPU 0
626 #define ARM_OPT_SET_ARCH 1
627 #define ARM_OPT_SET_TUNE 2
629 /* The name of the preprocessor macro to define for this architecture. */
631 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
633 struct fpu_desc
635 const char * name;
636 enum fputype fpu;
640 /* Available values for -mfpu=. */
642 static const struct fpu_desc all_fpus[] =
644 {"fpa", FPUTYPE_FPA},
645 {"fpe2", FPUTYPE_FPA_EMU2},
646 {"fpe3", FPUTYPE_FPA_EMU2},
647 {"maverick", FPUTYPE_MAVERICK},
648 {"vfp", FPUTYPE_VFP}
652 /* Floating point models used by the different hardware.
653 See fputype in arm.h. */
655 static const enum fputype fp_model_for_fpu[] =
657 /* No FP hardware. */
658 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
659 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
660 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
661 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
662 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
663 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
667 struct float_abi
669 const char * name;
670 enum float_abi_type abi_type;
674 /* Available values for -mfloat-abi=. */
676 static const struct float_abi all_float_abis[] =
678 {"soft", ARM_FLOAT_ABI_SOFT},
679 {"softfp", ARM_FLOAT_ABI_SOFTFP},
680 {"hard", ARM_FLOAT_ABI_HARD}
684 struct abi_name
686 const char *name;
687 enum arm_abi_type abi_type;
691 /* Available values for -mabi=. */
693 static const struct abi_name arm_all_abis[] =
695 {"apcs-gnu", ARM_ABI_APCS},
696 {"atpcs", ARM_ABI_ATPCS},
697 {"aapcs", ARM_ABI_AAPCS},
698 {"iwmmxt", ARM_ABI_IWMMXT},
699 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
702 /* Supported TLS relocations. */
704 enum tls_reloc {
705 TLS_GD32,
706 TLS_LDM32,
707 TLS_LDO32,
708 TLS_IE32,
709 TLS_LE32
712 /* Emit an insn that's a simple single-set. Both the operands must be known
713 to be valid. */
714 inline static rtx
715 emit_set_insn (rtx x, rtx y)
717 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
720 /* Return the number of bits set in VALUE. */
721 static unsigned
722 bit_count (unsigned long value)
724 unsigned long count = 0;
726 while (value)
728 count++;
729 value &= value - 1; /* Clear the least-significant set bit. */
732 return count;
735 /* Set up library functions unique to ARM. */
737 static void
738 arm_init_libfuncs (void)
740 /* There are no special library functions unless we are using the
741 ARM BPABI. */
742 if (!TARGET_BPABI)
743 return;
745 /* The functions below are described in Section 4 of the "Run-Time
746 ABI for the ARM architecture", Version 1.0. */
748 /* Double-precision floating-point arithmetic. Table 2. */
749 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
750 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
751 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
752 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
753 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
755 /* Double-precision comparisons. Table 3. */
756 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
757 set_optab_libfunc (ne_optab, DFmode, NULL);
758 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
759 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
760 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
761 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
762 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
764 /* Single-precision floating-point arithmetic. Table 4. */
765 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
766 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
767 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
768 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
769 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
771 /* Single-precision comparisons. Table 5. */
772 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
773 set_optab_libfunc (ne_optab, SFmode, NULL);
774 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
775 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
776 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
777 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
778 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
780 /* Floating-point to integer conversions. Table 6. */
781 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
782 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
783 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
784 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
785 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
786 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
787 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
788 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
790 /* Conversions between floating types. Table 7. */
791 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
792 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
794 /* Integer to floating-point conversions. Table 8. */
795 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
796 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
797 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
798 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
799 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
800 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
801 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
802 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
804 /* Long long. Table 9. */
805 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
806 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
807 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
808 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
809 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
810 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
811 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
812 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
814 /* Integer (32/32->32) division. \S 4.3.1. */
815 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
816 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
818 /* The divmod functions are designed so that they can be used for
819 plain division, even though they return both the quotient and the
820 remainder. The quotient is returned in the usual location (i.e.,
821 r0 for SImode, {r0, r1} for DImode), just as would be expected
822 for an ordinary division routine. Because the AAPCS calling
823 conventions specify that all of { r0, r1, r2, r3 } are
824 callee-saved registers, there is no need to tell the compiler
825 explicitly that those registers are clobbered by these
826 routines. */
827 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
828 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
830 /* For SImode division the ABI provides div-without-mod routines,
831 which are faster. */
832 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
833 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
835 /* We don't have mod libcalls. Fortunately gcc knows how to use the
836 divmod libcalls instead. */
837 set_optab_libfunc (smod_optab, DImode, NULL);
838 set_optab_libfunc (umod_optab, DImode, NULL);
839 set_optab_libfunc (smod_optab, SImode, NULL);
840 set_optab_libfunc (umod_optab, SImode, NULL);
843 /* Implement TARGET_HANDLE_OPTION. */
845 static bool
846 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
848 switch (code)
850 case OPT_march_:
851 arm_select[1].string = arg;
852 return true;
854 case OPT_mcpu_:
855 arm_select[0].string = arg;
856 return true;
858 case OPT_mhard_float:
859 target_float_abi_name = "hard";
860 return true;
862 case OPT_msoft_float:
863 target_float_abi_name = "soft";
864 return true;
866 case OPT_mtune_:
867 arm_select[2].string = arg;
868 return true;
870 default:
871 return true;
875 /* Fix up any incompatible options that the user has specified.
876 This has now turned into a maze. */
877 void
878 arm_override_options (void)
880 unsigned i;
881 enum processor_type target_arch_cpu = arm_none;
883 /* Set up the flags based on the cpu/architecture selected by the user. */
884 for (i = ARRAY_SIZE (arm_select); i--;)
886 struct arm_cpu_select * ptr = arm_select + i;
888 if (ptr->string != NULL && ptr->string[0] != '\0')
890 const struct processors * sel;
892 for (sel = ptr->processors; sel->name != NULL; sel++)
893 if (streq (ptr->string, sel->name))
895 /* Set the architecture define. */
896 if (i != ARM_OPT_SET_TUNE)
897 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
899 /* Determine the processor core for which we should
900 tune code-generation. */
901 if (/* -mcpu= is a sensible default. */
902 i == ARM_OPT_SET_CPU
903 /* -mtune= overrides -mcpu= and -march=. */
904 || i == ARM_OPT_SET_TUNE)
905 arm_tune = (enum processor_type) (sel - ptr->processors);
907 /* Remember the CPU associated with this architecture.
908 If no other option is used to set the CPU type,
909 we'll use this to guess the most suitable tuning
910 options. */
911 if (i == ARM_OPT_SET_ARCH)
912 target_arch_cpu = sel->core;
914 if (i != ARM_OPT_SET_TUNE)
916 /* If we have been given an architecture and a processor
917 make sure that they are compatible. We only generate
918 a warning though, and we prefer the CPU over the
919 architecture. */
920 if (insn_flags != 0 && (insn_flags ^ sel->flags))
921 warning (0, "switch -mcpu=%s conflicts with -march= switch",
922 ptr->string);
924 insn_flags = sel->flags;
927 break;
930 if (sel->name == NULL)
931 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
935 /* Guess the tuning options from the architecture if necessary. */
936 if (arm_tune == arm_none)
937 arm_tune = target_arch_cpu;
939 /* If the user did not specify a processor, choose one for them. */
940 if (insn_flags == 0)
942 const struct processors * sel;
943 unsigned int sought;
944 enum processor_type cpu;
946 cpu = TARGET_CPU_DEFAULT;
947 if (cpu == arm_none)
949 #ifdef SUBTARGET_CPU_DEFAULT
950 /* Use the subtarget default CPU if none was specified by
951 configure. */
952 cpu = SUBTARGET_CPU_DEFAULT;
953 #endif
954 /* Default to ARM6. */
955 if (cpu == arm_none)
956 cpu = arm6;
958 sel = &all_cores[cpu];
960 insn_flags = sel->flags;
962 /* Now check to see if the user has specified some command line
963 switch that require certain abilities from the cpu. */
964 sought = 0;
966 if (TARGET_INTERWORK || TARGET_THUMB)
968 sought |= (FL_THUMB | FL_MODE32);
970 /* There are no ARM processors that support both APCS-26 and
971 interworking. Therefore we force FL_MODE26 to be removed
972 from insn_flags here (if it was set), so that the search
973 below will always be able to find a compatible processor. */
974 insn_flags &= ~FL_MODE26;
977 if (sought != 0 && ((sought & insn_flags) != sought))
979 /* Try to locate a CPU type that supports all of the abilities
980 of the default CPU, plus the extra abilities requested by
981 the user. */
982 for (sel = all_cores; sel->name != NULL; sel++)
983 if ((sel->flags & sought) == (sought | insn_flags))
984 break;
986 if (sel->name == NULL)
988 unsigned current_bit_count = 0;
989 const struct processors * best_fit = NULL;
991 /* Ideally we would like to issue an error message here
992 saying that it was not possible to find a CPU compatible
993 with the default CPU, but which also supports the command
994 line options specified by the programmer, and so they
995 ought to use the -mcpu=<name> command line option to
996 override the default CPU type.
998 If we cannot find a cpu that has both the
999 characteristics of the default cpu and the given
1000 command line options we scan the array again looking
1001 for a best match. */
1002 for (sel = all_cores; sel->name != NULL; sel++)
1003 if ((sel->flags & sought) == sought)
1005 unsigned count;
1007 count = bit_count (sel->flags & insn_flags);
1009 if (count >= current_bit_count)
1011 best_fit = sel;
1012 current_bit_count = count;
1016 gcc_assert (best_fit);
1017 sel = best_fit;
1020 insn_flags = sel->flags;
1022 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1023 if (arm_tune == arm_none)
1024 arm_tune = (enum processor_type) (sel - all_cores);
1027 /* The processor for which we should tune should now have been
1028 chosen. */
1029 gcc_assert (arm_tune != arm_none);
1031 tune_flags = all_cores[(int)arm_tune].flags;
1032 if (optimize_size)
1033 targetm.rtx_costs = arm_size_rtx_costs;
1034 else
1035 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1037 /* Make sure that the processor choice does not conflict with any of the
1038 other command line choices. */
1039 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1041 warning (0, "target CPU does not support interworking" );
1042 target_flags &= ~MASK_INTERWORK;
1045 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1047 warning (0, "target CPU does not support THUMB instructions");
1048 target_flags &= ~MASK_THUMB;
1051 if (TARGET_APCS_FRAME && TARGET_THUMB)
1053 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1054 target_flags &= ~MASK_APCS_FRAME;
1057 /* Callee super interworking implies thumb interworking. Adding
1058 this to the flags here simplifies the logic elsewhere. */
1059 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1060 target_flags |= MASK_INTERWORK;
1062 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1063 from here where no function is being compiled currently. */
1064 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1065 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1067 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1068 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1070 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1071 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1073 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1075 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1076 target_flags |= MASK_APCS_FRAME;
1079 if (TARGET_POKE_FUNCTION_NAME)
1080 target_flags |= MASK_APCS_FRAME;
1082 if (TARGET_APCS_REENT && flag_pic)
1083 error ("-fpic and -mapcs-reent are incompatible");
1085 if (TARGET_APCS_REENT)
1086 warning (0, "APCS reentrant code not supported. Ignored");
1088 /* If this target is normally configured to use APCS frames, warn if they
1089 are turned off and debugging is turned on. */
1090 if (TARGET_ARM
1091 && write_symbols != NO_DEBUG
1092 && !TARGET_APCS_FRAME
1093 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1094 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1096 /* If stack checking is disabled, we can use r10 as the PIC register,
1097 which keeps r9 available. */
1098 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1099 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1101 if (TARGET_APCS_FLOAT)
1102 warning (0, "passing floating point arguments in fp regs not yet supported");
1104 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1105 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1106 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1107 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1108 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1109 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1110 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1111 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1112 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1113 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1115 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1116 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1117 thumb_code = (TARGET_ARM == 0);
1118 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1119 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1120 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1122 /* V5 code we generate is completely interworking capable, so we turn off
1123 TARGET_INTERWORK here to avoid many tests later on. */
1125 /* XXX However, we must pass the right pre-processor defines to CPP
1126 or GLD can get confused. This is a hack. */
1127 if (TARGET_INTERWORK)
1128 arm_cpp_interwork = 1;
1130 if (arm_arch5)
1131 target_flags &= ~MASK_INTERWORK;
1133 if (target_abi_name)
1135 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1137 if (streq (arm_all_abis[i].name, target_abi_name))
1139 arm_abi = arm_all_abis[i].abi_type;
1140 break;
1143 if (i == ARRAY_SIZE (arm_all_abis))
1144 error ("invalid ABI option: -mabi=%s", target_abi_name);
1146 else
1147 arm_abi = ARM_DEFAULT_ABI;
1149 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1150 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1152 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1153 error ("iwmmxt abi requires an iwmmxt capable cpu");
1155 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1156 if (target_fpu_name == NULL && target_fpe_name != NULL)
1158 if (streq (target_fpe_name, "2"))
1159 target_fpu_name = "fpe2";
1160 else if (streq (target_fpe_name, "3"))
1161 target_fpu_name = "fpe3";
1162 else
1163 error ("invalid floating point emulation option: -mfpe=%s",
1164 target_fpe_name);
1166 if (target_fpu_name != NULL)
1168 /* The user specified a FPU. */
1169 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1171 if (streq (all_fpus[i].name, target_fpu_name))
1173 arm_fpu_arch = all_fpus[i].fpu;
1174 arm_fpu_tune = arm_fpu_arch;
1175 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1176 break;
1179 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1180 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1182 else
1184 #ifdef FPUTYPE_DEFAULT
1185 /* Use the default if it is specified for this platform. */
1186 arm_fpu_arch = FPUTYPE_DEFAULT;
1187 arm_fpu_tune = FPUTYPE_DEFAULT;
1188 #else
1189 /* Pick one based on CPU type. */
1190 /* ??? Some targets assume FPA is the default.
1191 if ((insn_flags & FL_VFP) != 0)
1192 arm_fpu_arch = FPUTYPE_VFP;
1193 else
1195 if (arm_arch_cirrus)
1196 arm_fpu_arch = FPUTYPE_MAVERICK;
1197 else
1198 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1199 #endif
1200 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1201 arm_fpu_tune = FPUTYPE_FPA;
1202 else
1203 arm_fpu_tune = arm_fpu_arch;
1204 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1205 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1208 if (target_float_abi_name != NULL)
1210 /* The user specified a FP ABI. */
1211 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1213 if (streq (all_float_abis[i].name, target_float_abi_name))
1215 arm_float_abi = all_float_abis[i].abi_type;
1216 break;
1219 if (i == ARRAY_SIZE (all_float_abis))
1220 error ("invalid floating point abi: -mfloat-abi=%s",
1221 target_float_abi_name);
1223 else
1224 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1226 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1227 sorry ("-mfloat-abi=hard and VFP");
1229 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1230 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1231 will ever exist. GCC makes no attempt to support this combination. */
1232 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1233 sorry ("iWMMXt and hardware floating point");
1235 /* If soft-float is specified then don't use FPU. */
1236 if (TARGET_SOFT_FLOAT)
1237 arm_fpu_arch = FPUTYPE_NONE;
1239 /* For arm2/3 there is no need to do any scheduling if there is only
1240 a floating point emulator, or we are doing software floating-point. */
1241 if ((TARGET_SOFT_FLOAT
1242 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1243 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1244 && (tune_flags & FL_MODE32) == 0)
1245 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1247 if (target_thread_switch)
1249 if (strcmp (target_thread_switch, "soft") == 0)
1250 target_thread_pointer = TP_SOFT;
1251 else if (strcmp (target_thread_switch, "auto") == 0)
1252 target_thread_pointer = TP_AUTO;
1253 else if (strcmp (target_thread_switch, "cp15") == 0)
1254 target_thread_pointer = TP_CP15;
1255 else
1256 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1259 /* Use the cp15 method if it is available. */
1260 if (target_thread_pointer == TP_AUTO)
1262 if (arm_arch6k && !TARGET_THUMB)
1263 target_thread_pointer = TP_CP15;
1264 else
1265 target_thread_pointer = TP_SOFT;
1268 if (TARGET_HARD_TP && TARGET_THUMB)
1269 error ("can not use -mtp=cp15 with -mthumb");
1271 /* Override the default structure alignment for AAPCS ABI. */
1272 if (TARGET_AAPCS_BASED)
1273 arm_structure_size_boundary = 8;
1275 if (structure_size_string != NULL)
1277 int size = strtol (structure_size_string, NULL, 0);
1279 if (size == 8 || size == 32
1280 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1281 arm_structure_size_boundary = size;
1282 else
1283 warning (0, "structure size boundary can only be set to %s",
1284 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1287 if (arm_pic_register_string != NULL)
1289 int pic_register = decode_reg_name (arm_pic_register_string);
1291 if (!flag_pic)
1292 warning (0, "-mpic-register= is useless without -fpic");
1294 /* Prevent the user from choosing an obviously stupid PIC register. */
1295 else if (pic_register < 0 || call_used_regs[pic_register]
1296 || pic_register == HARD_FRAME_POINTER_REGNUM
1297 || pic_register == STACK_POINTER_REGNUM
1298 || pic_register >= PC_REGNUM)
1299 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1300 else
1301 arm_pic_register = pic_register;
1304 if (TARGET_THUMB && flag_schedule_insns)
1306 /* Don't warn since it's on by default in -O2. */
1307 flag_schedule_insns = 0;
1310 if (optimize_size)
1312 arm_constant_limit = 1;
1314 /* If optimizing for size, bump the number of instructions that we
1315 are prepared to conditionally execute (even on a StrongARM). */
1316 max_insns_skipped = 6;
1318 else
1320 /* For processors with load scheduling, it never costs more than
1321 2 cycles to load a constant, and the load scheduler may well
1322 reduce that to 1. */
1323 if (arm_ld_sched)
1324 arm_constant_limit = 1;
1326 /* On XScale the longer latency of a load makes it more difficult
1327 to achieve a good schedule, so it's faster to synthesize
1328 constants that can be done in two insns. */
1329 if (arm_tune_xscale)
1330 arm_constant_limit = 2;
1332 /* StrongARM has early execution of branches, so a sequence
1333 that is worth skipping is shorter. */
1334 if (arm_tune_strongarm)
1335 max_insns_skipped = 3;
1338 /* Register global variables with the garbage collector. */
1339 arm_add_gc_roots ();
1342 static void
1343 arm_add_gc_roots (void)
1345 gcc_obstack_init(&minipool_obstack);
1346 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1349 /* A table of known ARM exception types.
1350 For use with the interrupt function attribute. */
1352 typedef struct
1354 const char *const arg;
1355 const unsigned long return_value;
1357 isr_attribute_arg;
1359 static const isr_attribute_arg isr_attribute_args [] =
1361 { "IRQ", ARM_FT_ISR },
1362 { "irq", ARM_FT_ISR },
1363 { "FIQ", ARM_FT_FIQ },
1364 { "fiq", ARM_FT_FIQ },
1365 { "ABORT", ARM_FT_ISR },
1366 { "abort", ARM_FT_ISR },
1367 { "ABORT", ARM_FT_ISR },
1368 { "abort", ARM_FT_ISR },
1369 { "UNDEF", ARM_FT_EXCEPTION },
1370 { "undef", ARM_FT_EXCEPTION },
1371 { "SWI", ARM_FT_EXCEPTION },
1372 { "swi", ARM_FT_EXCEPTION },
1373 { NULL, ARM_FT_NORMAL }
1376 /* Returns the (interrupt) function type of the current
1377 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1379 static unsigned long
1380 arm_isr_value (tree argument)
1382 const isr_attribute_arg * ptr;
1383 const char * arg;
1385 /* No argument - default to IRQ. */
1386 if (argument == NULL_TREE)
1387 return ARM_FT_ISR;
1389 /* Get the value of the argument. */
1390 if (TREE_VALUE (argument) == NULL_TREE
1391 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1392 return ARM_FT_UNKNOWN;
1394 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1396 /* Check it against the list of known arguments. */
1397 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1398 if (streq (arg, ptr->arg))
1399 return ptr->return_value;
1401 /* An unrecognized interrupt type. */
1402 return ARM_FT_UNKNOWN;
1405 /* Computes the type of the current function. */
1407 static unsigned long
1408 arm_compute_func_type (void)
1410 unsigned long type = ARM_FT_UNKNOWN;
1411 tree a;
1412 tree attr;
1414 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1416 /* Decide if the current function is volatile. Such functions
1417 never return, and many memory cycles can be saved by not storing
1418 register values that will never be needed again. This optimization
1419 was added to speed up context switching in a kernel application. */
1420 if (optimize > 0
1421 && (TREE_NOTHROW (current_function_decl)
1422 || !(flag_unwind_tables
1423 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1424 && TREE_THIS_VOLATILE (current_function_decl))
1425 type |= ARM_FT_VOLATILE;
1427 if (cfun->static_chain_decl != NULL)
1428 type |= ARM_FT_NESTED;
1430 attr = DECL_ATTRIBUTES (current_function_decl);
1432 a = lookup_attribute ("naked", attr);
1433 if (a != NULL_TREE)
1434 type |= ARM_FT_NAKED;
1436 a = lookup_attribute ("isr", attr);
1437 if (a == NULL_TREE)
1438 a = lookup_attribute ("interrupt", attr);
1440 if (a == NULL_TREE)
1441 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1442 else
1443 type |= arm_isr_value (TREE_VALUE (a));
1445 return type;
1448 /* Returns the type of the current function. */
1450 unsigned long
1451 arm_current_func_type (void)
1453 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1454 cfun->machine->func_type = arm_compute_func_type ();
1456 return cfun->machine->func_type;
1459 /* Return 1 if it is possible to return using a single instruction.
1460 If SIBLING is non-null, this is a test for a return before a sibling
1461 call. SIBLING is the call insn, so we can examine its register usage. */
1464 use_return_insn (int iscond, rtx sibling)
1466 int regno;
1467 unsigned int func_type;
1468 unsigned long saved_int_regs;
1469 unsigned HOST_WIDE_INT stack_adjust;
1470 arm_stack_offsets *offsets;
1472 /* Never use a return instruction before reload has run. */
1473 if (!reload_completed)
1474 return 0;
1476 func_type = arm_current_func_type ();
1478 /* Naked functions and volatile functions need special
1479 consideration. */
1480 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1481 return 0;
1483 /* So do interrupt functions that use the frame pointer. */
1484 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1485 return 0;
1487 offsets = arm_get_frame_offsets ();
1488 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1490 /* As do variadic functions. */
1491 if (current_function_pretend_args_size
1492 || cfun->machine->uses_anonymous_args
1493 /* Or if the function calls __builtin_eh_return () */
1494 || current_function_calls_eh_return
1495 /* Or if the function calls alloca */
1496 || current_function_calls_alloca
1497 /* Or if there is a stack adjustment. However, if the stack pointer
1498 is saved on the stack, we can use a pre-incrementing stack load. */
1499 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1500 return 0;
1502 saved_int_regs = arm_compute_save_reg_mask ();
1504 /* Unfortunately, the insn
1506 ldmib sp, {..., sp, ...}
1508 triggers a bug on most SA-110 based devices, such that the stack
1509 pointer won't be correctly restored if the instruction takes a
1510 page fault. We work around this problem by popping r3 along with
1511 the other registers, since that is never slower than executing
1512 another instruction.
1514 We test for !arm_arch5 here, because code for any architecture
1515 less than this could potentially be run on one of the buggy
1516 chips. */
1517 if (stack_adjust == 4 && !arm_arch5)
1519 /* Validate that r3 is a call-clobbered register (always true in
1520 the default abi) ... */
1521 if (!call_used_regs[3])
1522 return 0;
1524 /* ... that it isn't being used for a return value ... */
1525 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1526 return 0;
1528 /* ... or for a tail-call argument ... */
1529 if (sibling)
1531 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1533 if (find_regno_fusage (sibling, USE, 3))
1534 return 0;
1537 /* ... and that there are no call-saved registers in r0-r2
1538 (always true in the default ABI). */
1539 if (saved_int_regs & 0x7)
1540 return 0;
1543 /* Can't be done if interworking with Thumb, and any registers have been
1544 stacked. */
1545 if (TARGET_INTERWORK && saved_int_regs != 0)
1546 return 0;
1548 /* On StrongARM, conditional returns are expensive if they aren't
1549 taken and multiple registers have been stacked. */
1550 if (iscond && arm_tune_strongarm)
1552 /* Conditional return when just the LR is stored is a simple
1553 conditional-load instruction, that's not expensive. */
1554 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1555 return 0;
1557 if (flag_pic
1558 && arm_pic_register != INVALID_REGNUM
1559 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1560 return 0;
1563 /* If there are saved registers but the LR isn't saved, then we need
1564 two instructions for the return. */
1565 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1566 return 0;
1568 /* Can't be done if any of the FPA regs are pushed,
1569 since this also requires an insn. */
1570 if (TARGET_HARD_FLOAT && TARGET_FPA)
1571 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1572 if (regs_ever_live[regno] && !call_used_regs[regno])
1573 return 0;
1575 /* Likewise VFP regs. */
1576 if (TARGET_HARD_FLOAT && TARGET_VFP)
1577 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1578 if (regs_ever_live[regno] && !call_used_regs[regno])
1579 return 0;
1581 if (TARGET_REALLY_IWMMXT)
1582 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1583 if (regs_ever_live[regno] && ! call_used_regs [regno])
1584 return 0;
1586 return 1;
1589 /* Return TRUE if int I is a valid immediate ARM constant. */
1592 const_ok_for_arm (HOST_WIDE_INT i)
1594 int lowbit;
1596 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1597 be all zero, or all one. */
1598 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1599 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1600 != ((~(unsigned HOST_WIDE_INT) 0)
1601 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1602 return FALSE;
1604 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1606 /* Fast return for 0 and small values. We must do this for zero, since
1607 the code below can't handle that one case. */
1608 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1609 return TRUE;
1611 /* Get the number of trailing zeros, rounded down to the nearest even
1612 number. */
1613 lowbit = (ffs ((int) i) - 1) & ~1;
1615 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1616 return TRUE;
1617 else if (lowbit <= 4
1618 && ((i & ~0xc000003f) == 0
1619 || (i & ~0xf000000f) == 0
1620 || (i & ~0xfc000003) == 0))
1621 return TRUE;
1623 return FALSE;
1626 /* Return true if I is a valid constant for the operation CODE. */
1627 static int
1628 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1630 if (const_ok_for_arm (i))
1631 return 1;
1633 switch (code)
1635 case PLUS:
1636 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1638 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1639 case XOR:
1640 case IOR:
1641 return 0;
1643 case AND:
1644 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1646 default:
1647 gcc_unreachable ();
1651 /* Emit a sequence of insns to handle a large constant.
1652 CODE is the code of the operation required, it can be any of SET, PLUS,
1653 IOR, AND, XOR, MINUS;
1654 MODE is the mode in which the operation is being performed;
1655 VAL is the integer to operate on;
1656 SOURCE is the other operand (a register, or a null-pointer for SET);
1657 SUBTARGETS means it is safe to create scratch registers if that will
1658 either produce a simpler sequence, or we will want to cse the values.
1659 Return value is the number of insns emitted. */
1662 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1663 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1665 rtx cond;
1667 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1668 cond = COND_EXEC_TEST (PATTERN (insn));
1669 else
1670 cond = NULL_RTX;
1672 if (subtargets || code == SET
1673 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1674 && REGNO (target) != REGNO (source)))
1676 /* After arm_reorg has been called, we can't fix up expensive
1677 constants by pushing them into memory so we must synthesize
1678 them in-line, regardless of the cost. This is only likely to
1679 be more costly on chips that have load delay slots and we are
1680 compiling without running the scheduler (so no splitting
1681 occurred before the final instruction emission).
1683 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1685 if (!after_arm_reorg
1686 && !cond
1687 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1688 1, 0)
1689 > arm_constant_limit + (code != SET)))
1691 if (code == SET)
1693 /* Currently SET is the only monadic value for CODE, all
1694 the rest are diadic. */
1695 emit_set_insn (target, GEN_INT (val));
1696 return 1;
1698 else
1700 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1702 emit_set_insn (temp, GEN_INT (val));
1703 /* For MINUS, the value is subtracted from, since we never
1704 have subtraction of a constant. */
1705 if (code == MINUS)
1706 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1707 else
1708 emit_set_insn (target,
1709 gen_rtx_fmt_ee (code, mode, source, temp));
1710 return 2;
1715 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1719 static int
1720 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1722 HOST_WIDE_INT temp1;
1723 int num_insns = 0;
1726 int end;
1728 if (i <= 0)
1729 i += 32;
1730 if (remainder & (3 << (i - 2)))
1732 end = i - 8;
1733 if (end < 0)
1734 end += 32;
1735 temp1 = remainder & ((0x0ff << end)
1736 | ((i < end) ? (0xff >> (32 - end)) : 0));
1737 remainder &= ~temp1;
1738 num_insns++;
1739 i -= 6;
1741 i -= 2;
1742 } while (remainder);
1743 return num_insns;
1746 /* Emit an instruction with the indicated PATTERN. If COND is
1747 non-NULL, conditionalize the execution of the instruction on COND
1748 being true. */
1750 static void
1751 emit_constant_insn (rtx cond, rtx pattern)
1753 if (cond)
1754 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1755 emit_insn (pattern);
1758 /* As above, but extra parameter GENERATE which, if clear, suppresses
1759 RTL generation. */
1761 static int
1762 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1763 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1764 int generate)
1766 int can_invert = 0;
1767 int can_negate = 0;
1768 int can_negate_initial = 0;
1769 int can_shift = 0;
1770 int i;
1771 int num_bits_set = 0;
1772 int set_sign_bit_copies = 0;
1773 int clear_sign_bit_copies = 0;
1774 int clear_zero_bit_copies = 0;
1775 int set_zero_bit_copies = 0;
1776 int insns = 0;
1777 unsigned HOST_WIDE_INT temp1, temp2;
1778 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1780 /* Find out which operations are safe for a given CODE. Also do a quick
1781 check for degenerate cases; these can occur when DImode operations
1782 are split. */
1783 switch (code)
1785 case SET:
1786 can_invert = 1;
1787 can_shift = 1;
1788 can_negate = 1;
1789 break;
1791 case PLUS:
1792 can_negate = 1;
1793 can_negate_initial = 1;
1794 break;
1796 case IOR:
1797 if (remainder == 0xffffffff)
1799 if (generate)
1800 emit_constant_insn (cond,
1801 gen_rtx_SET (VOIDmode, target,
1802 GEN_INT (ARM_SIGN_EXTEND (val))));
1803 return 1;
1805 if (remainder == 0)
1807 if (reload_completed && rtx_equal_p (target, source))
1808 return 0;
1809 if (generate)
1810 emit_constant_insn (cond,
1811 gen_rtx_SET (VOIDmode, target, source));
1812 return 1;
1814 break;
1816 case AND:
1817 if (remainder == 0)
1819 if (generate)
1820 emit_constant_insn (cond,
1821 gen_rtx_SET (VOIDmode, target, const0_rtx));
1822 return 1;
1824 if (remainder == 0xffffffff)
1826 if (reload_completed && rtx_equal_p (target, source))
1827 return 0;
1828 if (generate)
1829 emit_constant_insn (cond,
1830 gen_rtx_SET (VOIDmode, target, source));
1831 return 1;
1833 can_invert = 1;
1834 break;
1836 case XOR:
1837 if (remainder == 0)
1839 if (reload_completed && rtx_equal_p (target, source))
1840 return 0;
1841 if (generate)
1842 emit_constant_insn (cond,
1843 gen_rtx_SET (VOIDmode, target, source));
1844 return 1;
1847 /* We don't know how to handle other cases yet. */
1848 gcc_assert (remainder == 0xffffffff);
1850 if (generate)
1851 emit_constant_insn (cond,
1852 gen_rtx_SET (VOIDmode, target,
1853 gen_rtx_NOT (mode, source)));
1854 return 1;
1856 case MINUS:
1857 /* We treat MINUS as (val - source), since (source - val) is always
1858 passed as (source + (-val)). */
1859 if (remainder == 0)
1861 if (generate)
1862 emit_constant_insn (cond,
1863 gen_rtx_SET (VOIDmode, target,
1864 gen_rtx_NEG (mode, source)));
1865 return 1;
1867 if (const_ok_for_arm (val))
1869 if (generate)
1870 emit_constant_insn (cond,
1871 gen_rtx_SET (VOIDmode, target,
1872 gen_rtx_MINUS (mode, GEN_INT (val),
1873 source)));
1874 return 1;
1876 can_negate = 1;
1878 break;
1880 default:
1881 gcc_unreachable ();
1884 /* If we can do it in one insn get out quickly. */
1885 if (const_ok_for_arm (val)
1886 || (can_negate_initial && const_ok_for_arm (-val))
1887 || (can_invert && const_ok_for_arm (~val)))
1889 if (generate)
1890 emit_constant_insn (cond,
1891 gen_rtx_SET (VOIDmode, target,
1892 (source
1893 ? gen_rtx_fmt_ee (code, mode, source,
1894 GEN_INT (val))
1895 : GEN_INT (val))));
1896 return 1;
1899 /* Calculate a few attributes that may be useful for specific
1900 optimizations. */
1901 for (i = 31; i >= 0; i--)
1903 if ((remainder & (1 << i)) == 0)
1904 clear_sign_bit_copies++;
1905 else
1906 break;
1909 for (i = 31; i >= 0; i--)
1911 if ((remainder & (1 << i)) != 0)
1912 set_sign_bit_copies++;
1913 else
1914 break;
1917 for (i = 0; i <= 31; i++)
1919 if ((remainder & (1 << i)) == 0)
1920 clear_zero_bit_copies++;
1921 else
1922 break;
1925 for (i = 0; i <= 31; i++)
1927 if ((remainder & (1 << i)) != 0)
1928 set_zero_bit_copies++;
1929 else
1930 break;
1933 switch (code)
1935 case SET:
1936 /* See if we can do this by sign_extending a constant that is known
1937 to be negative. This is a good, way of doing it, since the shift
1938 may well merge into a subsequent insn. */
1939 if (set_sign_bit_copies > 1)
1941 if (const_ok_for_arm
1942 (temp1 = ARM_SIGN_EXTEND (remainder
1943 << (set_sign_bit_copies - 1))))
1945 if (generate)
1947 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1948 emit_constant_insn (cond,
1949 gen_rtx_SET (VOIDmode, new_src,
1950 GEN_INT (temp1)));
1951 emit_constant_insn (cond,
1952 gen_ashrsi3 (target, new_src,
1953 GEN_INT (set_sign_bit_copies - 1)));
1955 return 2;
1957 /* For an inverted constant, we will need to set the low bits,
1958 these will be shifted out of harm's way. */
1959 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1960 if (const_ok_for_arm (~temp1))
1962 if (generate)
1964 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1965 emit_constant_insn (cond,
1966 gen_rtx_SET (VOIDmode, new_src,
1967 GEN_INT (temp1)));
1968 emit_constant_insn (cond,
1969 gen_ashrsi3 (target, new_src,
1970 GEN_INT (set_sign_bit_copies - 1)));
1972 return 2;
1976 /* See if we can calculate the value as the difference between two
1977 valid immediates. */
1978 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1980 int topshift = clear_sign_bit_copies & ~1;
1982 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1983 & (0xff000000 >> topshift));
1985 /* If temp1 is zero, then that means the 9 most significant
1986 bits of remainder were 1 and we've caused it to overflow.
1987 When topshift is 0 we don't need to do anything since we
1988 can borrow from 'bit 32'. */
1989 if (temp1 == 0 && topshift != 0)
1990 temp1 = 0x80000000 >> (topshift - 1);
1992 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
1994 if (const_ok_for_arm (temp2))
1996 if (generate)
1998 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1999 emit_constant_insn (cond,
2000 gen_rtx_SET (VOIDmode, new_src,
2001 GEN_INT (temp1)));
2002 emit_constant_insn (cond,
2003 gen_addsi3 (target, new_src,
2004 GEN_INT (-temp2)));
2007 return 2;
2011 /* See if we can generate this by setting the bottom (or the top)
2012 16 bits, and then shifting these into the other half of the
2013 word. We only look for the simplest cases, to do more would cost
2014 too much. Be careful, however, not to generate this when the
2015 alternative would take fewer insns. */
2016 if (val & 0xffff0000)
2018 temp1 = remainder & 0xffff0000;
2019 temp2 = remainder & 0x0000ffff;
2021 /* Overlaps outside this range are best done using other methods. */
2022 for (i = 9; i < 24; i++)
2024 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2025 && !const_ok_for_arm (temp2))
2027 rtx new_src = (subtargets
2028 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2029 : target);
2030 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2031 source, subtargets, generate);
2032 source = new_src;
2033 if (generate)
2034 emit_constant_insn
2035 (cond,
2036 gen_rtx_SET
2037 (VOIDmode, target,
2038 gen_rtx_IOR (mode,
2039 gen_rtx_ASHIFT (mode, source,
2040 GEN_INT (i)),
2041 source)));
2042 return insns + 1;
2046 /* Don't duplicate cases already considered. */
2047 for (i = 17; i < 24; i++)
2049 if (((temp1 | (temp1 >> i)) == remainder)
2050 && !const_ok_for_arm (temp1))
2052 rtx new_src = (subtargets
2053 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2054 : target);
2055 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2056 source, subtargets, generate);
2057 source = new_src;
2058 if (generate)
2059 emit_constant_insn
2060 (cond,
2061 gen_rtx_SET (VOIDmode, target,
2062 gen_rtx_IOR
2063 (mode,
2064 gen_rtx_LSHIFTRT (mode, source,
2065 GEN_INT (i)),
2066 source)));
2067 return insns + 1;
2071 break;
2073 case IOR:
2074 case XOR:
2075 /* If we have IOR or XOR, and the constant can be loaded in a
2076 single instruction, and we can find a temporary to put it in,
2077 then this can be done in two instructions instead of 3-4. */
2078 if (subtargets
2079 /* TARGET can't be NULL if SUBTARGETS is 0 */
2080 || (reload_completed && !reg_mentioned_p (target, source)))
2082 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2084 if (generate)
2086 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2088 emit_constant_insn (cond,
2089 gen_rtx_SET (VOIDmode, sub,
2090 GEN_INT (val)));
2091 emit_constant_insn (cond,
2092 gen_rtx_SET (VOIDmode, target,
2093 gen_rtx_fmt_ee (code, mode,
2094 source, sub)));
2096 return 2;
2100 if (code == XOR)
2101 break;
2103 if (set_sign_bit_copies > 8
2104 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2106 if (generate)
2108 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2109 rtx shift = GEN_INT (set_sign_bit_copies);
2111 emit_constant_insn
2112 (cond,
2113 gen_rtx_SET (VOIDmode, sub,
2114 gen_rtx_NOT (mode,
2115 gen_rtx_ASHIFT (mode,
2116 source,
2117 shift))));
2118 emit_constant_insn
2119 (cond,
2120 gen_rtx_SET (VOIDmode, target,
2121 gen_rtx_NOT (mode,
2122 gen_rtx_LSHIFTRT (mode, sub,
2123 shift))));
2125 return 2;
2128 if (set_zero_bit_copies > 8
2129 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2131 if (generate)
2133 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2134 rtx shift = GEN_INT (set_zero_bit_copies);
2136 emit_constant_insn
2137 (cond,
2138 gen_rtx_SET (VOIDmode, sub,
2139 gen_rtx_NOT (mode,
2140 gen_rtx_LSHIFTRT (mode,
2141 source,
2142 shift))));
2143 emit_constant_insn
2144 (cond,
2145 gen_rtx_SET (VOIDmode, target,
2146 gen_rtx_NOT (mode,
2147 gen_rtx_ASHIFT (mode, sub,
2148 shift))));
2150 return 2;
2153 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2155 if (generate)
2157 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2158 emit_constant_insn (cond,
2159 gen_rtx_SET (VOIDmode, sub,
2160 gen_rtx_NOT (mode, source)));
2161 source = sub;
2162 if (subtargets)
2163 sub = gen_reg_rtx (mode);
2164 emit_constant_insn (cond,
2165 gen_rtx_SET (VOIDmode, sub,
2166 gen_rtx_AND (mode, source,
2167 GEN_INT (temp1))));
2168 emit_constant_insn (cond,
2169 gen_rtx_SET (VOIDmode, target,
2170 gen_rtx_NOT (mode, sub)));
2172 return 3;
2174 break;
2176 case AND:
2177 /* See if two shifts will do 2 or more insn's worth of work. */
2178 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2180 HOST_WIDE_INT shift_mask = ((0xffffffff
2181 << (32 - clear_sign_bit_copies))
2182 & 0xffffffff);
2184 if ((remainder | shift_mask) != 0xffffffff)
2186 if (generate)
2188 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2189 insns = arm_gen_constant (AND, mode, cond,
2190 remainder | shift_mask,
2191 new_src, source, subtargets, 1);
2192 source = new_src;
2194 else
2196 rtx targ = subtargets ? NULL_RTX : target;
2197 insns = arm_gen_constant (AND, mode, cond,
2198 remainder | shift_mask,
2199 targ, source, subtargets, 0);
2203 if (generate)
2205 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2206 rtx shift = GEN_INT (clear_sign_bit_copies);
2208 emit_insn (gen_ashlsi3 (new_src, source, shift));
2209 emit_insn (gen_lshrsi3 (target, new_src, shift));
2212 return insns + 2;
2215 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2217 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2219 if ((remainder | shift_mask) != 0xffffffff)
2221 if (generate)
2223 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2225 insns = arm_gen_constant (AND, mode, cond,
2226 remainder | shift_mask,
2227 new_src, source, subtargets, 1);
2228 source = new_src;
2230 else
2232 rtx targ = subtargets ? NULL_RTX : target;
2234 insns = arm_gen_constant (AND, mode, cond,
2235 remainder | shift_mask,
2236 targ, source, subtargets, 0);
2240 if (generate)
2242 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2243 rtx shift = GEN_INT (clear_zero_bit_copies);
2245 emit_insn (gen_lshrsi3 (new_src, source, shift));
2246 emit_insn (gen_ashlsi3 (target, new_src, shift));
2249 return insns + 2;
2252 break;
2254 default:
2255 break;
2258 for (i = 0; i < 32; i++)
2259 if (remainder & (1 << i))
2260 num_bits_set++;
2262 if (code == AND || (can_invert && num_bits_set > 16))
2263 remainder = (~remainder) & 0xffffffff;
2264 else if (code == PLUS && num_bits_set > 16)
2265 remainder = (-remainder) & 0xffffffff;
2266 else
2268 can_invert = 0;
2269 can_negate = 0;
2272 /* Now try and find a way of doing the job in either two or three
2273 instructions.
2274 We start by looking for the largest block of zeros that are aligned on
2275 a 2-bit boundary, we then fill up the temps, wrapping around to the
2276 top of the word when we drop off the bottom.
2277 In the worst case this code should produce no more than four insns. */
2279 int best_start = 0;
2280 int best_consecutive_zeros = 0;
2282 for (i = 0; i < 32; i += 2)
2284 int consecutive_zeros = 0;
2286 if (!(remainder & (3 << i)))
2288 while ((i < 32) && !(remainder & (3 << i)))
2290 consecutive_zeros += 2;
2291 i += 2;
2293 if (consecutive_zeros > best_consecutive_zeros)
2295 best_consecutive_zeros = consecutive_zeros;
2296 best_start = i - consecutive_zeros;
2298 i -= 2;
2302 /* So long as it won't require any more insns to do so, it's
2303 desirable to emit a small constant (in bits 0...9) in the last
2304 insn. This way there is more chance that it can be combined with
2305 a later addressing insn to form a pre-indexed load or store
2306 operation. Consider:
2308 *((volatile int *)0xe0000100) = 1;
2309 *((volatile int *)0xe0000110) = 2;
2311 We want this to wind up as:
2313 mov rA, #0xe0000000
2314 mov rB, #1
2315 str rB, [rA, #0x100]
2316 mov rB, #2
2317 str rB, [rA, #0x110]
2319 rather than having to synthesize both large constants from scratch.
2321 Therefore, we calculate how many insns would be required to emit
2322 the constant starting from `best_start', and also starting from
2323 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2324 yield a shorter sequence, we may as well use zero. */
2325 if (best_start != 0
2326 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2327 && (count_insns_for_constant (remainder, 0) <=
2328 count_insns_for_constant (remainder, best_start)))
2329 best_start = 0;
2331 /* Now start emitting the insns. */
2332 i = best_start;
2335 int end;
2337 if (i <= 0)
2338 i += 32;
2339 if (remainder & (3 << (i - 2)))
2341 end = i - 8;
2342 if (end < 0)
2343 end += 32;
2344 temp1 = remainder & ((0x0ff << end)
2345 | ((i < end) ? (0xff >> (32 - end)) : 0));
2346 remainder &= ~temp1;
2348 if (generate)
2350 rtx new_src, temp1_rtx;
2352 if (code == SET || code == MINUS)
2354 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2355 if (can_invert && code != MINUS)
2356 temp1 = ~temp1;
2358 else
2360 if (remainder && subtargets)
2361 new_src = gen_reg_rtx (mode);
2362 else
2363 new_src = target;
2364 if (can_invert)
2365 temp1 = ~temp1;
2366 else if (can_negate)
2367 temp1 = -temp1;
2370 temp1 = trunc_int_for_mode (temp1, mode);
2371 temp1_rtx = GEN_INT (temp1);
2373 if (code == SET)
2375 else if (code == MINUS)
2376 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2377 else
2378 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2380 emit_constant_insn (cond,
2381 gen_rtx_SET (VOIDmode, new_src,
2382 temp1_rtx));
2383 source = new_src;
2386 if (code == SET)
2388 can_invert = 0;
2389 code = PLUS;
2391 else if (code == MINUS)
2392 code = PLUS;
2394 insns++;
2395 i -= 6;
2397 i -= 2;
2399 while (remainder);
2402 return insns;
2405 /* Canonicalize a comparison so that we are more likely to recognize it.
2406 This can be done for a few constant compares, where we can make the
2407 immediate value easier to load. */
2409 enum rtx_code
2410 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2411 rtx * op1)
2413 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2414 unsigned HOST_WIDE_INT maxval;
2415 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2417 switch (code)
2419 case EQ:
2420 case NE:
2421 return code;
2423 case GT:
2424 case LE:
2425 if (i != maxval
2426 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2428 *op1 = GEN_INT (i + 1);
2429 return code == GT ? GE : LT;
2431 break;
2433 case GE:
2434 case LT:
2435 if (i != ~maxval
2436 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2438 *op1 = GEN_INT (i - 1);
2439 return code == GE ? GT : LE;
2441 break;
2443 case GTU:
2444 case LEU:
2445 if (i != ~((unsigned HOST_WIDE_INT) 0)
2446 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2448 *op1 = GEN_INT (i + 1);
2449 return code == GTU ? GEU : LTU;
2451 break;
2453 case GEU:
2454 case LTU:
2455 if (i != 0
2456 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2458 *op1 = GEN_INT (i - 1);
2459 return code == GEU ? GTU : LEU;
2461 break;
2463 default:
2464 gcc_unreachable ();
2467 return code;
2471 /* Define how to find the value returned by a function. */
2474 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2476 enum machine_mode mode;
2477 int unsignedp ATTRIBUTE_UNUSED;
2478 rtx r ATTRIBUTE_UNUSED;
2480 mode = TYPE_MODE (type);
2481 /* Promote integer types. */
2482 if (INTEGRAL_TYPE_P (type))
2483 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2485 /* Promotes small structs returned in a register to full-word size
2486 for big-endian AAPCS. */
2487 if (arm_return_in_msb (type))
2489 HOST_WIDE_INT size = int_size_in_bytes (type);
2490 if (size % UNITS_PER_WORD != 0)
2492 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2493 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2497 return LIBCALL_VALUE(mode);
2500 /* Determine the amount of memory needed to store the possible return
2501 registers of an untyped call. */
2503 arm_apply_result_size (void)
2505 int size = 16;
2507 if (TARGET_ARM)
2509 if (TARGET_HARD_FLOAT_ABI)
2511 if (TARGET_FPA)
2512 size += 12;
2513 if (TARGET_MAVERICK)
2514 size += 8;
2516 if (TARGET_IWMMXT_ABI)
2517 size += 8;
2520 return size;
2523 /* Decide whether a type should be returned in memory (true)
2524 or in a register (false). This is called by the macro
2525 RETURN_IN_MEMORY. */
2527 arm_return_in_memory (tree type)
2529 HOST_WIDE_INT size;
2531 if (!AGGREGATE_TYPE_P (type) &&
2532 (TREE_CODE (type) != VECTOR_TYPE) &&
2533 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2534 /* All simple types are returned in registers.
2535 For AAPCS, complex types are treated the same as aggregates. */
2536 return 0;
2538 size = int_size_in_bytes (type);
2540 if (arm_abi != ARM_ABI_APCS)
2542 /* ATPCS and later return aggregate types in memory only if they are
2543 larger than a word (or are variable size). */
2544 return (size < 0 || size > UNITS_PER_WORD);
2547 /* To maximize backwards compatibility with previous versions of gcc,
2548 return vectors up to 4 words in registers. */
2549 if (TREE_CODE (type) == VECTOR_TYPE)
2550 return (size < 0 || size > (4 * UNITS_PER_WORD));
2552 /* For the arm-wince targets we choose to be compatible with Microsoft's
2553 ARM and Thumb compilers, which always return aggregates in memory. */
2554 #ifndef ARM_WINCE
2555 /* All structures/unions bigger than one word are returned in memory.
2556 Also catch the case where int_size_in_bytes returns -1. In this case
2557 the aggregate is either huge or of variable size, and in either case
2558 we will want to return it via memory and not in a register. */
2559 if (size < 0 || size > UNITS_PER_WORD)
2560 return 1;
2562 if (TREE_CODE (type) == RECORD_TYPE)
2564 tree field;
2566 /* For a struct the APCS says that we only return in a register
2567 if the type is 'integer like' and every addressable element
2568 has an offset of zero. For practical purposes this means
2569 that the structure can have at most one non bit-field element
2570 and that this element must be the first one in the structure. */
2572 /* Find the first field, ignoring non FIELD_DECL things which will
2573 have been created by C++. */
2574 for (field = TYPE_FIELDS (type);
2575 field && TREE_CODE (field) != FIELD_DECL;
2576 field = TREE_CHAIN (field))
2577 continue;
2579 if (field == NULL)
2580 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2582 /* Check that the first field is valid for returning in a register. */
2584 /* ... Floats are not allowed */
2585 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2586 return 1;
2588 /* ... Aggregates that are not themselves valid for returning in
2589 a register are not allowed. */
2590 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2591 return 1;
2593 /* Now check the remaining fields, if any. Only bitfields are allowed,
2594 since they are not addressable. */
2595 for (field = TREE_CHAIN (field);
2596 field;
2597 field = TREE_CHAIN (field))
2599 if (TREE_CODE (field) != FIELD_DECL)
2600 continue;
2602 if (!DECL_BIT_FIELD_TYPE (field))
2603 return 1;
2606 return 0;
2609 if (TREE_CODE (type) == UNION_TYPE)
2611 tree field;
2613 /* Unions can be returned in registers if every element is
2614 integral, or can be returned in an integer register. */
2615 for (field = TYPE_FIELDS (type);
2616 field;
2617 field = TREE_CHAIN (field))
2619 if (TREE_CODE (field) != FIELD_DECL)
2620 continue;
2622 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2623 return 1;
2625 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2626 return 1;
2629 return 0;
2631 #endif /* not ARM_WINCE */
2633 /* Return all other types in memory. */
2634 return 1;
2637 /* Indicate whether or not words of a double are in big-endian order. */
2640 arm_float_words_big_endian (void)
2642 if (TARGET_MAVERICK)
2643 return 0;
2645 /* For FPA, float words are always big-endian. For VFP, floats words
2646 follow the memory system mode. */
2648 if (TARGET_FPA)
2650 return 1;
2653 if (TARGET_VFP)
2654 return (TARGET_BIG_END ? 1 : 0);
2656 return 1;
2659 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2660 for a call to a function whose data type is FNTYPE.
2661 For a library call, FNTYPE is NULL. */
2662 void
2663 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2664 rtx libname ATTRIBUTE_UNUSED,
2665 tree fndecl ATTRIBUTE_UNUSED)
2667 /* On the ARM, the offset starts at 0. */
2668 pcum->nregs = 0;
2669 pcum->iwmmxt_nregs = 0;
2670 pcum->can_split = true;
2672 pcum->call_cookie = CALL_NORMAL;
2674 if (TARGET_LONG_CALLS)
2675 pcum->call_cookie = CALL_LONG;
2677 /* Check for long call/short call attributes. The attributes
2678 override any command line option. */
2679 if (fntype)
2681 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2682 pcum->call_cookie = CALL_SHORT;
2683 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2684 pcum->call_cookie = CALL_LONG;
2687 /* Varargs vectors are treated the same as long long.
2688 named_count avoids having to change the way arm handles 'named' */
2689 pcum->named_count = 0;
2690 pcum->nargs = 0;
2692 if (TARGET_REALLY_IWMMXT && fntype)
2694 tree fn_arg;
2696 for (fn_arg = TYPE_ARG_TYPES (fntype);
2697 fn_arg;
2698 fn_arg = TREE_CHAIN (fn_arg))
2699 pcum->named_count += 1;
2701 if (! pcum->named_count)
2702 pcum->named_count = INT_MAX;
2707 /* Return true if mode/type need doubleword alignment. */
2708 bool
2709 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2711 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2712 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2716 /* Determine where to put an argument to a function.
2717 Value is zero to push the argument on the stack,
2718 or a hard register in which to store the argument.
2720 MODE is the argument's machine mode.
2721 TYPE is the data type of the argument (as a tree).
2722 This is null for libcalls where that information may
2723 not be available.
2724 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2725 the preceding args and about the function being called.
2726 NAMED is nonzero if this argument is a named parameter
2727 (otherwise it is an extra parameter matching an ellipsis). */
2730 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2731 tree type, int named)
2733 int nregs;
2735 /* Varargs vectors are treated the same as long long.
2736 named_count avoids having to change the way arm handles 'named' */
2737 if (TARGET_IWMMXT_ABI
2738 && arm_vector_mode_supported_p (mode)
2739 && pcum->named_count > pcum->nargs + 1)
2741 if (pcum->iwmmxt_nregs <= 9)
2742 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2743 else
2745 pcum->can_split = false;
2746 return NULL_RTX;
2750 /* Put doubleword aligned quantities in even register pairs. */
2751 if (pcum->nregs & 1
2752 && ARM_DOUBLEWORD_ALIGN
2753 && arm_needs_doubleword_align (mode, type))
2754 pcum->nregs++;
2756 if (mode == VOIDmode)
2757 /* Compute operand 2 of the call insn. */
2758 return GEN_INT (pcum->call_cookie);
2760 /* Only allow splitting an arg between regs and memory if all preceding
2761 args were allocated to regs. For args passed by reference we only count
2762 the reference pointer. */
2763 if (pcum->can_split)
2764 nregs = 1;
2765 else
2766 nregs = ARM_NUM_REGS2 (mode, type);
2768 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2769 return NULL_RTX;
2771 return gen_rtx_REG (mode, pcum->nregs);
2774 static int
2775 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2776 tree type, bool named ATTRIBUTE_UNUSED)
2778 int nregs = pcum->nregs;
2780 if (arm_vector_mode_supported_p (mode))
2781 return 0;
2783 if (NUM_ARG_REGS > nregs
2784 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2785 && pcum->can_split)
2786 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2788 return 0;
2791 /* Variable sized types are passed by reference. This is a GCC
2792 extension to the ARM ABI. */
2794 static bool
2795 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2796 enum machine_mode mode ATTRIBUTE_UNUSED,
2797 tree type, bool named ATTRIBUTE_UNUSED)
2799 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2802 /* Encode the current state of the #pragma [no_]long_calls. */
2803 typedef enum
2805 OFF, /* No #pragma [no_]long_calls is in effect. */
2806 LONG, /* #pragma long_calls is in effect. */
2807 SHORT /* #pragma no_long_calls is in effect. */
2808 } arm_pragma_enum;
2810 static arm_pragma_enum arm_pragma_long_calls = OFF;
2812 void
2813 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2815 arm_pragma_long_calls = LONG;
2818 void
2819 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2821 arm_pragma_long_calls = SHORT;
2824 void
2825 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2827 arm_pragma_long_calls = OFF;
2830 /* Table of machine attributes. */
2831 const struct attribute_spec arm_attribute_table[] =
2833 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2834 /* Function calls made to this symbol must be done indirectly, because
2835 it may lie outside of the 26 bit addressing range of a normal function
2836 call. */
2837 { "long_call", 0, 0, false, true, true, NULL },
2838 /* Whereas these functions are always known to reside within the 26 bit
2839 addressing range. */
2840 { "short_call", 0, 0, false, true, true, NULL },
2841 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2842 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2843 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2844 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2845 #ifdef ARM_PE
2846 /* ARM/PE has three new attributes:
2847 interfacearm - ?
2848 dllexport - for exporting a function/variable that will live in a dll
2849 dllimport - for importing a function/variable from a dll
2851 Microsoft allows multiple declspecs in one __declspec, separating
2852 them with spaces. We do NOT support this. Instead, use __declspec
2853 multiple times.
2855 { "dllimport", 0, 0, true, false, false, NULL },
2856 { "dllexport", 0, 0, true, false, false, NULL },
2857 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2858 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2859 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2860 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2861 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2862 #endif
2863 { NULL, 0, 0, false, false, false, NULL }
2866 /* Handle an attribute requiring a FUNCTION_DECL;
2867 arguments as in struct attribute_spec.handler. */
2868 static tree
2869 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2870 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2872 if (TREE_CODE (*node) != FUNCTION_DECL)
2874 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2875 IDENTIFIER_POINTER (name));
2876 *no_add_attrs = true;
2879 return NULL_TREE;
2882 /* Handle an "interrupt" or "isr" attribute;
2883 arguments as in struct attribute_spec.handler. */
2884 static tree
2885 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2886 bool *no_add_attrs)
2888 if (DECL_P (*node))
2890 if (TREE_CODE (*node) != FUNCTION_DECL)
2892 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2893 IDENTIFIER_POINTER (name));
2894 *no_add_attrs = true;
2896 /* FIXME: the argument if any is checked for type attributes;
2897 should it be checked for decl ones? */
2899 else
2901 if (TREE_CODE (*node) == FUNCTION_TYPE
2902 || TREE_CODE (*node) == METHOD_TYPE)
2904 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2906 warning (OPT_Wattributes, "%qs attribute ignored",
2907 IDENTIFIER_POINTER (name));
2908 *no_add_attrs = true;
2911 else if (TREE_CODE (*node) == POINTER_TYPE
2912 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2913 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2914 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2916 *node = build_variant_type_copy (*node);
2917 TREE_TYPE (*node) = build_type_attribute_variant
2918 (TREE_TYPE (*node),
2919 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2920 *no_add_attrs = true;
2922 else
2924 /* Possibly pass this attribute on from the type to a decl. */
2925 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2926 | (int) ATTR_FLAG_FUNCTION_NEXT
2927 | (int) ATTR_FLAG_ARRAY_NEXT))
2929 *no_add_attrs = true;
2930 return tree_cons (name, args, NULL_TREE);
2932 else
2934 warning (OPT_Wattributes, "%qs attribute ignored",
2935 IDENTIFIER_POINTER (name));
2940 return NULL_TREE;
2943 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2944 /* Handle the "notshared" attribute. This attribute is another way of
2945 requesting hidden visibility. ARM's compiler supports
2946 "__declspec(notshared)"; we support the same thing via an
2947 attribute. */
2949 static tree
2950 arm_handle_notshared_attribute (tree *node,
2951 tree name ATTRIBUTE_UNUSED,
2952 tree args ATTRIBUTE_UNUSED,
2953 int flags ATTRIBUTE_UNUSED,
2954 bool *no_add_attrs)
2956 tree decl = TYPE_NAME (*node);
2958 if (decl)
2960 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2961 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2962 *no_add_attrs = false;
2964 return NULL_TREE;
2966 #endif
2968 /* Return 0 if the attributes for two types are incompatible, 1 if they
2969 are compatible, and 2 if they are nearly compatible (which causes a
2970 warning to be generated). */
2971 static int
2972 arm_comp_type_attributes (tree type1, tree type2)
2974 int l1, l2, s1, s2;
2976 /* Check for mismatch of non-default calling convention. */
2977 if (TREE_CODE (type1) != FUNCTION_TYPE)
2978 return 1;
2980 /* Check for mismatched call attributes. */
2981 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2982 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2983 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2984 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2986 /* Only bother to check if an attribute is defined. */
2987 if (l1 | l2 | s1 | s2)
2989 /* If one type has an attribute, the other must have the same attribute. */
2990 if ((l1 != l2) || (s1 != s2))
2991 return 0;
2993 /* Disallow mixed attributes. */
2994 if ((l1 & s2) || (l2 & s1))
2995 return 0;
2998 /* Check for mismatched ISR attribute. */
2999 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3000 if (! l1)
3001 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3002 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3003 if (! l2)
3004 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3005 if (l1 != l2)
3006 return 0;
3008 return 1;
3011 /* Encode long_call or short_call attribute by prefixing
3012 symbol name in DECL with a special character FLAG. */
3013 void
3014 arm_encode_call_attribute (tree decl, int flag)
3016 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3017 int len = strlen (str);
3018 char * newstr;
3020 /* Do not allow weak functions to be treated as short call. */
3021 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3022 return;
3024 newstr = alloca (len + 2);
3025 newstr[0] = flag;
3026 strcpy (newstr + 1, str);
3028 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3029 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3032 /* Assigns default attributes to newly defined type. This is used to
3033 set short_call/long_call attributes for function types of
3034 functions defined inside corresponding #pragma scopes. */
3035 static void
3036 arm_set_default_type_attributes (tree type)
3038 /* Add __attribute__ ((long_call)) to all functions, when
3039 inside #pragma long_calls or __attribute__ ((short_call)),
3040 when inside #pragma no_long_calls. */
3041 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3043 tree type_attr_list, attr_name;
3044 type_attr_list = TYPE_ATTRIBUTES (type);
3046 if (arm_pragma_long_calls == LONG)
3047 attr_name = get_identifier ("long_call");
3048 else if (arm_pragma_long_calls == SHORT)
3049 attr_name = get_identifier ("short_call");
3050 else
3051 return;
3053 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3054 TYPE_ATTRIBUTES (type) = type_attr_list;
3058 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3059 defined within the current compilation unit. If this cannot be
3060 determined, then 0 is returned. */
3061 static int
3062 current_file_function_operand (rtx sym_ref)
3064 /* This is a bit of a fib. A function will have a short call flag
3065 applied to its name if it has the short call attribute, or it has
3066 already been defined within the current compilation unit. */
3067 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3068 return 1;
3070 /* The current function is always defined within the current compilation
3071 unit. If it s a weak definition however, then this may not be the real
3072 definition of the function, and so we have to say no. */
3073 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3074 && !DECL_WEAK (current_function_decl))
3075 return 1;
3077 /* We cannot make the determination - default to returning 0. */
3078 return 0;
3081 /* Return nonzero if a 32 bit "long_call" should be generated for
3082 this call. We generate a long_call if the function:
3084 a. has an __attribute__((long call))
3085 or b. is within the scope of a #pragma long_calls
3086 or c. the -mlong-calls command line switch has been specified
3087 . and either:
3088 1. -ffunction-sections is in effect
3089 or 2. the current function has __attribute__ ((section))
3090 or 3. the target function has __attribute__ ((section))
3092 However we do not generate a long call if the function:
3094 d. has an __attribute__ ((short_call))
3095 or e. is inside the scope of a #pragma no_long_calls
3096 or f. is defined within the current compilation unit.
3098 This function will be called by C fragments contained in the machine
3099 description file. SYM_REF and CALL_COOKIE correspond to the matched
3100 rtl operands. CALL_SYMBOL is used to distinguish between
3101 two different callers of the function. It is set to 1 in the
3102 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3103 and "call_value" patterns. This is because of the difference in the
3104 SYM_REFs passed by these patterns. */
3106 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3108 if (!call_symbol)
3110 if (GET_CODE (sym_ref) != MEM)
3111 return 0;
3113 sym_ref = XEXP (sym_ref, 0);
3116 if (GET_CODE (sym_ref) != SYMBOL_REF)
3117 return 0;
3119 if (call_cookie & CALL_SHORT)
3120 return 0;
3122 if (TARGET_LONG_CALLS)
3124 if (flag_function_sections
3125 || DECL_SECTION_NAME (current_function_decl))
3126 /* c.3 is handled by the definition of the
3127 ARM_DECLARE_FUNCTION_SIZE macro. */
3128 return 1;
3131 if (current_file_function_operand (sym_ref))
3132 return 0;
3134 return (call_cookie & CALL_LONG)
3135 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3136 || TARGET_LONG_CALLS;
3139 /* Return nonzero if it is ok to make a tail-call to DECL. */
3140 static bool
3141 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3143 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3145 if (cfun->machine->sibcall_blocked)
3146 return false;
3148 /* Never tailcall something for which we have no decl, or if we
3149 are in Thumb mode. */
3150 if (decl == NULL || TARGET_THUMB)
3151 return false;
3153 /* Get the calling method. */
3154 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3155 call_type = CALL_SHORT;
3156 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3157 call_type = CALL_LONG;
3159 /* Cannot tail-call to long calls, since these are out of range of
3160 a branch instruction. However, if not compiling PIC, we know
3161 we can reach the symbol if it is in this compilation unit. */
3162 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3163 return false;
3165 /* If we are interworking and the function is not declared static
3166 then we can't tail-call it unless we know that it exists in this
3167 compilation unit (since it might be a Thumb routine). */
3168 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3169 return false;
3171 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3172 if (IS_INTERRUPT (arm_current_func_type ()))
3173 return false;
3175 /* Everything else is ok. */
3176 return true;
3180 /* Addressing mode support functions. */
3182 /* Return nonzero if X is a legitimate immediate operand when compiling
3183 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3185 legitimate_pic_operand_p (rtx x)
3187 if (GET_CODE (x) == SYMBOL_REF
3188 || (GET_CODE (x) == CONST
3189 && GET_CODE (XEXP (x, 0)) == PLUS
3190 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3191 return 0;
3193 return 1;
3197 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3199 if (GET_CODE (orig) == SYMBOL_REF
3200 || GET_CODE (orig) == LABEL_REF)
3202 #ifndef AOF_ASSEMBLER
3203 rtx pic_ref, address;
3204 #endif
3205 rtx insn;
3206 int subregs = 0;
3208 /* If this function doesn't have a pic register, create one now.
3209 A lot of the logic here is made obscure by the fact that this
3210 routine gets called as part of the rtx cost estimation
3211 process. We don't want those calls to affect any assumptions
3212 about the real function; and further, we can't call
3213 entry_of_function() until we start the real expansion
3214 process. */
3215 if (!current_function_uses_pic_offset_table)
3217 gcc_assert (!no_new_pseudos);
3218 if (arm_pic_register != INVALID_REGNUM)
3220 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3222 /* Play games to avoid marking the function as needing pic
3223 if we are being called as part of the cost-estimation
3224 process. */
3225 if (!ir_type())
3226 current_function_uses_pic_offset_table = 1;
3228 else
3230 rtx seq;
3232 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3234 /* Play games to avoid marking the function as needing pic
3235 if we are being called as part of the cost-estimation
3236 process. */
3237 if (!ir_type())
3239 current_function_uses_pic_offset_table = 1;
3240 start_sequence ();
3242 arm_load_pic_register (0UL);
3244 seq = get_insns ();
3245 end_sequence ();
3246 emit_insn_after (seq, entry_of_function ());
3251 if (reg == 0)
3253 gcc_assert (!no_new_pseudos);
3254 reg = gen_reg_rtx (Pmode);
3256 subregs = 1;
3259 #ifdef AOF_ASSEMBLER
3260 /* The AOF assembler can generate relocations for these directly, and
3261 understands that the PIC register has to be added into the offset. */
3262 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3263 #else
3264 if (subregs)
3265 address = gen_reg_rtx (Pmode);
3266 else
3267 address = reg;
3269 if (TARGET_ARM)
3270 emit_insn (gen_pic_load_addr_arm (address, orig));
3271 else
3272 emit_insn (gen_pic_load_addr_thumb (address, orig));
3274 if ((GET_CODE (orig) == LABEL_REF
3275 || (GET_CODE (orig) == SYMBOL_REF &&
3276 SYMBOL_REF_LOCAL_P (orig)))
3277 && NEED_GOT_RELOC)
3278 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3279 else
3281 pic_ref = gen_const_mem (Pmode,
3282 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3283 address));
3286 insn = emit_move_insn (reg, pic_ref);
3287 #endif
3288 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3289 by loop. */
3290 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3291 REG_NOTES (insn));
3292 return reg;
3294 else if (GET_CODE (orig) == CONST)
3296 rtx base, offset;
3298 if (GET_CODE (XEXP (orig, 0)) == PLUS
3299 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3300 return orig;
3302 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3303 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3304 return orig;
3306 if (reg == 0)
3308 gcc_assert (!no_new_pseudos);
3309 reg = gen_reg_rtx (Pmode);
3312 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3314 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3315 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3316 base == reg ? 0 : reg);
3318 if (GET_CODE (offset) == CONST_INT)
3320 /* The base register doesn't really matter, we only want to
3321 test the index for the appropriate mode. */
3322 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3324 gcc_assert (!no_new_pseudos);
3325 offset = force_reg (Pmode, offset);
3328 if (GET_CODE (offset) == CONST_INT)
3329 return plus_constant (base, INTVAL (offset));
3332 if (GET_MODE_SIZE (mode) > 4
3333 && (GET_MODE_CLASS (mode) == MODE_INT
3334 || TARGET_SOFT_FLOAT))
3336 emit_insn (gen_addsi3 (reg, base, offset));
3337 return reg;
3340 return gen_rtx_PLUS (Pmode, base, offset);
3343 return orig;
3347 /* Find a spare low register to use during the prolog of a function. */
3349 static int
3350 thumb_find_work_register (unsigned long pushed_regs_mask)
3352 int reg;
3354 /* Check the argument registers first as these are call-used. The
3355 register allocation order means that sometimes r3 might be used
3356 but earlier argument registers might not, so check them all. */
3357 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3358 if (!regs_ever_live[reg])
3359 return reg;
3361 /* Before going on to check the call-saved registers we can try a couple
3362 more ways of deducing that r3 is available. The first is when we are
3363 pushing anonymous arguments onto the stack and we have less than 4
3364 registers worth of fixed arguments(*). In this case r3 will be part of
3365 the variable argument list and so we can be sure that it will be
3366 pushed right at the start of the function. Hence it will be available
3367 for the rest of the prologue.
3368 (*): ie current_function_pretend_args_size is greater than 0. */
3369 if (cfun->machine->uses_anonymous_args
3370 && current_function_pretend_args_size > 0)
3371 return LAST_ARG_REGNUM;
3373 /* The other case is when we have fixed arguments but less than 4 registers
3374 worth. In this case r3 might be used in the body of the function, but
3375 it is not being used to convey an argument into the function. In theory
3376 we could just check current_function_args_size to see how many bytes are
3377 being passed in argument registers, but it seems that it is unreliable.
3378 Sometimes it will have the value 0 when in fact arguments are being
3379 passed. (See testcase execute/20021111-1.c for an example). So we also
3380 check the args_info.nregs field as well. The problem with this field is
3381 that it makes no allowances for arguments that are passed to the
3382 function but which are not used. Hence we could miss an opportunity
3383 when a function has an unused argument in r3. But it is better to be
3384 safe than to be sorry. */
3385 if (! cfun->machine->uses_anonymous_args
3386 && current_function_args_size >= 0
3387 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3388 && cfun->args_info.nregs < 4)
3389 return LAST_ARG_REGNUM;
3391 /* Otherwise look for a call-saved register that is going to be pushed. */
3392 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3393 if (pushed_regs_mask & (1 << reg))
3394 return reg;
3396 /* Something went wrong - thumb_compute_save_reg_mask()
3397 should have arranged for a suitable register to be pushed. */
3398 gcc_unreachable ();
3401 static GTY(()) int pic_labelno;
3403 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3404 low register. */
3406 void
3407 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3409 #ifndef AOF_ASSEMBLER
3410 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3411 rtx global_offset_table;
3413 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3414 return;
3416 gcc_assert (flag_pic);
3418 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3419 in the code stream. */
3421 labelno = GEN_INT (pic_labelno++);
3422 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3423 l1 = gen_rtx_CONST (VOIDmode, l1);
3425 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3426 /* On the ARM the PC register contains 'dot + 8' at the time of the
3427 addition, on the Thumb it is 'dot + 4'. */
3428 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3429 if (GOT_PCREL)
3430 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3431 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3432 else
3433 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3435 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3437 if (TARGET_ARM)
3439 emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx));
3440 emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg,
3441 cfun->machine->pic_reg, labelno));
3443 else
3445 if (arm_pic_register != INVALID_REGNUM
3446 && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
3448 /* We will have pushed the pic register, so we should always be
3449 able to find a work register. */
3450 pic_tmp = gen_rtx_REG (SImode,
3451 thumb_find_work_register (saved_regs));
3452 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3453 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3455 else
3456 emit_insn (gen_pic_load_addr_thumb (cfun->machine->pic_reg, pic_rtx));
3457 emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg,
3458 cfun->machine->pic_reg, labelno));
3461 /* Need to emit this whether or not we obey regdecls,
3462 since setjmp/longjmp can cause life info to screw up. */
3463 emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
3464 #endif /* AOF_ASSEMBLER */
3468 /* Return nonzero if X is valid as an ARM state addressing register. */
3469 static int
3470 arm_address_register_rtx_p (rtx x, int strict_p)
3472 int regno;
3474 if (GET_CODE (x) != REG)
3475 return 0;
3477 regno = REGNO (x);
3479 if (strict_p)
3480 return ARM_REGNO_OK_FOR_BASE_P (regno);
3482 return (regno <= LAST_ARM_REGNUM
3483 || regno >= FIRST_PSEUDO_REGISTER
3484 || regno == FRAME_POINTER_REGNUM
3485 || regno == ARG_POINTER_REGNUM);
3488 /* Return TRUE if this rtx is the difference of a symbol and a label,
3489 and will reduce to a PC-relative relocation in the object file.
3490 Expressions like this can be left alone when generating PIC, rather
3491 than forced through the GOT. */
3492 static int
3493 pcrel_constant_p (rtx x)
3495 if (GET_CODE (x) == MINUS)
3496 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3498 return FALSE;
3501 /* Return nonzero if X is a valid ARM state address operand. */
3503 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3504 int strict_p)
3506 bool use_ldrd;
3507 enum rtx_code code = GET_CODE (x);
3509 if (arm_address_register_rtx_p (x, strict_p))
3510 return 1;
3512 use_ldrd = (TARGET_LDRD
3513 && (mode == DImode
3514 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3516 if (code == POST_INC || code == PRE_DEC
3517 || ((code == PRE_INC || code == POST_DEC)
3518 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3519 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3521 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3522 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3523 && GET_CODE (XEXP (x, 1)) == PLUS
3524 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3526 rtx addend = XEXP (XEXP (x, 1), 1);
3528 /* Don't allow ldrd post increment by register because it's hard
3529 to fixup invalid register choices. */
3530 if (use_ldrd
3531 && GET_CODE (x) == POST_MODIFY
3532 && GET_CODE (addend) == REG)
3533 return 0;
3535 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3536 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3539 /* After reload constants split into minipools will have addresses
3540 from a LABEL_REF. */
3541 else if (reload_completed
3542 && (code == LABEL_REF
3543 || (code == CONST
3544 && GET_CODE (XEXP (x, 0)) == PLUS
3545 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3546 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3547 return 1;
3549 else if (mode == TImode)
3550 return 0;
3552 else if (code == PLUS)
3554 rtx xop0 = XEXP (x, 0);
3555 rtx xop1 = XEXP (x, 1);
3557 return ((arm_address_register_rtx_p (xop0, strict_p)
3558 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3559 || (arm_address_register_rtx_p (xop1, strict_p)
3560 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3563 #if 0
3564 /* Reload currently can't handle MINUS, so disable this for now */
3565 else if (GET_CODE (x) == MINUS)
3567 rtx xop0 = XEXP (x, 0);
3568 rtx xop1 = XEXP (x, 1);
3570 return (arm_address_register_rtx_p (xop0, strict_p)
3571 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3573 #endif
3575 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3576 && code == SYMBOL_REF
3577 && CONSTANT_POOL_ADDRESS_P (x)
3578 && ! (flag_pic
3579 && symbol_mentioned_p (get_pool_constant (x))
3580 && ! pcrel_constant_p (get_pool_constant (x))))
3581 return 1;
3583 return 0;
3586 /* Return nonzero if INDEX is valid for an address index operand in
3587 ARM state. */
3588 static int
3589 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3590 int strict_p)
3592 HOST_WIDE_INT range;
3593 enum rtx_code code = GET_CODE (index);
3595 /* Standard coprocessor addressing modes. */
3596 if (TARGET_HARD_FLOAT
3597 && (TARGET_FPA || TARGET_MAVERICK)
3598 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3599 || (TARGET_MAVERICK && mode == DImode)))
3600 return (code == CONST_INT && INTVAL (index) < 1024
3601 && INTVAL (index) > -1024
3602 && (INTVAL (index) & 3) == 0);
3604 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3605 return (code == CONST_INT
3606 && INTVAL (index) < 1024
3607 && INTVAL (index) > -1024
3608 && (INTVAL (index) & 3) == 0);
3610 if (arm_address_register_rtx_p (index, strict_p)
3611 && (GET_MODE_SIZE (mode) <= 4))
3612 return 1;
3614 if (mode == DImode || mode == DFmode)
3616 if (code == CONST_INT)
3618 HOST_WIDE_INT val = INTVAL (index);
3620 if (TARGET_LDRD)
3621 return val > -256 && val < 256;
3622 else
3623 return val > -4096 && val < 4092;
3626 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3629 if (GET_MODE_SIZE (mode) <= 4
3630 && ! (arm_arch4
3631 && (mode == HImode
3632 || (mode == QImode && outer == SIGN_EXTEND))))
3634 if (code == MULT)
3636 rtx xiop0 = XEXP (index, 0);
3637 rtx xiop1 = XEXP (index, 1);
3639 return ((arm_address_register_rtx_p (xiop0, strict_p)
3640 && power_of_two_operand (xiop1, SImode))
3641 || (arm_address_register_rtx_p (xiop1, strict_p)
3642 && power_of_two_operand (xiop0, SImode)));
3644 else if (code == LSHIFTRT || code == ASHIFTRT
3645 || code == ASHIFT || code == ROTATERT)
3647 rtx op = XEXP (index, 1);
3649 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3650 && GET_CODE (op) == CONST_INT
3651 && INTVAL (op) > 0
3652 && INTVAL (op) <= 31);
3656 /* For ARM v4 we may be doing a sign-extend operation during the
3657 load. */
3658 if (arm_arch4)
3660 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3661 range = 256;
3662 else
3663 range = 4096;
3665 else
3666 range = (mode == HImode) ? 4095 : 4096;
3668 return (code == CONST_INT
3669 && INTVAL (index) < range
3670 && INTVAL (index) > -range);
3673 /* Return nonzero if X is valid as a Thumb state base register. */
3674 static int
3675 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3677 int regno;
3679 if (GET_CODE (x) != REG)
3680 return 0;
3682 regno = REGNO (x);
3684 if (strict_p)
3685 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3687 return (regno <= LAST_LO_REGNUM
3688 || regno > LAST_VIRTUAL_REGISTER
3689 || regno == FRAME_POINTER_REGNUM
3690 || (GET_MODE_SIZE (mode) >= 4
3691 && (regno == STACK_POINTER_REGNUM
3692 || regno >= FIRST_PSEUDO_REGISTER
3693 || x == hard_frame_pointer_rtx
3694 || x == arg_pointer_rtx)));
3697 /* Return nonzero if x is a legitimate index register. This is the case
3698 for any base register that can access a QImode object. */
3699 inline static int
3700 thumb_index_register_rtx_p (rtx x, int strict_p)
3702 return thumb_base_register_rtx_p (x, QImode, strict_p);
3705 /* Return nonzero if x is a legitimate Thumb-state address.
3707 The AP may be eliminated to either the SP or the FP, so we use the
3708 least common denominator, e.g. SImode, and offsets from 0 to 64.
3710 ??? Verify whether the above is the right approach.
3712 ??? Also, the FP may be eliminated to the SP, so perhaps that
3713 needs special handling also.
3715 ??? Look at how the mips16 port solves this problem. It probably uses
3716 better ways to solve some of these problems.
3718 Although it is not incorrect, we don't accept QImode and HImode
3719 addresses based on the frame pointer or arg pointer until the
3720 reload pass starts. This is so that eliminating such addresses
3721 into stack based ones won't produce impossible code. */
3723 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3725 /* ??? Not clear if this is right. Experiment. */
3726 if (GET_MODE_SIZE (mode) < 4
3727 && !(reload_in_progress || reload_completed)
3728 && (reg_mentioned_p (frame_pointer_rtx, x)
3729 || reg_mentioned_p (arg_pointer_rtx, x)
3730 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3731 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3732 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3733 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3734 return 0;
3736 /* Accept any base register. SP only in SImode or larger. */
3737 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3738 return 1;
3740 /* This is PC relative data before arm_reorg runs. */
3741 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3742 && GET_CODE (x) == SYMBOL_REF
3743 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
3744 return 1;
3746 /* This is PC relative data after arm_reorg runs. */
3747 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3748 && (GET_CODE (x) == LABEL_REF
3749 || (GET_CODE (x) == CONST
3750 && GET_CODE (XEXP (x, 0)) == PLUS
3751 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3752 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3753 return 1;
3755 /* Post-inc indexing only supported for SImode and larger. */
3756 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3757 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3758 return 1;
3760 else if (GET_CODE (x) == PLUS)
3762 /* REG+REG address can be any two index registers. */
3763 /* We disallow FRAME+REG addressing since we know that FRAME
3764 will be replaced with STACK, and SP relative addressing only
3765 permits SP+OFFSET. */
3766 if (GET_MODE_SIZE (mode) <= 4
3767 && XEXP (x, 0) != frame_pointer_rtx
3768 && XEXP (x, 1) != frame_pointer_rtx
3769 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3770 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3771 return 1;
3773 /* REG+const has 5-7 bit offset for non-SP registers. */
3774 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3775 || XEXP (x, 0) == arg_pointer_rtx)
3776 && GET_CODE (XEXP (x, 1)) == CONST_INT
3777 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3778 return 1;
3780 /* REG+const has 10 bit offset for SP, but only SImode and
3781 larger is supported. */
3782 /* ??? Should probably check for DI/DFmode overflow here
3783 just like GO_IF_LEGITIMATE_OFFSET does. */
3784 else if (GET_CODE (XEXP (x, 0)) == REG
3785 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3786 && GET_MODE_SIZE (mode) >= 4
3787 && GET_CODE (XEXP (x, 1)) == CONST_INT
3788 && INTVAL (XEXP (x, 1)) >= 0
3789 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3790 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3791 return 1;
3793 else if (GET_CODE (XEXP (x, 0)) == REG
3794 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3795 && GET_MODE_SIZE (mode) >= 4
3796 && GET_CODE (XEXP (x, 1)) == CONST_INT
3797 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3798 return 1;
3801 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3802 && GET_MODE_SIZE (mode) == 4
3803 && GET_CODE (x) == SYMBOL_REF
3804 && CONSTANT_POOL_ADDRESS_P (x)
3805 && ! (flag_pic
3806 && symbol_mentioned_p (get_pool_constant (x))
3807 && ! pcrel_constant_p (get_pool_constant (x))))
3808 return 1;
3810 return 0;
3813 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3814 instruction of mode MODE. */
3816 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3818 switch (GET_MODE_SIZE (mode))
3820 case 1:
3821 return val >= 0 && val < 32;
3823 case 2:
3824 return val >= 0 && val < 64 && (val & 1) == 0;
3826 default:
3827 return (val >= 0
3828 && (val + GET_MODE_SIZE (mode)) <= 128
3829 && (val & 3) == 0);
3833 /* Build the SYMBOL_REF for __tls_get_addr. */
3835 static GTY(()) rtx tls_get_addr_libfunc;
3837 static rtx
3838 get_tls_get_addr (void)
3840 if (!tls_get_addr_libfunc)
3841 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
3842 return tls_get_addr_libfunc;
3845 static rtx
3846 arm_load_tp (rtx target)
3848 if (!target)
3849 target = gen_reg_rtx (SImode);
3851 if (TARGET_HARD_TP)
3853 /* Can return in any reg. */
3854 emit_insn (gen_load_tp_hard (target));
3856 else
3858 /* Always returned in r0. Immediately copy the result into a pseudo,
3859 otherwise other uses of r0 (e.g. setting up function arguments) may
3860 clobber the value. */
3862 rtx tmp;
3864 emit_insn (gen_load_tp_soft ());
3866 tmp = gen_rtx_REG (SImode, 0);
3867 emit_move_insn (target, tmp);
3869 return target;
3872 static rtx
3873 load_tls_operand (rtx x, rtx reg)
3875 rtx tmp;
3877 if (reg == NULL_RTX)
3878 reg = gen_reg_rtx (SImode);
3880 tmp = gen_rtx_CONST (SImode, x);
3882 emit_move_insn (reg, tmp);
3884 return reg;
3887 static rtx
3888 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
3890 rtx insns, label, labelno, sum;
3892 start_sequence ();
3894 labelno = GEN_INT (pic_labelno++);
3895 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3896 label = gen_rtx_CONST (VOIDmode, label);
3898 sum = gen_rtx_UNSPEC (Pmode,
3899 gen_rtvec (4, x, GEN_INT (reloc), label,
3900 GEN_INT (TARGET_ARM ? 8 : 4)),
3901 UNSPEC_TLS);
3902 reg = load_tls_operand (sum, reg);
3904 if (TARGET_ARM)
3905 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
3906 else
3907 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3909 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
3910 Pmode, 1, reg, Pmode);
3912 insns = get_insns ();
3913 end_sequence ();
3915 return insns;
3919 legitimize_tls_address (rtx x, rtx reg)
3921 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
3922 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
3924 switch (model)
3926 case TLS_MODEL_GLOBAL_DYNAMIC:
3927 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
3928 dest = gen_reg_rtx (Pmode);
3929 emit_libcall_block (insns, dest, ret, x);
3930 return dest;
3932 case TLS_MODEL_LOCAL_DYNAMIC:
3933 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
3935 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3936 share the LDM result with other LD model accesses. */
3937 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
3938 UNSPEC_TLS);
3939 dest = gen_reg_rtx (Pmode);
3940 emit_libcall_block (insns, dest, ret, eqv);
3942 /* Load the addend. */
3943 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
3944 UNSPEC_TLS);
3945 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
3946 return gen_rtx_PLUS (Pmode, dest, addend);
3948 case TLS_MODEL_INITIAL_EXEC:
3949 labelno = GEN_INT (pic_labelno++);
3950 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3951 label = gen_rtx_CONST (VOIDmode, label);
3952 sum = gen_rtx_UNSPEC (Pmode,
3953 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
3954 GEN_INT (TARGET_ARM ? 8 : 4)),
3955 UNSPEC_TLS);
3956 reg = load_tls_operand (sum, reg);
3958 if (TARGET_ARM)
3959 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
3960 else
3962 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3963 emit_move_insn (reg, gen_const_mem (SImode, reg));
3966 tp = arm_load_tp (NULL_RTX);
3968 return gen_rtx_PLUS (Pmode, tp, reg);
3970 case TLS_MODEL_LOCAL_EXEC:
3971 tp = arm_load_tp (NULL_RTX);
3973 reg = gen_rtx_UNSPEC (Pmode,
3974 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
3975 UNSPEC_TLS);
3976 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
3978 return gen_rtx_PLUS (Pmode, tp, reg);
3980 default:
3981 abort ();
3985 /* Try machine-dependent ways of modifying an illegitimate address
3986 to be legitimate. If we find one, return the new, valid address. */
3988 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3990 if (arm_tls_symbol_p (x))
3991 return legitimize_tls_address (x, NULL_RTX);
3993 if (GET_CODE (x) == PLUS)
3995 rtx xop0 = XEXP (x, 0);
3996 rtx xop1 = XEXP (x, 1);
3998 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3999 xop0 = force_reg (SImode, xop0);
4001 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4002 xop1 = force_reg (SImode, xop1);
4004 if (ARM_BASE_REGISTER_RTX_P (xop0)
4005 && GET_CODE (xop1) == CONST_INT)
4007 HOST_WIDE_INT n, low_n;
4008 rtx base_reg, val;
4009 n = INTVAL (xop1);
4011 /* VFP addressing modes actually allow greater offsets, but for
4012 now we just stick with the lowest common denominator. */
4013 if (mode == DImode
4014 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4016 low_n = n & 0x0f;
4017 n &= ~0x0f;
4018 if (low_n > 4)
4020 n += 16;
4021 low_n -= 16;
4024 else
4026 low_n = ((mode) == TImode ? 0
4027 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4028 n -= low_n;
4031 base_reg = gen_reg_rtx (SImode);
4032 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4033 emit_move_insn (base_reg, val);
4034 x = plus_constant (base_reg, low_n);
4036 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4037 x = gen_rtx_PLUS (SImode, xop0, xop1);
4040 /* XXX We don't allow MINUS any more -- see comment in
4041 arm_legitimate_address_p (). */
4042 else if (GET_CODE (x) == MINUS)
4044 rtx xop0 = XEXP (x, 0);
4045 rtx xop1 = XEXP (x, 1);
4047 if (CONSTANT_P (xop0))
4048 xop0 = force_reg (SImode, xop0);
4050 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4051 xop1 = force_reg (SImode, xop1);
4053 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4054 x = gen_rtx_MINUS (SImode, xop0, xop1);
4057 /* Make sure to take full advantage of the pre-indexed addressing mode
4058 with absolute addresses which often allows for the base register to
4059 be factorized for multiple adjacent memory references, and it might
4060 even allows for the mini pool to be avoided entirely. */
4061 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4063 unsigned int bits;
4064 HOST_WIDE_INT mask, base, index;
4065 rtx base_reg;
4067 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4068 use a 8 bit index. So let's use a 12 bit index for SImode only and
4069 hope that arm_gen_constant will enable ldrb to use more bits. */
4070 bits = (mode == SImode) ? 12 : 8;
4071 mask = (1 << bits) - 1;
4072 base = INTVAL (x) & ~mask;
4073 index = INTVAL (x) & mask;
4074 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4076 /* It'll most probably be more efficient to generate the base
4077 with more bits set and use a negative index instead. */
4078 base |= mask;
4079 index -= mask;
4081 base_reg = force_reg (SImode, GEN_INT (base));
4082 x = plus_constant (base_reg, index);
4085 if (flag_pic)
4087 /* We need to find and carefully transform any SYMBOL and LABEL
4088 references; so go back to the original address expression. */
4089 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4091 if (new_x != orig_x)
4092 x = new_x;
4095 return x;
4099 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4100 to be legitimate. If we find one, return the new, valid address. */
4102 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4104 if (arm_tls_symbol_p (x))
4105 return legitimize_tls_address (x, NULL_RTX);
4107 if (GET_CODE (x) == PLUS
4108 && GET_CODE (XEXP (x, 1)) == CONST_INT
4109 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4110 || INTVAL (XEXP (x, 1)) < 0))
4112 rtx xop0 = XEXP (x, 0);
4113 rtx xop1 = XEXP (x, 1);
4114 HOST_WIDE_INT offset = INTVAL (xop1);
4116 /* Try and fold the offset into a biasing of the base register and
4117 then offsetting that. Don't do this when optimizing for space
4118 since it can cause too many CSEs. */
4119 if (optimize_size && offset >= 0
4120 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4122 HOST_WIDE_INT delta;
4124 if (offset >= 256)
4125 delta = offset - (256 - GET_MODE_SIZE (mode));
4126 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4127 delta = 31 * GET_MODE_SIZE (mode);
4128 else
4129 delta = offset & (~31 * GET_MODE_SIZE (mode));
4131 xop0 = force_operand (plus_constant (xop0, offset - delta),
4132 NULL_RTX);
4133 x = plus_constant (xop0, delta);
4135 else if (offset < 0 && offset > -256)
4136 /* Small negative offsets are best done with a subtract before the
4137 dereference, forcing these into a register normally takes two
4138 instructions. */
4139 x = force_operand (x, NULL_RTX);
4140 else
4142 /* For the remaining cases, force the constant into a register. */
4143 xop1 = force_reg (SImode, xop1);
4144 x = gen_rtx_PLUS (SImode, xop0, xop1);
4147 else if (GET_CODE (x) == PLUS
4148 && s_register_operand (XEXP (x, 1), SImode)
4149 && !s_register_operand (XEXP (x, 0), SImode))
4151 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4153 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4156 if (flag_pic)
4158 /* We need to find and carefully transform any SYMBOL and LABEL
4159 references; so go back to the original address expression. */
4160 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4162 if (new_x != orig_x)
4163 x = new_x;
4166 return x;
4170 thumb_legitimize_reload_address (rtx *x_p,
4171 enum machine_mode mode,
4172 int opnum, int type,
4173 int ind_levels ATTRIBUTE_UNUSED)
4175 rtx x = *x_p;
4177 if (GET_CODE (x) == PLUS
4178 && GET_MODE_SIZE (mode) < 4
4179 && REG_P (XEXP (x, 0))
4180 && XEXP (x, 0) == stack_pointer_rtx
4181 && GET_CODE (XEXP (x, 1)) == CONST_INT
4182 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4184 rtx orig_x = x;
4186 x = copy_rtx (x);
4187 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4188 Pmode, VOIDmode, 0, 0, opnum, type);
4189 return x;
4192 /* If both registers are hi-regs, then it's better to reload the
4193 entire expression rather than each register individually. That
4194 only requires one reload register rather than two. */
4195 if (GET_CODE (x) == PLUS
4196 && REG_P (XEXP (x, 0))
4197 && REG_P (XEXP (x, 1))
4198 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4199 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4201 rtx orig_x = x;
4203 x = copy_rtx (x);
4204 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4205 Pmode, VOIDmode, 0, 0, opnum, type);
4206 return x;
4209 return NULL;
4212 /* Test for various thread-local symbols. */
4214 /* Return TRUE if X is a thread-local symbol. */
4216 static bool
4217 arm_tls_symbol_p (rtx x)
4219 if (! TARGET_HAVE_TLS)
4220 return false;
4222 if (GET_CODE (x) != SYMBOL_REF)
4223 return false;
4225 return SYMBOL_REF_TLS_MODEL (x) != 0;
4228 /* Helper for arm_tls_referenced_p. */
4230 static int
4231 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4233 if (GET_CODE (*x) == SYMBOL_REF)
4234 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4236 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4237 TLS offsets, not real symbol references. */
4238 if (GET_CODE (*x) == UNSPEC
4239 && XINT (*x, 1) == UNSPEC_TLS)
4240 return -1;
4242 return 0;
4245 /* Return TRUE if X contains any TLS symbol references. */
4247 bool
4248 arm_tls_referenced_p (rtx x)
4250 if (! TARGET_HAVE_TLS)
4251 return false;
4253 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4256 #define REG_OR_SUBREG_REG(X) \
4257 (GET_CODE (X) == REG \
4258 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4260 #define REG_OR_SUBREG_RTX(X) \
4261 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4263 #ifndef COSTS_N_INSNS
4264 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4265 #endif
4266 static inline int
4267 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4269 enum machine_mode mode = GET_MODE (x);
4271 switch (code)
4273 case ASHIFT:
4274 case ASHIFTRT:
4275 case LSHIFTRT:
4276 case ROTATERT:
4277 case PLUS:
4278 case MINUS:
4279 case COMPARE:
4280 case NEG:
4281 case NOT:
4282 return COSTS_N_INSNS (1);
4284 case MULT:
4285 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4287 int cycles = 0;
4288 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4290 while (i)
4292 i >>= 2;
4293 cycles++;
4295 return COSTS_N_INSNS (2) + cycles;
4297 return COSTS_N_INSNS (1) + 16;
4299 case SET:
4300 return (COSTS_N_INSNS (1)
4301 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4302 + GET_CODE (SET_DEST (x)) == MEM));
4304 case CONST_INT:
4305 if (outer == SET)
4307 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4308 return 0;
4309 if (thumb_shiftable_const (INTVAL (x)))
4310 return COSTS_N_INSNS (2);
4311 return COSTS_N_INSNS (3);
4313 else if ((outer == PLUS || outer == COMPARE)
4314 && INTVAL (x) < 256 && INTVAL (x) > -256)
4315 return 0;
4316 else if (outer == AND
4317 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4318 return COSTS_N_INSNS (1);
4319 else if (outer == ASHIFT || outer == ASHIFTRT
4320 || outer == LSHIFTRT)
4321 return 0;
4322 return COSTS_N_INSNS (2);
4324 case CONST:
4325 case CONST_DOUBLE:
4326 case LABEL_REF:
4327 case SYMBOL_REF:
4328 return COSTS_N_INSNS (3);
4330 case UDIV:
4331 case UMOD:
4332 case DIV:
4333 case MOD:
4334 return 100;
4336 case TRUNCATE:
4337 return 99;
4339 case AND:
4340 case XOR:
4341 case IOR:
4342 /* XXX guess. */
4343 return 8;
4345 case MEM:
4346 /* XXX another guess. */
4347 /* Memory costs quite a lot for the first word, but subsequent words
4348 load at the equivalent of a single insn each. */
4349 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4350 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4351 ? 4 : 0));
4353 case IF_THEN_ELSE:
4354 /* XXX a guess. */
4355 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4356 return 14;
4357 return 2;
4359 case ZERO_EXTEND:
4360 /* XXX still guessing. */
4361 switch (GET_MODE (XEXP (x, 0)))
4363 case QImode:
4364 return (1 + (mode == DImode ? 4 : 0)
4365 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4367 case HImode:
4368 return (4 + (mode == DImode ? 4 : 0)
4369 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4371 case SImode:
4372 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4374 default:
4375 return 99;
4378 default:
4379 return 99;
4384 /* Worker routine for arm_rtx_costs. */
4385 static inline int
4386 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4388 enum machine_mode mode = GET_MODE (x);
4389 enum rtx_code subcode;
4390 int extra_cost;
4392 switch (code)
4394 case MEM:
4395 /* Memory costs quite a lot for the first word, but subsequent words
4396 load at the equivalent of a single insn each. */
4397 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4398 + (GET_CODE (x) == SYMBOL_REF
4399 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4401 case DIV:
4402 case MOD:
4403 case UDIV:
4404 case UMOD:
4405 return optimize_size ? COSTS_N_INSNS (2) : 100;
4407 case ROTATE:
4408 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4409 return 4;
4410 /* Fall through */
4411 case ROTATERT:
4412 if (mode != SImode)
4413 return 8;
4414 /* Fall through */
4415 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4416 if (mode == DImode)
4417 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4418 + ((GET_CODE (XEXP (x, 0)) == REG
4419 || (GET_CODE (XEXP (x, 0)) == SUBREG
4420 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4421 ? 0 : 8));
4422 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4423 || (GET_CODE (XEXP (x, 0)) == SUBREG
4424 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4425 ? 0 : 4)
4426 + ((GET_CODE (XEXP (x, 1)) == REG
4427 || (GET_CODE (XEXP (x, 1)) == SUBREG
4428 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4429 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4430 ? 0 : 4));
4432 case MINUS:
4433 if (mode == DImode)
4434 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4435 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4436 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4437 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4438 ? 0 : 8));
4440 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4441 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4442 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4443 && arm_const_double_rtx (XEXP (x, 1))))
4444 ? 0 : 8)
4445 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4446 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4447 && arm_const_double_rtx (XEXP (x, 0))))
4448 ? 0 : 8));
4450 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4451 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4452 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4453 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4454 || subcode == ASHIFTRT || subcode == LSHIFTRT
4455 || subcode == ROTATE || subcode == ROTATERT
4456 || (subcode == MULT
4457 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4458 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4459 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4460 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4461 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4462 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4463 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4464 return 1;
4465 /* Fall through */
4467 case PLUS:
4468 if (GET_CODE (XEXP (x, 0)) == MULT)
4470 extra_cost = rtx_cost (XEXP (x, 0), code);
4471 if (!REG_OR_SUBREG_REG (XEXP (x, 1)))
4472 extra_cost += 4 * ARM_NUM_REGS (mode);
4473 return extra_cost;
4476 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4477 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4478 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4479 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4480 && arm_const_double_rtx (XEXP (x, 1))))
4481 ? 0 : 8));
4483 /* Fall through */
4484 case AND: case XOR: case IOR:
4485 extra_cost = 0;
4487 /* Normally the frame registers will be spilt into reg+const during
4488 reload, so it is a bad idea to combine them with other instructions,
4489 since then they might not be moved outside of loops. As a compromise
4490 we allow integration with ops that have a constant as their second
4491 operand. */
4492 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4493 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4494 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4495 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4496 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4497 extra_cost = 4;
4499 if (mode == DImode)
4500 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4501 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4502 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4503 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4504 ? 0 : 8));
4506 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4507 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4508 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4509 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4510 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4511 ? 0 : 4));
4513 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4514 return (1 + extra_cost
4515 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4516 || subcode == LSHIFTRT || subcode == ASHIFTRT
4517 || subcode == ROTATE || subcode == ROTATERT
4518 || (subcode == MULT
4519 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4520 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4521 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4522 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4523 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4524 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4525 ? 0 : 4));
4527 return 8;
4529 case MULT:
4530 /* This should have been handled by the CPU specific routines. */
4531 gcc_unreachable ();
4533 case TRUNCATE:
4534 if (arm_arch3m && mode == SImode
4535 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4536 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4537 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4538 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4539 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4540 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4541 return 8;
4542 return 99;
4544 case NEG:
4545 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4546 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4547 /* Fall through */
4548 case NOT:
4549 if (mode == DImode)
4550 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4552 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4554 case IF_THEN_ELSE:
4555 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4556 return 14;
4557 return 2;
4559 case COMPARE:
4560 return 1;
4562 case ABS:
4563 return 4 + (mode == DImode ? 4 : 0);
4565 case SIGN_EXTEND:
4566 if (GET_MODE (XEXP (x, 0)) == QImode)
4567 return (4 + (mode == DImode ? 4 : 0)
4568 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4569 /* Fall through */
4570 case ZERO_EXTEND:
4571 switch (GET_MODE (XEXP (x, 0)))
4573 case QImode:
4574 return (1 + (mode == DImode ? 4 : 0)
4575 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4577 case HImode:
4578 return (4 + (mode == DImode ? 4 : 0)
4579 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4581 case SImode:
4582 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4584 case V8QImode:
4585 case V4HImode:
4586 case V2SImode:
4587 case V4QImode:
4588 case V2HImode:
4589 return 1;
4591 default:
4592 gcc_unreachable ();
4594 gcc_unreachable ();
4596 case CONST_INT:
4597 if (const_ok_for_arm (INTVAL (x)))
4598 return outer == SET ? 2 : -1;
4599 else if (outer == AND
4600 && const_ok_for_arm (~INTVAL (x)))
4601 return -1;
4602 else if ((outer == COMPARE
4603 || outer == PLUS || outer == MINUS)
4604 && const_ok_for_arm (-INTVAL (x)))
4605 return -1;
4606 else
4607 return 5;
4609 case CONST:
4610 case LABEL_REF:
4611 case SYMBOL_REF:
4612 return 6;
4614 case CONST_DOUBLE:
4615 if (arm_const_double_rtx (x))
4616 return outer == SET ? 2 : -1;
4617 else if ((outer == COMPARE || outer == PLUS)
4618 && neg_const_double_rtx_ok_for_fpa (x))
4619 return -1;
4620 return 7;
4622 default:
4623 return 99;
4627 /* RTX costs when optimizing for size. */
4628 static bool
4629 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4631 enum machine_mode mode = GET_MODE (x);
4633 if (TARGET_THUMB)
4635 /* XXX TBD. For now, use the standard costs. */
4636 *total = thumb_rtx_costs (x, code, outer_code);
4637 return true;
4640 switch (code)
4642 case MEM:
4643 /* A memory access costs 1 insn if the mode is small, or the address is
4644 a single register, otherwise it costs one insn per word. */
4645 if (REG_P (XEXP (x, 0)))
4646 *total = COSTS_N_INSNS (1);
4647 else
4648 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4649 return true;
4651 case DIV:
4652 case MOD:
4653 case UDIV:
4654 case UMOD:
4655 /* Needs a libcall, so it costs about this. */
4656 *total = COSTS_N_INSNS (2);
4657 return false;
4659 case ROTATE:
4660 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4662 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4663 return true;
4665 /* Fall through */
4666 case ROTATERT:
4667 case ASHIFT:
4668 case LSHIFTRT:
4669 case ASHIFTRT:
4670 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4672 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4673 return true;
4675 else if (mode == SImode)
4677 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4678 /* Slightly disparage register shifts, but not by much. */
4679 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4680 *total += 1 + rtx_cost (XEXP (x, 1), code);
4681 return true;
4684 /* Needs a libcall. */
4685 *total = COSTS_N_INSNS (2);
4686 return false;
4688 case MINUS:
4689 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4691 *total = COSTS_N_INSNS (1);
4692 return false;
4695 if (mode == SImode)
4697 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4698 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4700 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4701 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4702 || subcode1 == ROTATE || subcode1 == ROTATERT
4703 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4704 || subcode1 == ASHIFTRT)
4706 /* It's just the cost of the two operands. */
4707 *total = 0;
4708 return false;
4711 *total = COSTS_N_INSNS (1);
4712 return false;
4715 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4716 return false;
4718 case PLUS:
4719 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4721 *total = COSTS_N_INSNS (1);
4722 return false;
4725 /* Fall through */
4726 case AND: case XOR: case IOR:
4727 if (mode == SImode)
4729 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4731 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4732 || subcode == LSHIFTRT || subcode == ASHIFTRT
4733 || (code == AND && subcode == NOT))
4735 /* It's just the cost of the two operands. */
4736 *total = 0;
4737 return false;
4741 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4742 return false;
4744 case MULT:
4745 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4746 return false;
4748 case NEG:
4749 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4750 *total = COSTS_N_INSNS (1);
4751 /* Fall through */
4752 case NOT:
4753 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4755 return false;
4757 case IF_THEN_ELSE:
4758 *total = 0;
4759 return false;
4761 case COMPARE:
4762 if (cc_register (XEXP (x, 0), VOIDmode))
4763 * total = 0;
4764 else
4765 *total = COSTS_N_INSNS (1);
4766 return false;
4768 case ABS:
4769 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4770 *total = COSTS_N_INSNS (1);
4771 else
4772 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4773 return false;
4775 case SIGN_EXTEND:
4776 *total = 0;
4777 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4779 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4780 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4782 if (mode == DImode)
4783 *total += COSTS_N_INSNS (1);
4784 return false;
4786 case ZERO_EXTEND:
4787 *total = 0;
4788 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4790 switch (GET_MODE (XEXP (x, 0)))
4792 case QImode:
4793 *total += COSTS_N_INSNS (1);
4794 break;
4796 case HImode:
4797 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4799 case SImode:
4800 break;
4802 default:
4803 *total += COSTS_N_INSNS (2);
4807 if (mode == DImode)
4808 *total += COSTS_N_INSNS (1);
4810 return false;
4812 case CONST_INT:
4813 if (const_ok_for_arm (INTVAL (x)))
4814 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4815 else if (const_ok_for_arm (~INTVAL (x)))
4816 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4817 else if (const_ok_for_arm (-INTVAL (x)))
4819 if (outer_code == COMPARE || outer_code == PLUS
4820 || outer_code == MINUS)
4821 *total = 0;
4822 else
4823 *total = COSTS_N_INSNS (1);
4825 else
4826 *total = COSTS_N_INSNS (2);
4827 return true;
4829 case CONST:
4830 case LABEL_REF:
4831 case SYMBOL_REF:
4832 *total = COSTS_N_INSNS (2);
4833 return true;
4835 case CONST_DOUBLE:
4836 *total = COSTS_N_INSNS (4);
4837 return true;
4839 default:
4840 if (mode != VOIDmode)
4841 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4842 else
4843 *total = COSTS_N_INSNS (4); /* How knows? */
4844 return false;
4848 /* RTX costs for cores with a slow MUL implementation. */
4850 static bool
4851 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4853 enum machine_mode mode = GET_MODE (x);
4855 if (TARGET_THUMB)
4857 *total = thumb_rtx_costs (x, code, outer_code);
4858 return true;
4861 switch (code)
4863 case MULT:
4864 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4865 || mode == DImode)
4867 *total = 30;
4868 return true;
4871 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4873 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4874 & (unsigned HOST_WIDE_INT) 0xffffffff);
4875 int cost, const_ok = const_ok_for_arm (i);
4876 int j, booth_unit_size;
4878 /* Tune as appropriate. */
4879 cost = const_ok ? 4 : 8;
4880 booth_unit_size = 2;
4881 for (j = 0; i && j < 32; j += booth_unit_size)
4883 i >>= booth_unit_size;
4884 cost += 2;
4887 *total = cost;
4888 return true;
4891 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4892 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4893 return true;
4895 default:
4896 *total = arm_rtx_costs_1 (x, code, outer_code);
4897 return true;
4902 /* RTX cost for cores with a fast multiply unit (M variants). */
4904 static bool
4905 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4907 enum machine_mode mode = GET_MODE (x);
4909 if (TARGET_THUMB)
4911 *total = thumb_rtx_costs (x, code, outer_code);
4912 return true;
4915 switch (code)
4917 case MULT:
4918 /* There is no point basing this on the tuning, since it is always the
4919 fast variant if it exists at all. */
4920 if (mode == DImode
4921 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4922 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4923 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4925 *total = 8;
4926 return true;
4930 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4931 || mode == DImode)
4933 *total = 30;
4934 return true;
4937 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4939 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4940 & (unsigned HOST_WIDE_INT) 0xffffffff);
4941 int cost, const_ok = const_ok_for_arm (i);
4942 int j, booth_unit_size;
4944 /* Tune as appropriate. */
4945 cost = const_ok ? 4 : 8;
4946 booth_unit_size = 8;
4947 for (j = 0; i && j < 32; j += booth_unit_size)
4949 i >>= booth_unit_size;
4950 cost += 2;
4953 *total = cost;
4954 return true;
4957 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4958 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4959 return true;
4961 default:
4962 *total = arm_rtx_costs_1 (x, code, outer_code);
4963 return true;
4968 /* RTX cost for XScale CPUs. */
4970 static bool
4971 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4973 enum machine_mode mode = GET_MODE (x);
4975 if (TARGET_THUMB)
4977 *total = thumb_rtx_costs (x, code, outer_code);
4978 return true;
4981 switch (code)
4983 case MULT:
4984 /* There is no point basing this on the tuning, since it is always the
4985 fast variant if it exists at all. */
4986 if (mode == DImode
4987 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4988 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4989 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4991 *total = 8;
4992 return true;
4996 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4997 || mode == DImode)
4999 *total = 30;
5000 return true;
5003 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5005 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5006 & (unsigned HOST_WIDE_INT) 0xffffffff);
5007 int cost, const_ok = const_ok_for_arm (i);
5008 unsigned HOST_WIDE_INT masked_const;
5010 /* The cost will be related to two insns.
5011 First a load of the constant (MOV or LDR), then a multiply. */
5012 cost = 2;
5013 if (! const_ok)
5014 cost += 1; /* LDR is probably more expensive because
5015 of longer result latency. */
5016 masked_const = i & 0xffff8000;
5017 if (masked_const != 0 && masked_const != 0xffff8000)
5019 masked_const = i & 0xf8000000;
5020 if (masked_const == 0 || masked_const == 0xf8000000)
5021 cost += 1;
5022 else
5023 cost += 2;
5025 *total = cost;
5026 return true;
5029 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5030 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5031 return true;
5033 case COMPARE:
5034 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5035 will stall until the multiplication is complete. */
5036 if (GET_CODE (XEXP (x, 0)) == MULT)
5037 *total = 4 + rtx_cost (XEXP (x, 0), code);
5038 else
5039 *total = arm_rtx_costs_1 (x, code, outer_code);
5040 return true;
5042 default:
5043 *total = arm_rtx_costs_1 (x, code, outer_code);
5044 return true;
5049 /* RTX costs for 9e (and later) cores. */
5051 static bool
5052 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5054 enum machine_mode mode = GET_MODE (x);
5055 int nonreg_cost;
5056 int cost;
5058 if (TARGET_THUMB)
5060 switch (code)
5062 case MULT:
5063 *total = COSTS_N_INSNS (3);
5064 return true;
5066 default:
5067 *total = thumb_rtx_costs (x, code, outer_code);
5068 return true;
5072 switch (code)
5074 case MULT:
5075 /* There is no point basing this on the tuning, since it is always the
5076 fast variant if it exists at all. */
5077 if (mode == DImode
5078 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5079 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5080 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5082 *total = 3;
5083 return true;
5087 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5089 *total = 30;
5090 return true;
5092 if (mode == DImode)
5094 cost = 7;
5095 nonreg_cost = 8;
5097 else
5099 cost = 2;
5100 nonreg_cost = 4;
5104 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5105 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5106 return true;
5108 default:
5109 *total = arm_rtx_costs_1 (x, code, outer_code);
5110 return true;
5113 /* All address computations that can be done are free, but rtx cost returns
5114 the same for practically all of them. So we weight the different types
5115 of address here in the order (most pref first):
5116 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5117 static inline int
5118 arm_arm_address_cost (rtx x)
5120 enum rtx_code c = GET_CODE (x);
5122 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5123 return 0;
5124 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5125 return 10;
5127 if (c == PLUS || c == MINUS)
5129 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5130 return 2;
5132 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5133 return 3;
5135 return 4;
5138 return 6;
5141 static inline int
5142 arm_thumb_address_cost (rtx x)
5144 enum rtx_code c = GET_CODE (x);
5146 if (c == REG)
5147 return 1;
5148 if (c == PLUS
5149 && GET_CODE (XEXP (x, 0)) == REG
5150 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5151 return 1;
5153 return 2;
5156 static int
5157 arm_address_cost (rtx x)
5159 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5162 static int
5163 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5165 rtx i_pat, d_pat;
5167 /* Some true dependencies can have a higher cost depending
5168 on precisely how certain input operands are used. */
5169 if (arm_tune_xscale
5170 && REG_NOTE_KIND (link) == 0
5171 && recog_memoized (insn) >= 0
5172 && recog_memoized (dep) >= 0)
5174 int shift_opnum = get_attr_shift (insn);
5175 enum attr_type attr_type = get_attr_type (dep);
5177 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5178 operand for INSN. If we have a shifted input operand and the
5179 instruction we depend on is another ALU instruction, then we may
5180 have to account for an additional stall. */
5181 if (shift_opnum != 0
5182 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5184 rtx shifted_operand;
5185 int opno;
5187 /* Get the shifted operand. */
5188 extract_insn (insn);
5189 shifted_operand = recog_data.operand[shift_opnum];
5191 /* Iterate over all the operands in DEP. If we write an operand
5192 that overlaps with SHIFTED_OPERAND, then we have increase the
5193 cost of this dependency. */
5194 extract_insn (dep);
5195 preprocess_constraints ();
5196 for (opno = 0; opno < recog_data.n_operands; opno++)
5198 /* We can ignore strict inputs. */
5199 if (recog_data.operand_type[opno] == OP_IN)
5200 continue;
5202 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5203 shifted_operand))
5204 return 2;
5209 /* XXX This is not strictly true for the FPA. */
5210 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5211 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5212 return 0;
5214 /* Call insns don't incur a stall, even if they follow a load. */
5215 if (REG_NOTE_KIND (link) == 0
5216 && GET_CODE (insn) == CALL_INSN)
5217 return 1;
5219 if ((i_pat = single_set (insn)) != NULL
5220 && GET_CODE (SET_SRC (i_pat)) == MEM
5221 && (d_pat = single_set (dep)) != NULL
5222 && GET_CODE (SET_DEST (d_pat)) == MEM)
5224 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5225 /* This is a load after a store, there is no conflict if the load reads
5226 from a cached area. Assume that loads from the stack, and from the
5227 constant pool are cached, and that others will miss. This is a
5228 hack. */
5230 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5231 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5232 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5233 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5234 return 1;
5237 return cost;
5240 static int fp_consts_inited = 0;
5242 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5243 static const char * const strings_fp[8] =
5245 "0", "1", "2", "3",
5246 "4", "5", "0.5", "10"
5249 static REAL_VALUE_TYPE values_fp[8];
5251 static void
5252 init_fp_table (void)
5254 int i;
5255 REAL_VALUE_TYPE r;
5257 if (TARGET_VFP)
5258 fp_consts_inited = 1;
5259 else
5260 fp_consts_inited = 8;
5262 for (i = 0; i < fp_consts_inited; i++)
5264 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5265 values_fp[i] = r;
5269 /* Return TRUE if rtx X is a valid immediate FP constant. */
5271 arm_const_double_rtx (rtx x)
5273 REAL_VALUE_TYPE r;
5274 int i;
5276 if (!fp_consts_inited)
5277 init_fp_table ();
5279 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5280 if (REAL_VALUE_MINUS_ZERO (r))
5281 return 0;
5283 for (i = 0; i < fp_consts_inited; i++)
5284 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5285 return 1;
5287 return 0;
5290 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5292 neg_const_double_rtx_ok_for_fpa (rtx x)
5294 REAL_VALUE_TYPE r;
5295 int i;
5297 if (!fp_consts_inited)
5298 init_fp_table ();
5300 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5301 r = REAL_VALUE_NEGATE (r);
5302 if (REAL_VALUE_MINUS_ZERO (r))
5303 return 0;
5305 for (i = 0; i < 8; i++)
5306 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5307 return 1;
5309 return 0;
5312 /* Predicates for `match_operand' and `match_operator'. */
5314 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5316 cirrus_memory_offset (rtx op)
5318 /* Reject eliminable registers. */
5319 if (! (reload_in_progress || reload_completed)
5320 && ( reg_mentioned_p (frame_pointer_rtx, op)
5321 || reg_mentioned_p (arg_pointer_rtx, op)
5322 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5323 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5324 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5325 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5326 return 0;
5328 if (GET_CODE (op) == MEM)
5330 rtx ind;
5332 ind = XEXP (op, 0);
5334 /* Match: (mem (reg)). */
5335 if (GET_CODE (ind) == REG)
5336 return 1;
5338 /* Match:
5339 (mem (plus (reg)
5340 (const))). */
5341 if (GET_CODE (ind) == PLUS
5342 && GET_CODE (XEXP (ind, 0)) == REG
5343 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5344 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5345 return 1;
5348 return 0;
5351 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5352 WB if true if writeback address modes are allowed. */
5355 arm_coproc_mem_operand (rtx op, bool wb)
5357 rtx ind;
5359 /* Reject eliminable registers. */
5360 if (! (reload_in_progress || reload_completed)
5361 && ( reg_mentioned_p (frame_pointer_rtx, op)
5362 || reg_mentioned_p (arg_pointer_rtx, op)
5363 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5364 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5365 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5366 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5367 return FALSE;
5369 /* Constants are converted into offsets from labels. */
5370 if (GET_CODE (op) != MEM)
5371 return FALSE;
5373 ind = XEXP (op, 0);
5375 if (reload_completed
5376 && (GET_CODE (ind) == LABEL_REF
5377 || (GET_CODE (ind) == CONST
5378 && GET_CODE (XEXP (ind, 0)) == PLUS
5379 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5380 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5381 return TRUE;
5383 /* Match: (mem (reg)). */
5384 if (GET_CODE (ind) == REG)
5385 return arm_address_register_rtx_p (ind, 0);
5387 /* Autoincremment addressing modes. */
5388 if (wb
5389 && (GET_CODE (ind) == PRE_INC
5390 || GET_CODE (ind) == POST_INC
5391 || GET_CODE (ind) == PRE_DEC
5392 || GET_CODE (ind) == POST_DEC))
5393 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5395 if (wb
5396 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5397 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5398 && GET_CODE (XEXP (ind, 1)) == PLUS
5399 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5400 ind = XEXP (ind, 1);
5402 /* Match:
5403 (plus (reg)
5404 (const)). */
5405 if (GET_CODE (ind) == PLUS
5406 && GET_CODE (XEXP (ind, 0)) == REG
5407 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5408 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5409 && INTVAL (XEXP (ind, 1)) > -1024
5410 && INTVAL (XEXP (ind, 1)) < 1024
5411 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5412 return TRUE;
5414 return FALSE;
5417 /* Return true if X is a register that will be eliminated later on. */
5419 arm_eliminable_register (rtx x)
5421 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5422 || REGNO (x) == ARG_POINTER_REGNUM
5423 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5424 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5427 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5428 VFP registers. Otherwise return NO_REGS. */
5430 enum reg_class
5431 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5433 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5434 return NO_REGS;
5436 return GENERAL_REGS;
5439 /* Values which must be returned in the most-significant end of the return
5440 register. */
5442 static bool
5443 arm_return_in_msb (tree valtype)
5445 return (TARGET_AAPCS_BASED
5446 && BYTES_BIG_ENDIAN
5447 && (AGGREGATE_TYPE_P (valtype)
5448 || TREE_CODE (valtype) == COMPLEX_TYPE));
5451 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5452 Use by the Cirrus Maverick code which has to workaround
5453 a hardware bug triggered by such instructions. */
5454 static bool
5455 arm_memory_load_p (rtx insn)
5457 rtx body, lhs, rhs;;
5459 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5460 return false;
5462 body = PATTERN (insn);
5464 if (GET_CODE (body) != SET)
5465 return false;
5467 lhs = XEXP (body, 0);
5468 rhs = XEXP (body, 1);
5470 lhs = REG_OR_SUBREG_RTX (lhs);
5472 /* If the destination is not a general purpose
5473 register we do not have to worry. */
5474 if (GET_CODE (lhs) != REG
5475 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5476 return false;
5478 /* As well as loads from memory we also have to react
5479 to loads of invalid constants which will be turned
5480 into loads from the minipool. */
5481 return (GET_CODE (rhs) == MEM
5482 || GET_CODE (rhs) == SYMBOL_REF
5483 || note_invalid_constants (insn, -1, false));
5486 /* Return TRUE if INSN is a Cirrus instruction. */
5487 static bool
5488 arm_cirrus_insn_p (rtx insn)
5490 enum attr_cirrus attr;
5492 /* get_attr cannot accept USE or CLOBBER. */
5493 if (!insn
5494 || GET_CODE (insn) != INSN
5495 || GET_CODE (PATTERN (insn)) == USE
5496 || GET_CODE (PATTERN (insn)) == CLOBBER)
5497 return 0;
5499 attr = get_attr_cirrus (insn);
5501 return attr != CIRRUS_NOT;
5504 /* Cirrus reorg for invalid instruction combinations. */
5505 static void
5506 cirrus_reorg (rtx first)
5508 enum attr_cirrus attr;
5509 rtx body = PATTERN (first);
5510 rtx t;
5511 int nops;
5513 /* Any branch must be followed by 2 non Cirrus instructions. */
5514 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5516 nops = 0;
5517 t = next_nonnote_insn (first);
5519 if (arm_cirrus_insn_p (t))
5520 ++ nops;
5522 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5523 ++ nops;
5525 while (nops --)
5526 emit_insn_after (gen_nop (), first);
5528 return;
5531 /* (float (blah)) is in parallel with a clobber. */
5532 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5533 body = XVECEXP (body, 0, 0);
5535 if (GET_CODE (body) == SET)
5537 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5539 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5540 be followed by a non Cirrus insn. */
5541 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5543 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5544 emit_insn_after (gen_nop (), first);
5546 return;
5548 else if (arm_memory_load_p (first))
5550 unsigned int arm_regno;
5552 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5553 ldr/cfmv64hr combination where the Rd field is the same
5554 in both instructions must be split with a non Cirrus
5555 insn. Example:
5557 ldr r0, blah
5559 cfmvsr mvf0, r0. */
5561 /* Get Arm register number for ldr insn. */
5562 if (GET_CODE (lhs) == REG)
5563 arm_regno = REGNO (lhs);
5564 else
5566 gcc_assert (GET_CODE (rhs) == REG);
5567 arm_regno = REGNO (rhs);
5570 /* Next insn. */
5571 first = next_nonnote_insn (first);
5573 if (! arm_cirrus_insn_p (first))
5574 return;
5576 body = PATTERN (first);
5578 /* (float (blah)) is in parallel with a clobber. */
5579 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5580 body = XVECEXP (body, 0, 0);
5582 if (GET_CODE (body) == FLOAT)
5583 body = XEXP (body, 0);
5585 if (get_attr_cirrus (first) == CIRRUS_MOVE
5586 && GET_CODE (XEXP (body, 1)) == REG
5587 && arm_regno == REGNO (XEXP (body, 1)))
5588 emit_insn_after (gen_nop (), first);
5590 return;
5594 /* get_attr cannot accept USE or CLOBBER. */
5595 if (!first
5596 || GET_CODE (first) != INSN
5597 || GET_CODE (PATTERN (first)) == USE
5598 || GET_CODE (PATTERN (first)) == CLOBBER)
5599 return;
5601 attr = get_attr_cirrus (first);
5603 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5604 must be followed by a non-coprocessor instruction. */
5605 if (attr == CIRRUS_COMPARE)
5607 nops = 0;
5609 t = next_nonnote_insn (first);
5611 if (arm_cirrus_insn_p (t))
5612 ++ nops;
5614 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5615 ++ nops;
5617 while (nops --)
5618 emit_insn_after (gen_nop (), first);
5620 return;
5624 /* Return TRUE if X references a SYMBOL_REF. */
5626 symbol_mentioned_p (rtx x)
5628 const char * fmt;
5629 int i;
5631 if (GET_CODE (x) == SYMBOL_REF)
5632 return 1;
5634 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5635 are constant offsets, not symbols. */
5636 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5637 return 0;
5639 fmt = GET_RTX_FORMAT (GET_CODE (x));
5641 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5643 if (fmt[i] == 'E')
5645 int j;
5647 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5648 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5649 return 1;
5651 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5652 return 1;
5655 return 0;
5658 /* Return TRUE if X references a LABEL_REF. */
5660 label_mentioned_p (rtx x)
5662 const char * fmt;
5663 int i;
5665 if (GET_CODE (x) == LABEL_REF)
5666 return 1;
5668 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5669 instruction, but they are constant offsets, not symbols. */
5670 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5671 return 0;
5673 fmt = GET_RTX_FORMAT (GET_CODE (x));
5674 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5676 if (fmt[i] == 'E')
5678 int j;
5680 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5681 if (label_mentioned_p (XVECEXP (x, i, j)))
5682 return 1;
5684 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5685 return 1;
5688 return 0;
5692 tls_mentioned_p (rtx x)
5694 switch (GET_CODE (x))
5696 case CONST:
5697 return tls_mentioned_p (XEXP (x, 0));
5699 case UNSPEC:
5700 if (XINT (x, 1) == UNSPEC_TLS)
5701 return 1;
5703 default:
5704 return 0;
5708 /* Must not copy a SET whose source operand is PC-relative. */
5710 static bool
5711 arm_cannot_copy_insn_p (rtx insn)
5713 rtx pat = PATTERN (insn);
5715 if (GET_CODE (pat) == PARALLEL
5716 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
5718 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
5720 if (GET_CODE (rhs) == UNSPEC
5721 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
5722 return TRUE;
5724 if (GET_CODE (rhs) == MEM
5725 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
5726 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
5727 return TRUE;
5730 return FALSE;
5733 enum rtx_code
5734 minmax_code (rtx x)
5736 enum rtx_code code = GET_CODE (x);
5738 switch (code)
5740 case SMAX:
5741 return GE;
5742 case SMIN:
5743 return LE;
5744 case UMIN:
5745 return LEU;
5746 case UMAX:
5747 return GEU;
5748 default:
5749 gcc_unreachable ();
5753 /* Return 1 if memory locations are adjacent. */
5755 adjacent_mem_locations (rtx a, rtx b)
5757 /* We don't guarantee to preserve the order of these memory refs. */
5758 if (volatile_refs_p (a) || volatile_refs_p (b))
5759 return 0;
5761 if ((GET_CODE (XEXP (a, 0)) == REG
5762 || (GET_CODE (XEXP (a, 0)) == PLUS
5763 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5764 && (GET_CODE (XEXP (b, 0)) == REG
5765 || (GET_CODE (XEXP (b, 0)) == PLUS
5766 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5768 HOST_WIDE_INT val0 = 0, val1 = 0;
5769 rtx reg0, reg1;
5770 int val_diff;
5772 if (GET_CODE (XEXP (a, 0)) == PLUS)
5774 reg0 = XEXP (XEXP (a, 0), 0);
5775 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5777 else
5778 reg0 = XEXP (a, 0);
5780 if (GET_CODE (XEXP (b, 0)) == PLUS)
5782 reg1 = XEXP (XEXP (b, 0), 0);
5783 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5785 else
5786 reg1 = XEXP (b, 0);
5788 /* Don't accept any offset that will require multiple
5789 instructions to handle, since this would cause the
5790 arith_adjacentmem pattern to output an overlong sequence. */
5791 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5792 return 0;
5794 /* Don't allow an eliminable register: register elimination can make
5795 the offset too large. */
5796 if (arm_eliminable_register (reg0))
5797 return 0;
5799 val_diff = val1 - val0;
5801 if (arm_ld_sched)
5803 /* If the target has load delay slots, then there's no benefit
5804 to using an ldm instruction unless the offset is zero and
5805 we are optimizing for size. */
5806 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5807 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5808 && (val_diff == 4 || val_diff == -4));
5811 return ((REGNO (reg0) == REGNO (reg1))
5812 && (val_diff == 4 || val_diff == -4));
5815 return 0;
5819 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5820 HOST_WIDE_INT *load_offset)
5822 int unsorted_regs[4];
5823 HOST_WIDE_INT unsorted_offsets[4];
5824 int order[4];
5825 int base_reg = -1;
5826 int i;
5828 /* Can only handle 2, 3, or 4 insns at present,
5829 though could be easily extended if required. */
5830 gcc_assert (nops >= 2 && nops <= 4);
5832 /* Loop over the operands and check that the memory references are
5833 suitable (i.e. immediate offsets from the same base register). At
5834 the same time, extract the target register, and the memory
5835 offsets. */
5836 for (i = 0; i < nops; i++)
5838 rtx reg;
5839 rtx offset;
5841 /* Convert a subreg of a mem into the mem itself. */
5842 if (GET_CODE (operands[nops + i]) == SUBREG)
5843 operands[nops + i] = alter_subreg (operands + (nops + i));
5845 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5847 /* Don't reorder volatile memory references; it doesn't seem worth
5848 looking for the case where the order is ok anyway. */
5849 if (MEM_VOLATILE_P (operands[nops + i]))
5850 return 0;
5852 offset = const0_rtx;
5854 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5855 || (GET_CODE (reg) == SUBREG
5856 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5857 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5858 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5859 == REG)
5860 || (GET_CODE (reg) == SUBREG
5861 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5862 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5863 == CONST_INT)))
5865 if (i == 0)
5867 base_reg = REGNO (reg);
5868 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5869 ? REGNO (operands[i])
5870 : REGNO (SUBREG_REG (operands[i])));
5871 order[0] = 0;
5873 else
5875 if (base_reg != (int) REGNO (reg))
5876 /* Not addressed from the same base register. */
5877 return 0;
5879 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5880 ? REGNO (operands[i])
5881 : REGNO (SUBREG_REG (operands[i])));
5882 if (unsorted_regs[i] < unsorted_regs[order[0]])
5883 order[0] = i;
5886 /* If it isn't an integer register, or if it overwrites the
5887 base register but isn't the last insn in the list, then
5888 we can't do this. */
5889 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5890 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5891 return 0;
5893 unsorted_offsets[i] = INTVAL (offset);
5895 else
5896 /* Not a suitable memory address. */
5897 return 0;
5900 /* All the useful information has now been extracted from the
5901 operands into unsorted_regs and unsorted_offsets; additionally,
5902 order[0] has been set to the lowest numbered register in the
5903 list. Sort the registers into order, and check that the memory
5904 offsets are ascending and adjacent. */
5906 for (i = 1; i < nops; i++)
5908 int j;
5910 order[i] = order[i - 1];
5911 for (j = 0; j < nops; j++)
5912 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5913 && (order[i] == order[i - 1]
5914 || unsorted_regs[j] < unsorted_regs[order[i]]))
5915 order[i] = j;
5917 /* Have we found a suitable register? if not, one must be used more
5918 than once. */
5919 if (order[i] == order[i - 1])
5920 return 0;
5922 /* Is the memory address adjacent and ascending? */
5923 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5924 return 0;
5927 if (base)
5929 *base = base_reg;
5931 for (i = 0; i < nops; i++)
5932 regs[i] = unsorted_regs[order[i]];
5934 *load_offset = unsorted_offsets[order[0]];
5937 if (unsorted_offsets[order[0]] == 0)
5938 return 1; /* ldmia */
5940 if (unsorted_offsets[order[0]] == 4)
5941 return 2; /* ldmib */
5943 if (unsorted_offsets[order[nops - 1]] == 0)
5944 return 3; /* ldmda */
5946 if (unsorted_offsets[order[nops - 1]] == -4)
5947 return 4; /* ldmdb */
5949 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5950 if the offset isn't small enough. The reason 2 ldrs are faster
5951 is because these ARMs are able to do more than one cache access
5952 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5953 whilst the ARM8 has a double bandwidth cache. This means that
5954 these cores can do both an instruction fetch and a data fetch in
5955 a single cycle, so the trick of calculating the address into a
5956 scratch register (one of the result regs) and then doing a load
5957 multiple actually becomes slower (and no smaller in code size).
5958 That is the transformation
5960 ldr rd1, [rbase + offset]
5961 ldr rd2, [rbase + offset + 4]
5965 add rd1, rbase, offset
5966 ldmia rd1, {rd1, rd2}
5968 produces worse code -- '3 cycles + any stalls on rd2' instead of
5969 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5970 access per cycle, the first sequence could never complete in less
5971 than 6 cycles, whereas the ldm sequence would only take 5 and
5972 would make better use of sequential accesses if not hitting the
5973 cache.
5975 We cheat here and test 'arm_ld_sched' which we currently know to
5976 only be true for the ARM8, ARM9 and StrongARM. If this ever
5977 changes, then the test below needs to be reworked. */
5978 if (nops == 2 && arm_ld_sched)
5979 return 0;
5981 /* Can't do it without setting up the offset, only do this if it takes
5982 no more than one insn. */
5983 return (const_ok_for_arm (unsorted_offsets[order[0]])
5984 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5987 const char *
5988 emit_ldm_seq (rtx *operands, int nops)
5990 int regs[4];
5991 int base_reg;
5992 HOST_WIDE_INT offset;
5993 char buf[100];
5994 int i;
5996 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5998 case 1:
5999 strcpy (buf, "ldm%?ia\t");
6000 break;
6002 case 2:
6003 strcpy (buf, "ldm%?ib\t");
6004 break;
6006 case 3:
6007 strcpy (buf, "ldm%?da\t");
6008 break;
6010 case 4:
6011 strcpy (buf, "ldm%?db\t");
6012 break;
6014 case 5:
6015 if (offset >= 0)
6016 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6017 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6018 (long) offset);
6019 else
6020 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6021 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6022 (long) -offset);
6023 output_asm_insn (buf, operands);
6024 base_reg = regs[0];
6025 strcpy (buf, "ldm%?ia\t");
6026 break;
6028 default:
6029 gcc_unreachable ();
6032 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6033 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6035 for (i = 1; i < nops; i++)
6036 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6037 reg_names[regs[i]]);
6039 strcat (buf, "}\t%@ phole ldm");
6041 output_asm_insn (buf, operands);
6042 return "";
6046 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6047 HOST_WIDE_INT * load_offset)
6049 int unsorted_regs[4];
6050 HOST_WIDE_INT unsorted_offsets[4];
6051 int order[4];
6052 int base_reg = -1;
6053 int i;
6055 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6056 extended if required. */
6057 gcc_assert (nops >= 2 && nops <= 4);
6059 /* Loop over the operands and check that the memory references are
6060 suitable (i.e. immediate offsets from the same base register). At
6061 the same time, extract the target register, and the memory
6062 offsets. */
6063 for (i = 0; i < nops; i++)
6065 rtx reg;
6066 rtx offset;
6068 /* Convert a subreg of a mem into the mem itself. */
6069 if (GET_CODE (operands[nops + i]) == SUBREG)
6070 operands[nops + i] = alter_subreg (operands + (nops + i));
6072 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6074 /* Don't reorder volatile memory references; it doesn't seem worth
6075 looking for the case where the order is ok anyway. */
6076 if (MEM_VOLATILE_P (operands[nops + i]))
6077 return 0;
6079 offset = const0_rtx;
6081 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6082 || (GET_CODE (reg) == SUBREG
6083 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6084 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6085 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6086 == REG)
6087 || (GET_CODE (reg) == SUBREG
6088 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6089 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6090 == CONST_INT)))
6092 if (i == 0)
6094 base_reg = REGNO (reg);
6095 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6096 ? REGNO (operands[i])
6097 : REGNO (SUBREG_REG (operands[i])));
6098 order[0] = 0;
6100 else
6102 if (base_reg != (int) REGNO (reg))
6103 /* Not addressed from the same base register. */
6104 return 0;
6106 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6107 ? REGNO (operands[i])
6108 : REGNO (SUBREG_REG (operands[i])));
6109 if (unsorted_regs[i] < unsorted_regs[order[0]])
6110 order[0] = i;
6113 /* If it isn't an integer register, then we can't do this. */
6114 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6115 return 0;
6117 unsorted_offsets[i] = INTVAL (offset);
6119 else
6120 /* Not a suitable memory address. */
6121 return 0;
6124 /* All the useful information has now been extracted from the
6125 operands into unsorted_regs and unsorted_offsets; additionally,
6126 order[0] has been set to the lowest numbered register in the
6127 list. Sort the registers into order, and check that the memory
6128 offsets are ascending and adjacent. */
6130 for (i = 1; i < nops; i++)
6132 int j;
6134 order[i] = order[i - 1];
6135 for (j = 0; j < nops; j++)
6136 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6137 && (order[i] == order[i - 1]
6138 || unsorted_regs[j] < unsorted_regs[order[i]]))
6139 order[i] = j;
6141 /* Have we found a suitable register? if not, one must be used more
6142 than once. */
6143 if (order[i] == order[i - 1])
6144 return 0;
6146 /* Is the memory address adjacent and ascending? */
6147 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6148 return 0;
6151 if (base)
6153 *base = base_reg;
6155 for (i = 0; i < nops; i++)
6156 regs[i] = unsorted_regs[order[i]];
6158 *load_offset = unsorted_offsets[order[0]];
6161 if (unsorted_offsets[order[0]] == 0)
6162 return 1; /* stmia */
6164 if (unsorted_offsets[order[0]] == 4)
6165 return 2; /* stmib */
6167 if (unsorted_offsets[order[nops - 1]] == 0)
6168 return 3; /* stmda */
6170 if (unsorted_offsets[order[nops - 1]] == -4)
6171 return 4; /* stmdb */
6173 return 0;
6176 const char *
6177 emit_stm_seq (rtx *operands, int nops)
6179 int regs[4];
6180 int base_reg;
6181 HOST_WIDE_INT offset;
6182 char buf[100];
6183 int i;
6185 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6187 case 1:
6188 strcpy (buf, "stm%?ia\t");
6189 break;
6191 case 2:
6192 strcpy (buf, "stm%?ib\t");
6193 break;
6195 case 3:
6196 strcpy (buf, "stm%?da\t");
6197 break;
6199 case 4:
6200 strcpy (buf, "stm%?db\t");
6201 break;
6203 default:
6204 gcc_unreachable ();
6207 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6208 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6210 for (i = 1; i < nops; i++)
6211 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6212 reg_names[regs[i]]);
6214 strcat (buf, "}\t%@ phole stm");
6216 output_asm_insn (buf, operands);
6217 return "";
6220 /* Routines for use in generating RTL. */
6223 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6224 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6226 HOST_WIDE_INT offset = *offsetp;
6227 int i = 0, j;
6228 rtx result;
6229 int sign = up ? 1 : -1;
6230 rtx mem, addr;
6232 /* XScale has load-store double instructions, but they have stricter
6233 alignment requirements than load-store multiple, so we cannot
6234 use them.
6236 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6237 the pipeline until completion.
6239 NREGS CYCLES
6245 An ldr instruction takes 1-3 cycles, but does not block the
6246 pipeline.
6248 NREGS CYCLES
6249 1 1-3
6250 2 2-6
6251 3 3-9
6252 4 4-12
6254 Best case ldr will always win. However, the more ldr instructions
6255 we issue, the less likely we are to be able to schedule them well.
6256 Using ldr instructions also increases code size.
6258 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6259 for counts of 3 or 4 regs. */
6260 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6262 rtx seq;
6264 start_sequence ();
6266 for (i = 0; i < count; i++)
6268 addr = plus_constant (from, i * 4 * sign);
6269 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6270 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6271 offset += 4 * sign;
6274 if (write_back)
6276 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6277 *offsetp = offset;
6280 seq = get_insns ();
6281 end_sequence ();
6283 return seq;
6286 result = gen_rtx_PARALLEL (VOIDmode,
6287 rtvec_alloc (count + (write_back ? 1 : 0)));
6288 if (write_back)
6290 XVECEXP (result, 0, 0)
6291 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6292 i = 1;
6293 count++;
6296 for (j = 0; i < count; i++, j++)
6298 addr = plus_constant (from, j * 4 * sign);
6299 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6300 XVECEXP (result, 0, i)
6301 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6302 offset += 4 * sign;
6305 if (write_back)
6306 *offsetp = offset;
6308 return result;
6312 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6313 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6315 HOST_WIDE_INT offset = *offsetp;
6316 int i = 0, j;
6317 rtx result;
6318 int sign = up ? 1 : -1;
6319 rtx mem, addr;
6321 /* See arm_gen_load_multiple for discussion of
6322 the pros/cons of ldm/stm usage for XScale. */
6323 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6325 rtx seq;
6327 start_sequence ();
6329 for (i = 0; i < count; i++)
6331 addr = plus_constant (to, i * 4 * sign);
6332 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6333 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6334 offset += 4 * sign;
6337 if (write_back)
6339 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6340 *offsetp = offset;
6343 seq = get_insns ();
6344 end_sequence ();
6346 return seq;
6349 result = gen_rtx_PARALLEL (VOIDmode,
6350 rtvec_alloc (count + (write_back ? 1 : 0)));
6351 if (write_back)
6353 XVECEXP (result, 0, 0)
6354 = gen_rtx_SET (VOIDmode, to,
6355 plus_constant (to, count * 4 * sign));
6356 i = 1;
6357 count++;
6360 for (j = 0; i < count; i++, j++)
6362 addr = plus_constant (to, j * 4 * sign);
6363 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6364 XVECEXP (result, 0, i)
6365 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6366 offset += 4 * sign;
6369 if (write_back)
6370 *offsetp = offset;
6372 return result;
6376 arm_gen_movmemqi (rtx *operands)
6378 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6379 HOST_WIDE_INT srcoffset, dstoffset;
6380 int i;
6381 rtx src, dst, srcbase, dstbase;
6382 rtx part_bytes_reg = NULL;
6383 rtx mem;
6385 if (GET_CODE (operands[2]) != CONST_INT
6386 || GET_CODE (operands[3]) != CONST_INT
6387 || INTVAL (operands[2]) > 64
6388 || INTVAL (operands[3]) & 3)
6389 return 0;
6391 dstbase = operands[0];
6392 srcbase = operands[1];
6394 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6395 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6397 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6398 out_words_to_go = INTVAL (operands[2]) / 4;
6399 last_bytes = INTVAL (operands[2]) & 3;
6400 dstoffset = srcoffset = 0;
6402 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6403 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6405 for (i = 0; in_words_to_go >= 2; i+=4)
6407 if (in_words_to_go > 4)
6408 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6409 srcbase, &srcoffset));
6410 else
6411 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6412 FALSE, srcbase, &srcoffset));
6414 if (out_words_to_go)
6416 if (out_words_to_go > 4)
6417 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6418 dstbase, &dstoffset));
6419 else if (out_words_to_go != 1)
6420 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6421 dst, TRUE,
6422 (last_bytes == 0
6423 ? FALSE : TRUE),
6424 dstbase, &dstoffset));
6425 else
6427 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6428 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6429 if (last_bytes != 0)
6431 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6432 dstoffset += 4;
6437 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6438 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6441 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6442 if (out_words_to_go)
6444 rtx sreg;
6446 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6447 sreg = copy_to_reg (mem);
6449 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6450 emit_move_insn (mem, sreg);
6451 in_words_to_go--;
6453 gcc_assert (!in_words_to_go); /* Sanity check */
6456 if (in_words_to_go)
6458 gcc_assert (in_words_to_go > 0);
6460 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6461 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6464 gcc_assert (!last_bytes || part_bytes_reg);
6466 if (BYTES_BIG_ENDIAN && last_bytes)
6468 rtx tmp = gen_reg_rtx (SImode);
6470 /* The bytes we want are in the top end of the word. */
6471 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6472 GEN_INT (8 * (4 - last_bytes))));
6473 part_bytes_reg = tmp;
6475 while (last_bytes)
6477 mem = adjust_automodify_address (dstbase, QImode,
6478 plus_constant (dst, last_bytes - 1),
6479 dstoffset + last_bytes - 1);
6480 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6482 if (--last_bytes)
6484 tmp = gen_reg_rtx (SImode);
6485 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6486 part_bytes_reg = tmp;
6491 else
6493 if (last_bytes > 1)
6495 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6496 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6497 last_bytes -= 2;
6498 if (last_bytes)
6500 rtx tmp = gen_reg_rtx (SImode);
6501 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6502 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6503 part_bytes_reg = tmp;
6504 dstoffset += 2;
6508 if (last_bytes)
6510 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6511 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6515 return 1;
6518 /* Select a dominance comparison mode if possible for a test of the general
6519 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6520 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6521 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6522 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6523 In all cases OP will be either EQ or NE, but we don't need to know which
6524 here. If we are unable to support a dominance comparison we return
6525 CC mode. This will then fail to match for the RTL expressions that
6526 generate this call. */
6527 enum machine_mode
6528 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6530 enum rtx_code cond1, cond2;
6531 int swapped = 0;
6533 /* Currently we will probably get the wrong result if the individual
6534 comparisons are not simple. This also ensures that it is safe to
6535 reverse a comparison if necessary. */
6536 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6537 != CCmode)
6538 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6539 != CCmode))
6540 return CCmode;
6542 /* The if_then_else variant of this tests the second condition if the
6543 first passes, but is true if the first fails. Reverse the first
6544 condition to get a true "inclusive-or" expression. */
6545 if (cond_or == DOM_CC_NX_OR_Y)
6546 cond1 = reverse_condition (cond1);
6548 /* If the comparisons are not equal, and one doesn't dominate the other,
6549 then we can't do this. */
6550 if (cond1 != cond2
6551 && !comparison_dominates_p (cond1, cond2)
6552 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6553 return CCmode;
6555 if (swapped)
6557 enum rtx_code temp = cond1;
6558 cond1 = cond2;
6559 cond2 = temp;
6562 switch (cond1)
6564 case EQ:
6565 if (cond_or == DOM_CC_X_AND_Y)
6566 return CC_DEQmode;
6568 switch (cond2)
6570 case EQ: return CC_DEQmode;
6571 case LE: return CC_DLEmode;
6572 case LEU: return CC_DLEUmode;
6573 case GE: return CC_DGEmode;
6574 case GEU: return CC_DGEUmode;
6575 default: gcc_unreachable ();
6578 case LT:
6579 if (cond_or == DOM_CC_X_AND_Y)
6580 return CC_DLTmode;
6582 switch (cond2)
6584 case LT:
6585 return CC_DLTmode;
6586 case LE:
6587 return CC_DLEmode;
6588 case NE:
6589 return CC_DNEmode;
6590 default:
6591 gcc_unreachable ();
6594 case GT:
6595 if (cond_or == DOM_CC_X_AND_Y)
6596 return CC_DGTmode;
6598 switch (cond2)
6600 case GT:
6601 return CC_DGTmode;
6602 case GE:
6603 return CC_DGEmode;
6604 case NE:
6605 return CC_DNEmode;
6606 default:
6607 gcc_unreachable ();
6610 case LTU:
6611 if (cond_or == DOM_CC_X_AND_Y)
6612 return CC_DLTUmode;
6614 switch (cond2)
6616 case LTU:
6617 return CC_DLTUmode;
6618 case LEU:
6619 return CC_DLEUmode;
6620 case NE:
6621 return CC_DNEmode;
6622 default:
6623 gcc_unreachable ();
6626 case GTU:
6627 if (cond_or == DOM_CC_X_AND_Y)
6628 return CC_DGTUmode;
6630 switch (cond2)
6632 case GTU:
6633 return CC_DGTUmode;
6634 case GEU:
6635 return CC_DGEUmode;
6636 case NE:
6637 return CC_DNEmode;
6638 default:
6639 gcc_unreachable ();
6642 /* The remaining cases only occur when both comparisons are the
6643 same. */
6644 case NE:
6645 gcc_assert (cond1 == cond2);
6646 return CC_DNEmode;
6648 case LE:
6649 gcc_assert (cond1 == cond2);
6650 return CC_DLEmode;
6652 case GE:
6653 gcc_assert (cond1 == cond2);
6654 return CC_DGEmode;
6656 case LEU:
6657 gcc_assert (cond1 == cond2);
6658 return CC_DLEUmode;
6660 case GEU:
6661 gcc_assert (cond1 == cond2);
6662 return CC_DGEUmode;
6664 default:
6665 gcc_unreachable ();
6669 enum machine_mode
6670 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6672 /* All floating point compares return CCFP if it is an equality
6673 comparison, and CCFPE otherwise. */
6674 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6676 switch (op)
6678 case EQ:
6679 case NE:
6680 case UNORDERED:
6681 case ORDERED:
6682 case UNLT:
6683 case UNLE:
6684 case UNGT:
6685 case UNGE:
6686 case UNEQ:
6687 case LTGT:
6688 return CCFPmode;
6690 case LT:
6691 case LE:
6692 case GT:
6693 case GE:
6694 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6695 return CCFPmode;
6696 return CCFPEmode;
6698 default:
6699 gcc_unreachable ();
6703 /* A compare with a shifted operand. Because of canonicalization, the
6704 comparison will have to be swapped when we emit the assembler. */
6705 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6706 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6707 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6708 || GET_CODE (x) == ROTATERT))
6709 return CC_SWPmode;
6711 /* This operation is performed swapped, but since we only rely on the Z
6712 flag we don't need an additional mode. */
6713 if (GET_MODE (y) == SImode && REG_P (y)
6714 && GET_CODE (x) == NEG
6715 && (op == EQ || op == NE))
6716 return CC_Zmode;
6718 /* This is a special case that is used by combine to allow a
6719 comparison of a shifted byte load to be split into a zero-extend
6720 followed by a comparison of the shifted integer (only valid for
6721 equalities and unsigned inequalities). */
6722 if (GET_MODE (x) == SImode
6723 && GET_CODE (x) == ASHIFT
6724 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6725 && GET_CODE (XEXP (x, 0)) == SUBREG
6726 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6727 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6728 && (op == EQ || op == NE
6729 || op == GEU || op == GTU || op == LTU || op == LEU)
6730 && GET_CODE (y) == CONST_INT)
6731 return CC_Zmode;
6733 /* A construct for a conditional compare, if the false arm contains
6734 0, then both conditions must be true, otherwise either condition
6735 must be true. Not all conditions are possible, so CCmode is
6736 returned if it can't be done. */
6737 if (GET_CODE (x) == IF_THEN_ELSE
6738 && (XEXP (x, 2) == const0_rtx
6739 || XEXP (x, 2) == const1_rtx)
6740 && COMPARISON_P (XEXP (x, 0))
6741 && COMPARISON_P (XEXP (x, 1)))
6742 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6743 INTVAL (XEXP (x, 2)));
6745 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6746 if (GET_CODE (x) == AND
6747 && COMPARISON_P (XEXP (x, 0))
6748 && COMPARISON_P (XEXP (x, 1)))
6749 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6750 DOM_CC_X_AND_Y);
6752 if (GET_CODE (x) == IOR
6753 && COMPARISON_P (XEXP (x, 0))
6754 && COMPARISON_P (XEXP (x, 1)))
6755 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6756 DOM_CC_X_OR_Y);
6758 /* An operation (on Thumb) where we want to test for a single bit.
6759 This is done by shifting that bit up into the top bit of a
6760 scratch register; we can then branch on the sign bit. */
6761 if (TARGET_THUMB
6762 && GET_MODE (x) == SImode
6763 && (op == EQ || op == NE)
6764 && GET_CODE (x) == ZERO_EXTRACT
6765 && XEXP (x, 1) == const1_rtx)
6766 return CC_Nmode;
6768 /* An operation that sets the condition codes as a side-effect, the
6769 V flag is not set correctly, so we can only use comparisons where
6770 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6771 instead.) */
6772 if (GET_MODE (x) == SImode
6773 && y == const0_rtx
6774 && (op == EQ || op == NE || op == LT || op == GE)
6775 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6776 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6777 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6778 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6779 || GET_CODE (x) == LSHIFTRT
6780 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6781 || GET_CODE (x) == ROTATERT
6782 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6783 return CC_NOOVmode;
6785 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6786 return CC_Zmode;
6788 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6789 && GET_CODE (x) == PLUS
6790 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6791 return CC_Cmode;
6793 return CCmode;
6796 /* X and Y are two things to compare using CODE. Emit the compare insn and
6797 return the rtx for register 0 in the proper mode. FP means this is a
6798 floating point compare: I don't think that it is needed on the arm. */
6800 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6802 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6803 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6805 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
6807 return cc_reg;
6810 /* Generate a sequence of insns that will generate the correct return
6811 address mask depending on the physical architecture that the program
6812 is running on. */
6814 arm_gen_return_addr_mask (void)
6816 rtx reg = gen_reg_rtx (Pmode);
6818 emit_insn (gen_return_addr_mask (reg));
6819 return reg;
6822 void
6823 arm_reload_in_hi (rtx *operands)
6825 rtx ref = operands[1];
6826 rtx base, scratch;
6827 HOST_WIDE_INT offset = 0;
6829 if (GET_CODE (ref) == SUBREG)
6831 offset = SUBREG_BYTE (ref);
6832 ref = SUBREG_REG (ref);
6835 if (GET_CODE (ref) == REG)
6837 /* We have a pseudo which has been spilt onto the stack; there
6838 are two cases here: the first where there is a simple
6839 stack-slot replacement and a second where the stack-slot is
6840 out of range, or is used as a subreg. */
6841 if (reg_equiv_mem[REGNO (ref)])
6843 ref = reg_equiv_mem[REGNO (ref)];
6844 base = find_replacement (&XEXP (ref, 0));
6846 else
6847 /* The slot is out of range, or was dressed up in a SUBREG. */
6848 base = reg_equiv_address[REGNO (ref)];
6850 else
6851 base = find_replacement (&XEXP (ref, 0));
6853 /* Handle the case where the address is too complex to be offset by 1. */
6854 if (GET_CODE (base) == MINUS
6855 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6857 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6859 emit_set_insn (base_plus, base);
6860 base = base_plus;
6862 else if (GET_CODE (base) == PLUS)
6864 /* The addend must be CONST_INT, or we would have dealt with it above. */
6865 HOST_WIDE_INT hi, lo;
6867 offset += INTVAL (XEXP (base, 1));
6868 base = XEXP (base, 0);
6870 /* Rework the address into a legal sequence of insns. */
6871 /* Valid range for lo is -4095 -> 4095 */
6872 lo = (offset >= 0
6873 ? (offset & 0xfff)
6874 : -((-offset) & 0xfff));
6876 /* Corner case, if lo is the max offset then we would be out of range
6877 once we have added the additional 1 below, so bump the msb into the
6878 pre-loading insn(s). */
6879 if (lo == 4095)
6880 lo &= 0x7ff;
6882 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6883 ^ (HOST_WIDE_INT) 0x80000000)
6884 - (HOST_WIDE_INT) 0x80000000);
6886 gcc_assert (hi + lo == offset);
6888 if (hi != 0)
6890 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6892 /* Get the base address; addsi3 knows how to handle constants
6893 that require more than one insn. */
6894 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6895 base = base_plus;
6896 offset = lo;
6900 /* Operands[2] may overlap operands[0] (though it won't overlap
6901 operands[1]), that's why we asked for a DImode reg -- so we can
6902 use the bit that does not overlap. */
6903 if (REGNO (operands[2]) == REGNO (operands[0]))
6904 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6905 else
6906 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6908 emit_insn (gen_zero_extendqisi2 (scratch,
6909 gen_rtx_MEM (QImode,
6910 plus_constant (base,
6911 offset))));
6912 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6913 gen_rtx_MEM (QImode,
6914 plus_constant (base,
6915 offset + 1))));
6916 if (!BYTES_BIG_ENDIAN)
6917 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6918 gen_rtx_IOR (SImode,
6919 gen_rtx_ASHIFT
6920 (SImode,
6921 gen_rtx_SUBREG (SImode, operands[0], 0),
6922 GEN_INT (8)),
6923 scratch));
6924 else
6925 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6926 gen_rtx_IOR (SImode,
6927 gen_rtx_ASHIFT (SImode, scratch,
6928 GEN_INT (8)),
6929 gen_rtx_SUBREG (SImode, operands[0], 0)));
6932 /* Handle storing a half-word to memory during reload by synthesizing as two
6933 byte stores. Take care not to clobber the input values until after we
6934 have moved them somewhere safe. This code assumes that if the DImode
6935 scratch in operands[2] overlaps either the input value or output address
6936 in some way, then that value must die in this insn (we absolutely need
6937 two scratch registers for some corner cases). */
6938 void
6939 arm_reload_out_hi (rtx *operands)
6941 rtx ref = operands[0];
6942 rtx outval = operands[1];
6943 rtx base, scratch;
6944 HOST_WIDE_INT offset = 0;
6946 if (GET_CODE (ref) == SUBREG)
6948 offset = SUBREG_BYTE (ref);
6949 ref = SUBREG_REG (ref);
6952 if (GET_CODE (ref) == REG)
6954 /* We have a pseudo which has been spilt onto the stack; there
6955 are two cases here: the first where there is a simple
6956 stack-slot replacement and a second where the stack-slot is
6957 out of range, or is used as a subreg. */
6958 if (reg_equiv_mem[REGNO (ref)])
6960 ref = reg_equiv_mem[REGNO (ref)];
6961 base = find_replacement (&XEXP (ref, 0));
6963 else
6964 /* The slot is out of range, or was dressed up in a SUBREG. */
6965 base = reg_equiv_address[REGNO (ref)];
6967 else
6968 base = find_replacement (&XEXP (ref, 0));
6970 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6972 /* Handle the case where the address is too complex to be offset by 1. */
6973 if (GET_CODE (base) == MINUS
6974 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6976 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6978 /* Be careful not to destroy OUTVAL. */
6979 if (reg_overlap_mentioned_p (base_plus, outval))
6981 /* Updating base_plus might destroy outval, see if we can
6982 swap the scratch and base_plus. */
6983 if (!reg_overlap_mentioned_p (scratch, outval))
6985 rtx tmp = scratch;
6986 scratch = base_plus;
6987 base_plus = tmp;
6989 else
6991 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6993 /* Be conservative and copy OUTVAL into the scratch now,
6994 this should only be necessary if outval is a subreg
6995 of something larger than a word. */
6996 /* XXX Might this clobber base? I can't see how it can,
6997 since scratch is known to overlap with OUTVAL, and
6998 must be wider than a word. */
6999 emit_insn (gen_movhi (scratch_hi, outval));
7000 outval = scratch_hi;
7004 emit_set_insn (base_plus, base);
7005 base = base_plus;
7007 else if (GET_CODE (base) == PLUS)
7009 /* The addend must be CONST_INT, or we would have dealt with it above. */
7010 HOST_WIDE_INT hi, lo;
7012 offset += INTVAL (XEXP (base, 1));
7013 base = XEXP (base, 0);
7015 /* Rework the address into a legal sequence of insns. */
7016 /* Valid range for lo is -4095 -> 4095 */
7017 lo = (offset >= 0
7018 ? (offset & 0xfff)
7019 : -((-offset) & 0xfff));
7021 /* Corner case, if lo is the max offset then we would be out of range
7022 once we have added the additional 1 below, so bump the msb into the
7023 pre-loading insn(s). */
7024 if (lo == 4095)
7025 lo &= 0x7ff;
7027 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7028 ^ (HOST_WIDE_INT) 0x80000000)
7029 - (HOST_WIDE_INT) 0x80000000);
7031 gcc_assert (hi + lo == offset);
7033 if (hi != 0)
7035 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7037 /* Be careful not to destroy OUTVAL. */
7038 if (reg_overlap_mentioned_p (base_plus, outval))
7040 /* Updating base_plus might destroy outval, see if we
7041 can swap the scratch and base_plus. */
7042 if (!reg_overlap_mentioned_p (scratch, outval))
7044 rtx tmp = scratch;
7045 scratch = base_plus;
7046 base_plus = tmp;
7048 else
7050 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7052 /* Be conservative and copy outval into scratch now,
7053 this should only be necessary if outval is a
7054 subreg of something larger than a word. */
7055 /* XXX Might this clobber base? I can't see how it
7056 can, since scratch is known to overlap with
7057 outval. */
7058 emit_insn (gen_movhi (scratch_hi, outval));
7059 outval = scratch_hi;
7063 /* Get the base address; addsi3 knows how to handle constants
7064 that require more than one insn. */
7065 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7066 base = base_plus;
7067 offset = lo;
7071 if (BYTES_BIG_ENDIAN)
7073 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7074 plus_constant (base, offset + 1)),
7075 gen_lowpart (QImode, outval)));
7076 emit_insn (gen_lshrsi3 (scratch,
7077 gen_rtx_SUBREG (SImode, outval, 0),
7078 GEN_INT (8)));
7079 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7080 gen_lowpart (QImode, scratch)));
7082 else
7084 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7085 gen_lowpart (QImode, outval)));
7086 emit_insn (gen_lshrsi3 (scratch,
7087 gen_rtx_SUBREG (SImode, outval, 0),
7088 GEN_INT (8)));
7089 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7090 plus_constant (base, offset + 1)),
7091 gen_lowpart (QImode, scratch)));
7095 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7096 (padded to the size of a word) should be passed in a register. */
7098 static bool
7099 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7101 if (TARGET_AAPCS_BASED)
7102 return must_pass_in_stack_var_size (mode, type);
7103 else
7104 return must_pass_in_stack_var_size_or_pad (mode, type);
7108 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7109 Return true if an argument passed on the stack should be padded upwards,
7110 i.e. if the least-significant byte has useful data.
7111 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7112 aggregate types are placed in the lowest memory address. */
7114 bool
7115 arm_pad_arg_upward (enum machine_mode mode, tree type)
7117 if (!TARGET_AAPCS_BASED)
7118 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7120 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7121 return false;
7123 return true;
7127 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7128 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7129 byte of the register has useful data, and return the opposite if the
7130 most significant byte does.
7131 For AAPCS, small aggregates and small complex types are always padded
7132 upwards. */
7134 bool
7135 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7136 tree type, int first ATTRIBUTE_UNUSED)
7138 if (TARGET_AAPCS_BASED
7139 && BYTES_BIG_ENDIAN
7140 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7141 && int_size_in_bytes (type) <= 4)
7142 return true;
7144 /* Otherwise, use default padding. */
7145 return !BYTES_BIG_ENDIAN;
7149 /* Print a symbolic form of X to the debug file, F. */
7150 static void
7151 arm_print_value (FILE *f, rtx x)
7153 switch (GET_CODE (x))
7155 case CONST_INT:
7156 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7157 return;
7159 case CONST_DOUBLE:
7160 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7161 return;
7163 case CONST_VECTOR:
7165 int i;
7167 fprintf (f, "<");
7168 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7170 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7171 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7172 fputc (',', f);
7174 fprintf (f, ">");
7176 return;
7178 case CONST_STRING:
7179 fprintf (f, "\"%s\"", XSTR (x, 0));
7180 return;
7182 case SYMBOL_REF:
7183 fprintf (f, "`%s'", XSTR (x, 0));
7184 return;
7186 case LABEL_REF:
7187 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7188 return;
7190 case CONST:
7191 arm_print_value (f, XEXP (x, 0));
7192 return;
7194 case PLUS:
7195 arm_print_value (f, XEXP (x, 0));
7196 fprintf (f, "+");
7197 arm_print_value (f, XEXP (x, 1));
7198 return;
7200 case PC:
7201 fprintf (f, "pc");
7202 return;
7204 default:
7205 fprintf (f, "????");
7206 return;
7210 /* Routines for manipulation of the constant pool. */
7212 /* Arm instructions cannot load a large constant directly into a
7213 register; they have to come from a pc relative load. The constant
7214 must therefore be placed in the addressable range of the pc
7215 relative load. Depending on the precise pc relative load
7216 instruction the range is somewhere between 256 bytes and 4k. This
7217 means that we often have to dump a constant inside a function, and
7218 generate code to branch around it.
7220 It is important to minimize this, since the branches will slow
7221 things down and make the code larger.
7223 Normally we can hide the table after an existing unconditional
7224 branch so that there is no interruption of the flow, but in the
7225 worst case the code looks like this:
7227 ldr rn, L1
7229 b L2
7230 align
7231 L1: .long value
7235 ldr rn, L3
7237 b L4
7238 align
7239 L3: .long value
7243 We fix this by performing a scan after scheduling, which notices
7244 which instructions need to have their operands fetched from the
7245 constant table and builds the table.
7247 The algorithm starts by building a table of all the constants that
7248 need fixing up and all the natural barriers in the function (places
7249 where a constant table can be dropped without breaking the flow).
7250 For each fixup we note how far the pc-relative replacement will be
7251 able to reach and the offset of the instruction into the function.
7253 Having built the table we then group the fixes together to form
7254 tables that are as large as possible (subject to addressing
7255 constraints) and emit each table of constants after the last
7256 barrier that is within range of all the instructions in the group.
7257 If a group does not contain a barrier, then we forcibly create one
7258 by inserting a jump instruction into the flow. Once the table has
7259 been inserted, the insns are then modified to reference the
7260 relevant entry in the pool.
7262 Possible enhancements to the algorithm (not implemented) are:
7264 1) For some processors and object formats, there may be benefit in
7265 aligning the pools to the start of cache lines; this alignment
7266 would need to be taken into account when calculating addressability
7267 of a pool. */
7269 /* These typedefs are located at the start of this file, so that
7270 they can be used in the prototypes there. This comment is to
7271 remind readers of that fact so that the following structures
7272 can be understood more easily.
7274 typedef struct minipool_node Mnode;
7275 typedef struct minipool_fixup Mfix; */
7277 struct minipool_node
7279 /* Doubly linked chain of entries. */
7280 Mnode * next;
7281 Mnode * prev;
7282 /* The maximum offset into the code that this entry can be placed. While
7283 pushing fixes for forward references, all entries are sorted in order
7284 of increasing max_address. */
7285 HOST_WIDE_INT max_address;
7286 /* Similarly for an entry inserted for a backwards ref. */
7287 HOST_WIDE_INT min_address;
7288 /* The number of fixes referencing this entry. This can become zero
7289 if we "unpush" an entry. In this case we ignore the entry when we
7290 come to emit the code. */
7291 int refcount;
7292 /* The offset from the start of the minipool. */
7293 HOST_WIDE_INT offset;
7294 /* The value in table. */
7295 rtx value;
7296 /* The mode of value. */
7297 enum machine_mode mode;
7298 /* The size of the value. With iWMMXt enabled
7299 sizes > 4 also imply an alignment of 8-bytes. */
7300 int fix_size;
7303 struct minipool_fixup
7305 Mfix * next;
7306 rtx insn;
7307 HOST_WIDE_INT address;
7308 rtx * loc;
7309 enum machine_mode mode;
7310 int fix_size;
7311 rtx value;
7312 Mnode * minipool;
7313 HOST_WIDE_INT forwards;
7314 HOST_WIDE_INT backwards;
7317 /* Fixes less than a word need padding out to a word boundary. */
7318 #define MINIPOOL_FIX_SIZE(mode) \
7319 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7321 static Mnode * minipool_vector_head;
7322 static Mnode * minipool_vector_tail;
7323 static rtx minipool_vector_label;
7324 static int minipool_pad;
7326 /* The linked list of all minipool fixes required for this function. */
7327 Mfix * minipool_fix_head;
7328 Mfix * minipool_fix_tail;
7329 /* The fix entry for the current minipool, once it has been placed. */
7330 Mfix * minipool_barrier;
7332 /* Determines if INSN is the start of a jump table. Returns the end
7333 of the TABLE or NULL_RTX. */
7334 static rtx
7335 is_jump_table (rtx insn)
7337 rtx table;
7339 if (GET_CODE (insn) == JUMP_INSN
7340 && JUMP_LABEL (insn) != NULL
7341 && ((table = next_real_insn (JUMP_LABEL (insn)))
7342 == next_real_insn (insn))
7343 && table != NULL
7344 && GET_CODE (table) == JUMP_INSN
7345 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7346 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7347 return table;
7349 return NULL_RTX;
7352 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7353 #define JUMP_TABLES_IN_TEXT_SECTION 0
7354 #endif
7356 static HOST_WIDE_INT
7357 get_jump_table_size (rtx insn)
7359 /* ADDR_VECs only take room if read-only data does into the text
7360 section. */
7361 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7363 rtx body = PATTERN (insn);
7364 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7366 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7369 return 0;
7372 /* Move a minipool fix MP from its current location to before MAX_MP.
7373 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7374 constraints may need updating. */
7375 static Mnode *
7376 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7377 HOST_WIDE_INT max_address)
7379 /* The code below assumes these are different. */
7380 gcc_assert (mp != max_mp);
7382 if (max_mp == NULL)
7384 if (max_address < mp->max_address)
7385 mp->max_address = max_address;
7387 else
7389 if (max_address > max_mp->max_address - mp->fix_size)
7390 mp->max_address = max_mp->max_address - mp->fix_size;
7391 else
7392 mp->max_address = max_address;
7394 /* Unlink MP from its current position. Since max_mp is non-null,
7395 mp->prev must be non-null. */
7396 mp->prev->next = mp->next;
7397 if (mp->next != NULL)
7398 mp->next->prev = mp->prev;
7399 else
7400 minipool_vector_tail = mp->prev;
7402 /* Re-insert it before MAX_MP. */
7403 mp->next = max_mp;
7404 mp->prev = max_mp->prev;
7405 max_mp->prev = mp;
7407 if (mp->prev != NULL)
7408 mp->prev->next = mp;
7409 else
7410 minipool_vector_head = mp;
7413 /* Save the new entry. */
7414 max_mp = mp;
7416 /* Scan over the preceding entries and adjust their addresses as
7417 required. */
7418 while (mp->prev != NULL
7419 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7421 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7422 mp = mp->prev;
7425 return max_mp;
7428 /* Add a constant to the minipool for a forward reference. Returns the
7429 node added or NULL if the constant will not fit in this pool. */
7430 static Mnode *
7431 add_minipool_forward_ref (Mfix *fix)
7433 /* If set, max_mp is the first pool_entry that has a lower
7434 constraint than the one we are trying to add. */
7435 Mnode * max_mp = NULL;
7436 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7437 Mnode * mp;
7439 /* If the minipool starts before the end of FIX->INSN then this FIX
7440 can not be placed into the current pool. Furthermore, adding the
7441 new constant pool entry may cause the pool to start FIX_SIZE bytes
7442 earlier. */
7443 if (minipool_vector_head &&
7444 (fix->address + get_attr_length (fix->insn)
7445 >= minipool_vector_head->max_address - fix->fix_size))
7446 return NULL;
7448 /* Scan the pool to see if a constant with the same value has
7449 already been added. While we are doing this, also note the
7450 location where we must insert the constant if it doesn't already
7451 exist. */
7452 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7454 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7455 && fix->mode == mp->mode
7456 && (GET_CODE (fix->value) != CODE_LABEL
7457 || (CODE_LABEL_NUMBER (fix->value)
7458 == CODE_LABEL_NUMBER (mp->value)))
7459 && rtx_equal_p (fix->value, mp->value))
7461 /* More than one fix references this entry. */
7462 mp->refcount++;
7463 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7466 /* Note the insertion point if necessary. */
7467 if (max_mp == NULL
7468 && mp->max_address > max_address)
7469 max_mp = mp;
7471 /* If we are inserting an 8-bytes aligned quantity and
7472 we have not already found an insertion point, then
7473 make sure that all such 8-byte aligned quantities are
7474 placed at the start of the pool. */
7475 if (ARM_DOUBLEWORD_ALIGN
7476 && max_mp == NULL
7477 && fix->fix_size == 8
7478 && mp->fix_size != 8)
7480 max_mp = mp;
7481 max_address = mp->max_address;
7485 /* The value is not currently in the minipool, so we need to create
7486 a new entry for it. If MAX_MP is NULL, the entry will be put on
7487 the end of the list since the placement is less constrained than
7488 any existing entry. Otherwise, we insert the new fix before
7489 MAX_MP and, if necessary, adjust the constraints on the other
7490 entries. */
7491 mp = XNEW (Mnode);
7492 mp->fix_size = fix->fix_size;
7493 mp->mode = fix->mode;
7494 mp->value = fix->value;
7495 mp->refcount = 1;
7496 /* Not yet required for a backwards ref. */
7497 mp->min_address = -65536;
7499 if (max_mp == NULL)
7501 mp->max_address = max_address;
7502 mp->next = NULL;
7503 mp->prev = minipool_vector_tail;
7505 if (mp->prev == NULL)
7507 minipool_vector_head = mp;
7508 minipool_vector_label = gen_label_rtx ();
7510 else
7511 mp->prev->next = mp;
7513 minipool_vector_tail = mp;
7515 else
7517 if (max_address > max_mp->max_address - mp->fix_size)
7518 mp->max_address = max_mp->max_address - mp->fix_size;
7519 else
7520 mp->max_address = max_address;
7522 mp->next = max_mp;
7523 mp->prev = max_mp->prev;
7524 max_mp->prev = mp;
7525 if (mp->prev != NULL)
7526 mp->prev->next = mp;
7527 else
7528 minipool_vector_head = mp;
7531 /* Save the new entry. */
7532 max_mp = mp;
7534 /* Scan over the preceding entries and adjust their addresses as
7535 required. */
7536 while (mp->prev != NULL
7537 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7539 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7540 mp = mp->prev;
7543 return max_mp;
7546 static Mnode *
7547 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7548 HOST_WIDE_INT min_address)
7550 HOST_WIDE_INT offset;
7552 /* The code below assumes these are different. */
7553 gcc_assert (mp != min_mp);
7555 if (min_mp == NULL)
7557 if (min_address > mp->min_address)
7558 mp->min_address = min_address;
7560 else
7562 /* We will adjust this below if it is too loose. */
7563 mp->min_address = min_address;
7565 /* Unlink MP from its current position. Since min_mp is non-null,
7566 mp->next must be non-null. */
7567 mp->next->prev = mp->prev;
7568 if (mp->prev != NULL)
7569 mp->prev->next = mp->next;
7570 else
7571 minipool_vector_head = mp->next;
7573 /* Reinsert it after MIN_MP. */
7574 mp->prev = min_mp;
7575 mp->next = min_mp->next;
7576 min_mp->next = mp;
7577 if (mp->next != NULL)
7578 mp->next->prev = mp;
7579 else
7580 minipool_vector_tail = mp;
7583 min_mp = mp;
7585 offset = 0;
7586 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7588 mp->offset = offset;
7589 if (mp->refcount > 0)
7590 offset += mp->fix_size;
7592 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7593 mp->next->min_address = mp->min_address + mp->fix_size;
7596 return min_mp;
7599 /* Add a constant to the minipool for a backward reference. Returns the
7600 node added or NULL if the constant will not fit in this pool.
7602 Note that the code for insertion for a backwards reference can be
7603 somewhat confusing because the calculated offsets for each fix do
7604 not take into account the size of the pool (which is still under
7605 construction. */
7606 static Mnode *
7607 add_minipool_backward_ref (Mfix *fix)
7609 /* If set, min_mp is the last pool_entry that has a lower constraint
7610 than the one we are trying to add. */
7611 Mnode *min_mp = NULL;
7612 /* This can be negative, since it is only a constraint. */
7613 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7614 Mnode *mp;
7616 /* If we can't reach the current pool from this insn, or if we can't
7617 insert this entry at the end of the pool without pushing other
7618 fixes out of range, then we don't try. This ensures that we
7619 can't fail later on. */
7620 if (min_address >= minipool_barrier->address
7621 || (minipool_vector_tail->min_address + fix->fix_size
7622 >= minipool_barrier->address))
7623 return NULL;
7625 /* Scan the pool to see if a constant with the same value has
7626 already been added. While we are doing this, also note the
7627 location where we must insert the constant if it doesn't already
7628 exist. */
7629 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7631 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7632 && fix->mode == mp->mode
7633 && (GET_CODE (fix->value) != CODE_LABEL
7634 || (CODE_LABEL_NUMBER (fix->value)
7635 == CODE_LABEL_NUMBER (mp->value)))
7636 && rtx_equal_p (fix->value, mp->value)
7637 /* Check that there is enough slack to move this entry to the
7638 end of the table (this is conservative). */
7639 && (mp->max_address
7640 > (minipool_barrier->address
7641 + minipool_vector_tail->offset
7642 + minipool_vector_tail->fix_size)))
7644 mp->refcount++;
7645 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7648 if (min_mp != NULL)
7649 mp->min_address += fix->fix_size;
7650 else
7652 /* Note the insertion point if necessary. */
7653 if (mp->min_address < min_address)
7655 /* For now, we do not allow the insertion of 8-byte alignment
7656 requiring nodes anywhere but at the start of the pool. */
7657 if (ARM_DOUBLEWORD_ALIGN
7658 && fix->fix_size == 8 && mp->fix_size != 8)
7659 return NULL;
7660 else
7661 min_mp = mp;
7663 else if (mp->max_address
7664 < minipool_barrier->address + mp->offset + fix->fix_size)
7666 /* Inserting before this entry would push the fix beyond
7667 its maximum address (which can happen if we have
7668 re-located a forwards fix); force the new fix to come
7669 after it. */
7670 min_mp = mp;
7671 min_address = mp->min_address + fix->fix_size;
7673 /* If we are inserting an 8-bytes aligned quantity and
7674 we have not already found an insertion point, then
7675 make sure that all such 8-byte aligned quantities are
7676 placed at the start of the pool. */
7677 else if (ARM_DOUBLEWORD_ALIGN
7678 && min_mp == NULL
7679 && fix->fix_size == 8
7680 && mp->fix_size < 8)
7682 min_mp = mp;
7683 min_address = mp->min_address + fix->fix_size;
7688 /* We need to create a new entry. */
7689 mp = XNEW (Mnode);
7690 mp->fix_size = fix->fix_size;
7691 mp->mode = fix->mode;
7692 mp->value = fix->value;
7693 mp->refcount = 1;
7694 mp->max_address = minipool_barrier->address + 65536;
7696 mp->min_address = min_address;
7698 if (min_mp == NULL)
7700 mp->prev = NULL;
7701 mp->next = minipool_vector_head;
7703 if (mp->next == NULL)
7705 minipool_vector_tail = mp;
7706 minipool_vector_label = gen_label_rtx ();
7708 else
7709 mp->next->prev = mp;
7711 minipool_vector_head = mp;
7713 else
7715 mp->next = min_mp->next;
7716 mp->prev = min_mp;
7717 min_mp->next = mp;
7719 if (mp->next != NULL)
7720 mp->next->prev = mp;
7721 else
7722 minipool_vector_tail = mp;
7725 /* Save the new entry. */
7726 min_mp = mp;
7728 if (mp->prev)
7729 mp = mp->prev;
7730 else
7731 mp->offset = 0;
7733 /* Scan over the following entries and adjust their offsets. */
7734 while (mp->next != NULL)
7736 if (mp->next->min_address < mp->min_address + mp->fix_size)
7737 mp->next->min_address = mp->min_address + mp->fix_size;
7739 if (mp->refcount)
7740 mp->next->offset = mp->offset + mp->fix_size;
7741 else
7742 mp->next->offset = mp->offset;
7744 mp = mp->next;
7747 return min_mp;
7750 static void
7751 assign_minipool_offsets (Mfix *barrier)
7753 HOST_WIDE_INT offset = 0;
7754 Mnode *mp;
7756 minipool_barrier = barrier;
7758 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7760 mp->offset = offset;
7762 if (mp->refcount > 0)
7763 offset += mp->fix_size;
7767 /* Output the literal table */
7768 static void
7769 dump_minipool (rtx scan)
7771 Mnode * mp;
7772 Mnode * nmp;
7773 int align64 = 0;
7775 if (ARM_DOUBLEWORD_ALIGN)
7776 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7777 if (mp->refcount > 0 && mp->fix_size == 8)
7779 align64 = 1;
7780 break;
7783 if (dump_file)
7784 fprintf (dump_file,
7785 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7786 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7788 scan = emit_label_after (gen_label_rtx (), scan);
7789 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7790 scan = emit_label_after (minipool_vector_label, scan);
7792 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7794 if (mp->refcount > 0)
7796 if (dump_file)
7798 fprintf (dump_file,
7799 ";; Offset %u, min %ld, max %ld ",
7800 (unsigned) mp->offset, (unsigned long) mp->min_address,
7801 (unsigned long) mp->max_address);
7802 arm_print_value (dump_file, mp->value);
7803 fputc ('\n', dump_file);
7806 switch (mp->fix_size)
7808 #ifdef HAVE_consttable_1
7809 case 1:
7810 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7811 break;
7813 #endif
7814 #ifdef HAVE_consttable_2
7815 case 2:
7816 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7817 break;
7819 #endif
7820 #ifdef HAVE_consttable_4
7821 case 4:
7822 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7823 break;
7825 #endif
7826 #ifdef HAVE_consttable_8
7827 case 8:
7828 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7829 break;
7831 #endif
7832 default:
7833 gcc_unreachable ();
7837 nmp = mp->next;
7838 free (mp);
7841 minipool_vector_head = minipool_vector_tail = NULL;
7842 scan = emit_insn_after (gen_consttable_end (), scan);
7843 scan = emit_barrier_after (scan);
7846 /* Return the cost of forcibly inserting a barrier after INSN. */
7847 static int
7848 arm_barrier_cost (rtx insn)
7850 /* Basing the location of the pool on the loop depth is preferable,
7851 but at the moment, the basic block information seems to be
7852 corrupt by this stage of the compilation. */
7853 int base_cost = 50;
7854 rtx next = next_nonnote_insn (insn);
7856 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7857 base_cost -= 20;
7859 switch (GET_CODE (insn))
7861 case CODE_LABEL:
7862 /* It will always be better to place the table before the label, rather
7863 than after it. */
7864 return 50;
7866 case INSN:
7867 case CALL_INSN:
7868 return base_cost;
7870 case JUMP_INSN:
7871 return base_cost - 10;
7873 default:
7874 return base_cost + 10;
7878 /* Find the best place in the insn stream in the range
7879 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7880 Create the barrier by inserting a jump and add a new fix entry for
7881 it. */
7882 static Mfix *
7883 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7885 HOST_WIDE_INT count = 0;
7886 rtx barrier;
7887 rtx from = fix->insn;
7888 /* The instruction after which we will insert the jump. */
7889 rtx selected = NULL;
7890 int selected_cost;
7891 /* The address at which the jump instruction will be placed. */
7892 HOST_WIDE_INT selected_address;
7893 Mfix * new_fix;
7894 HOST_WIDE_INT max_count = max_address - fix->address;
7895 rtx label = gen_label_rtx ();
7897 selected_cost = arm_barrier_cost (from);
7898 selected_address = fix->address;
7900 while (from && count < max_count)
7902 rtx tmp;
7903 int new_cost;
7905 /* This code shouldn't have been called if there was a natural barrier
7906 within range. */
7907 gcc_assert (GET_CODE (from) != BARRIER);
7909 /* Count the length of this insn. */
7910 count += get_attr_length (from);
7912 /* If there is a jump table, add its length. */
7913 tmp = is_jump_table (from);
7914 if (tmp != NULL)
7916 count += get_jump_table_size (tmp);
7918 /* Jump tables aren't in a basic block, so base the cost on
7919 the dispatch insn. If we select this location, we will
7920 still put the pool after the table. */
7921 new_cost = arm_barrier_cost (from);
7923 if (count < max_count
7924 && (!selected || new_cost <= selected_cost))
7926 selected = tmp;
7927 selected_cost = new_cost;
7928 selected_address = fix->address + count;
7931 /* Continue after the dispatch table. */
7932 from = NEXT_INSN (tmp);
7933 continue;
7936 new_cost = arm_barrier_cost (from);
7938 if (count < max_count
7939 && (!selected || new_cost <= selected_cost))
7941 selected = from;
7942 selected_cost = new_cost;
7943 selected_address = fix->address + count;
7946 from = NEXT_INSN (from);
7949 /* Make sure that we found a place to insert the jump. */
7950 gcc_assert (selected);
7952 /* Create a new JUMP_INSN that branches around a barrier. */
7953 from = emit_jump_insn_after (gen_jump (label), selected);
7954 JUMP_LABEL (from) = label;
7955 barrier = emit_barrier_after (from);
7956 emit_label_after (label, barrier);
7958 /* Create a minipool barrier entry for the new barrier. */
7959 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7960 new_fix->insn = barrier;
7961 new_fix->address = selected_address;
7962 new_fix->next = fix->next;
7963 fix->next = new_fix;
7965 return new_fix;
7968 /* Record that there is a natural barrier in the insn stream at
7969 ADDRESS. */
7970 static void
7971 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7973 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7975 fix->insn = insn;
7976 fix->address = address;
7978 fix->next = NULL;
7979 if (minipool_fix_head != NULL)
7980 minipool_fix_tail->next = fix;
7981 else
7982 minipool_fix_head = fix;
7984 minipool_fix_tail = fix;
7987 /* Record INSN, which will need fixing up to load a value from the
7988 minipool. ADDRESS is the offset of the insn since the start of the
7989 function; LOC is a pointer to the part of the insn which requires
7990 fixing; VALUE is the constant that must be loaded, which is of type
7991 MODE. */
7992 static void
7993 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7994 enum machine_mode mode, rtx value)
7996 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7998 #ifdef AOF_ASSEMBLER
7999 /* PIC symbol references need to be converted into offsets into the
8000 based area. */
8001 /* XXX This shouldn't be done here. */
8002 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
8003 value = aof_pic_entry (value);
8004 #endif /* AOF_ASSEMBLER */
8006 fix->insn = insn;
8007 fix->address = address;
8008 fix->loc = loc;
8009 fix->mode = mode;
8010 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8011 fix->value = value;
8012 fix->forwards = get_attr_pool_range (insn);
8013 fix->backwards = get_attr_neg_pool_range (insn);
8014 fix->minipool = NULL;
8016 /* If an insn doesn't have a range defined for it, then it isn't
8017 expecting to be reworked by this code. Better to stop now than
8018 to generate duff assembly code. */
8019 gcc_assert (fix->forwards || fix->backwards);
8021 /* If an entry requires 8-byte alignment then assume all constant pools
8022 require 4 bytes of padding. Trying to do this later on a per-pool
8023 basis is awkward because existing pool entries have to be modified. */
8024 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8025 minipool_pad = 4;
8027 if (dump_file)
8029 fprintf (dump_file,
8030 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8031 GET_MODE_NAME (mode),
8032 INSN_UID (insn), (unsigned long) address,
8033 -1 * (long)fix->backwards, (long)fix->forwards);
8034 arm_print_value (dump_file, fix->value);
8035 fprintf (dump_file, "\n");
8038 /* Add it to the chain of fixes. */
8039 fix->next = NULL;
8041 if (minipool_fix_head != NULL)
8042 minipool_fix_tail->next = fix;
8043 else
8044 minipool_fix_head = fix;
8046 minipool_fix_tail = fix;
8049 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8050 Returns the number of insns needed, or 99 if we don't know how to
8051 do it. */
8053 arm_const_double_inline_cost (rtx val)
8055 rtx lowpart, highpart;
8056 enum machine_mode mode;
8058 mode = GET_MODE (val);
8060 if (mode == VOIDmode)
8061 mode = DImode;
8063 gcc_assert (GET_MODE_SIZE (mode) == 8);
8065 lowpart = gen_lowpart (SImode, val);
8066 highpart = gen_highpart_mode (SImode, mode, val);
8068 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8069 gcc_assert (GET_CODE (highpart) == CONST_INT);
8071 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8072 NULL_RTX, NULL_RTX, 0, 0)
8073 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8074 NULL_RTX, NULL_RTX, 0, 0));
8077 /* Return true if it is worthwhile to split a 64-bit constant into two
8078 32-bit operations. This is the case if optimizing for size, or
8079 if we have load delay slots, or if one 32-bit part can be done with
8080 a single data operation. */
8081 bool
8082 arm_const_double_by_parts (rtx val)
8084 enum machine_mode mode = GET_MODE (val);
8085 rtx part;
8087 if (optimize_size || arm_ld_sched)
8088 return true;
8090 if (mode == VOIDmode)
8091 mode = DImode;
8093 part = gen_highpart_mode (SImode, mode, val);
8095 gcc_assert (GET_CODE (part) == CONST_INT);
8097 if (const_ok_for_arm (INTVAL (part))
8098 || const_ok_for_arm (~INTVAL (part)))
8099 return true;
8101 part = gen_lowpart (SImode, val);
8103 gcc_assert (GET_CODE (part) == CONST_INT);
8105 if (const_ok_for_arm (INTVAL (part))
8106 || const_ok_for_arm (~INTVAL (part)))
8107 return true;
8109 return false;
8112 /* Scan INSN and note any of its operands that need fixing.
8113 If DO_PUSHES is false we do not actually push any of the fixups
8114 needed. The function returns TRUE if any fixups were needed/pushed.
8115 This is used by arm_memory_load_p() which needs to know about loads
8116 of constants that will be converted into minipool loads. */
8117 static bool
8118 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8120 bool result = false;
8121 int opno;
8123 extract_insn (insn);
8125 if (!constrain_operands (1))
8126 fatal_insn_not_found (insn);
8128 if (recog_data.n_alternatives == 0)
8129 return false;
8131 /* Fill in recog_op_alt with information about the constraints of
8132 this insn. */
8133 preprocess_constraints ();
8135 for (opno = 0; opno < recog_data.n_operands; opno++)
8137 /* Things we need to fix can only occur in inputs. */
8138 if (recog_data.operand_type[opno] != OP_IN)
8139 continue;
8141 /* If this alternative is a memory reference, then any mention
8142 of constants in this alternative is really to fool reload
8143 into allowing us to accept one there. We need to fix them up
8144 now so that we output the right code. */
8145 if (recog_op_alt[opno][which_alternative].memory_ok)
8147 rtx op = recog_data.operand[opno];
8149 if (CONSTANT_P (op))
8151 if (do_pushes)
8152 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8153 recog_data.operand_mode[opno], op);
8154 result = true;
8156 else if (GET_CODE (op) == MEM
8157 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8158 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8160 if (do_pushes)
8162 rtx cop = avoid_constant_pool_reference (op);
8164 /* Casting the address of something to a mode narrower
8165 than a word can cause avoid_constant_pool_reference()
8166 to return the pool reference itself. That's no good to
8167 us here. Lets just hope that we can use the
8168 constant pool value directly. */
8169 if (op == cop)
8170 cop = get_pool_constant (XEXP (op, 0));
8172 push_minipool_fix (insn, address,
8173 recog_data.operand_loc[opno],
8174 recog_data.operand_mode[opno], cop);
8177 result = true;
8182 return result;
8185 /* Gcc puts the pool in the wrong place for ARM, since we can only
8186 load addresses a limited distance around the pc. We do some
8187 special munging to move the constant pool values to the correct
8188 point in the code. */
8189 static void
8190 arm_reorg (void)
8192 rtx insn;
8193 HOST_WIDE_INT address = 0;
8194 Mfix * fix;
8196 minipool_fix_head = minipool_fix_tail = NULL;
8198 /* The first insn must always be a note, or the code below won't
8199 scan it properly. */
8200 insn = get_insns ();
8201 gcc_assert (GET_CODE (insn) == NOTE);
8202 minipool_pad = 0;
8204 /* Scan all the insns and record the operands that will need fixing. */
8205 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8207 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8208 && (arm_cirrus_insn_p (insn)
8209 || GET_CODE (insn) == JUMP_INSN
8210 || arm_memory_load_p (insn)))
8211 cirrus_reorg (insn);
8213 if (GET_CODE (insn) == BARRIER)
8214 push_minipool_barrier (insn, address);
8215 else if (INSN_P (insn))
8217 rtx table;
8219 note_invalid_constants (insn, address, true);
8220 address += get_attr_length (insn);
8222 /* If the insn is a vector jump, add the size of the table
8223 and skip the table. */
8224 if ((table = is_jump_table (insn)) != NULL)
8226 address += get_jump_table_size (table);
8227 insn = table;
8232 fix = minipool_fix_head;
8234 /* Now scan the fixups and perform the required changes. */
8235 while (fix)
8237 Mfix * ftmp;
8238 Mfix * fdel;
8239 Mfix * last_added_fix;
8240 Mfix * last_barrier = NULL;
8241 Mfix * this_fix;
8243 /* Skip any further barriers before the next fix. */
8244 while (fix && GET_CODE (fix->insn) == BARRIER)
8245 fix = fix->next;
8247 /* No more fixes. */
8248 if (fix == NULL)
8249 break;
8251 last_added_fix = NULL;
8253 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8255 if (GET_CODE (ftmp->insn) == BARRIER)
8257 if (ftmp->address >= minipool_vector_head->max_address)
8258 break;
8260 last_barrier = ftmp;
8262 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8263 break;
8265 last_added_fix = ftmp; /* Keep track of the last fix added. */
8268 /* If we found a barrier, drop back to that; any fixes that we
8269 could have reached but come after the barrier will now go in
8270 the next mini-pool. */
8271 if (last_barrier != NULL)
8273 /* Reduce the refcount for those fixes that won't go into this
8274 pool after all. */
8275 for (fdel = last_barrier->next;
8276 fdel && fdel != ftmp;
8277 fdel = fdel->next)
8279 fdel->minipool->refcount--;
8280 fdel->minipool = NULL;
8283 ftmp = last_barrier;
8285 else
8287 /* ftmp is first fix that we can't fit into this pool and
8288 there no natural barriers that we could use. Insert a
8289 new barrier in the code somewhere between the previous
8290 fix and this one, and arrange to jump around it. */
8291 HOST_WIDE_INT max_address;
8293 /* The last item on the list of fixes must be a barrier, so
8294 we can never run off the end of the list of fixes without
8295 last_barrier being set. */
8296 gcc_assert (ftmp);
8298 max_address = minipool_vector_head->max_address;
8299 /* Check that there isn't another fix that is in range that
8300 we couldn't fit into this pool because the pool was
8301 already too large: we need to put the pool before such an
8302 instruction. The pool itself may come just after the
8303 fix because create_fix_barrier also allows space for a
8304 jump instruction. */
8305 if (ftmp->address < max_address)
8306 max_address = ftmp->address + 1;
8308 last_barrier = create_fix_barrier (last_added_fix, max_address);
8311 assign_minipool_offsets (last_barrier);
8313 while (ftmp)
8315 if (GET_CODE (ftmp->insn) != BARRIER
8316 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8317 == NULL))
8318 break;
8320 ftmp = ftmp->next;
8323 /* Scan over the fixes we have identified for this pool, fixing them
8324 up and adding the constants to the pool itself. */
8325 for (this_fix = fix; this_fix && ftmp != this_fix;
8326 this_fix = this_fix->next)
8327 if (GET_CODE (this_fix->insn) != BARRIER)
8329 rtx addr
8330 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8331 minipool_vector_label),
8332 this_fix->minipool->offset);
8333 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8336 dump_minipool (last_barrier->insn);
8337 fix = ftmp;
8340 /* From now on we must synthesize any constants that we can't handle
8341 directly. This can happen if the RTL gets split during final
8342 instruction generation. */
8343 after_arm_reorg = 1;
8345 /* Free the minipool memory. */
8346 obstack_free (&minipool_obstack, minipool_startobj);
8349 /* Routines to output assembly language. */
8351 /* If the rtx is the correct value then return the string of the number.
8352 In this way we can ensure that valid double constants are generated even
8353 when cross compiling. */
8354 const char *
8355 fp_immediate_constant (rtx x)
8357 REAL_VALUE_TYPE r;
8358 int i;
8360 if (!fp_consts_inited)
8361 init_fp_table ();
8363 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8364 for (i = 0; i < 8; i++)
8365 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8366 return strings_fp[i];
8368 gcc_unreachable ();
8371 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8372 static const char *
8373 fp_const_from_val (REAL_VALUE_TYPE *r)
8375 int i;
8377 if (!fp_consts_inited)
8378 init_fp_table ();
8380 for (i = 0; i < 8; i++)
8381 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8382 return strings_fp[i];
8384 gcc_unreachable ();
8387 /* Output the operands of a LDM/STM instruction to STREAM.
8388 MASK is the ARM register set mask of which only bits 0-15 are important.
8389 REG is the base register, either the frame pointer or the stack pointer,
8390 INSTR is the possibly suffixed load or store instruction. */
8392 static void
8393 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8394 unsigned long mask)
8396 unsigned i;
8397 bool not_first = FALSE;
8399 fputc ('\t', stream);
8400 asm_fprintf (stream, instr, reg);
8401 fputs (", {", stream);
8403 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8404 if (mask & (1 << i))
8406 if (not_first)
8407 fprintf (stream, ", ");
8409 asm_fprintf (stream, "%r", i);
8410 not_first = TRUE;
8413 fprintf (stream, "}\n");
8417 /* Output a FLDMX instruction to STREAM.
8418 BASE if the register containing the address.
8419 REG and COUNT specify the register range.
8420 Extra registers may be added to avoid hardware bugs. */
8422 static void
8423 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8425 int i;
8427 /* Workaround ARM10 VFPr1 bug. */
8428 if (count == 2 && !arm_arch6)
8430 if (reg == 15)
8431 reg--;
8432 count++;
8435 fputc ('\t', stream);
8436 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8438 for (i = reg; i < reg + count; i++)
8440 if (i > reg)
8441 fputs (", ", stream);
8442 asm_fprintf (stream, "d%d", i);
8444 fputs ("}\n", stream);
8449 /* Output the assembly for a store multiple. */
8451 const char *
8452 vfp_output_fstmx (rtx * operands)
8454 char pattern[100];
8455 int p;
8456 int base;
8457 int i;
8459 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8460 p = strlen (pattern);
8462 gcc_assert (GET_CODE (operands[1]) == REG);
8464 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8465 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8467 p += sprintf (&pattern[p], ", d%d", base + i);
8469 strcpy (&pattern[p], "}");
8471 output_asm_insn (pattern, operands);
8472 return "";
8476 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8477 number of bytes pushed. */
8479 static int
8480 vfp_emit_fstmx (int base_reg, int count)
8482 rtx par;
8483 rtx dwarf;
8484 rtx tmp, reg;
8485 int i;
8487 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8488 register pairs are stored by a store multiple insn. We avoid this
8489 by pushing an extra pair. */
8490 if (count == 2 && !arm_arch6)
8492 if (base_reg == LAST_VFP_REGNUM - 3)
8493 base_reg -= 2;
8494 count++;
8497 /* ??? The frame layout is implementation defined. We describe
8498 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8499 We really need some way of representing the whole block so that the
8500 unwinder can figure it out at runtime. */
8501 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8502 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8504 reg = gen_rtx_REG (DFmode, base_reg);
8505 base_reg += 2;
8507 XVECEXP (par, 0, 0)
8508 = gen_rtx_SET (VOIDmode,
8509 gen_frame_mem (BLKmode,
8510 gen_rtx_PRE_DEC (BLKmode,
8511 stack_pointer_rtx)),
8512 gen_rtx_UNSPEC (BLKmode,
8513 gen_rtvec (1, reg),
8514 UNSPEC_PUSH_MULT));
8516 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8517 plus_constant (stack_pointer_rtx, -(count * 8 + 4)));
8518 RTX_FRAME_RELATED_P (tmp) = 1;
8519 XVECEXP (dwarf, 0, 0) = tmp;
8521 tmp = gen_rtx_SET (VOIDmode,
8522 gen_frame_mem (DFmode, stack_pointer_rtx),
8523 reg);
8524 RTX_FRAME_RELATED_P (tmp) = 1;
8525 XVECEXP (dwarf, 0, 1) = tmp;
8527 for (i = 1; i < count; i++)
8529 reg = gen_rtx_REG (DFmode, base_reg);
8530 base_reg += 2;
8531 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8533 tmp = gen_rtx_SET (VOIDmode,
8534 gen_frame_mem (DFmode,
8535 plus_constant (stack_pointer_rtx,
8536 i * 8)),
8537 reg);
8538 RTX_FRAME_RELATED_P (tmp) = 1;
8539 XVECEXP (dwarf, 0, i + 1) = tmp;
8542 par = emit_insn (par);
8543 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8544 REG_NOTES (par));
8545 RTX_FRAME_RELATED_P (par) = 1;
8547 return count * 8 + 4;
8551 /* Output a 'call' insn. */
8552 const char *
8553 output_call (rtx *operands)
8555 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8557 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8558 if (REGNO (operands[0]) == LR_REGNUM)
8560 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8561 output_asm_insn ("mov%?\t%0, %|lr", operands);
8564 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8566 if (TARGET_INTERWORK || arm_arch4t)
8567 output_asm_insn ("bx%?\t%0", operands);
8568 else
8569 output_asm_insn ("mov%?\t%|pc, %0", operands);
8571 return "";
8574 /* Output a 'call' insn that is a reference in memory. */
8575 const char *
8576 output_call_mem (rtx *operands)
8578 if (TARGET_INTERWORK && !arm_arch5)
8580 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8581 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8582 output_asm_insn ("bx%?\t%|ip", operands);
8584 else if (regno_use_in (LR_REGNUM, operands[0]))
8586 /* LR is used in the memory address. We load the address in the
8587 first instruction. It's safe to use IP as the target of the
8588 load since the call will kill it anyway. */
8589 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8590 if (arm_arch5)
8591 output_asm_insn ("blx%?\t%|ip", operands);
8592 else
8594 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8595 if (arm_arch4t)
8596 output_asm_insn ("bx%?\t%|ip", operands);
8597 else
8598 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8601 else
8603 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8604 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8607 return "";
8611 /* Output a move from arm registers to an fpa registers.
8612 OPERANDS[0] is an fpa register.
8613 OPERANDS[1] is the first registers of an arm register pair. */
8614 const char *
8615 output_mov_long_double_fpa_from_arm (rtx *operands)
8617 int arm_reg0 = REGNO (operands[1]);
8618 rtx ops[3];
8620 gcc_assert (arm_reg0 != IP_REGNUM);
8622 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8623 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8624 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8626 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8627 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8629 return "";
8632 /* Output a move from an fpa register to arm registers.
8633 OPERANDS[0] is the first registers of an arm register pair.
8634 OPERANDS[1] is an fpa register. */
8635 const char *
8636 output_mov_long_double_arm_from_fpa (rtx *operands)
8638 int arm_reg0 = REGNO (operands[0]);
8639 rtx ops[3];
8641 gcc_assert (arm_reg0 != IP_REGNUM);
8643 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8644 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8645 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8647 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8648 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8649 return "";
8652 /* Output a move from arm registers to arm registers of a long double
8653 OPERANDS[0] is the destination.
8654 OPERANDS[1] is the source. */
8655 const char *
8656 output_mov_long_double_arm_from_arm (rtx *operands)
8658 /* We have to be careful here because the two might overlap. */
8659 int dest_start = REGNO (operands[0]);
8660 int src_start = REGNO (operands[1]);
8661 rtx ops[2];
8662 int i;
8664 if (dest_start < src_start)
8666 for (i = 0; i < 3; i++)
8668 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8669 ops[1] = gen_rtx_REG (SImode, src_start + i);
8670 output_asm_insn ("mov%?\t%0, %1", ops);
8673 else
8675 for (i = 2; i >= 0; i--)
8677 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8678 ops[1] = gen_rtx_REG (SImode, src_start + i);
8679 output_asm_insn ("mov%?\t%0, %1", ops);
8683 return "";
8687 /* Output a move from arm registers to an fpa registers.
8688 OPERANDS[0] is an fpa register.
8689 OPERANDS[1] is the first registers of an arm register pair. */
8690 const char *
8691 output_mov_double_fpa_from_arm (rtx *operands)
8693 int arm_reg0 = REGNO (operands[1]);
8694 rtx ops[2];
8696 gcc_assert (arm_reg0 != IP_REGNUM);
8698 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8699 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8700 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8701 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8702 return "";
8705 /* Output a move from an fpa register to arm registers.
8706 OPERANDS[0] is the first registers of an arm register pair.
8707 OPERANDS[1] is an fpa register. */
8708 const char *
8709 output_mov_double_arm_from_fpa (rtx *operands)
8711 int arm_reg0 = REGNO (operands[0]);
8712 rtx ops[2];
8714 gcc_assert (arm_reg0 != IP_REGNUM);
8716 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8717 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8718 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8719 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8720 return "";
8723 /* Output a move between double words.
8724 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8725 or MEM<-REG and all MEMs must be offsettable addresses. */
8726 const char *
8727 output_move_double (rtx *operands)
8729 enum rtx_code code0 = GET_CODE (operands[0]);
8730 enum rtx_code code1 = GET_CODE (operands[1]);
8731 rtx otherops[3];
8733 if (code0 == REG)
8735 int reg0 = REGNO (operands[0]);
8737 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8739 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8741 switch (GET_CODE (XEXP (operands[1], 0)))
8743 case REG:
8744 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8745 break;
8747 case PRE_INC:
8748 gcc_assert (TARGET_LDRD);
8749 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8750 break;
8752 case PRE_DEC:
8753 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8754 break;
8756 case POST_INC:
8757 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8758 break;
8760 case POST_DEC:
8761 gcc_assert (TARGET_LDRD);
8762 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8763 break;
8765 case PRE_MODIFY:
8766 case POST_MODIFY:
8767 otherops[0] = operands[0];
8768 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8769 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8771 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8773 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8775 /* Registers overlap so split out the increment. */
8776 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8777 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8779 else
8780 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8782 else
8784 /* We only allow constant increments, so this is safe. */
8785 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8787 break;
8789 case LABEL_REF:
8790 case CONST:
8791 output_asm_insn ("adr%?\t%0, %1", operands);
8792 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8793 break;
8795 default:
8796 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8797 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8799 otherops[0] = operands[0];
8800 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8801 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8803 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8805 if (GET_CODE (otherops[2]) == CONST_INT)
8807 switch ((int) INTVAL (otherops[2]))
8809 case -8:
8810 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8811 return "";
8812 case -4:
8813 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8814 return "";
8815 case 4:
8816 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8817 return "";
8820 if (TARGET_LDRD
8821 && (GET_CODE (otherops[2]) == REG
8822 || (GET_CODE (otherops[2]) == CONST_INT
8823 && INTVAL (otherops[2]) > -256
8824 && INTVAL (otherops[2]) < 256)))
8826 if (reg_overlap_mentioned_p (otherops[0],
8827 otherops[2]))
8829 /* Swap base and index registers over to
8830 avoid a conflict. */
8831 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8832 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8834 /* If both registers conflict, it will usually
8835 have been fixed by a splitter. */
8836 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8838 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8839 output_asm_insn ("ldr%?d\t%0, [%1]",
8840 otherops);
8842 else
8843 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8844 return "";
8847 if (GET_CODE (otherops[2]) == CONST_INT)
8849 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8850 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8851 else
8852 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8854 else
8855 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8857 else
8858 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8860 return "ldm%?ia\t%0, %M0";
8862 else
8864 otherops[1] = adjust_address (operands[1], SImode, 4);
8865 /* Take care of overlapping base/data reg. */
8866 if (reg_mentioned_p (operands[0], operands[1]))
8868 output_asm_insn ("ldr%?\t%0, %1", otherops);
8869 output_asm_insn ("ldr%?\t%0, %1", operands);
8871 else
8873 output_asm_insn ("ldr%?\t%0, %1", operands);
8874 output_asm_insn ("ldr%?\t%0, %1", otherops);
8879 else
8881 /* Constraints should ensure this. */
8882 gcc_assert (code0 == MEM && code1 == REG);
8883 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8885 switch (GET_CODE (XEXP (operands[0], 0)))
8887 case REG:
8888 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8889 break;
8891 case PRE_INC:
8892 gcc_assert (TARGET_LDRD);
8893 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8894 break;
8896 case PRE_DEC:
8897 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8898 break;
8900 case POST_INC:
8901 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8902 break;
8904 case POST_DEC:
8905 gcc_assert (TARGET_LDRD);
8906 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8907 break;
8909 case PRE_MODIFY:
8910 case POST_MODIFY:
8911 otherops[0] = operands[1];
8912 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8913 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8915 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8916 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8917 else
8918 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8919 break;
8921 case PLUS:
8922 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8923 if (GET_CODE (otherops[2]) == CONST_INT)
8925 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8927 case -8:
8928 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8929 return "";
8931 case -4:
8932 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8933 return "";
8935 case 4:
8936 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8937 return "";
8940 if (TARGET_LDRD
8941 && (GET_CODE (otherops[2]) == REG
8942 || (GET_CODE (otherops[2]) == CONST_INT
8943 && INTVAL (otherops[2]) > -256
8944 && INTVAL (otherops[2]) < 256)))
8946 otherops[0] = operands[1];
8947 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8948 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8949 return "";
8951 /* Fall through */
8953 default:
8954 otherops[0] = adjust_address (operands[0], SImode, 4);
8955 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8956 output_asm_insn ("str%?\t%1, %0", operands);
8957 output_asm_insn ("str%?\t%1, %0", otherops);
8961 return "";
8964 /* Output an ADD r, s, #n where n may be too big for one instruction.
8965 If adding zero to one register, output nothing. */
8966 const char *
8967 output_add_immediate (rtx *operands)
8969 HOST_WIDE_INT n = INTVAL (operands[2]);
8971 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8973 if (n < 0)
8974 output_multi_immediate (operands,
8975 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8976 -n);
8977 else
8978 output_multi_immediate (operands,
8979 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8983 return "";
8986 /* Output a multiple immediate operation.
8987 OPERANDS is the vector of operands referred to in the output patterns.
8988 INSTR1 is the output pattern to use for the first constant.
8989 INSTR2 is the output pattern to use for subsequent constants.
8990 IMMED_OP is the index of the constant slot in OPERANDS.
8991 N is the constant value. */
8992 static const char *
8993 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8994 int immed_op, HOST_WIDE_INT n)
8996 #if HOST_BITS_PER_WIDE_INT > 32
8997 n &= 0xffffffff;
8998 #endif
9000 if (n == 0)
9002 /* Quick and easy output. */
9003 operands[immed_op] = const0_rtx;
9004 output_asm_insn (instr1, operands);
9006 else
9008 int i;
9009 const char * instr = instr1;
9011 /* Note that n is never zero here (which would give no output). */
9012 for (i = 0; i < 32; i += 2)
9014 if (n & (3 << i))
9016 operands[immed_op] = GEN_INT (n & (255 << i));
9017 output_asm_insn (instr, operands);
9018 instr = instr2;
9019 i += 6;
9024 return "";
9027 /* Return the appropriate ARM instruction for the operation code.
9028 The returned result should not be overwritten. OP is the rtx of the
9029 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9030 was shifted. */
9031 const char *
9032 arithmetic_instr (rtx op, int shift_first_arg)
9034 switch (GET_CODE (op))
9036 case PLUS:
9037 return "add";
9039 case MINUS:
9040 return shift_first_arg ? "rsb" : "sub";
9042 case IOR:
9043 return "orr";
9045 case XOR:
9046 return "eor";
9048 case AND:
9049 return "and";
9051 default:
9052 gcc_unreachable ();
9056 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9057 for the operation code. The returned result should not be overwritten.
9058 OP is the rtx code of the shift.
9059 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9060 shift. */
9061 static const char *
9062 shift_op (rtx op, HOST_WIDE_INT *amountp)
9064 const char * mnem;
9065 enum rtx_code code = GET_CODE (op);
9067 switch (GET_CODE (XEXP (op, 1)))
9069 case REG:
9070 case SUBREG:
9071 *amountp = -1;
9072 break;
9074 case CONST_INT:
9075 *amountp = INTVAL (XEXP (op, 1));
9076 break;
9078 default:
9079 gcc_unreachable ();
9082 switch (code)
9084 case ASHIFT:
9085 mnem = "asl";
9086 break;
9088 case ASHIFTRT:
9089 mnem = "asr";
9090 break;
9092 case LSHIFTRT:
9093 mnem = "lsr";
9094 break;
9096 case ROTATE:
9097 gcc_assert (*amountp != -1);
9098 *amountp = 32 - *amountp;
9100 /* Fall through. */
9102 case ROTATERT:
9103 mnem = "ror";
9104 break;
9106 case MULT:
9107 /* We never have to worry about the amount being other than a
9108 power of 2, since this case can never be reloaded from a reg. */
9109 gcc_assert (*amountp != -1);
9110 *amountp = int_log2 (*amountp);
9111 return "asl";
9113 default:
9114 gcc_unreachable ();
9117 if (*amountp != -1)
9119 /* This is not 100% correct, but follows from the desire to merge
9120 multiplication by a power of 2 with the recognizer for a
9121 shift. >=32 is not a valid shift for "asl", so we must try and
9122 output a shift that produces the correct arithmetical result.
9123 Using lsr #32 is identical except for the fact that the carry bit
9124 is not set correctly if we set the flags; but we never use the
9125 carry bit from such an operation, so we can ignore that. */
9126 if (code == ROTATERT)
9127 /* Rotate is just modulo 32. */
9128 *amountp &= 31;
9129 else if (*amountp != (*amountp & 31))
9131 if (code == ASHIFT)
9132 mnem = "lsr";
9133 *amountp = 32;
9136 /* Shifts of 0 are no-ops. */
9137 if (*amountp == 0)
9138 return NULL;
9141 return mnem;
9144 /* Obtain the shift from the POWER of two. */
9146 static HOST_WIDE_INT
9147 int_log2 (HOST_WIDE_INT power)
9149 HOST_WIDE_INT shift = 0;
9151 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9153 gcc_assert (shift <= 31);
9154 shift++;
9157 return shift;
9160 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9161 because /bin/as is horribly restrictive. The judgement about
9162 whether or not each character is 'printable' (and can be output as
9163 is) or not (and must be printed with an octal escape) must be made
9164 with reference to the *host* character set -- the situation is
9165 similar to that discussed in the comments above pp_c_char in
9166 c-pretty-print.c. */
9168 #define MAX_ASCII_LEN 51
9170 void
9171 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9173 int i;
9174 int len_so_far = 0;
9176 fputs ("\t.ascii\t\"", stream);
9178 for (i = 0; i < len; i++)
9180 int c = p[i];
9182 if (len_so_far >= MAX_ASCII_LEN)
9184 fputs ("\"\n\t.ascii\t\"", stream);
9185 len_so_far = 0;
9188 if (ISPRINT (c))
9190 if (c == '\\' || c == '\"')
9192 putc ('\\', stream);
9193 len_so_far++;
9195 putc (c, stream);
9196 len_so_far++;
9198 else
9200 fprintf (stream, "\\%03o", c);
9201 len_so_far += 4;
9205 fputs ("\"\n", stream);
9208 /* Compute the register save mask for registers 0 through 12
9209 inclusive. This code is used by arm_compute_save_reg_mask. */
9211 static unsigned long
9212 arm_compute_save_reg0_reg12_mask (void)
9214 unsigned long func_type = arm_current_func_type ();
9215 unsigned long save_reg_mask = 0;
9216 unsigned int reg;
9218 if (IS_INTERRUPT (func_type))
9220 unsigned int max_reg;
9221 /* Interrupt functions must not corrupt any registers,
9222 even call clobbered ones. If this is a leaf function
9223 we can just examine the registers used by the RTL, but
9224 otherwise we have to assume that whatever function is
9225 called might clobber anything, and so we have to save
9226 all the call-clobbered registers as well. */
9227 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9228 /* FIQ handlers have registers r8 - r12 banked, so
9229 we only need to check r0 - r7, Normal ISRs only
9230 bank r14 and r15, so we must check up to r12.
9231 r13 is the stack pointer which is always preserved,
9232 so we do not need to consider it here. */
9233 max_reg = 7;
9234 else
9235 max_reg = 12;
9237 for (reg = 0; reg <= max_reg; reg++)
9238 if (regs_ever_live[reg]
9239 || (! current_function_is_leaf && call_used_regs [reg]))
9240 save_reg_mask |= (1 << reg);
9242 /* Also save the pic base register if necessary. */
9243 if (flag_pic
9244 && !TARGET_SINGLE_PIC_BASE
9245 && arm_pic_register != INVALID_REGNUM
9246 && current_function_uses_pic_offset_table)
9247 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9249 else
9251 /* In the normal case we only need to save those registers
9252 which are call saved and which are used by this function. */
9253 for (reg = 0; reg <= 10; reg++)
9254 if (regs_ever_live[reg] && ! call_used_regs [reg])
9255 save_reg_mask |= (1 << reg);
9257 /* Handle the frame pointer as a special case. */
9258 if (! TARGET_APCS_FRAME
9259 && ! frame_pointer_needed
9260 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9261 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9262 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9264 /* If we aren't loading the PIC register,
9265 don't stack it even though it may be live. */
9266 if (flag_pic
9267 && !TARGET_SINGLE_PIC_BASE
9268 && arm_pic_register != INVALID_REGNUM
9269 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9270 || current_function_uses_pic_offset_table))
9271 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9274 /* Save registers so the exception handler can modify them. */
9275 if (current_function_calls_eh_return)
9277 unsigned int i;
9279 for (i = 0; ; i++)
9281 reg = EH_RETURN_DATA_REGNO (i);
9282 if (reg == INVALID_REGNUM)
9283 break;
9284 save_reg_mask |= 1 << reg;
9288 return save_reg_mask;
9291 /* Compute a bit mask of which registers need to be
9292 saved on the stack for the current function. */
9294 static unsigned long
9295 arm_compute_save_reg_mask (void)
9297 unsigned int save_reg_mask = 0;
9298 unsigned long func_type = arm_current_func_type ();
9300 if (IS_NAKED (func_type))
9301 /* This should never really happen. */
9302 return 0;
9304 /* If we are creating a stack frame, then we must save the frame pointer,
9305 IP (which will hold the old stack pointer), LR and the PC. */
9306 if (frame_pointer_needed)
9307 save_reg_mask |=
9308 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9309 | (1 << IP_REGNUM)
9310 | (1 << LR_REGNUM)
9311 | (1 << PC_REGNUM);
9313 /* Volatile functions do not return, so there
9314 is no need to save any other registers. */
9315 if (IS_VOLATILE (func_type))
9316 return save_reg_mask;
9318 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9320 /* Decide if we need to save the link register.
9321 Interrupt routines have their own banked link register,
9322 so they never need to save it.
9323 Otherwise if we do not use the link register we do not need to save
9324 it. If we are pushing other registers onto the stack however, we
9325 can save an instruction in the epilogue by pushing the link register
9326 now and then popping it back into the PC. This incurs extra memory
9327 accesses though, so we only do it when optimizing for size, and only
9328 if we know that we will not need a fancy return sequence. */
9329 if (regs_ever_live [LR_REGNUM]
9330 || (save_reg_mask
9331 && optimize_size
9332 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9333 && !current_function_calls_eh_return))
9334 save_reg_mask |= 1 << LR_REGNUM;
9336 if (cfun->machine->lr_save_eliminated)
9337 save_reg_mask &= ~ (1 << LR_REGNUM);
9339 if (TARGET_REALLY_IWMMXT
9340 && ((bit_count (save_reg_mask)
9341 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9343 unsigned int reg;
9345 /* The total number of registers that are going to be pushed
9346 onto the stack is odd. We need to ensure that the stack
9347 is 64-bit aligned before we start to save iWMMXt registers,
9348 and also before we start to create locals. (A local variable
9349 might be a double or long long which we will load/store using
9350 an iWMMXt instruction). Therefore we need to push another
9351 ARM register, so that the stack will be 64-bit aligned. We
9352 try to avoid using the arg registers (r0 -r3) as they might be
9353 used to pass values in a tail call. */
9354 for (reg = 4; reg <= 12; reg++)
9355 if ((save_reg_mask & (1 << reg)) == 0)
9356 break;
9358 if (reg <= 12)
9359 save_reg_mask |= (1 << reg);
9360 else
9362 cfun->machine->sibcall_blocked = 1;
9363 save_reg_mask |= (1 << 3);
9367 return save_reg_mask;
9371 /* Compute a bit mask of which registers need to be
9372 saved on the stack for the current function. */
9373 static unsigned long
9374 thumb_compute_save_reg_mask (void)
9376 unsigned long mask;
9377 unsigned reg;
9379 mask = 0;
9380 for (reg = 0; reg < 12; reg ++)
9381 if (regs_ever_live[reg] && !call_used_regs[reg])
9382 mask |= 1 << reg;
9384 if (flag_pic
9385 && !TARGET_SINGLE_PIC_BASE
9386 && arm_pic_register != INVALID_REGNUM
9387 && current_function_uses_pic_offset_table)
9388 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9390 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9391 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9392 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9394 /* LR will also be pushed if any lo regs are pushed. */
9395 if (mask & 0xff || thumb_force_lr_save ())
9396 mask |= (1 << LR_REGNUM);
9398 /* Make sure we have a low work register if we need one.
9399 We will need one if we are going to push a high register,
9400 but we are not currently intending to push a low register. */
9401 if ((mask & 0xff) == 0
9402 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9404 /* Use thumb_find_work_register to choose which register
9405 we will use. If the register is live then we will
9406 have to push it. Use LAST_LO_REGNUM as our fallback
9407 choice for the register to select. */
9408 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9410 if (! call_used_regs[reg])
9411 mask |= 1 << reg;
9414 return mask;
9418 /* Return the number of bytes required to save VFP registers. */
9419 static int
9420 arm_get_vfp_saved_size (void)
9422 unsigned int regno;
9423 int count;
9424 int saved;
9426 saved = 0;
9427 /* Space for saved VFP registers. */
9428 if (TARGET_HARD_FLOAT && TARGET_VFP)
9430 count = 0;
9431 for (regno = FIRST_VFP_REGNUM;
9432 regno < LAST_VFP_REGNUM;
9433 regno += 2)
9435 if ((!regs_ever_live[regno] || call_used_regs[regno])
9436 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9438 if (count > 0)
9440 /* Workaround ARM10 VFPr1 bug. */
9441 if (count == 2 && !arm_arch6)
9442 count++;
9443 saved += count * 8 + 4;
9445 count = 0;
9447 else
9448 count++;
9450 if (count > 0)
9452 if (count == 2 && !arm_arch6)
9453 count++;
9454 saved += count * 8 + 4;
9457 return saved;
9461 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9462 everything bar the final return instruction. */
9463 const char *
9464 output_return_instruction (rtx operand, int really_return, int reverse)
9466 char conditional[10];
9467 char instr[100];
9468 unsigned reg;
9469 unsigned long live_regs_mask;
9470 unsigned long func_type;
9471 arm_stack_offsets *offsets;
9473 func_type = arm_current_func_type ();
9475 if (IS_NAKED (func_type))
9476 return "";
9478 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9480 /* If this function was declared non-returning, and we have
9481 found a tail call, then we have to trust that the called
9482 function won't return. */
9483 if (really_return)
9485 rtx ops[2];
9487 /* Otherwise, trap an attempted return by aborting. */
9488 ops[0] = operand;
9489 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9490 : "abort");
9491 assemble_external_libcall (ops[1]);
9492 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9495 return "";
9498 gcc_assert (!current_function_calls_alloca || really_return);
9500 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9502 return_used_this_function = 1;
9504 live_regs_mask = arm_compute_save_reg_mask ();
9506 if (live_regs_mask)
9508 const char * return_reg;
9510 /* If we do not have any special requirements for function exit
9511 (e.g. interworking, or ISR) then we can load the return address
9512 directly into the PC. Otherwise we must load it into LR. */
9513 if (really_return
9514 && ! TARGET_INTERWORK)
9515 return_reg = reg_names[PC_REGNUM];
9516 else
9517 return_reg = reg_names[LR_REGNUM];
9519 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9521 /* There are three possible reasons for the IP register
9522 being saved. 1) a stack frame was created, in which case
9523 IP contains the old stack pointer, or 2) an ISR routine
9524 corrupted it, or 3) it was saved to align the stack on
9525 iWMMXt. In case 1, restore IP into SP, otherwise just
9526 restore IP. */
9527 if (frame_pointer_needed)
9529 live_regs_mask &= ~ (1 << IP_REGNUM);
9530 live_regs_mask |= (1 << SP_REGNUM);
9532 else
9533 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9536 /* On some ARM architectures it is faster to use LDR rather than
9537 LDM to load a single register. On other architectures, the
9538 cost is the same. In 26 bit mode, or for exception handlers,
9539 we have to use LDM to load the PC so that the CPSR is also
9540 restored. */
9541 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9542 if (live_regs_mask == (1U << reg))
9543 break;
9545 if (reg <= LAST_ARM_REGNUM
9546 && (reg != LR_REGNUM
9547 || ! really_return
9548 || ! IS_INTERRUPT (func_type)))
9550 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9551 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9553 else
9555 char *p;
9556 int first = 1;
9558 /* Generate the load multiple instruction to restore the
9559 registers. Note we can get here, even if
9560 frame_pointer_needed is true, but only if sp already
9561 points to the base of the saved core registers. */
9562 if (live_regs_mask & (1 << SP_REGNUM))
9564 unsigned HOST_WIDE_INT stack_adjust;
9566 offsets = arm_get_frame_offsets ();
9567 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9568 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9570 if (stack_adjust && arm_arch5)
9571 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9572 else
9574 /* If we can't use ldmib (SA110 bug),
9575 then try to pop r3 instead. */
9576 if (stack_adjust)
9577 live_regs_mask |= 1 << 3;
9578 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9581 else
9582 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9584 p = instr + strlen (instr);
9586 for (reg = 0; reg <= SP_REGNUM; reg++)
9587 if (live_regs_mask & (1 << reg))
9589 int l = strlen (reg_names[reg]);
9591 if (first)
9592 first = 0;
9593 else
9595 memcpy (p, ", ", 2);
9596 p += 2;
9599 memcpy (p, "%|", 2);
9600 memcpy (p + 2, reg_names[reg], l);
9601 p += l + 2;
9604 if (live_regs_mask & (1 << LR_REGNUM))
9606 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9607 /* If returning from an interrupt, restore the CPSR. */
9608 if (IS_INTERRUPT (func_type))
9609 strcat (p, "^");
9611 else
9612 strcpy (p, "}");
9615 output_asm_insn (instr, & operand);
9617 /* See if we need to generate an extra instruction to
9618 perform the actual function return. */
9619 if (really_return
9620 && func_type != ARM_FT_INTERWORKED
9621 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9623 /* The return has already been handled
9624 by loading the LR into the PC. */
9625 really_return = 0;
9629 if (really_return)
9631 switch ((int) ARM_FUNC_TYPE (func_type))
9633 case ARM_FT_ISR:
9634 case ARM_FT_FIQ:
9635 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9636 break;
9638 case ARM_FT_INTERWORKED:
9639 sprintf (instr, "bx%s\t%%|lr", conditional);
9640 break;
9642 case ARM_FT_EXCEPTION:
9643 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9644 break;
9646 default:
9647 /* Use bx if it's available. */
9648 if (arm_arch5 || arm_arch4t)
9649 sprintf (instr, "bx%s\t%%|lr", conditional);
9650 else
9651 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9652 break;
9655 output_asm_insn (instr, & operand);
9658 return "";
9661 /* Write the function name into the code section, directly preceding
9662 the function prologue.
9664 Code will be output similar to this:
9666 .ascii "arm_poke_function_name", 0
9667 .align
9669 .word 0xff000000 + (t1 - t0)
9670 arm_poke_function_name
9671 mov ip, sp
9672 stmfd sp!, {fp, ip, lr, pc}
9673 sub fp, ip, #4
9675 When performing a stack backtrace, code can inspect the value
9676 of 'pc' stored at 'fp' + 0. If the trace function then looks
9677 at location pc - 12 and the top 8 bits are set, then we know
9678 that there is a function name embedded immediately preceding this
9679 location and has length ((pc[-3]) & 0xff000000).
9681 We assume that pc is declared as a pointer to an unsigned long.
9683 It is of no benefit to output the function name if we are assembling
9684 a leaf function. These function types will not contain a stack
9685 backtrace structure, therefore it is not possible to determine the
9686 function name. */
9687 void
9688 arm_poke_function_name (FILE *stream, const char *name)
9690 unsigned long alignlength;
9691 unsigned long length;
9692 rtx x;
9694 length = strlen (name) + 1;
9695 alignlength = ROUND_UP_WORD (length);
9697 ASM_OUTPUT_ASCII (stream, name, length);
9698 ASM_OUTPUT_ALIGN (stream, 2);
9699 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9700 assemble_aligned_integer (UNITS_PER_WORD, x);
9703 /* Place some comments into the assembler stream
9704 describing the current function. */
9705 static void
9706 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9708 unsigned long func_type;
9710 if (!TARGET_ARM)
9712 thumb_output_function_prologue (f, frame_size);
9713 return;
9716 /* Sanity check. */
9717 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9719 func_type = arm_current_func_type ();
9721 switch ((int) ARM_FUNC_TYPE (func_type))
9723 default:
9724 case ARM_FT_NORMAL:
9725 break;
9726 case ARM_FT_INTERWORKED:
9727 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9728 break;
9729 case ARM_FT_ISR:
9730 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9731 break;
9732 case ARM_FT_FIQ:
9733 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9734 break;
9735 case ARM_FT_EXCEPTION:
9736 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9737 break;
9740 if (IS_NAKED (func_type))
9741 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9743 if (IS_VOLATILE (func_type))
9744 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9746 if (IS_NESTED (func_type))
9747 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9749 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9750 current_function_args_size,
9751 current_function_pretend_args_size, frame_size);
9753 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9754 frame_pointer_needed,
9755 cfun->machine->uses_anonymous_args);
9757 if (cfun->machine->lr_save_eliminated)
9758 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9760 if (current_function_calls_eh_return)
9761 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9763 #ifdef AOF_ASSEMBLER
9764 if (flag_pic)
9765 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9766 #endif
9768 return_used_this_function = 0;
9771 const char *
9772 arm_output_epilogue (rtx sibling)
9774 int reg;
9775 unsigned long saved_regs_mask;
9776 unsigned long func_type;
9777 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9778 frame that is $fp + 4 for a non-variadic function. */
9779 int floats_offset = 0;
9780 rtx operands[3];
9781 FILE * f = asm_out_file;
9782 unsigned int lrm_count = 0;
9783 int really_return = (sibling == NULL);
9784 int start_reg;
9785 arm_stack_offsets *offsets;
9787 /* If we have already generated the return instruction
9788 then it is futile to generate anything else. */
9789 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9790 return "";
9792 func_type = arm_current_func_type ();
9794 if (IS_NAKED (func_type))
9795 /* Naked functions don't have epilogues. */
9796 return "";
9798 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9800 rtx op;
9802 /* A volatile function should never return. Call abort. */
9803 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9804 assemble_external_libcall (op);
9805 output_asm_insn ("bl\t%a0", &op);
9807 return "";
9810 /* If we are throwing an exception, then we really must be doing a
9811 return, so we can't tail-call. */
9812 gcc_assert (!current_function_calls_eh_return || really_return);
9814 offsets = arm_get_frame_offsets ();
9815 saved_regs_mask = arm_compute_save_reg_mask ();
9817 if (TARGET_IWMMXT)
9818 lrm_count = bit_count (saved_regs_mask);
9820 floats_offset = offsets->saved_args;
9821 /* Compute how far away the floats will be. */
9822 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9823 if (saved_regs_mask & (1 << reg))
9824 floats_offset += 4;
9826 if (frame_pointer_needed)
9828 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9829 int vfp_offset = offsets->frame;
9831 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9833 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9834 if (regs_ever_live[reg] && !call_used_regs[reg])
9836 floats_offset += 12;
9837 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9838 reg, FP_REGNUM, floats_offset - vfp_offset);
9841 else
9843 start_reg = LAST_FPA_REGNUM;
9845 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9847 if (regs_ever_live[reg] && !call_used_regs[reg])
9849 floats_offset += 12;
9851 /* We can't unstack more than four registers at once. */
9852 if (start_reg - reg == 3)
9854 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9855 reg, FP_REGNUM, floats_offset - vfp_offset);
9856 start_reg = reg - 1;
9859 else
9861 if (reg != start_reg)
9862 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9863 reg + 1, start_reg - reg,
9864 FP_REGNUM, floats_offset - vfp_offset);
9865 start_reg = reg - 1;
9869 /* Just in case the last register checked also needs unstacking. */
9870 if (reg != start_reg)
9871 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9872 reg + 1, start_reg - reg,
9873 FP_REGNUM, floats_offset - vfp_offset);
9876 if (TARGET_HARD_FLOAT && TARGET_VFP)
9878 int saved_size;
9880 /* The fldmx insn does not have base+offset addressing modes,
9881 so we use IP to hold the address. */
9882 saved_size = arm_get_vfp_saved_size ();
9884 if (saved_size > 0)
9886 floats_offset += saved_size;
9887 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9888 FP_REGNUM, floats_offset - vfp_offset);
9890 start_reg = FIRST_VFP_REGNUM;
9891 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9893 if ((!regs_ever_live[reg] || call_used_regs[reg])
9894 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9896 if (start_reg != reg)
9897 arm_output_fldmx (f, IP_REGNUM,
9898 (start_reg - FIRST_VFP_REGNUM) / 2,
9899 (reg - start_reg) / 2);
9900 start_reg = reg + 2;
9903 if (start_reg != reg)
9904 arm_output_fldmx (f, IP_REGNUM,
9905 (start_reg - FIRST_VFP_REGNUM) / 2,
9906 (reg - start_reg) / 2);
9909 if (TARGET_IWMMXT)
9911 /* The frame pointer is guaranteed to be non-double-word aligned.
9912 This is because it is set to (old_stack_pointer - 4) and the
9913 old_stack_pointer was double word aligned. Thus the offset to
9914 the iWMMXt registers to be loaded must also be non-double-word
9915 sized, so that the resultant address *is* double-word aligned.
9916 We can ignore floats_offset since that was already included in
9917 the live_regs_mask. */
9918 lrm_count += (lrm_count % 2 ? 2 : 1);
9920 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9921 if (regs_ever_live[reg] && !call_used_regs[reg])
9923 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9924 reg, FP_REGNUM, lrm_count * 4);
9925 lrm_count += 2;
9929 /* saved_regs_mask should contain the IP, which at the time of stack
9930 frame generation actually contains the old stack pointer. So a
9931 quick way to unwind the stack is just pop the IP register directly
9932 into the stack pointer. */
9933 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9934 saved_regs_mask &= ~ (1 << IP_REGNUM);
9935 saved_regs_mask |= (1 << SP_REGNUM);
9937 /* There are two registers left in saved_regs_mask - LR and PC. We
9938 only need to restore the LR register (the return address), but to
9939 save time we can load it directly into the PC, unless we need a
9940 special function exit sequence, or we are not really returning. */
9941 if (really_return
9942 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9943 && !current_function_calls_eh_return)
9944 /* Delete the LR from the register mask, so that the LR on
9945 the stack is loaded into the PC in the register mask. */
9946 saved_regs_mask &= ~ (1 << LR_REGNUM);
9947 else
9948 saved_regs_mask &= ~ (1 << PC_REGNUM);
9950 /* We must use SP as the base register, because SP is one of the
9951 registers being restored. If an interrupt or page fault
9952 happens in the ldm instruction, the SP might or might not
9953 have been restored. That would be bad, as then SP will no
9954 longer indicate the safe area of stack, and we can get stack
9955 corruption. Using SP as the base register means that it will
9956 be reset correctly to the original value, should an interrupt
9957 occur. If the stack pointer already points at the right
9958 place, then omit the subtraction. */
9959 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9960 || current_function_calls_alloca)
9961 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9962 4 * bit_count (saved_regs_mask));
9963 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9965 if (IS_INTERRUPT (func_type))
9966 /* Interrupt handlers will have pushed the
9967 IP onto the stack, so restore it now. */
9968 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9970 else
9972 /* Restore stack pointer if necessary. */
9973 if (offsets->outgoing_args != offsets->saved_regs)
9975 operands[0] = operands[1] = stack_pointer_rtx;
9976 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9977 output_add_immediate (operands);
9980 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9982 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9983 if (regs_ever_live[reg] && !call_used_regs[reg])
9984 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9985 reg, SP_REGNUM);
9987 else
9989 start_reg = FIRST_FPA_REGNUM;
9991 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9993 if (regs_ever_live[reg] && !call_used_regs[reg])
9995 if (reg - start_reg == 3)
9997 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9998 start_reg, SP_REGNUM);
9999 start_reg = reg + 1;
10002 else
10004 if (reg != start_reg)
10005 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10006 start_reg, reg - start_reg,
10007 SP_REGNUM);
10009 start_reg = reg + 1;
10013 /* Just in case the last register checked also needs unstacking. */
10014 if (reg != start_reg)
10015 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10016 start_reg, reg - start_reg, SP_REGNUM);
10019 if (TARGET_HARD_FLOAT && TARGET_VFP)
10021 start_reg = FIRST_VFP_REGNUM;
10022 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10024 if ((!regs_ever_live[reg] || call_used_regs[reg])
10025 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10027 if (start_reg != reg)
10028 arm_output_fldmx (f, SP_REGNUM,
10029 (start_reg - FIRST_VFP_REGNUM) / 2,
10030 (reg - start_reg) / 2);
10031 start_reg = reg + 2;
10034 if (start_reg != reg)
10035 arm_output_fldmx (f, SP_REGNUM,
10036 (start_reg - FIRST_VFP_REGNUM) / 2,
10037 (reg - start_reg) / 2);
10039 if (TARGET_IWMMXT)
10040 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10041 if (regs_ever_live[reg] && !call_used_regs[reg])
10042 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10044 /* If we can, restore the LR into the PC. */
10045 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10046 && really_return
10047 && current_function_pretend_args_size == 0
10048 && saved_regs_mask & (1 << LR_REGNUM)
10049 && !current_function_calls_eh_return)
10051 saved_regs_mask &= ~ (1 << LR_REGNUM);
10052 saved_regs_mask |= (1 << PC_REGNUM);
10055 /* Load the registers off the stack. If we only have one register
10056 to load use the LDR instruction - it is faster. */
10057 if (saved_regs_mask == (1 << LR_REGNUM))
10059 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10061 else if (saved_regs_mask)
10063 if (saved_regs_mask & (1 << SP_REGNUM))
10064 /* Note - write back to the stack register is not enabled
10065 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10066 in the list of registers and if we add writeback the
10067 instruction becomes UNPREDICTABLE. */
10068 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
10069 else
10070 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
10073 if (current_function_pretend_args_size)
10075 /* Unwind the pre-pushed regs. */
10076 operands[0] = operands[1] = stack_pointer_rtx;
10077 operands[2] = GEN_INT (current_function_pretend_args_size);
10078 output_add_immediate (operands);
10082 /* We may have already restored PC directly from the stack. */
10083 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10084 return "";
10086 /* Stack adjustment for exception handler. */
10087 if (current_function_calls_eh_return)
10088 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10089 ARM_EH_STACKADJ_REGNUM);
10091 /* Generate the return instruction. */
10092 switch ((int) ARM_FUNC_TYPE (func_type))
10094 case ARM_FT_ISR:
10095 case ARM_FT_FIQ:
10096 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10097 break;
10099 case ARM_FT_EXCEPTION:
10100 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10101 break;
10103 case ARM_FT_INTERWORKED:
10104 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10105 break;
10107 default:
10108 if (arm_arch5 || arm_arch4t)
10109 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10110 else
10111 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10112 break;
10115 return "";
10118 static void
10119 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10120 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10122 arm_stack_offsets *offsets;
10124 if (TARGET_THUMB)
10126 int regno;
10128 /* Emit any call-via-reg trampolines that are needed for v4t support
10129 of call_reg and call_value_reg type insns. */
10130 for (regno = 0; regno < LR_REGNUM; regno++)
10132 rtx label = cfun->machine->call_via[regno];
10134 if (label != NULL)
10136 switch_to_section (function_section (current_function_decl));
10137 targetm.asm_out.internal_label (asm_out_file, "L",
10138 CODE_LABEL_NUMBER (label));
10139 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10143 /* ??? Probably not safe to set this here, since it assumes that a
10144 function will be emitted as assembly immediately after we generate
10145 RTL for it. This does not happen for inline functions. */
10146 return_used_this_function = 0;
10148 else
10150 /* We need to take into account any stack-frame rounding. */
10151 offsets = arm_get_frame_offsets ();
10153 gcc_assert (!use_return_insn (FALSE, NULL)
10154 || !return_used_this_function
10155 || offsets->saved_regs == offsets->outgoing_args
10156 || frame_pointer_needed);
10158 /* Reset the ARM-specific per-function variables. */
10159 after_arm_reorg = 0;
10163 /* Generate and emit an insn that we will recognize as a push_multi.
10164 Unfortunately, since this insn does not reflect very well the actual
10165 semantics of the operation, we need to annotate the insn for the benefit
10166 of DWARF2 frame unwind information. */
10167 static rtx
10168 emit_multi_reg_push (unsigned long mask)
10170 int num_regs = 0;
10171 int num_dwarf_regs;
10172 int i, j;
10173 rtx par;
10174 rtx dwarf;
10175 int dwarf_par_index;
10176 rtx tmp, reg;
10178 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10179 if (mask & (1 << i))
10180 num_regs++;
10182 gcc_assert (num_regs && num_regs <= 16);
10184 /* We don't record the PC in the dwarf frame information. */
10185 num_dwarf_regs = num_regs;
10186 if (mask & (1 << PC_REGNUM))
10187 num_dwarf_regs--;
10189 /* For the body of the insn we are going to generate an UNSPEC in
10190 parallel with several USEs. This allows the insn to be recognized
10191 by the push_multi pattern in the arm.md file. The insn looks
10192 something like this:
10194 (parallel [
10195 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10196 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10197 (use (reg:SI 11 fp))
10198 (use (reg:SI 12 ip))
10199 (use (reg:SI 14 lr))
10200 (use (reg:SI 15 pc))
10203 For the frame note however, we try to be more explicit and actually
10204 show each register being stored into the stack frame, plus a (single)
10205 decrement of the stack pointer. We do it this way in order to be
10206 friendly to the stack unwinding code, which only wants to see a single
10207 stack decrement per instruction. The RTL we generate for the note looks
10208 something like this:
10210 (sequence [
10211 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10212 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10213 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10214 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10215 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10218 This sequence is used both by the code to support stack unwinding for
10219 exceptions handlers and the code to generate dwarf2 frame debugging. */
10221 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10222 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10223 dwarf_par_index = 1;
10225 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10227 if (mask & (1 << i))
10229 reg = gen_rtx_REG (SImode, i);
10231 XVECEXP (par, 0, 0)
10232 = gen_rtx_SET (VOIDmode,
10233 gen_frame_mem (BLKmode,
10234 gen_rtx_PRE_DEC (BLKmode,
10235 stack_pointer_rtx)),
10236 gen_rtx_UNSPEC (BLKmode,
10237 gen_rtvec (1, reg),
10238 UNSPEC_PUSH_MULT));
10240 if (i != PC_REGNUM)
10242 tmp = gen_rtx_SET (VOIDmode,
10243 gen_frame_mem (SImode, stack_pointer_rtx),
10244 reg);
10245 RTX_FRAME_RELATED_P (tmp) = 1;
10246 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10247 dwarf_par_index++;
10250 break;
10254 for (j = 1, i++; j < num_regs; i++)
10256 if (mask & (1 << i))
10258 reg = gen_rtx_REG (SImode, i);
10260 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10262 if (i != PC_REGNUM)
10265 = gen_rtx_SET (VOIDmode,
10266 gen_frame_mem (SImode,
10267 plus_constant (stack_pointer_rtx,
10268 4 * j)),
10269 reg);
10270 RTX_FRAME_RELATED_P (tmp) = 1;
10271 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10274 j++;
10278 par = emit_insn (par);
10280 tmp = gen_rtx_SET (VOIDmode,
10281 stack_pointer_rtx,
10282 plus_constant (stack_pointer_rtx, -4 * num_regs));
10283 RTX_FRAME_RELATED_P (tmp) = 1;
10284 XVECEXP (dwarf, 0, 0) = tmp;
10286 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10287 REG_NOTES (par));
10288 return par;
10291 /* Calculate the size of the return value that is passed in registers. */
10292 static int
10293 arm_size_return_regs (void)
10295 enum machine_mode mode;
10297 if (current_function_return_rtx != 0)
10298 mode = GET_MODE (current_function_return_rtx);
10299 else
10300 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10302 return GET_MODE_SIZE (mode);
10305 static rtx
10306 emit_sfm (int base_reg, int count)
10308 rtx par;
10309 rtx dwarf;
10310 rtx tmp, reg;
10311 int i;
10313 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10314 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10316 reg = gen_rtx_REG (XFmode, base_reg++);
10318 XVECEXP (par, 0, 0)
10319 = gen_rtx_SET (VOIDmode,
10320 gen_frame_mem (BLKmode,
10321 gen_rtx_PRE_DEC (BLKmode,
10322 stack_pointer_rtx)),
10323 gen_rtx_UNSPEC (BLKmode,
10324 gen_rtvec (1, reg),
10325 UNSPEC_PUSH_MULT));
10326 tmp = gen_rtx_SET (VOIDmode,
10327 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10328 RTX_FRAME_RELATED_P (tmp) = 1;
10329 XVECEXP (dwarf, 0, 1) = tmp;
10331 for (i = 1; i < count; i++)
10333 reg = gen_rtx_REG (XFmode, base_reg++);
10334 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10336 tmp = gen_rtx_SET (VOIDmode,
10337 gen_frame_mem (XFmode,
10338 plus_constant (stack_pointer_rtx,
10339 i * 12)),
10340 reg);
10341 RTX_FRAME_RELATED_P (tmp) = 1;
10342 XVECEXP (dwarf, 0, i + 1) = tmp;
10345 tmp = gen_rtx_SET (VOIDmode,
10346 stack_pointer_rtx,
10347 plus_constant (stack_pointer_rtx, -12 * count));
10349 RTX_FRAME_RELATED_P (tmp) = 1;
10350 XVECEXP (dwarf, 0, 0) = tmp;
10352 par = emit_insn (par);
10353 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10354 REG_NOTES (par));
10355 return par;
10359 /* Return true if the current function needs to save/restore LR. */
10361 static bool
10362 thumb_force_lr_save (void)
10364 return !cfun->machine->lr_save_eliminated
10365 && (!leaf_function_p ()
10366 || thumb_far_jump_used_p ()
10367 || regs_ever_live [LR_REGNUM]);
10371 /* Compute the distance from register FROM to register TO.
10372 These can be the arg pointer (26), the soft frame pointer (25),
10373 the stack pointer (13) or the hard frame pointer (11).
10374 In thumb mode r7 is used as the soft frame pointer, if needed.
10375 Typical stack layout looks like this:
10377 old stack pointer -> | |
10378 ----
10379 | | \
10380 | | saved arguments for
10381 | | vararg functions
10382 | | /
10384 hard FP & arg pointer -> | | \
10385 | | stack
10386 | | frame
10387 | | /
10389 | | \
10390 | | call saved
10391 | | registers
10392 soft frame pointer -> | | /
10394 | | \
10395 | | local
10396 | | variables
10397 locals base pointer -> | | /
10399 | | \
10400 | | outgoing
10401 | | arguments
10402 current stack pointer -> | | /
10405 For a given function some or all of these stack components
10406 may not be needed, giving rise to the possibility of
10407 eliminating some of the registers.
10409 The values returned by this function must reflect the behavior
10410 of arm_expand_prologue() and arm_compute_save_reg_mask().
10412 The sign of the number returned reflects the direction of stack
10413 growth, so the values are positive for all eliminations except
10414 from the soft frame pointer to the hard frame pointer.
10416 SFP may point just inside the local variables block to ensure correct
10417 alignment. */
10420 /* Calculate stack offsets. These are used to calculate register elimination
10421 offsets and in prologue/epilogue code. */
10423 static arm_stack_offsets *
10424 arm_get_frame_offsets (void)
10426 struct arm_stack_offsets *offsets;
10427 unsigned long func_type;
10428 int leaf;
10429 int saved;
10430 HOST_WIDE_INT frame_size;
10432 offsets = &cfun->machine->stack_offsets;
10434 /* We need to know if we are a leaf function. Unfortunately, it
10435 is possible to be called after start_sequence has been called,
10436 which causes get_insns to return the insns for the sequence,
10437 not the function, which will cause leaf_function_p to return
10438 the incorrect result.
10440 to know about leaf functions once reload has completed, and the
10441 frame size cannot be changed after that time, so we can safely
10442 use the cached value. */
10444 if (reload_completed)
10445 return offsets;
10447 /* Initially this is the size of the local variables. It will translated
10448 into an offset once we have determined the size of preceding data. */
10449 frame_size = ROUND_UP_WORD (get_frame_size ());
10451 leaf = leaf_function_p ();
10453 /* Space for variadic functions. */
10454 offsets->saved_args = current_function_pretend_args_size;
10456 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10458 if (TARGET_ARM)
10460 unsigned int regno;
10462 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10464 /* We know that SP will be doubleword aligned on entry, and we must
10465 preserve that condition at any subroutine call. We also require the
10466 soft frame pointer to be doubleword aligned. */
10468 if (TARGET_REALLY_IWMMXT)
10470 /* Check for the call-saved iWMMXt registers. */
10471 for (regno = FIRST_IWMMXT_REGNUM;
10472 regno <= LAST_IWMMXT_REGNUM;
10473 regno++)
10474 if (regs_ever_live [regno] && ! call_used_regs [regno])
10475 saved += 8;
10478 func_type = arm_current_func_type ();
10479 if (! IS_VOLATILE (func_type))
10481 /* Space for saved FPA registers. */
10482 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10483 if (regs_ever_live[regno] && ! call_used_regs[regno])
10484 saved += 12;
10486 /* Space for saved VFP registers. */
10487 if (TARGET_HARD_FLOAT && TARGET_VFP)
10488 saved += arm_get_vfp_saved_size ();
10491 else /* TARGET_THUMB */
10493 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10494 if (TARGET_BACKTRACE)
10495 saved += 16;
10498 /* Saved registers include the stack frame. */
10499 offsets->saved_regs = offsets->saved_args + saved;
10500 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10501 /* A leaf function does not need any stack alignment if it has nothing
10502 on the stack. */
10503 if (leaf && frame_size == 0)
10505 offsets->outgoing_args = offsets->soft_frame;
10506 return offsets;
10509 /* Ensure SFP has the correct alignment. */
10510 if (ARM_DOUBLEWORD_ALIGN
10511 && (offsets->soft_frame & 7))
10512 offsets->soft_frame += 4;
10514 offsets->locals_base = offsets->soft_frame + frame_size;
10515 offsets->outgoing_args = (offsets->locals_base
10516 + current_function_outgoing_args_size);
10518 if (ARM_DOUBLEWORD_ALIGN)
10520 /* Ensure SP remains doubleword aligned. */
10521 if (offsets->outgoing_args & 7)
10522 offsets->outgoing_args += 4;
10523 gcc_assert (!(offsets->outgoing_args & 7));
10526 return offsets;
10530 /* Calculate the relative offsets for the different stack pointers. Positive
10531 offsets are in the direction of stack growth. */
10533 HOST_WIDE_INT
10534 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10536 arm_stack_offsets *offsets;
10538 offsets = arm_get_frame_offsets ();
10540 /* OK, now we have enough information to compute the distances.
10541 There must be an entry in these switch tables for each pair
10542 of registers in ELIMINABLE_REGS, even if some of the entries
10543 seem to be redundant or useless. */
10544 switch (from)
10546 case ARG_POINTER_REGNUM:
10547 switch (to)
10549 case THUMB_HARD_FRAME_POINTER_REGNUM:
10550 return 0;
10552 case FRAME_POINTER_REGNUM:
10553 /* This is the reverse of the soft frame pointer
10554 to hard frame pointer elimination below. */
10555 return offsets->soft_frame - offsets->saved_args;
10557 case ARM_HARD_FRAME_POINTER_REGNUM:
10558 /* If there is no stack frame then the hard
10559 frame pointer and the arg pointer coincide. */
10560 if (offsets->frame == offsets->saved_regs)
10561 return 0;
10562 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10563 return (frame_pointer_needed
10564 && cfun->static_chain_decl != NULL
10565 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10567 case STACK_POINTER_REGNUM:
10568 /* If nothing has been pushed on the stack at all
10569 then this will return -4. This *is* correct! */
10570 return offsets->outgoing_args - (offsets->saved_args + 4);
10572 default:
10573 gcc_unreachable ();
10575 gcc_unreachable ();
10577 case FRAME_POINTER_REGNUM:
10578 switch (to)
10580 case THUMB_HARD_FRAME_POINTER_REGNUM:
10581 return 0;
10583 case ARM_HARD_FRAME_POINTER_REGNUM:
10584 /* The hard frame pointer points to the top entry in the
10585 stack frame. The soft frame pointer to the bottom entry
10586 in the stack frame. If there is no stack frame at all,
10587 then they are identical. */
10589 return offsets->frame - offsets->soft_frame;
10591 case STACK_POINTER_REGNUM:
10592 return offsets->outgoing_args - offsets->soft_frame;
10594 default:
10595 gcc_unreachable ();
10597 gcc_unreachable ();
10599 default:
10600 /* You cannot eliminate from the stack pointer.
10601 In theory you could eliminate from the hard frame
10602 pointer to the stack pointer, but this will never
10603 happen, since if a stack frame is not needed the
10604 hard frame pointer will never be used. */
10605 gcc_unreachable ();
10610 /* Generate the prologue instructions for entry into an ARM function. */
10611 void
10612 arm_expand_prologue (void)
10614 int reg;
10615 rtx amount;
10616 rtx insn;
10617 rtx ip_rtx;
10618 unsigned long live_regs_mask;
10619 unsigned long func_type;
10620 int fp_offset = 0;
10621 int saved_pretend_args = 0;
10622 int saved_regs = 0;
10623 unsigned HOST_WIDE_INT args_to_push;
10624 arm_stack_offsets *offsets;
10626 func_type = arm_current_func_type ();
10628 /* Naked functions don't have prologues. */
10629 if (IS_NAKED (func_type))
10630 return;
10632 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10633 args_to_push = current_function_pretend_args_size;
10635 /* Compute which register we will have to save onto the stack. */
10636 live_regs_mask = arm_compute_save_reg_mask ();
10638 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10640 if (frame_pointer_needed)
10642 if (IS_INTERRUPT (func_type))
10644 /* Interrupt functions must not corrupt any registers.
10645 Creating a frame pointer however, corrupts the IP
10646 register, so we must push it first. */
10647 insn = emit_multi_reg_push (1 << IP_REGNUM);
10649 /* Do not set RTX_FRAME_RELATED_P on this insn.
10650 The dwarf stack unwinding code only wants to see one
10651 stack decrement per function, and this is not it. If
10652 this instruction is labeled as being part of the frame
10653 creation sequence then dwarf2out_frame_debug_expr will
10654 die when it encounters the assignment of IP to FP
10655 later on, since the use of SP here establishes SP as
10656 the CFA register and not IP.
10658 Anyway this instruction is not really part of the stack
10659 frame creation although it is part of the prologue. */
10661 else if (IS_NESTED (func_type))
10663 /* The Static chain register is the same as the IP register
10664 used as a scratch register during stack frame creation.
10665 To get around this need to find somewhere to store IP
10666 whilst the frame is being created. We try the following
10667 places in order:
10669 1. The last argument register.
10670 2. A slot on the stack above the frame. (This only
10671 works if the function is not a varargs function).
10672 3. Register r3, after pushing the argument registers
10673 onto the stack.
10675 Note - we only need to tell the dwarf2 backend about the SP
10676 adjustment in the second variant; the static chain register
10677 doesn't need to be unwound, as it doesn't contain a value
10678 inherited from the caller. */
10680 if (regs_ever_live[3] == 0)
10681 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10682 else if (args_to_push == 0)
10684 rtx dwarf;
10686 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10687 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
10688 fp_offset = 4;
10690 /* Just tell the dwarf backend that we adjusted SP. */
10691 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10692 plus_constant (stack_pointer_rtx,
10693 -fp_offset));
10694 RTX_FRAME_RELATED_P (insn) = 1;
10695 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10696 dwarf, REG_NOTES (insn));
10698 else
10700 /* Store the args on the stack. */
10701 if (cfun->machine->uses_anonymous_args)
10702 insn = emit_multi_reg_push
10703 ((0xf0 >> (args_to_push / 4)) & 0xf);
10704 else
10705 insn = emit_insn
10706 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10707 GEN_INT (- args_to_push)));
10709 RTX_FRAME_RELATED_P (insn) = 1;
10711 saved_pretend_args = 1;
10712 fp_offset = args_to_push;
10713 args_to_push = 0;
10715 /* Now reuse r3 to preserve IP. */
10716 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10720 insn = emit_set_insn (ip_rtx,
10721 plus_constant (stack_pointer_rtx, fp_offset));
10722 RTX_FRAME_RELATED_P (insn) = 1;
10725 if (args_to_push)
10727 /* Push the argument registers, or reserve space for them. */
10728 if (cfun->machine->uses_anonymous_args)
10729 insn = emit_multi_reg_push
10730 ((0xf0 >> (args_to_push / 4)) & 0xf);
10731 else
10732 insn = emit_insn
10733 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10734 GEN_INT (- args_to_push)));
10735 RTX_FRAME_RELATED_P (insn) = 1;
10738 /* If this is an interrupt service routine, and the link register
10739 is going to be pushed, and we are not creating a stack frame,
10740 (which would involve an extra push of IP and a pop in the epilogue)
10741 subtracting four from LR now will mean that the function return
10742 can be done with a single instruction. */
10743 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10744 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10745 && ! frame_pointer_needed)
10747 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
10749 emit_set_insn (lr, plus_constant (lr, -4));
10752 if (live_regs_mask)
10754 insn = emit_multi_reg_push (live_regs_mask);
10755 saved_regs += bit_count (live_regs_mask) * 4;
10756 RTX_FRAME_RELATED_P (insn) = 1;
10759 if (TARGET_IWMMXT)
10760 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10761 if (regs_ever_live[reg] && ! call_used_regs [reg])
10763 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10764 insn = gen_frame_mem (V2SImode, insn);
10765 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
10766 RTX_FRAME_RELATED_P (insn) = 1;
10767 saved_regs += 8;
10770 if (! IS_VOLATILE (func_type))
10772 int start_reg;
10774 /* Save any floating point call-saved registers used by this
10775 function. */
10776 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10778 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10779 if (regs_ever_live[reg] && !call_used_regs[reg])
10781 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10782 insn = gen_frame_mem (XFmode, insn);
10783 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
10784 RTX_FRAME_RELATED_P (insn) = 1;
10785 saved_regs += 12;
10788 else
10790 start_reg = LAST_FPA_REGNUM;
10792 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10794 if (regs_ever_live[reg] && !call_used_regs[reg])
10796 if (start_reg - reg == 3)
10798 insn = emit_sfm (reg, 4);
10799 RTX_FRAME_RELATED_P (insn) = 1;
10800 saved_regs += 48;
10801 start_reg = reg - 1;
10804 else
10806 if (start_reg != reg)
10808 insn = emit_sfm (reg + 1, start_reg - reg);
10809 RTX_FRAME_RELATED_P (insn) = 1;
10810 saved_regs += (start_reg - reg) * 12;
10812 start_reg = reg - 1;
10816 if (start_reg != reg)
10818 insn = emit_sfm (reg + 1, start_reg - reg);
10819 saved_regs += (start_reg - reg) * 12;
10820 RTX_FRAME_RELATED_P (insn) = 1;
10823 if (TARGET_HARD_FLOAT && TARGET_VFP)
10825 start_reg = FIRST_VFP_REGNUM;
10827 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10829 if ((!regs_ever_live[reg] || call_used_regs[reg])
10830 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10832 if (start_reg != reg)
10833 saved_regs += vfp_emit_fstmx (start_reg,
10834 (reg - start_reg) / 2);
10835 start_reg = reg + 2;
10838 if (start_reg != reg)
10839 saved_regs += vfp_emit_fstmx (start_reg,
10840 (reg - start_reg) / 2);
10844 if (frame_pointer_needed)
10846 /* Create the new frame pointer. */
10847 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10848 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10849 RTX_FRAME_RELATED_P (insn) = 1;
10851 if (IS_NESTED (func_type))
10853 /* Recover the static chain register. */
10854 if (regs_ever_live [3] == 0
10855 || saved_pretend_args)
10856 insn = gen_rtx_REG (SImode, 3);
10857 else /* if (current_function_pretend_args_size == 0) */
10859 insn = plus_constant (hard_frame_pointer_rtx, 4);
10860 insn = gen_frame_mem (SImode, insn);
10863 emit_set_insn (ip_rtx, insn);
10864 /* Add a USE to stop propagate_one_insn() from barfing. */
10865 emit_insn (gen_prologue_use (ip_rtx));
10869 offsets = arm_get_frame_offsets ();
10870 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10872 /* This add can produce multiple insns for a large constant, so we
10873 need to get tricky. */
10874 rtx last = get_last_insn ();
10876 amount = GEN_INT (offsets->saved_args + saved_regs
10877 - offsets->outgoing_args);
10879 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10880 amount));
10883 last = last ? NEXT_INSN (last) : get_insns ();
10884 RTX_FRAME_RELATED_P (last) = 1;
10886 while (last != insn);
10888 /* If the frame pointer is needed, emit a special barrier that
10889 will prevent the scheduler from moving stores to the frame
10890 before the stack adjustment. */
10891 if (frame_pointer_needed)
10892 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10893 hard_frame_pointer_rtx));
10897 if (flag_pic && arm_pic_register != INVALID_REGNUM)
10898 arm_load_pic_register (0UL);
10900 /* If we are profiling, make sure no instructions are scheduled before
10901 the call to mcount. Similarly if the user has requested no
10902 scheduling in the prolog. Similarly if we want non-call exceptions
10903 using the EABI unwinder, to prevent faulting instructions from being
10904 swapped with a stack adjustment. */
10905 if (current_function_profile || !TARGET_SCHED_PROLOG
10906 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
10907 emit_insn (gen_blockage ());
10909 /* If the link register is being kept alive, with the return address in it,
10910 then make sure that it does not get reused by the ce2 pass. */
10911 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10913 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10914 cfun->machine->lr_save_eliminated = 1;
10918 /* If CODE is 'd', then the X is a condition operand and the instruction
10919 should only be executed if the condition is true.
10920 if CODE is 'D', then the X is a condition operand and the instruction
10921 should only be executed if the condition is false: however, if the mode
10922 of the comparison is CCFPEmode, then always execute the instruction -- we
10923 do this because in these circumstances !GE does not necessarily imply LT;
10924 in these cases the instruction pattern will take care to make sure that
10925 an instruction containing %d will follow, thereby undoing the effects of
10926 doing this instruction unconditionally.
10927 If CODE is 'N' then X is a floating point operand that must be negated
10928 before output.
10929 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10930 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10931 void
10932 arm_print_operand (FILE *stream, rtx x, int code)
10934 switch (code)
10936 case '@':
10937 fputs (ASM_COMMENT_START, stream);
10938 return;
10940 case '_':
10941 fputs (user_label_prefix, stream);
10942 return;
10944 case '|':
10945 fputs (REGISTER_PREFIX, stream);
10946 return;
10948 case '?':
10949 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10951 if (TARGET_THUMB)
10953 output_operand_lossage ("predicated Thumb instruction");
10954 break;
10956 if (current_insn_predicate != NULL)
10958 output_operand_lossage
10959 ("predicated instruction in conditional sequence");
10960 break;
10963 fputs (arm_condition_codes[arm_current_cc], stream);
10965 else if (current_insn_predicate)
10967 enum arm_cond_code code;
10969 if (TARGET_THUMB)
10971 output_operand_lossage ("predicated Thumb instruction");
10972 break;
10975 code = get_arm_condition_code (current_insn_predicate);
10976 fputs (arm_condition_codes[code], stream);
10978 return;
10980 case 'N':
10982 REAL_VALUE_TYPE r;
10983 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10984 r = REAL_VALUE_NEGATE (r);
10985 fprintf (stream, "%s", fp_const_from_val (&r));
10987 return;
10989 case 'B':
10990 if (GET_CODE (x) == CONST_INT)
10992 HOST_WIDE_INT val;
10993 val = ARM_SIGN_EXTEND (~INTVAL (x));
10994 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10996 else
10998 putc ('~', stream);
10999 output_addr_const (stream, x);
11001 return;
11003 case 'i':
11004 fprintf (stream, "%s", arithmetic_instr (x, 1));
11005 return;
11007 /* Truncate Cirrus shift counts. */
11008 case 's':
11009 if (GET_CODE (x) == CONST_INT)
11011 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11012 return;
11014 arm_print_operand (stream, x, 0);
11015 return;
11017 case 'I':
11018 fprintf (stream, "%s", arithmetic_instr (x, 0));
11019 return;
11021 case 'S':
11023 HOST_WIDE_INT val;
11024 const char *shift;
11026 if (!shift_operator (x, SImode))
11028 output_operand_lossage ("invalid shift operand");
11029 break;
11032 shift = shift_op (x, &val);
11034 if (shift)
11036 fprintf (stream, ", %s ", shift);
11037 if (val == -1)
11038 arm_print_operand (stream, XEXP (x, 1), 0);
11039 else
11040 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11043 return;
11045 /* An explanation of the 'Q', 'R' and 'H' register operands:
11047 In a pair of registers containing a DI or DF value the 'Q'
11048 operand returns the register number of the register containing
11049 the least significant part of the value. The 'R' operand returns
11050 the register number of the register containing the most
11051 significant part of the value.
11053 The 'H' operand returns the higher of the two register numbers.
11054 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11055 same as the 'Q' operand, since the most significant part of the
11056 value is held in the lower number register. The reverse is true
11057 on systems where WORDS_BIG_ENDIAN is false.
11059 The purpose of these operands is to distinguish between cases
11060 where the endian-ness of the values is important (for example
11061 when they are added together), and cases where the endian-ness
11062 is irrelevant, but the order of register operations is important.
11063 For example when loading a value from memory into a register
11064 pair, the endian-ness does not matter. Provided that the value
11065 from the lower memory address is put into the lower numbered
11066 register, and the value from the higher address is put into the
11067 higher numbered register, the load will work regardless of whether
11068 the value being loaded is big-wordian or little-wordian. The
11069 order of the two register loads can matter however, if the address
11070 of the memory location is actually held in one of the registers
11071 being overwritten by the load. */
11072 case 'Q':
11073 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11075 output_operand_lossage ("invalid operand for code '%c'", code);
11076 return;
11079 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11080 return;
11082 case 'R':
11083 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11085 output_operand_lossage ("invalid operand for code '%c'", code);
11086 return;
11089 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11090 return;
11092 case 'H':
11093 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11095 output_operand_lossage ("invalid operand for code '%c'", code);
11096 return;
11099 asm_fprintf (stream, "%r", REGNO (x) + 1);
11100 return;
11102 case 'm':
11103 asm_fprintf (stream, "%r",
11104 GET_CODE (XEXP (x, 0)) == REG
11105 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11106 return;
11108 case 'M':
11109 asm_fprintf (stream, "{%r-%r}",
11110 REGNO (x),
11111 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11112 return;
11114 case 'd':
11115 /* CONST_TRUE_RTX means always -- that's the default. */
11116 if (x == const_true_rtx)
11117 return;
11119 if (!COMPARISON_P (x))
11121 output_operand_lossage ("invalid operand for code '%c'", code);
11122 return;
11125 fputs (arm_condition_codes[get_arm_condition_code (x)],
11126 stream);
11127 return;
11129 case 'D':
11130 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11131 want to do that. */
11132 if (x == const_true_rtx)
11134 output_operand_lossage ("instruction never exectued");
11135 return;
11137 if (!COMPARISON_P (x))
11139 output_operand_lossage ("invalid operand for code '%c'", code);
11140 return;
11143 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11144 (get_arm_condition_code (x))],
11145 stream);
11146 return;
11148 /* Cirrus registers can be accessed in a variety of ways:
11149 single floating point (f)
11150 double floating point (d)
11151 32bit integer (fx)
11152 64bit integer (dx). */
11153 case 'W': /* Cirrus register in F mode. */
11154 case 'X': /* Cirrus register in D mode. */
11155 case 'Y': /* Cirrus register in FX mode. */
11156 case 'Z': /* Cirrus register in DX mode. */
11157 gcc_assert (GET_CODE (x) == REG
11158 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11160 fprintf (stream, "mv%s%s",
11161 code == 'W' ? "f"
11162 : code == 'X' ? "d"
11163 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11165 return;
11167 /* Print cirrus register in the mode specified by the register's mode. */
11168 case 'V':
11170 int mode = GET_MODE (x);
11172 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11174 output_operand_lossage ("invalid operand for code '%c'", code);
11175 return;
11178 fprintf (stream, "mv%s%s",
11179 mode == DFmode ? "d"
11180 : mode == SImode ? "fx"
11181 : mode == DImode ? "dx"
11182 : "f", reg_names[REGNO (x)] + 2);
11184 return;
11187 case 'U':
11188 if (GET_CODE (x) != REG
11189 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11190 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11191 /* Bad value for wCG register number. */
11193 output_operand_lossage ("invalid operand for code '%c'", code);
11194 return;
11197 else
11198 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11199 return;
11201 /* Print an iWMMXt control register name. */
11202 case 'w':
11203 if (GET_CODE (x) != CONST_INT
11204 || INTVAL (x) < 0
11205 || INTVAL (x) >= 16)
11206 /* Bad value for wC register number. */
11208 output_operand_lossage ("invalid operand for code '%c'", code);
11209 return;
11212 else
11214 static const char * wc_reg_names [16] =
11216 "wCID", "wCon", "wCSSF", "wCASF",
11217 "wC4", "wC5", "wC6", "wC7",
11218 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11219 "wC12", "wC13", "wC14", "wC15"
11222 fprintf (stream, wc_reg_names [INTVAL (x)]);
11224 return;
11226 /* Print a VFP double precision register name. */
11227 case 'P':
11229 int mode = GET_MODE (x);
11230 int num;
11232 if (mode != DImode && mode != DFmode)
11234 output_operand_lossage ("invalid operand for code '%c'", code);
11235 return;
11238 if (GET_CODE (x) != REG
11239 || !IS_VFP_REGNUM (REGNO (x)))
11241 output_operand_lossage ("invalid operand for code '%c'", code);
11242 return;
11245 num = REGNO(x) - FIRST_VFP_REGNUM;
11246 if (num & 1)
11248 output_operand_lossage ("invalid operand for code '%c'", code);
11249 return;
11252 fprintf (stream, "d%d", num >> 1);
11254 return;
11256 default:
11257 if (x == 0)
11259 output_operand_lossage ("missing operand");
11260 return;
11263 switch (GET_CODE (x))
11265 case REG:
11266 asm_fprintf (stream, "%r", REGNO (x));
11267 break;
11269 case MEM:
11270 output_memory_reference_mode = GET_MODE (x);
11271 output_address (XEXP (x, 0));
11272 break;
11274 case CONST_DOUBLE:
11275 fprintf (stream, "#%s", fp_immediate_constant (x));
11276 break;
11278 default:
11279 gcc_assert (GET_CODE (x) != NEG);
11280 fputc ('#', stream);
11281 output_addr_const (stream, x);
11282 break;
11287 #ifndef AOF_ASSEMBLER
11288 /* Target hook for assembling integer objects. The ARM version needs to
11289 handle word-sized values specially. */
11290 static bool
11291 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11293 if (size == UNITS_PER_WORD && aligned_p)
11295 fputs ("\t.word\t", asm_out_file);
11296 output_addr_const (asm_out_file, x);
11298 /* Mark symbols as position independent. We only do this in the
11299 .text segment, not in the .data segment. */
11300 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11301 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11303 if (GET_CODE (x) == SYMBOL_REF
11304 && (CONSTANT_POOL_ADDRESS_P (x)
11305 || SYMBOL_REF_LOCAL_P (x)))
11306 fputs ("(GOTOFF)", asm_out_file);
11307 else if (GET_CODE (x) == LABEL_REF)
11308 fputs ("(GOTOFF)", asm_out_file);
11309 else
11310 fputs ("(GOT)", asm_out_file);
11312 fputc ('\n', asm_out_file);
11313 return true;
11316 if (arm_vector_mode_supported_p (GET_MODE (x)))
11318 int i, units;
11320 gcc_assert (GET_CODE (x) == CONST_VECTOR);
11322 units = CONST_VECTOR_NUNITS (x);
11324 switch (GET_MODE (x))
11326 case V2SImode: size = 4; break;
11327 case V4HImode: size = 2; break;
11328 case V8QImode: size = 1; break;
11329 default:
11330 gcc_unreachable ();
11333 for (i = 0; i < units; i++)
11335 rtx elt;
11337 elt = CONST_VECTOR_ELT (x, i);
11338 assemble_integer
11339 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11342 return true;
11345 return default_assemble_integer (x, size, aligned_p);
11349 /* Add a function to the list of static constructors. */
11351 static void
11352 arm_elf_asm_constructor (rtx symbol, int priority)
11354 section *s;
11356 if (!TARGET_AAPCS_BASED)
11358 default_named_section_asm_out_constructor (symbol, priority);
11359 return;
11362 /* Put these in the .init_array section, using a special relocation. */
11363 if (priority != DEFAULT_INIT_PRIORITY)
11365 char buf[18];
11366 sprintf (buf, ".init_array.%.5u", priority);
11367 s = get_section (buf, SECTION_WRITE, NULL_TREE);
11369 else
11370 s = ctors_section;
11372 switch_to_section (s);
11373 assemble_align (POINTER_SIZE);
11374 fputs ("\t.word\t", asm_out_file);
11375 output_addr_const (asm_out_file, symbol);
11376 fputs ("(target1)\n", asm_out_file);
11378 #endif
11380 /* A finite state machine takes care of noticing whether or not instructions
11381 can be conditionally executed, and thus decrease execution time and code
11382 size by deleting branch instructions. The fsm is controlled by
11383 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11385 /* The state of the fsm controlling condition codes are:
11386 0: normal, do nothing special
11387 1: make ASM_OUTPUT_OPCODE not output this instruction
11388 2: make ASM_OUTPUT_OPCODE not output this instruction
11389 3: make instructions conditional
11390 4: make instructions conditional
11392 State transitions (state->state by whom under condition):
11393 0 -> 1 final_prescan_insn if the `target' is a label
11394 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11395 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11396 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11397 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11398 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11399 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11400 (the target insn is arm_target_insn).
11402 If the jump clobbers the conditions then we use states 2 and 4.
11404 A similar thing can be done with conditional return insns.
11406 XXX In case the `target' is an unconditional branch, this conditionalising
11407 of the instructions always reduces code size, but not always execution
11408 time. But then, I want to reduce the code size to somewhere near what
11409 /bin/cc produces. */
11411 /* Returns the index of the ARM condition code string in
11412 `arm_condition_codes'. COMPARISON should be an rtx like
11413 `(eq (...) (...))'. */
11414 static enum arm_cond_code
11415 get_arm_condition_code (rtx comparison)
11417 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11418 int code;
11419 enum rtx_code comp_code = GET_CODE (comparison);
11421 if (GET_MODE_CLASS (mode) != MODE_CC)
11422 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11423 XEXP (comparison, 1));
11425 switch (mode)
11427 case CC_DNEmode: code = ARM_NE; goto dominance;
11428 case CC_DEQmode: code = ARM_EQ; goto dominance;
11429 case CC_DGEmode: code = ARM_GE; goto dominance;
11430 case CC_DGTmode: code = ARM_GT; goto dominance;
11431 case CC_DLEmode: code = ARM_LE; goto dominance;
11432 case CC_DLTmode: code = ARM_LT; goto dominance;
11433 case CC_DGEUmode: code = ARM_CS; goto dominance;
11434 case CC_DGTUmode: code = ARM_HI; goto dominance;
11435 case CC_DLEUmode: code = ARM_LS; goto dominance;
11436 case CC_DLTUmode: code = ARM_CC;
11438 dominance:
11439 gcc_assert (comp_code == EQ || comp_code == NE);
11441 if (comp_code == EQ)
11442 return ARM_INVERSE_CONDITION_CODE (code);
11443 return code;
11445 case CC_NOOVmode:
11446 switch (comp_code)
11448 case NE: return ARM_NE;
11449 case EQ: return ARM_EQ;
11450 case GE: return ARM_PL;
11451 case LT: return ARM_MI;
11452 default: gcc_unreachable ();
11455 case CC_Zmode:
11456 switch (comp_code)
11458 case NE: return ARM_NE;
11459 case EQ: return ARM_EQ;
11460 default: gcc_unreachable ();
11463 case CC_Nmode:
11464 switch (comp_code)
11466 case NE: return ARM_MI;
11467 case EQ: return ARM_PL;
11468 default: gcc_unreachable ();
11471 case CCFPEmode:
11472 case CCFPmode:
11473 /* These encodings assume that AC=1 in the FPA system control
11474 byte. This allows us to handle all cases except UNEQ and
11475 LTGT. */
11476 switch (comp_code)
11478 case GE: return ARM_GE;
11479 case GT: return ARM_GT;
11480 case LE: return ARM_LS;
11481 case LT: return ARM_MI;
11482 case NE: return ARM_NE;
11483 case EQ: return ARM_EQ;
11484 case ORDERED: return ARM_VC;
11485 case UNORDERED: return ARM_VS;
11486 case UNLT: return ARM_LT;
11487 case UNLE: return ARM_LE;
11488 case UNGT: return ARM_HI;
11489 case UNGE: return ARM_PL;
11490 /* UNEQ and LTGT do not have a representation. */
11491 case UNEQ: /* Fall through. */
11492 case LTGT: /* Fall through. */
11493 default: gcc_unreachable ();
11496 case CC_SWPmode:
11497 switch (comp_code)
11499 case NE: return ARM_NE;
11500 case EQ: return ARM_EQ;
11501 case GE: return ARM_LE;
11502 case GT: return ARM_LT;
11503 case LE: return ARM_GE;
11504 case LT: return ARM_GT;
11505 case GEU: return ARM_LS;
11506 case GTU: return ARM_CC;
11507 case LEU: return ARM_CS;
11508 case LTU: return ARM_HI;
11509 default: gcc_unreachable ();
11512 case CC_Cmode:
11513 switch (comp_code)
11515 case LTU: return ARM_CS;
11516 case GEU: return ARM_CC;
11517 default: gcc_unreachable ();
11520 case CCmode:
11521 switch (comp_code)
11523 case NE: return ARM_NE;
11524 case EQ: return ARM_EQ;
11525 case GE: return ARM_GE;
11526 case GT: return ARM_GT;
11527 case LE: return ARM_LE;
11528 case LT: return ARM_LT;
11529 case GEU: return ARM_CS;
11530 case GTU: return ARM_HI;
11531 case LEU: return ARM_LS;
11532 case LTU: return ARM_CC;
11533 default: gcc_unreachable ();
11536 default: gcc_unreachable ();
11540 void
11541 arm_final_prescan_insn (rtx insn)
11543 /* BODY will hold the body of INSN. */
11544 rtx body = PATTERN (insn);
11546 /* This will be 1 if trying to repeat the trick, and things need to be
11547 reversed if it appears to fail. */
11548 int reverse = 0;
11550 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11551 taken are clobbered, even if the rtl suggests otherwise. It also
11552 means that we have to grub around within the jump expression to find
11553 out what the conditions are when the jump isn't taken. */
11554 int jump_clobbers = 0;
11556 /* If we start with a return insn, we only succeed if we find another one. */
11557 int seeking_return = 0;
11559 /* START_INSN will hold the insn from where we start looking. This is the
11560 first insn after the following code_label if REVERSE is true. */
11561 rtx start_insn = insn;
11563 /* If in state 4, check if the target branch is reached, in order to
11564 change back to state 0. */
11565 if (arm_ccfsm_state == 4)
11567 if (insn == arm_target_insn)
11569 arm_target_insn = NULL;
11570 arm_ccfsm_state = 0;
11572 return;
11575 /* If in state 3, it is possible to repeat the trick, if this insn is an
11576 unconditional branch to a label, and immediately following this branch
11577 is the previous target label which is only used once, and the label this
11578 branch jumps to is not too far off. */
11579 if (arm_ccfsm_state == 3)
11581 if (simplejump_p (insn))
11583 start_insn = next_nonnote_insn (start_insn);
11584 if (GET_CODE (start_insn) == BARRIER)
11586 /* XXX Isn't this always a barrier? */
11587 start_insn = next_nonnote_insn (start_insn);
11589 if (GET_CODE (start_insn) == CODE_LABEL
11590 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11591 && LABEL_NUSES (start_insn) == 1)
11592 reverse = TRUE;
11593 else
11594 return;
11596 else if (GET_CODE (body) == RETURN)
11598 start_insn = next_nonnote_insn (start_insn);
11599 if (GET_CODE (start_insn) == BARRIER)
11600 start_insn = next_nonnote_insn (start_insn);
11601 if (GET_CODE (start_insn) == CODE_LABEL
11602 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11603 && LABEL_NUSES (start_insn) == 1)
11605 reverse = TRUE;
11606 seeking_return = 1;
11608 else
11609 return;
11611 else
11612 return;
11615 gcc_assert (!arm_ccfsm_state || reverse);
11616 if (GET_CODE (insn) != JUMP_INSN)
11617 return;
11619 /* This jump might be paralleled with a clobber of the condition codes
11620 the jump should always come first */
11621 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11622 body = XVECEXP (body, 0, 0);
11624 if (reverse
11625 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11626 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11628 int insns_skipped;
11629 int fail = FALSE, succeed = FALSE;
11630 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11631 int then_not_else = TRUE;
11632 rtx this_insn = start_insn, label = 0;
11634 /* If the jump cannot be done with one instruction, we cannot
11635 conditionally execute the instruction in the inverse case. */
11636 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11638 jump_clobbers = 1;
11639 return;
11642 /* Register the insn jumped to. */
11643 if (reverse)
11645 if (!seeking_return)
11646 label = XEXP (SET_SRC (body), 0);
11648 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11649 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11650 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11652 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11653 then_not_else = FALSE;
11655 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11656 seeking_return = 1;
11657 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11659 seeking_return = 1;
11660 then_not_else = FALSE;
11662 else
11663 gcc_unreachable ();
11665 /* See how many insns this branch skips, and what kind of insns. If all
11666 insns are okay, and the label or unconditional branch to the same
11667 label is not too far away, succeed. */
11668 for (insns_skipped = 0;
11669 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11671 rtx scanbody;
11673 this_insn = next_nonnote_insn (this_insn);
11674 if (!this_insn)
11675 break;
11677 switch (GET_CODE (this_insn))
11679 case CODE_LABEL:
11680 /* Succeed if it is the target label, otherwise fail since
11681 control falls in from somewhere else. */
11682 if (this_insn == label)
11684 if (jump_clobbers)
11686 arm_ccfsm_state = 2;
11687 this_insn = next_nonnote_insn (this_insn);
11689 else
11690 arm_ccfsm_state = 1;
11691 succeed = TRUE;
11693 else
11694 fail = TRUE;
11695 break;
11697 case BARRIER:
11698 /* Succeed if the following insn is the target label.
11699 Otherwise fail.
11700 If return insns are used then the last insn in a function
11701 will be a barrier. */
11702 this_insn = next_nonnote_insn (this_insn);
11703 if (this_insn && this_insn == label)
11705 if (jump_clobbers)
11707 arm_ccfsm_state = 2;
11708 this_insn = next_nonnote_insn (this_insn);
11710 else
11711 arm_ccfsm_state = 1;
11712 succeed = TRUE;
11714 else
11715 fail = TRUE;
11716 break;
11718 case CALL_INSN:
11719 /* The AAPCS says that conditional calls should not be
11720 used since they make interworking inefficient (the
11721 linker can't transform BL<cond> into BLX). That's
11722 only a problem if the machine has BLX. */
11723 if (arm_arch5)
11725 fail = TRUE;
11726 break;
11729 /* Succeed if the following insn is the target label, or
11730 if the following two insns are a barrier and the
11731 target label. */
11732 this_insn = next_nonnote_insn (this_insn);
11733 if (this_insn && GET_CODE (this_insn) == BARRIER)
11734 this_insn = next_nonnote_insn (this_insn);
11736 if (this_insn && this_insn == label
11737 && insns_skipped < max_insns_skipped)
11739 if (jump_clobbers)
11741 arm_ccfsm_state = 2;
11742 this_insn = next_nonnote_insn (this_insn);
11744 else
11745 arm_ccfsm_state = 1;
11746 succeed = TRUE;
11748 else
11749 fail = TRUE;
11750 break;
11752 case JUMP_INSN:
11753 /* If this is an unconditional branch to the same label, succeed.
11754 If it is to another label, do nothing. If it is conditional,
11755 fail. */
11756 /* XXX Probably, the tests for SET and the PC are
11757 unnecessary. */
11759 scanbody = PATTERN (this_insn);
11760 if (GET_CODE (scanbody) == SET
11761 && GET_CODE (SET_DEST (scanbody)) == PC)
11763 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11764 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11766 arm_ccfsm_state = 2;
11767 succeed = TRUE;
11769 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11770 fail = TRUE;
11772 /* Fail if a conditional return is undesirable (e.g. on a
11773 StrongARM), but still allow this if optimizing for size. */
11774 else if (GET_CODE (scanbody) == RETURN
11775 && !use_return_insn (TRUE, NULL)
11776 && !optimize_size)
11777 fail = TRUE;
11778 else if (GET_CODE (scanbody) == RETURN
11779 && seeking_return)
11781 arm_ccfsm_state = 2;
11782 succeed = TRUE;
11784 else if (GET_CODE (scanbody) == PARALLEL)
11786 switch (get_attr_conds (this_insn))
11788 case CONDS_NOCOND:
11789 break;
11790 default:
11791 fail = TRUE;
11792 break;
11795 else
11796 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11798 break;
11800 case INSN:
11801 /* Instructions using or affecting the condition codes make it
11802 fail. */
11803 scanbody = PATTERN (this_insn);
11804 if (!(GET_CODE (scanbody) == SET
11805 || GET_CODE (scanbody) == PARALLEL)
11806 || get_attr_conds (this_insn) != CONDS_NOCOND)
11807 fail = TRUE;
11809 /* A conditional cirrus instruction must be followed by
11810 a non Cirrus instruction. However, since we
11811 conditionalize instructions in this function and by
11812 the time we get here we can't add instructions
11813 (nops), because shorten_branches() has already been
11814 called, we will disable conditionalizing Cirrus
11815 instructions to be safe. */
11816 if (GET_CODE (scanbody) != USE
11817 && GET_CODE (scanbody) != CLOBBER
11818 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11819 fail = TRUE;
11820 break;
11822 default:
11823 break;
11826 if (succeed)
11828 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11829 arm_target_label = CODE_LABEL_NUMBER (label);
11830 else
11832 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11834 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11836 this_insn = next_nonnote_insn (this_insn);
11837 gcc_assert (!this_insn
11838 || (GET_CODE (this_insn) != BARRIER
11839 && GET_CODE (this_insn) != CODE_LABEL));
11841 if (!this_insn)
11843 /* Oh, dear! we ran off the end.. give up. */
11844 recog (PATTERN (insn), insn, NULL);
11845 arm_ccfsm_state = 0;
11846 arm_target_insn = NULL;
11847 return;
11849 arm_target_insn = this_insn;
11851 if (jump_clobbers)
11853 gcc_assert (!reverse);
11854 arm_current_cc =
11855 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11856 0), 0), 1));
11857 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11858 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11859 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11860 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11862 else
11864 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11865 what it was. */
11866 if (!reverse)
11867 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11868 0));
11871 if (reverse || then_not_else)
11872 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11875 /* Restore recog_data (getting the attributes of other insns can
11876 destroy this array, but final.c assumes that it remains intact
11877 across this call; since the insn has been recognized already we
11878 call recog direct). */
11879 recog (PATTERN (insn), insn, NULL);
11883 /* Returns true if REGNO is a valid register
11884 for holding a quantity of type MODE. */
11886 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11888 if (GET_MODE_CLASS (mode) == MODE_CC)
11889 return (regno == CC_REGNUM
11890 || (TARGET_HARD_FLOAT && TARGET_VFP
11891 && regno == VFPCC_REGNUM));
11893 if (TARGET_THUMB)
11894 /* For the Thumb we only allow values bigger than SImode in
11895 registers 0 - 6, so that there is always a second low
11896 register available to hold the upper part of the value.
11897 We probably we ought to ensure that the register is the
11898 start of an even numbered register pair. */
11899 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11901 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
11902 && IS_CIRRUS_REGNUM (regno))
11903 /* We have outlawed SI values in Cirrus registers because they
11904 reside in the lower 32 bits, but SF values reside in the
11905 upper 32 bits. This causes gcc all sorts of grief. We can't
11906 even split the registers into pairs because Cirrus SI values
11907 get sign extended to 64bits-- aldyh. */
11908 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11910 if (TARGET_HARD_FLOAT && TARGET_VFP
11911 && IS_VFP_REGNUM (regno))
11913 if (mode == SFmode || mode == SImode)
11914 return TRUE;
11916 /* DFmode values are only valid in even register pairs. */
11917 if (mode == DFmode)
11918 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11919 return FALSE;
11922 if (TARGET_REALLY_IWMMXT)
11924 if (IS_IWMMXT_GR_REGNUM (regno))
11925 return mode == SImode;
11927 if (IS_IWMMXT_REGNUM (regno))
11928 return VALID_IWMMXT_REG_MODE (mode);
11931 /* We allow any value to be stored in the general registers.
11932 Restrict doubleword quantities to even register pairs so that we can
11933 use ldrd. */
11934 if (regno <= LAST_ARM_REGNUM)
11935 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11937 if (regno == FRAME_POINTER_REGNUM
11938 || regno == ARG_POINTER_REGNUM)
11939 /* We only allow integers in the fake hard registers. */
11940 return GET_MODE_CLASS (mode) == MODE_INT;
11942 /* The only registers left are the FPA registers
11943 which we only allow to hold FP values. */
11944 return (TARGET_HARD_FLOAT && TARGET_FPA
11945 && GET_MODE_CLASS (mode) == MODE_FLOAT
11946 && regno >= FIRST_FPA_REGNUM
11947 && regno <= LAST_FPA_REGNUM);
11951 arm_regno_class (int regno)
11953 if (TARGET_THUMB)
11955 if (regno == STACK_POINTER_REGNUM)
11956 return STACK_REG;
11957 if (regno == CC_REGNUM)
11958 return CC_REG;
11959 if (regno < 8)
11960 return LO_REGS;
11961 return HI_REGS;
11964 if ( regno <= LAST_ARM_REGNUM
11965 || regno == FRAME_POINTER_REGNUM
11966 || regno == ARG_POINTER_REGNUM)
11967 return GENERAL_REGS;
11969 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11970 return NO_REGS;
11972 if (IS_CIRRUS_REGNUM (regno))
11973 return CIRRUS_REGS;
11975 if (IS_VFP_REGNUM (regno))
11976 return VFP_REGS;
11978 if (IS_IWMMXT_REGNUM (regno))
11979 return IWMMXT_REGS;
11981 if (IS_IWMMXT_GR_REGNUM (regno))
11982 return IWMMXT_GR_REGS;
11984 return FPA_REGS;
11987 /* Handle a special case when computing the offset
11988 of an argument from the frame pointer. */
11990 arm_debugger_arg_offset (int value, rtx addr)
11992 rtx insn;
11994 /* We are only interested if dbxout_parms() failed to compute the offset. */
11995 if (value != 0)
11996 return 0;
11998 /* We can only cope with the case where the address is held in a register. */
11999 if (GET_CODE (addr) != REG)
12000 return 0;
12002 /* If we are using the frame pointer to point at the argument, then
12003 an offset of 0 is correct. */
12004 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
12005 return 0;
12007 /* If we are using the stack pointer to point at the
12008 argument, then an offset of 0 is correct. */
12009 if ((TARGET_THUMB || !frame_pointer_needed)
12010 && REGNO (addr) == SP_REGNUM)
12011 return 0;
12013 /* Oh dear. The argument is pointed to by a register rather
12014 than being held in a register, or being stored at a known
12015 offset from the frame pointer. Since GDB only understands
12016 those two kinds of argument we must translate the address
12017 held in the register into an offset from the frame pointer.
12018 We do this by searching through the insns for the function
12019 looking to see where this register gets its value. If the
12020 register is initialized from the frame pointer plus an offset
12021 then we are in luck and we can continue, otherwise we give up.
12023 This code is exercised by producing debugging information
12024 for a function with arguments like this:
12026 double func (double a, double b, int c, double d) {return d;}
12028 Without this code the stab for parameter 'd' will be set to
12029 an offset of 0 from the frame pointer, rather than 8. */
12031 /* The if() statement says:
12033 If the insn is a normal instruction
12034 and if the insn is setting the value in a register
12035 and if the register being set is the register holding the address of the argument
12036 and if the address is computing by an addition
12037 that involves adding to a register
12038 which is the frame pointer
12039 a constant integer
12041 then... */
12043 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12045 if ( GET_CODE (insn) == INSN
12046 && GET_CODE (PATTERN (insn)) == SET
12047 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12048 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12049 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12050 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12051 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12054 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12056 break;
12060 if (value == 0)
12062 debug_rtx (addr);
12063 warning (0, "unable to compute real location of stacked parameter");
12064 value = 8; /* XXX magic hack */
12067 return value;
12070 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12071 do \
12073 if ((MASK) & insn_flags) \
12074 add_builtin_function ((NAME), (TYPE), (CODE), \
12075 BUILT_IN_MD, NULL, NULL_TREE); \
12077 while (0)
12079 struct builtin_description
12081 const unsigned int mask;
12082 const enum insn_code icode;
12083 const char * const name;
12084 const enum arm_builtins code;
12085 const enum rtx_code comparison;
12086 const unsigned int flag;
12089 static const struct builtin_description bdesc_2arg[] =
12091 #define IWMMXT_BUILTIN(code, string, builtin) \
12092 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12093 ARM_BUILTIN_##builtin, 0, 0 },
12095 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12096 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12097 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12098 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12099 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12100 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12101 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12102 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12103 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12104 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12105 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12106 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12107 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12108 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12109 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12110 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12111 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12112 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12113 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12114 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12115 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12116 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12117 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12118 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12119 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12120 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12121 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12122 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12123 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12124 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12125 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12126 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12127 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12128 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12129 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12130 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12131 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12132 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12133 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12134 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12135 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12136 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12137 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12138 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12139 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12140 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12141 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12142 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12143 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12144 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12145 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12146 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12147 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12148 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12149 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12150 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12151 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12152 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12154 #define IWMMXT_BUILTIN2(code, builtin) \
12155 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12157 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12158 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12159 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12160 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12161 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12162 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12163 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12164 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12165 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12166 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12167 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
12168 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
12169 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
12170 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
12171 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
12172 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
12173 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
12174 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
12175 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
12176 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
12177 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
12178 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
12179 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
12180 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
12181 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
12182 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
12183 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
12184 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
12185 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
12186 IWMMXT_BUILTIN2 (rordi3, WRORDI)
12187 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
12188 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
12191 static const struct builtin_description bdesc_1arg[] =
12193 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
12194 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
12195 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
12196 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
12197 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
12198 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
12199 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
12200 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
12201 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
12202 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
12203 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
12204 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
12205 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
12206 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
12207 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
12208 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
12209 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
12210 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
12213 /* Set up all the iWMMXt builtins. This is
12214 not called if TARGET_IWMMXT is zero. */
12216 static void
12217 arm_init_iwmmxt_builtins (void)
12219 const struct builtin_description * d;
12220 size_t i;
12221 tree endlink = void_list_node;
12223 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12224 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12225 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12227 tree int_ftype_int
12228 = build_function_type (integer_type_node,
12229 tree_cons (NULL_TREE, integer_type_node, endlink));
12230 tree v8qi_ftype_v8qi_v8qi_int
12231 = build_function_type (V8QI_type_node,
12232 tree_cons (NULL_TREE, V8QI_type_node,
12233 tree_cons (NULL_TREE, V8QI_type_node,
12234 tree_cons (NULL_TREE,
12235 integer_type_node,
12236 endlink))));
12237 tree v4hi_ftype_v4hi_int
12238 = build_function_type (V4HI_type_node,
12239 tree_cons (NULL_TREE, V4HI_type_node,
12240 tree_cons (NULL_TREE, integer_type_node,
12241 endlink)));
12242 tree v2si_ftype_v2si_int
12243 = build_function_type (V2SI_type_node,
12244 tree_cons (NULL_TREE, V2SI_type_node,
12245 tree_cons (NULL_TREE, integer_type_node,
12246 endlink)));
12247 tree v2si_ftype_di_di
12248 = build_function_type (V2SI_type_node,
12249 tree_cons (NULL_TREE, long_long_integer_type_node,
12250 tree_cons (NULL_TREE, long_long_integer_type_node,
12251 endlink)));
12252 tree di_ftype_di_int
12253 = build_function_type (long_long_integer_type_node,
12254 tree_cons (NULL_TREE, long_long_integer_type_node,
12255 tree_cons (NULL_TREE, integer_type_node,
12256 endlink)));
12257 tree di_ftype_di_int_int
12258 = build_function_type (long_long_integer_type_node,
12259 tree_cons (NULL_TREE, long_long_integer_type_node,
12260 tree_cons (NULL_TREE, integer_type_node,
12261 tree_cons (NULL_TREE,
12262 integer_type_node,
12263 endlink))));
12264 tree int_ftype_v8qi
12265 = build_function_type (integer_type_node,
12266 tree_cons (NULL_TREE, V8QI_type_node,
12267 endlink));
12268 tree int_ftype_v4hi
12269 = build_function_type (integer_type_node,
12270 tree_cons (NULL_TREE, V4HI_type_node,
12271 endlink));
12272 tree int_ftype_v2si
12273 = build_function_type (integer_type_node,
12274 tree_cons (NULL_TREE, V2SI_type_node,
12275 endlink));
12276 tree int_ftype_v8qi_int
12277 = build_function_type (integer_type_node,
12278 tree_cons (NULL_TREE, V8QI_type_node,
12279 tree_cons (NULL_TREE, integer_type_node,
12280 endlink)));
12281 tree int_ftype_v4hi_int
12282 = build_function_type (integer_type_node,
12283 tree_cons (NULL_TREE, V4HI_type_node,
12284 tree_cons (NULL_TREE, integer_type_node,
12285 endlink)));
12286 tree int_ftype_v2si_int
12287 = build_function_type (integer_type_node,
12288 tree_cons (NULL_TREE, V2SI_type_node,
12289 tree_cons (NULL_TREE, integer_type_node,
12290 endlink)));
12291 tree v8qi_ftype_v8qi_int_int
12292 = build_function_type (V8QI_type_node,
12293 tree_cons (NULL_TREE, V8QI_type_node,
12294 tree_cons (NULL_TREE, integer_type_node,
12295 tree_cons (NULL_TREE,
12296 integer_type_node,
12297 endlink))));
12298 tree v4hi_ftype_v4hi_int_int
12299 = build_function_type (V4HI_type_node,
12300 tree_cons (NULL_TREE, V4HI_type_node,
12301 tree_cons (NULL_TREE, integer_type_node,
12302 tree_cons (NULL_TREE,
12303 integer_type_node,
12304 endlink))));
12305 tree v2si_ftype_v2si_int_int
12306 = build_function_type (V2SI_type_node,
12307 tree_cons (NULL_TREE, V2SI_type_node,
12308 tree_cons (NULL_TREE, integer_type_node,
12309 tree_cons (NULL_TREE,
12310 integer_type_node,
12311 endlink))));
12312 /* Miscellaneous. */
12313 tree v8qi_ftype_v4hi_v4hi
12314 = build_function_type (V8QI_type_node,
12315 tree_cons (NULL_TREE, V4HI_type_node,
12316 tree_cons (NULL_TREE, V4HI_type_node,
12317 endlink)));
12318 tree v4hi_ftype_v2si_v2si
12319 = build_function_type (V4HI_type_node,
12320 tree_cons (NULL_TREE, V2SI_type_node,
12321 tree_cons (NULL_TREE, V2SI_type_node,
12322 endlink)));
12323 tree v2si_ftype_v4hi_v4hi
12324 = build_function_type (V2SI_type_node,
12325 tree_cons (NULL_TREE, V4HI_type_node,
12326 tree_cons (NULL_TREE, V4HI_type_node,
12327 endlink)));
12328 tree v2si_ftype_v8qi_v8qi
12329 = build_function_type (V2SI_type_node,
12330 tree_cons (NULL_TREE, V8QI_type_node,
12331 tree_cons (NULL_TREE, V8QI_type_node,
12332 endlink)));
12333 tree v4hi_ftype_v4hi_di
12334 = build_function_type (V4HI_type_node,
12335 tree_cons (NULL_TREE, V4HI_type_node,
12336 tree_cons (NULL_TREE,
12337 long_long_integer_type_node,
12338 endlink)));
12339 tree v2si_ftype_v2si_di
12340 = build_function_type (V2SI_type_node,
12341 tree_cons (NULL_TREE, V2SI_type_node,
12342 tree_cons (NULL_TREE,
12343 long_long_integer_type_node,
12344 endlink)));
12345 tree void_ftype_int_int
12346 = build_function_type (void_type_node,
12347 tree_cons (NULL_TREE, integer_type_node,
12348 tree_cons (NULL_TREE, integer_type_node,
12349 endlink)));
12350 tree di_ftype_void
12351 = build_function_type (long_long_unsigned_type_node, endlink);
12352 tree di_ftype_v8qi
12353 = build_function_type (long_long_integer_type_node,
12354 tree_cons (NULL_TREE, V8QI_type_node,
12355 endlink));
12356 tree di_ftype_v4hi
12357 = build_function_type (long_long_integer_type_node,
12358 tree_cons (NULL_TREE, V4HI_type_node,
12359 endlink));
12360 tree di_ftype_v2si
12361 = build_function_type (long_long_integer_type_node,
12362 tree_cons (NULL_TREE, V2SI_type_node,
12363 endlink));
12364 tree v2si_ftype_v4hi
12365 = build_function_type (V2SI_type_node,
12366 tree_cons (NULL_TREE, V4HI_type_node,
12367 endlink));
12368 tree v4hi_ftype_v8qi
12369 = build_function_type (V4HI_type_node,
12370 tree_cons (NULL_TREE, V8QI_type_node,
12371 endlink));
12373 tree di_ftype_di_v4hi_v4hi
12374 = build_function_type (long_long_unsigned_type_node,
12375 tree_cons (NULL_TREE,
12376 long_long_unsigned_type_node,
12377 tree_cons (NULL_TREE, V4HI_type_node,
12378 tree_cons (NULL_TREE,
12379 V4HI_type_node,
12380 endlink))));
12382 tree di_ftype_v4hi_v4hi
12383 = build_function_type (long_long_unsigned_type_node,
12384 tree_cons (NULL_TREE, V4HI_type_node,
12385 tree_cons (NULL_TREE, V4HI_type_node,
12386 endlink)));
12388 /* Normal vector binops. */
12389 tree v8qi_ftype_v8qi_v8qi
12390 = build_function_type (V8QI_type_node,
12391 tree_cons (NULL_TREE, V8QI_type_node,
12392 tree_cons (NULL_TREE, V8QI_type_node,
12393 endlink)));
12394 tree v4hi_ftype_v4hi_v4hi
12395 = build_function_type (V4HI_type_node,
12396 tree_cons (NULL_TREE, V4HI_type_node,
12397 tree_cons (NULL_TREE, V4HI_type_node,
12398 endlink)));
12399 tree v2si_ftype_v2si_v2si
12400 = build_function_type (V2SI_type_node,
12401 tree_cons (NULL_TREE, V2SI_type_node,
12402 tree_cons (NULL_TREE, V2SI_type_node,
12403 endlink)));
12404 tree di_ftype_di_di
12405 = build_function_type (long_long_unsigned_type_node,
12406 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12407 tree_cons (NULL_TREE,
12408 long_long_unsigned_type_node,
12409 endlink)));
12411 /* Add all builtins that are more or less simple operations on two
12412 operands. */
12413 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12415 /* Use one of the operands; the target can have a different mode for
12416 mask-generating compares. */
12417 enum machine_mode mode;
12418 tree type;
12420 if (d->name == 0)
12421 continue;
12423 mode = insn_data[d->icode].operand[1].mode;
12425 switch (mode)
12427 case V8QImode:
12428 type = v8qi_ftype_v8qi_v8qi;
12429 break;
12430 case V4HImode:
12431 type = v4hi_ftype_v4hi_v4hi;
12432 break;
12433 case V2SImode:
12434 type = v2si_ftype_v2si_v2si;
12435 break;
12436 case DImode:
12437 type = di_ftype_di_di;
12438 break;
12440 default:
12441 gcc_unreachable ();
12444 def_mbuiltin (d->mask, d->name, type, d->code);
12447 /* Add the remaining MMX insns with somewhat more complicated types. */
12448 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12449 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12450 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12452 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12453 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12454 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12455 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12456 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12457 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12459 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12460 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12461 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12462 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12463 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12464 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12466 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12467 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12468 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12469 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12470 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12471 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12473 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12474 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12475 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12476 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12477 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12478 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12480 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12482 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12483 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12484 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12485 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12487 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12488 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12489 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12490 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12491 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12492 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12493 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12494 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12495 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12497 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12498 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12499 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12501 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12502 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12503 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12505 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12506 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12507 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12508 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12509 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12510 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12512 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12513 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12514 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12515 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12516 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12517 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12518 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12519 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12520 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12521 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12522 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12523 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12525 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12526 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12527 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12528 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12530 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12531 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12532 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12533 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12534 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12535 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12536 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12539 static void
12540 arm_init_tls_builtins (void)
12542 tree ftype;
12543 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
12544 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
12546 ftype = build_function_type (ptr_type_node, void_list_node);
12547 add_builtin_function ("__builtin_thread_pointer", ftype,
12548 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
12549 NULL, const_nothrow);
12552 static void
12553 arm_init_builtins (void)
12555 arm_init_tls_builtins ();
12557 if (TARGET_REALLY_IWMMXT)
12558 arm_init_iwmmxt_builtins ();
12561 /* Errors in the source file can cause expand_expr to return const0_rtx
12562 where we expect a vector. To avoid crashing, use one of the vector
12563 clear instructions. */
12565 static rtx
12566 safe_vector_operand (rtx x, enum machine_mode mode)
12568 if (x != const0_rtx)
12569 return x;
12570 x = gen_reg_rtx (mode);
12572 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12573 : gen_rtx_SUBREG (DImode, x, 0)));
12574 return x;
12577 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12579 static rtx
12580 arm_expand_binop_builtin (enum insn_code icode,
12581 tree arglist, rtx target)
12583 rtx pat;
12584 tree arg0 = TREE_VALUE (arglist);
12585 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12586 rtx op0 = expand_normal (arg0);
12587 rtx op1 = expand_normal (arg1);
12588 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12589 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12590 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12592 if (VECTOR_MODE_P (mode0))
12593 op0 = safe_vector_operand (op0, mode0);
12594 if (VECTOR_MODE_P (mode1))
12595 op1 = safe_vector_operand (op1, mode1);
12597 if (! target
12598 || GET_MODE (target) != tmode
12599 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12600 target = gen_reg_rtx (tmode);
12602 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12604 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12605 op0 = copy_to_mode_reg (mode0, op0);
12606 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12607 op1 = copy_to_mode_reg (mode1, op1);
12609 pat = GEN_FCN (icode) (target, op0, op1);
12610 if (! pat)
12611 return 0;
12612 emit_insn (pat);
12613 return target;
12616 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12618 static rtx
12619 arm_expand_unop_builtin (enum insn_code icode,
12620 tree arglist, rtx target, int do_load)
12622 rtx pat;
12623 tree arg0 = TREE_VALUE (arglist);
12624 rtx op0 = expand_normal (arg0);
12625 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12626 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12628 if (! target
12629 || GET_MODE (target) != tmode
12630 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12631 target = gen_reg_rtx (tmode);
12632 if (do_load)
12633 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12634 else
12636 if (VECTOR_MODE_P (mode0))
12637 op0 = safe_vector_operand (op0, mode0);
12639 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12640 op0 = copy_to_mode_reg (mode0, op0);
12643 pat = GEN_FCN (icode) (target, op0);
12644 if (! pat)
12645 return 0;
12646 emit_insn (pat);
12647 return target;
12650 /* Expand an expression EXP that calls a built-in function,
12651 with result going to TARGET if that's convenient
12652 (and in mode MODE if that's convenient).
12653 SUBTARGET may be used as the target for computing one of EXP's operands.
12654 IGNORE is nonzero if the value is to be ignored. */
12656 static rtx
12657 arm_expand_builtin (tree exp,
12658 rtx target,
12659 rtx subtarget ATTRIBUTE_UNUSED,
12660 enum machine_mode mode ATTRIBUTE_UNUSED,
12661 int ignore ATTRIBUTE_UNUSED)
12663 const struct builtin_description * d;
12664 enum insn_code icode;
12665 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12666 tree arglist = TREE_OPERAND (exp, 1);
12667 tree arg0;
12668 tree arg1;
12669 tree arg2;
12670 rtx op0;
12671 rtx op1;
12672 rtx op2;
12673 rtx pat;
12674 int fcode = DECL_FUNCTION_CODE (fndecl);
12675 size_t i;
12676 enum machine_mode tmode;
12677 enum machine_mode mode0;
12678 enum machine_mode mode1;
12679 enum machine_mode mode2;
12681 switch (fcode)
12683 case ARM_BUILTIN_TEXTRMSB:
12684 case ARM_BUILTIN_TEXTRMUB:
12685 case ARM_BUILTIN_TEXTRMSH:
12686 case ARM_BUILTIN_TEXTRMUH:
12687 case ARM_BUILTIN_TEXTRMSW:
12688 case ARM_BUILTIN_TEXTRMUW:
12689 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12690 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12691 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12692 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12693 : CODE_FOR_iwmmxt_textrmw);
12695 arg0 = TREE_VALUE (arglist);
12696 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12697 op0 = expand_normal (arg0);
12698 op1 = expand_normal (arg1);
12699 tmode = insn_data[icode].operand[0].mode;
12700 mode0 = insn_data[icode].operand[1].mode;
12701 mode1 = insn_data[icode].operand[2].mode;
12703 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12704 op0 = copy_to_mode_reg (mode0, op0);
12705 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12707 /* @@@ better error message */
12708 error ("selector must be an immediate");
12709 return gen_reg_rtx (tmode);
12711 if (target == 0
12712 || GET_MODE (target) != tmode
12713 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12714 target = gen_reg_rtx (tmode);
12715 pat = GEN_FCN (icode) (target, op0, op1);
12716 if (! pat)
12717 return 0;
12718 emit_insn (pat);
12719 return target;
12721 case ARM_BUILTIN_TINSRB:
12722 case ARM_BUILTIN_TINSRH:
12723 case ARM_BUILTIN_TINSRW:
12724 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12725 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12726 : CODE_FOR_iwmmxt_tinsrw);
12727 arg0 = TREE_VALUE (arglist);
12728 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12729 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12730 op0 = expand_normal (arg0);
12731 op1 = expand_normal (arg1);
12732 op2 = expand_normal (arg2);
12733 tmode = insn_data[icode].operand[0].mode;
12734 mode0 = insn_data[icode].operand[1].mode;
12735 mode1 = insn_data[icode].operand[2].mode;
12736 mode2 = insn_data[icode].operand[3].mode;
12738 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12739 op0 = copy_to_mode_reg (mode0, op0);
12740 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12741 op1 = copy_to_mode_reg (mode1, op1);
12742 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12744 /* @@@ better error message */
12745 error ("selector must be an immediate");
12746 return const0_rtx;
12748 if (target == 0
12749 || GET_MODE (target) != tmode
12750 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12751 target = gen_reg_rtx (tmode);
12752 pat = GEN_FCN (icode) (target, op0, op1, op2);
12753 if (! pat)
12754 return 0;
12755 emit_insn (pat);
12756 return target;
12758 case ARM_BUILTIN_SETWCX:
12759 arg0 = TREE_VALUE (arglist);
12760 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12761 op0 = force_reg (SImode, expand_normal (arg0));
12762 op1 = expand_normal (arg1);
12763 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12764 return 0;
12766 case ARM_BUILTIN_GETWCX:
12767 arg0 = TREE_VALUE (arglist);
12768 op0 = expand_normal (arg0);
12769 target = gen_reg_rtx (SImode);
12770 emit_insn (gen_iwmmxt_tmrc (target, op0));
12771 return target;
12773 case ARM_BUILTIN_WSHUFH:
12774 icode = CODE_FOR_iwmmxt_wshufh;
12775 arg0 = TREE_VALUE (arglist);
12776 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12777 op0 = expand_normal (arg0);
12778 op1 = expand_normal (arg1);
12779 tmode = insn_data[icode].operand[0].mode;
12780 mode1 = insn_data[icode].operand[1].mode;
12781 mode2 = insn_data[icode].operand[2].mode;
12783 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12784 op0 = copy_to_mode_reg (mode1, op0);
12785 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12787 /* @@@ better error message */
12788 error ("mask must be an immediate");
12789 return const0_rtx;
12791 if (target == 0
12792 || GET_MODE (target) != tmode
12793 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12794 target = gen_reg_rtx (tmode);
12795 pat = GEN_FCN (icode) (target, op0, op1);
12796 if (! pat)
12797 return 0;
12798 emit_insn (pat);
12799 return target;
12801 case ARM_BUILTIN_WSADB:
12802 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12803 case ARM_BUILTIN_WSADH:
12804 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12805 case ARM_BUILTIN_WSADBZ:
12806 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12807 case ARM_BUILTIN_WSADHZ:
12808 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12810 /* Several three-argument builtins. */
12811 case ARM_BUILTIN_WMACS:
12812 case ARM_BUILTIN_WMACU:
12813 case ARM_BUILTIN_WALIGN:
12814 case ARM_BUILTIN_TMIA:
12815 case ARM_BUILTIN_TMIAPH:
12816 case ARM_BUILTIN_TMIATT:
12817 case ARM_BUILTIN_TMIATB:
12818 case ARM_BUILTIN_TMIABT:
12819 case ARM_BUILTIN_TMIABB:
12820 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12821 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12822 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12823 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12824 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12825 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12826 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12827 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12828 : CODE_FOR_iwmmxt_walign);
12829 arg0 = TREE_VALUE (arglist);
12830 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12831 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12832 op0 = expand_normal (arg0);
12833 op1 = expand_normal (arg1);
12834 op2 = expand_normal (arg2);
12835 tmode = insn_data[icode].operand[0].mode;
12836 mode0 = insn_data[icode].operand[1].mode;
12837 mode1 = insn_data[icode].operand[2].mode;
12838 mode2 = insn_data[icode].operand[3].mode;
12840 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12841 op0 = copy_to_mode_reg (mode0, op0);
12842 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12843 op1 = copy_to_mode_reg (mode1, op1);
12844 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12845 op2 = copy_to_mode_reg (mode2, op2);
12846 if (target == 0
12847 || GET_MODE (target) != tmode
12848 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12849 target = gen_reg_rtx (tmode);
12850 pat = GEN_FCN (icode) (target, op0, op1, op2);
12851 if (! pat)
12852 return 0;
12853 emit_insn (pat);
12854 return target;
12856 case ARM_BUILTIN_WZERO:
12857 target = gen_reg_rtx (DImode);
12858 emit_insn (gen_iwmmxt_clrdi (target));
12859 return target;
12861 case ARM_BUILTIN_THREAD_POINTER:
12862 return arm_load_tp (target);
12864 default:
12865 break;
12868 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12869 if (d->code == (const enum arm_builtins) fcode)
12870 return arm_expand_binop_builtin (d->icode, arglist, target);
12872 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12873 if (d->code == (const enum arm_builtins) fcode)
12874 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12876 /* @@@ Should really do something sensible here. */
12877 return NULL_RTX;
12880 /* Return the number (counting from 0) of
12881 the least significant set bit in MASK. */
12883 inline static int
12884 number_of_first_bit_set (unsigned mask)
12886 int bit;
12888 for (bit = 0;
12889 (mask & (1 << bit)) == 0;
12890 ++bit)
12891 continue;
12893 return bit;
12896 /* Emit code to push or pop registers to or from the stack. F is the
12897 assembly file. MASK is the registers to push or pop. PUSH is
12898 nonzero if we should push, and zero if we should pop. For debugging
12899 output, if pushing, adjust CFA_OFFSET by the amount of space added
12900 to the stack. REAL_REGS should have the same number of bits set as
12901 MASK, and will be used instead (in the same order) to describe which
12902 registers were saved - this is used to mark the save slots when we
12903 push high registers after moving them to low registers. */
12904 static void
12905 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12906 unsigned long real_regs)
12908 int regno;
12909 int lo_mask = mask & 0xFF;
12910 int pushed_words = 0;
12912 gcc_assert (mask);
12914 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12916 /* Special case. Do not generate a POP PC statement here, do it in
12917 thumb_exit() */
12918 thumb_exit (f, -1);
12919 return;
12922 if (ARM_EABI_UNWIND_TABLES && push)
12924 fprintf (f, "\t.save\t{");
12925 for (regno = 0; regno < 15; regno++)
12927 if (real_regs & (1 << regno))
12929 if (real_regs & ((1 << regno) -1))
12930 fprintf (f, ", ");
12931 asm_fprintf (f, "%r", regno);
12934 fprintf (f, "}\n");
12937 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12939 /* Look at the low registers first. */
12940 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12942 if (lo_mask & 1)
12944 asm_fprintf (f, "%r", regno);
12946 if ((lo_mask & ~1) != 0)
12947 fprintf (f, ", ");
12949 pushed_words++;
12953 if (push && (mask & (1 << LR_REGNUM)))
12955 /* Catch pushing the LR. */
12956 if (mask & 0xFF)
12957 fprintf (f, ", ");
12959 asm_fprintf (f, "%r", LR_REGNUM);
12961 pushed_words++;
12963 else if (!push && (mask & (1 << PC_REGNUM)))
12965 /* Catch popping the PC. */
12966 if (TARGET_INTERWORK || TARGET_BACKTRACE
12967 || current_function_calls_eh_return)
12969 /* The PC is never poped directly, instead
12970 it is popped into r3 and then BX is used. */
12971 fprintf (f, "}\n");
12973 thumb_exit (f, -1);
12975 return;
12977 else
12979 if (mask & 0xFF)
12980 fprintf (f, ", ");
12982 asm_fprintf (f, "%r", PC_REGNUM);
12986 fprintf (f, "}\n");
12988 if (push && pushed_words && dwarf2out_do_frame ())
12990 char *l = dwarf2out_cfi_label ();
12991 int pushed_mask = real_regs;
12993 *cfa_offset += pushed_words * 4;
12994 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12996 pushed_words = 0;
12997 pushed_mask = real_regs;
12998 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
13000 if (pushed_mask & 1)
13001 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
13006 /* Generate code to return from a thumb function.
13007 If 'reg_containing_return_addr' is -1, then the return address is
13008 actually on the stack, at the stack pointer. */
13009 static void
13010 thumb_exit (FILE *f, int reg_containing_return_addr)
13012 unsigned regs_available_for_popping;
13013 unsigned regs_to_pop;
13014 int pops_needed;
13015 unsigned available;
13016 unsigned required;
13017 int mode;
13018 int size;
13019 int restore_a4 = FALSE;
13021 /* Compute the registers we need to pop. */
13022 regs_to_pop = 0;
13023 pops_needed = 0;
13025 if (reg_containing_return_addr == -1)
13027 regs_to_pop |= 1 << LR_REGNUM;
13028 ++pops_needed;
13031 if (TARGET_BACKTRACE)
13033 /* Restore the (ARM) frame pointer and stack pointer. */
13034 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13035 pops_needed += 2;
13038 /* If there is nothing to pop then just emit the BX instruction and
13039 return. */
13040 if (pops_needed == 0)
13042 if (current_function_calls_eh_return)
13043 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13045 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13046 return;
13048 /* Otherwise if we are not supporting interworking and we have not created
13049 a backtrace structure and the function was not entered in ARM mode then
13050 just pop the return address straight into the PC. */
13051 else if (!TARGET_INTERWORK
13052 && !TARGET_BACKTRACE
13053 && !is_called_in_ARM_mode (current_function_decl)
13054 && !current_function_calls_eh_return)
13056 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13057 return;
13060 /* Find out how many of the (return) argument registers we can corrupt. */
13061 regs_available_for_popping = 0;
13063 /* If returning via __builtin_eh_return, the bottom three registers
13064 all contain information needed for the return. */
13065 if (current_function_calls_eh_return)
13066 size = 12;
13067 else
13069 /* If we can deduce the registers used from the function's
13070 return value. This is more reliable that examining
13071 regs_ever_live[] because that will be set if the register is
13072 ever used in the function, not just if the register is used
13073 to hold a return value. */
13075 if (current_function_return_rtx != 0)
13076 mode = GET_MODE (current_function_return_rtx);
13077 else
13078 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13080 size = GET_MODE_SIZE (mode);
13082 if (size == 0)
13084 /* In a void function we can use any argument register.
13085 In a function that returns a structure on the stack
13086 we can use the second and third argument registers. */
13087 if (mode == VOIDmode)
13088 regs_available_for_popping =
13089 (1 << ARG_REGISTER (1))
13090 | (1 << ARG_REGISTER (2))
13091 | (1 << ARG_REGISTER (3));
13092 else
13093 regs_available_for_popping =
13094 (1 << ARG_REGISTER (2))
13095 | (1 << ARG_REGISTER (3));
13097 else if (size <= 4)
13098 regs_available_for_popping =
13099 (1 << ARG_REGISTER (2))
13100 | (1 << ARG_REGISTER (3));
13101 else if (size <= 8)
13102 regs_available_for_popping =
13103 (1 << ARG_REGISTER (3));
13106 /* Match registers to be popped with registers into which we pop them. */
13107 for (available = regs_available_for_popping,
13108 required = regs_to_pop;
13109 required != 0 && available != 0;
13110 available &= ~(available & - available),
13111 required &= ~(required & - required))
13112 -- pops_needed;
13114 /* If we have any popping registers left over, remove them. */
13115 if (available > 0)
13116 regs_available_for_popping &= ~available;
13118 /* Otherwise if we need another popping register we can use
13119 the fourth argument register. */
13120 else if (pops_needed)
13122 /* If we have not found any free argument registers and
13123 reg a4 contains the return address, we must move it. */
13124 if (regs_available_for_popping == 0
13125 && reg_containing_return_addr == LAST_ARG_REGNUM)
13127 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13128 reg_containing_return_addr = LR_REGNUM;
13130 else if (size > 12)
13132 /* Register a4 is being used to hold part of the return value,
13133 but we have dire need of a free, low register. */
13134 restore_a4 = TRUE;
13136 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13139 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13141 /* The fourth argument register is available. */
13142 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13144 --pops_needed;
13148 /* Pop as many registers as we can. */
13149 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13150 regs_available_for_popping);
13152 /* Process the registers we popped. */
13153 if (reg_containing_return_addr == -1)
13155 /* The return address was popped into the lowest numbered register. */
13156 regs_to_pop &= ~(1 << LR_REGNUM);
13158 reg_containing_return_addr =
13159 number_of_first_bit_set (regs_available_for_popping);
13161 /* Remove this register for the mask of available registers, so that
13162 the return address will not be corrupted by further pops. */
13163 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13166 /* If we popped other registers then handle them here. */
13167 if (regs_available_for_popping)
13169 int frame_pointer;
13171 /* Work out which register currently contains the frame pointer. */
13172 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
13174 /* Move it into the correct place. */
13175 asm_fprintf (f, "\tmov\t%r, %r\n",
13176 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
13178 /* (Temporarily) remove it from the mask of popped registers. */
13179 regs_available_for_popping &= ~(1 << frame_pointer);
13180 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
13182 if (regs_available_for_popping)
13184 int stack_pointer;
13186 /* We popped the stack pointer as well,
13187 find the register that contains it. */
13188 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
13190 /* Move it into the stack register. */
13191 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
13193 /* At this point we have popped all necessary registers, so
13194 do not worry about restoring regs_available_for_popping
13195 to its correct value:
13197 assert (pops_needed == 0)
13198 assert (regs_available_for_popping == (1 << frame_pointer))
13199 assert (regs_to_pop == (1 << STACK_POINTER)) */
13201 else
13203 /* Since we have just move the popped value into the frame
13204 pointer, the popping register is available for reuse, and
13205 we know that we still have the stack pointer left to pop. */
13206 regs_available_for_popping |= (1 << frame_pointer);
13210 /* If we still have registers left on the stack, but we no longer have
13211 any registers into which we can pop them, then we must move the return
13212 address into the link register and make available the register that
13213 contained it. */
13214 if (regs_available_for_popping == 0 && pops_needed > 0)
13216 regs_available_for_popping |= 1 << reg_containing_return_addr;
13218 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
13219 reg_containing_return_addr);
13221 reg_containing_return_addr = LR_REGNUM;
13224 /* If we have registers left on the stack then pop some more.
13225 We know that at most we will want to pop FP and SP. */
13226 if (pops_needed > 0)
13228 int popped_into;
13229 int move_to;
13231 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13232 regs_available_for_popping);
13234 /* We have popped either FP or SP.
13235 Move whichever one it is into the correct register. */
13236 popped_into = number_of_first_bit_set (regs_available_for_popping);
13237 move_to = number_of_first_bit_set (regs_to_pop);
13239 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
13241 regs_to_pop &= ~(1 << move_to);
13243 --pops_needed;
13246 /* If we still have not popped everything then we must have only
13247 had one register available to us and we are now popping the SP. */
13248 if (pops_needed > 0)
13250 int popped_into;
13252 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13253 regs_available_for_popping);
13255 popped_into = number_of_first_bit_set (regs_available_for_popping);
13257 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
13259 assert (regs_to_pop == (1 << STACK_POINTER))
13260 assert (pops_needed == 1)
13264 /* If necessary restore the a4 register. */
13265 if (restore_a4)
13267 if (reg_containing_return_addr != LR_REGNUM)
13269 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13270 reg_containing_return_addr = LR_REGNUM;
13273 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13276 if (current_function_calls_eh_return)
13277 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13279 /* Return to caller. */
13280 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13284 void
13285 thumb_final_prescan_insn (rtx insn)
13287 if (flag_print_asm_name)
13288 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
13289 INSN_ADDRESSES (INSN_UID (insn)));
13293 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
13295 unsigned HOST_WIDE_INT mask = 0xff;
13296 int i;
13298 if (val == 0) /* XXX */
13299 return 0;
13301 for (i = 0; i < 25; i++)
13302 if ((val & (mask << i)) == val)
13303 return 1;
13305 return 0;
13308 /* Returns nonzero if the current function contains,
13309 or might contain a far jump. */
13310 static int
13311 thumb_far_jump_used_p (void)
13313 rtx insn;
13315 /* This test is only important for leaf functions. */
13316 /* assert (!leaf_function_p ()); */
13318 /* If we have already decided that far jumps may be used,
13319 do not bother checking again, and always return true even if
13320 it turns out that they are not being used. Once we have made
13321 the decision that far jumps are present (and that hence the link
13322 register will be pushed onto the stack) we cannot go back on it. */
13323 if (cfun->machine->far_jump_used)
13324 return 1;
13326 /* If this function is not being called from the prologue/epilogue
13327 generation code then it must be being called from the
13328 INITIAL_ELIMINATION_OFFSET macro. */
13329 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13331 /* In this case we know that we are being asked about the elimination
13332 of the arg pointer register. If that register is not being used,
13333 then there are no arguments on the stack, and we do not have to
13334 worry that a far jump might force the prologue to push the link
13335 register, changing the stack offsets. In this case we can just
13336 return false, since the presence of far jumps in the function will
13337 not affect stack offsets.
13339 If the arg pointer is live (or if it was live, but has now been
13340 eliminated and so set to dead) then we do have to test to see if
13341 the function might contain a far jump. This test can lead to some
13342 false negatives, since before reload is completed, then length of
13343 branch instructions is not known, so gcc defaults to returning their
13344 longest length, which in turn sets the far jump attribute to true.
13346 A false negative will not result in bad code being generated, but it
13347 will result in a needless push and pop of the link register. We
13348 hope that this does not occur too often.
13350 If we need doubleword stack alignment this could affect the other
13351 elimination offsets so we can't risk getting it wrong. */
13352 if (regs_ever_live [ARG_POINTER_REGNUM])
13353 cfun->machine->arg_pointer_live = 1;
13354 else if (!cfun->machine->arg_pointer_live)
13355 return 0;
13358 /* Check to see if the function contains a branch
13359 insn with the far jump attribute set. */
13360 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13362 if (GET_CODE (insn) == JUMP_INSN
13363 /* Ignore tablejump patterns. */
13364 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13365 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13366 && get_attr_far_jump (insn) == FAR_JUMP_YES
13369 /* Record the fact that we have decided that
13370 the function does use far jumps. */
13371 cfun->machine->far_jump_used = 1;
13372 return 1;
13376 return 0;
13379 /* Return nonzero if FUNC must be entered in ARM mode. */
13381 is_called_in_ARM_mode (tree func)
13383 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
13385 /* Ignore the problem about functions whose address is taken. */
13386 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13387 return TRUE;
13389 #ifdef ARM_PE
13390 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13391 #else
13392 return FALSE;
13393 #endif
13396 /* The bits which aren't usefully expanded as rtl. */
13397 const char *
13398 thumb_unexpanded_epilogue (void)
13400 int regno;
13401 unsigned long live_regs_mask = 0;
13402 int high_regs_pushed = 0;
13403 int had_to_push_lr;
13404 int size;
13406 if (return_used_this_function)
13407 return "";
13409 if (IS_NAKED (arm_current_func_type ()))
13410 return "";
13412 live_regs_mask = thumb_compute_save_reg_mask ();
13413 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13415 /* If we can deduce the registers used from the function's return value.
13416 This is more reliable that examining regs_ever_live[] because that
13417 will be set if the register is ever used in the function, not just if
13418 the register is used to hold a return value. */
13419 size = arm_size_return_regs ();
13421 /* The prolog may have pushed some high registers to use as
13422 work registers. e.g. the testsuite file:
13423 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13424 compiles to produce:
13425 push {r4, r5, r6, r7, lr}
13426 mov r7, r9
13427 mov r6, r8
13428 push {r6, r7}
13429 as part of the prolog. We have to undo that pushing here. */
13431 if (high_regs_pushed)
13433 unsigned long mask = live_regs_mask & 0xff;
13434 int next_hi_reg;
13436 /* The available low registers depend on the size of the value we are
13437 returning. */
13438 if (size <= 12)
13439 mask |= 1 << 3;
13440 if (size <= 8)
13441 mask |= 1 << 2;
13443 if (mask == 0)
13444 /* Oh dear! We have no low registers into which we can pop
13445 high registers! */
13446 internal_error
13447 ("no low registers available for popping high registers");
13449 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13450 if (live_regs_mask & (1 << next_hi_reg))
13451 break;
13453 while (high_regs_pushed)
13455 /* Find lo register(s) into which the high register(s) can
13456 be popped. */
13457 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13459 if (mask & (1 << regno))
13460 high_regs_pushed--;
13461 if (high_regs_pushed == 0)
13462 break;
13465 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13467 /* Pop the values into the low register(s). */
13468 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13470 /* Move the value(s) into the high registers. */
13471 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13473 if (mask & (1 << regno))
13475 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13476 regno);
13478 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13479 if (live_regs_mask & (1 << next_hi_reg))
13480 break;
13484 live_regs_mask &= ~0x0f00;
13487 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13488 live_regs_mask &= 0xff;
13490 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13492 /* Pop the return address into the PC. */
13493 if (had_to_push_lr)
13494 live_regs_mask |= 1 << PC_REGNUM;
13496 /* Either no argument registers were pushed or a backtrace
13497 structure was created which includes an adjusted stack
13498 pointer, so just pop everything. */
13499 if (live_regs_mask)
13500 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13501 live_regs_mask);
13503 /* We have either just popped the return address into the
13504 PC or it is was kept in LR for the entire function. */
13505 if (!had_to_push_lr)
13506 thumb_exit (asm_out_file, LR_REGNUM);
13508 else
13510 /* Pop everything but the return address. */
13511 if (live_regs_mask)
13512 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13513 live_regs_mask);
13515 if (had_to_push_lr)
13517 if (size > 12)
13519 /* We have no free low regs, so save one. */
13520 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13521 LAST_ARG_REGNUM);
13524 /* Get the return address into a temporary register. */
13525 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13526 1 << LAST_ARG_REGNUM);
13528 if (size > 12)
13530 /* Move the return address to lr. */
13531 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13532 LAST_ARG_REGNUM);
13533 /* Restore the low register. */
13534 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13535 IP_REGNUM);
13536 regno = LR_REGNUM;
13538 else
13539 regno = LAST_ARG_REGNUM;
13541 else
13542 regno = LR_REGNUM;
13544 /* Remove the argument registers that were pushed onto the stack. */
13545 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13546 SP_REGNUM, SP_REGNUM,
13547 current_function_pretend_args_size);
13549 thumb_exit (asm_out_file, regno);
13552 return "";
13555 /* Functions to save and restore machine-specific function data. */
13556 static struct machine_function *
13557 arm_init_machine_status (void)
13559 struct machine_function *machine;
13560 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13562 #if ARM_FT_UNKNOWN != 0
13563 machine->func_type = ARM_FT_UNKNOWN;
13564 #endif
13565 return machine;
13568 /* Return an RTX indicating where the return address to the
13569 calling function can be found. */
13571 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13573 if (count != 0)
13574 return NULL_RTX;
13576 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13579 /* Do anything needed before RTL is emitted for each function. */
13580 void
13581 arm_init_expanders (void)
13583 /* Arrange to initialize and mark the machine per-function status. */
13584 init_machine_status = arm_init_machine_status;
13586 /* This is to stop the combine pass optimizing away the alignment
13587 adjustment of va_arg. */
13588 /* ??? It is claimed that this should not be necessary. */
13589 if (cfun)
13590 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13594 /* Like arm_compute_initial_elimination offset. Simpler because there
13595 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13596 to point at the base of the local variables after static stack
13597 space for a function has been allocated. */
13599 HOST_WIDE_INT
13600 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13602 arm_stack_offsets *offsets;
13604 offsets = arm_get_frame_offsets ();
13606 switch (from)
13608 case ARG_POINTER_REGNUM:
13609 switch (to)
13611 case STACK_POINTER_REGNUM:
13612 return offsets->outgoing_args - offsets->saved_args;
13614 case FRAME_POINTER_REGNUM:
13615 return offsets->soft_frame - offsets->saved_args;
13617 case ARM_HARD_FRAME_POINTER_REGNUM:
13618 return offsets->saved_regs - offsets->saved_args;
13620 case THUMB_HARD_FRAME_POINTER_REGNUM:
13621 return offsets->locals_base - offsets->saved_args;
13623 default:
13624 gcc_unreachable ();
13626 break;
13628 case FRAME_POINTER_REGNUM:
13629 switch (to)
13631 case STACK_POINTER_REGNUM:
13632 return offsets->outgoing_args - offsets->soft_frame;
13634 case ARM_HARD_FRAME_POINTER_REGNUM:
13635 return offsets->saved_regs - offsets->soft_frame;
13637 case THUMB_HARD_FRAME_POINTER_REGNUM:
13638 return offsets->locals_base - offsets->soft_frame;
13640 default:
13641 gcc_unreachable ();
13643 break;
13645 default:
13646 gcc_unreachable ();
13651 /* Generate the rest of a function's prologue. */
13652 void
13653 thumb_expand_prologue (void)
13655 rtx insn, dwarf;
13657 HOST_WIDE_INT amount;
13658 arm_stack_offsets *offsets;
13659 unsigned long func_type;
13660 int regno;
13661 unsigned long live_regs_mask;
13663 func_type = arm_current_func_type ();
13665 /* Naked functions don't have prologues. */
13666 if (IS_NAKED (func_type))
13667 return;
13669 if (IS_INTERRUPT (func_type))
13671 error ("interrupt Service Routines cannot be coded in Thumb mode");
13672 return;
13675 live_regs_mask = thumb_compute_save_reg_mask ();
13676 /* Load the pic register before setting the frame pointer,
13677 so we can use r7 as a temporary work register. */
13678 if (flag_pic && arm_pic_register != INVALID_REGNUM)
13679 arm_load_pic_register (live_regs_mask);
13681 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13682 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13683 stack_pointer_rtx);
13685 offsets = arm_get_frame_offsets ();
13686 amount = offsets->outgoing_args - offsets->saved_regs;
13687 if (amount)
13689 if (amount < 512)
13691 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13692 GEN_INT (- amount)));
13693 RTX_FRAME_RELATED_P (insn) = 1;
13695 else
13697 rtx reg;
13699 /* The stack decrement is too big for an immediate value in a single
13700 insn. In theory we could issue multiple subtracts, but after
13701 three of them it becomes more space efficient to place the full
13702 value in the constant pool and load into a register. (Also the
13703 ARM debugger really likes to see only one stack decrement per
13704 function). So instead we look for a scratch register into which
13705 we can load the decrement, and then we subtract this from the
13706 stack pointer. Unfortunately on the thumb the only available
13707 scratch registers are the argument registers, and we cannot use
13708 these as they may hold arguments to the function. Instead we
13709 attempt to locate a call preserved register which is used by this
13710 function. If we can find one, then we know that it will have
13711 been pushed at the start of the prologue and so we can corrupt
13712 it now. */
13713 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13714 if (live_regs_mask & (1 << regno)
13715 && !(frame_pointer_needed
13716 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13717 break;
13719 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13721 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13723 /* Choose an arbitrary, non-argument low register. */
13724 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13726 /* Save it by copying it into a high, scratch register. */
13727 emit_insn (gen_movsi (spare, reg));
13728 /* Add a USE to stop propagate_one_insn() from barfing. */
13729 emit_insn (gen_prologue_use (spare));
13731 /* Decrement the stack. */
13732 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13733 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13734 stack_pointer_rtx, reg));
13735 RTX_FRAME_RELATED_P (insn) = 1;
13736 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13737 plus_constant (stack_pointer_rtx,
13738 -amount));
13739 RTX_FRAME_RELATED_P (dwarf) = 1;
13740 REG_NOTES (insn)
13741 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13742 REG_NOTES (insn));
13744 /* Restore the low register's original value. */
13745 emit_insn (gen_movsi (reg, spare));
13747 /* Emit a USE of the restored scratch register, so that flow
13748 analysis will not consider the restore redundant. The
13749 register won't be used again in this function and isn't
13750 restored by the epilogue. */
13751 emit_insn (gen_prologue_use (reg));
13753 else
13755 reg = gen_rtx_REG (SImode, regno);
13757 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13759 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13760 stack_pointer_rtx, reg));
13761 RTX_FRAME_RELATED_P (insn) = 1;
13762 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13763 plus_constant (stack_pointer_rtx,
13764 -amount));
13765 RTX_FRAME_RELATED_P (dwarf) = 1;
13766 REG_NOTES (insn)
13767 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13768 REG_NOTES (insn));
13773 if (frame_pointer_needed)
13775 amount = offsets->outgoing_args - offsets->locals_base;
13777 if (amount < 1024)
13778 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13779 stack_pointer_rtx, GEN_INT (amount)));
13780 else
13782 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13783 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13784 hard_frame_pointer_rtx,
13785 stack_pointer_rtx));
13786 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
13787 plus_constant (stack_pointer_rtx, amount));
13788 RTX_FRAME_RELATED_P (dwarf) = 1;
13789 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13790 REG_NOTES (insn));
13793 RTX_FRAME_RELATED_P (insn) = 1;
13796 /* If we are profiling, make sure no instructions are scheduled before
13797 the call to mcount. Similarly if the user has requested no
13798 scheduling in the prolog. Similarly if we want non-call exceptions
13799 using the EABI unwinder, to prevent faulting instructions from being
13800 swapped with a stack adjustment. */
13801 if (current_function_profile || !TARGET_SCHED_PROLOG
13802 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
13803 emit_insn (gen_blockage ());
13805 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13806 if (live_regs_mask & 0xff)
13807 cfun->machine->lr_save_eliminated = 0;
13809 /* If the link register is being kept alive, with the return address in it,
13810 then make sure that it does not get reused by the ce2 pass. */
13811 if (cfun->machine->lr_save_eliminated)
13812 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13816 void
13817 thumb_expand_epilogue (void)
13819 HOST_WIDE_INT amount;
13820 arm_stack_offsets *offsets;
13821 int regno;
13823 /* Naked functions don't have prologues. */
13824 if (IS_NAKED (arm_current_func_type ()))
13825 return;
13827 offsets = arm_get_frame_offsets ();
13828 amount = offsets->outgoing_args - offsets->saved_regs;
13830 if (frame_pointer_needed)
13832 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13833 amount = offsets->locals_base - offsets->saved_regs;
13836 if (amount)
13838 if (amount < 512)
13839 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13840 GEN_INT (amount)));
13841 else
13843 /* r3 is always free in the epilogue. */
13844 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13846 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13847 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13851 /* Emit a USE (stack_pointer_rtx), so that
13852 the stack adjustment will not be deleted. */
13853 emit_insn (gen_prologue_use (stack_pointer_rtx));
13855 if (current_function_profile || !TARGET_SCHED_PROLOG)
13856 emit_insn (gen_blockage ());
13858 /* Emit a clobber for each insn that will be restored in the epilogue,
13859 so that flow2 will get register lifetimes correct. */
13860 for (regno = 0; regno < 13; regno++)
13861 if (regs_ever_live[regno] && !call_used_regs[regno])
13862 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13864 if (! regs_ever_live[LR_REGNUM])
13865 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13868 static void
13869 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13871 unsigned long live_regs_mask = 0;
13872 unsigned long l_mask;
13873 unsigned high_regs_pushed = 0;
13874 int cfa_offset = 0;
13875 int regno;
13877 if (IS_NAKED (arm_current_func_type ()))
13878 return;
13880 if (is_called_in_ARM_mode (current_function_decl))
13882 const char * name;
13884 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13885 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13886 == SYMBOL_REF);
13887 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13889 /* Generate code sequence to switch us into Thumb mode. */
13890 /* The .code 32 directive has already been emitted by
13891 ASM_DECLARE_FUNCTION_NAME. */
13892 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13893 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13895 /* Generate a label, so that the debugger will notice the
13896 change in instruction sets. This label is also used by
13897 the assembler to bypass the ARM code when this function
13898 is called from a Thumb encoded function elsewhere in the
13899 same file. Hence the definition of STUB_NAME here must
13900 agree with the definition in gas/config/tc-arm.c. */
13902 #define STUB_NAME ".real_start_of"
13904 fprintf (f, "\t.code\t16\n");
13905 #ifdef ARM_PE
13906 if (arm_dllexport_name_p (name))
13907 name = arm_strip_name_encoding (name);
13908 #endif
13909 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13910 fprintf (f, "\t.thumb_func\n");
13911 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13914 if (current_function_pretend_args_size)
13916 /* Output unwind directive for the stack adjustment. */
13917 if (ARM_EABI_UNWIND_TABLES)
13918 fprintf (f, "\t.pad #%d\n",
13919 current_function_pretend_args_size);
13921 if (cfun->machine->uses_anonymous_args)
13923 int num_pushes;
13925 fprintf (f, "\tpush\t{");
13927 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13929 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13930 regno <= LAST_ARG_REGNUM;
13931 regno++)
13932 asm_fprintf (f, "%r%s", regno,
13933 regno == LAST_ARG_REGNUM ? "" : ", ");
13935 fprintf (f, "}\n");
13937 else
13938 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13939 SP_REGNUM, SP_REGNUM,
13940 current_function_pretend_args_size);
13942 /* We don't need to record the stores for unwinding (would it
13943 help the debugger any if we did?), but record the change in
13944 the stack pointer. */
13945 if (dwarf2out_do_frame ())
13947 char *l = dwarf2out_cfi_label ();
13949 cfa_offset = cfa_offset + current_function_pretend_args_size;
13950 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13954 /* Get the registers we are going to push. */
13955 live_regs_mask = thumb_compute_save_reg_mask ();
13956 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13957 l_mask = live_regs_mask & 0x40ff;
13958 /* Then count how many other high registers will need to be pushed. */
13959 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13961 if (TARGET_BACKTRACE)
13963 unsigned offset;
13964 unsigned work_register;
13966 /* We have been asked to create a stack backtrace structure.
13967 The code looks like this:
13969 0 .align 2
13970 0 func:
13971 0 sub SP, #16 Reserve space for 4 registers.
13972 2 push {R7} Push low registers.
13973 4 add R7, SP, #20 Get the stack pointer before the push.
13974 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13975 8 mov R7, PC Get hold of the start of this code plus 12.
13976 10 str R7, [SP, #16] Store it.
13977 12 mov R7, FP Get hold of the current frame pointer.
13978 14 str R7, [SP, #4] Store it.
13979 16 mov R7, LR Get hold of the current return address.
13980 18 str R7, [SP, #12] Store it.
13981 20 add R7, SP, #16 Point at the start of the backtrace structure.
13982 22 mov FP, R7 Put this value into the frame pointer. */
13984 work_register = thumb_find_work_register (live_regs_mask);
13986 if (ARM_EABI_UNWIND_TABLES)
13987 asm_fprintf (f, "\t.pad #16\n");
13989 asm_fprintf
13990 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13991 SP_REGNUM, SP_REGNUM);
13993 if (dwarf2out_do_frame ())
13995 char *l = dwarf2out_cfi_label ();
13997 cfa_offset = cfa_offset + 16;
13998 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14001 if (l_mask)
14003 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14004 offset = bit_count (l_mask) * UNITS_PER_WORD;
14006 else
14007 offset = 0;
14009 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14010 offset + 16 + current_function_pretend_args_size);
14012 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14013 offset + 4);
14015 /* Make sure that the instruction fetching the PC is in the right place
14016 to calculate "start of backtrace creation code + 12". */
14017 if (l_mask)
14019 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14020 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14021 offset + 12);
14022 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14023 ARM_HARD_FRAME_POINTER_REGNUM);
14024 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14025 offset);
14027 else
14029 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14030 ARM_HARD_FRAME_POINTER_REGNUM);
14031 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14032 offset);
14033 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14034 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14035 offset + 12);
14038 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14039 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14040 offset + 8);
14041 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14042 offset + 12);
14043 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14044 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14046 /* Optimization: If we are not pushing any low registers but we are going
14047 to push some high registers then delay our first push. This will just
14048 be a push of LR and we can combine it with the push of the first high
14049 register. */
14050 else if ((l_mask & 0xff) != 0
14051 || (high_regs_pushed == 0 && l_mask))
14052 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14054 if (high_regs_pushed)
14056 unsigned pushable_regs;
14057 unsigned next_hi_reg;
14059 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14060 if (live_regs_mask & (1 << next_hi_reg))
14061 break;
14063 pushable_regs = l_mask & 0xff;
14065 if (pushable_regs == 0)
14066 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14068 while (high_regs_pushed > 0)
14070 unsigned long real_regs_mask = 0;
14072 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14074 if (pushable_regs & (1 << regno))
14076 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14078 high_regs_pushed --;
14079 real_regs_mask |= (1 << next_hi_reg);
14081 if (high_regs_pushed)
14083 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14084 next_hi_reg --)
14085 if (live_regs_mask & (1 << next_hi_reg))
14086 break;
14088 else
14090 pushable_regs &= ~((1 << regno) - 1);
14091 break;
14096 /* If we had to find a work register and we have not yet
14097 saved the LR then add it to the list of regs to push. */
14098 if (l_mask == (1 << LR_REGNUM))
14100 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14101 1, &cfa_offset,
14102 real_regs_mask | (1 << LR_REGNUM));
14103 l_mask = 0;
14105 else
14106 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14111 /* Handle the case of a double word load into a low register from
14112 a computed memory address. The computed address may involve a
14113 register which is overwritten by the load. */
14114 const char *
14115 thumb_load_double_from_address (rtx *operands)
14117 rtx addr;
14118 rtx base;
14119 rtx offset;
14120 rtx arg1;
14121 rtx arg2;
14123 gcc_assert (GET_CODE (operands[0]) == REG);
14124 gcc_assert (GET_CODE (operands[1]) == MEM);
14126 /* Get the memory address. */
14127 addr = XEXP (operands[1], 0);
14129 /* Work out how the memory address is computed. */
14130 switch (GET_CODE (addr))
14132 case REG:
14133 operands[2] = adjust_address (operands[1], SImode, 4);
14135 if (REGNO (operands[0]) == REGNO (addr))
14137 output_asm_insn ("ldr\t%H0, %2", operands);
14138 output_asm_insn ("ldr\t%0, %1", operands);
14140 else
14142 output_asm_insn ("ldr\t%0, %1", operands);
14143 output_asm_insn ("ldr\t%H0, %2", operands);
14145 break;
14147 case CONST:
14148 /* Compute <address> + 4 for the high order load. */
14149 operands[2] = adjust_address (operands[1], SImode, 4);
14151 output_asm_insn ("ldr\t%0, %1", operands);
14152 output_asm_insn ("ldr\t%H0, %2", operands);
14153 break;
14155 case PLUS:
14156 arg1 = XEXP (addr, 0);
14157 arg2 = XEXP (addr, 1);
14159 if (CONSTANT_P (arg1))
14160 base = arg2, offset = arg1;
14161 else
14162 base = arg1, offset = arg2;
14164 gcc_assert (GET_CODE (base) == REG);
14166 /* Catch the case of <address> = <reg> + <reg> */
14167 if (GET_CODE (offset) == REG)
14169 int reg_offset = REGNO (offset);
14170 int reg_base = REGNO (base);
14171 int reg_dest = REGNO (operands[0]);
14173 /* Add the base and offset registers together into the
14174 higher destination register. */
14175 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14176 reg_dest + 1, reg_base, reg_offset);
14178 /* Load the lower destination register from the address in
14179 the higher destination register. */
14180 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14181 reg_dest, reg_dest + 1);
14183 /* Load the higher destination register from its own address
14184 plus 4. */
14185 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14186 reg_dest + 1, reg_dest + 1);
14188 else
14190 /* Compute <address> + 4 for the high order load. */
14191 operands[2] = adjust_address (operands[1], SImode, 4);
14193 /* If the computed address is held in the low order register
14194 then load the high order register first, otherwise always
14195 load the low order register first. */
14196 if (REGNO (operands[0]) == REGNO (base))
14198 output_asm_insn ("ldr\t%H0, %2", operands);
14199 output_asm_insn ("ldr\t%0, %1", operands);
14201 else
14203 output_asm_insn ("ldr\t%0, %1", operands);
14204 output_asm_insn ("ldr\t%H0, %2", operands);
14207 break;
14209 case LABEL_REF:
14210 /* With no registers to worry about we can just load the value
14211 directly. */
14212 operands[2] = adjust_address (operands[1], SImode, 4);
14214 output_asm_insn ("ldr\t%H0, %2", operands);
14215 output_asm_insn ("ldr\t%0, %1", operands);
14216 break;
14218 default:
14219 gcc_unreachable ();
14222 return "";
14225 const char *
14226 thumb_output_move_mem_multiple (int n, rtx *operands)
14228 rtx tmp;
14230 switch (n)
14232 case 2:
14233 if (REGNO (operands[4]) > REGNO (operands[5]))
14235 tmp = operands[4];
14236 operands[4] = operands[5];
14237 operands[5] = tmp;
14239 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
14240 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
14241 break;
14243 case 3:
14244 if (REGNO (operands[4]) > REGNO (operands[5]))
14246 tmp = operands[4];
14247 operands[4] = operands[5];
14248 operands[5] = tmp;
14250 if (REGNO (operands[5]) > REGNO (operands[6]))
14252 tmp = operands[5];
14253 operands[5] = operands[6];
14254 operands[6] = tmp;
14256 if (REGNO (operands[4]) > REGNO (operands[5]))
14258 tmp = operands[4];
14259 operands[4] = operands[5];
14260 operands[5] = tmp;
14263 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
14264 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
14265 break;
14267 default:
14268 gcc_unreachable ();
14271 return "";
14274 /* Output a call-via instruction for thumb state. */
14275 const char *
14276 thumb_call_via_reg (rtx reg)
14278 int regno = REGNO (reg);
14279 rtx *labelp;
14281 gcc_assert (regno < LR_REGNUM);
14283 /* If we are in the normal text section we can use a single instance
14284 per compilation unit. If we are doing function sections, then we need
14285 an entry per section, since we can't rely on reachability. */
14286 if (in_section == text_section)
14288 thumb_call_reg_needed = 1;
14290 if (thumb_call_via_label[regno] == NULL)
14291 thumb_call_via_label[regno] = gen_label_rtx ();
14292 labelp = thumb_call_via_label + regno;
14294 else
14296 if (cfun->machine->call_via[regno] == NULL)
14297 cfun->machine->call_via[regno] = gen_label_rtx ();
14298 labelp = cfun->machine->call_via + regno;
14301 output_asm_insn ("bl\t%a0", labelp);
14302 return "";
14305 /* Routines for generating rtl. */
14306 void
14307 thumb_expand_movmemqi (rtx *operands)
14309 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
14310 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
14311 HOST_WIDE_INT len = INTVAL (operands[2]);
14312 HOST_WIDE_INT offset = 0;
14314 while (len >= 12)
14316 emit_insn (gen_movmem12b (out, in, out, in));
14317 len -= 12;
14320 if (len >= 8)
14322 emit_insn (gen_movmem8b (out, in, out, in));
14323 len -= 8;
14326 if (len >= 4)
14328 rtx reg = gen_reg_rtx (SImode);
14329 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
14330 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
14331 len -= 4;
14332 offset += 4;
14335 if (len >= 2)
14337 rtx reg = gen_reg_rtx (HImode);
14338 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
14339 plus_constant (in, offset))));
14340 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
14341 reg));
14342 len -= 2;
14343 offset += 2;
14346 if (len)
14348 rtx reg = gen_reg_rtx (QImode);
14349 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
14350 plus_constant (in, offset))));
14351 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
14352 reg));
14356 void
14357 thumb_reload_out_hi (rtx *operands)
14359 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14362 /* Handle reading a half-word from memory during reload. */
14363 void
14364 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14366 gcc_unreachable ();
14369 /* Return the length of a function name prefix
14370 that starts with the character 'c'. */
14371 static int
14372 arm_get_strip_length (int c)
14374 switch (c)
14376 ARM_NAME_ENCODING_LENGTHS
14377 default: return 0;
14381 /* Return a pointer to a function's name with any
14382 and all prefix encodings stripped from it. */
14383 const char *
14384 arm_strip_name_encoding (const char *name)
14386 int skip;
14388 while ((skip = arm_get_strip_length (* name)))
14389 name += skip;
14391 return name;
14394 /* If there is a '*' anywhere in the name's prefix, then
14395 emit the stripped name verbatim, otherwise prepend an
14396 underscore if leading underscores are being used. */
14397 void
14398 arm_asm_output_labelref (FILE *stream, const char *name)
14400 int skip;
14401 int verbatim = 0;
14403 while ((skip = arm_get_strip_length (* name)))
14405 verbatim |= (*name == '*');
14406 name += skip;
14409 if (verbatim)
14410 fputs (name, stream);
14411 else
14412 asm_fprintf (stream, "%U%s", name);
14415 static void
14416 arm_file_end (void)
14418 int regno;
14420 if (NEED_INDICATE_EXEC_STACK)
14421 /* Add .note.GNU-stack. */
14422 file_end_indicate_exec_stack ();
14424 if (! thumb_call_reg_needed)
14425 return;
14427 switch_to_section (text_section);
14428 asm_fprintf (asm_out_file, "\t.code 16\n");
14429 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14431 for (regno = 0; regno < LR_REGNUM; regno++)
14433 rtx label = thumb_call_via_label[regno];
14435 if (label != 0)
14437 targetm.asm_out.internal_label (asm_out_file, "L",
14438 CODE_LABEL_NUMBER (label));
14439 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14444 rtx aof_pic_label;
14446 #ifdef AOF_ASSEMBLER
14447 /* Special functions only needed when producing AOF syntax assembler. */
14449 struct pic_chain
14451 struct pic_chain * next;
14452 const char * symname;
14455 static struct pic_chain * aof_pic_chain = NULL;
14458 aof_pic_entry (rtx x)
14460 struct pic_chain ** chainp;
14461 int offset;
14463 if (aof_pic_label == NULL_RTX)
14465 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14468 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14469 offset += 4, chainp = &(*chainp)->next)
14470 if ((*chainp)->symname == XSTR (x, 0))
14471 return plus_constant (aof_pic_label, offset);
14473 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14474 (*chainp)->next = NULL;
14475 (*chainp)->symname = XSTR (x, 0);
14476 return plus_constant (aof_pic_label, offset);
14479 void
14480 aof_dump_pic_table (FILE *f)
14482 struct pic_chain * chain;
14484 if (aof_pic_chain == NULL)
14485 return;
14487 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14488 PIC_OFFSET_TABLE_REGNUM,
14489 PIC_OFFSET_TABLE_REGNUM);
14490 fputs ("|x$adcons|\n", f);
14492 for (chain = aof_pic_chain; chain; chain = chain->next)
14494 fputs ("\tDCD\t", f);
14495 assemble_name (f, chain->symname);
14496 fputs ("\n", f);
14500 int arm_text_section_count = 1;
14502 /* A get_unnamed_section callback for switching to the text section. */
14504 static void
14505 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14507 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
14508 arm_text_section_count++);
14509 if (flag_pic)
14510 fprintf (asm_out_file, ", PIC, REENTRANT");
14511 fprintf (asm_out_file, "\n");
14514 static int arm_data_section_count = 1;
14516 /* A get_unnamed_section callback for switching to the data section. */
14518 static void
14519 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14521 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
14522 arm_data_section_count++);
14525 /* Implement TARGET_ASM_INIT_SECTIONS.
14527 AOF Assembler syntax is a nightmare when it comes to areas, since once
14528 we change from one area to another, we can't go back again. Instead,
14529 we must create a new area with the same attributes and add the new output
14530 to that. Unfortunately, there is nothing we can do here to guarantee that
14531 two areas with the same attributes will be linked adjacently in the
14532 resulting executable, so we have to be careful not to do pc-relative
14533 addressing across such boundaries. */
14535 static void
14536 aof_asm_init_sections (void)
14538 text_section = get_unnamed_section (SECTION_CODE,
14539 aof_output_text_section_asm_op, NULL);
14540 data_section = get_unnamed_section (SECTION_WRITE,
14541 aof_output_data_section_asm_op, NULL);
14542 readonly_data_section = text_section;
14545 void
14546 zero_init_section (void)
14548 static int zero_init_count = 1;
14550 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
14551 in_section = NULL;
14554 /* The AOF assembler is religiously strict about declarations of
14555 imported and exported symbols, so that it is impossible to declare
14556 a function as imported near the beginning of the file, and then to
14557 export it later on. It is, however, possible to delay the decision
14558 until all the functions in the file have been compiled. To get
14559 around this, we maintain a list of the imports and exports, and
14560 delete from it any that are subsequently defined. At the end of
14561 compilation we spit the remainder of the list out before the END
14562 directive. */
14564 struct import
14566 struct import * next;
14567 const char * name;
14570 static struct import * imports_list = NULL;
14572 void
14573 aof_add_import (const char *name)
14575 struct import * new;
14577 for (new = imports_list; new; new = new->next)
14578 if (new->name == name)
14579 return;
14581 new = (struct import *) xmalloc (sizeof (struct import));
14582 new->next = imports_list;
14583 imports_list = new;
14584 new->name = name;
14587 void
14588 aof_delete_import (const char *name)
14590 struct import ** old;
14592 for (old = &imports_list; *old; old = & (*old)->next)
14594 if ((*old)->name == name)
14596 *old = (*old)->next;
14597 return;
14602 int arm_main_function = 0;
14604 static void
14605 aof_dump_imports (FILE *f)
14607 /* The AOF assembler needs this to cause the startup code to be extracted
14608 from the library. Brining in __main causes the whole thing to work
14609 automagically. */
14610 if (arm_main_function)
14612 switch_to_section (text_section);
14613 fputs ("\tIMPORT __main\n", f);
14614 fputs ("\tDCD __main\n", f);
14617 /* Now dump the remaining imports. */
14618 while (imports_list)
14620 fprintf (f, "\tIMPORT\t");
14621 assemble_name (f, imports_list->name);
14622 fputc ('\n', f);
14623 imports_list = imports_list->next;
14627 static void
14628 aof_globalize_label (FILE *stream, const char *name)
14630 default_globalize_label (stream, name);
14631 if (! strcmp (name, "main"))
14632 arm_main_function = 1;
14635 static void
14636 aof_file_start (void)
14638 fputs ("__r0\tRN\t0\n", asm_out_file);
14639 fputs ("__a1\tRN\t0\n", asm_out_file);
14640 fputs ("__a2\tRN\t1\n", asm_out_file);
14641 fputs ("__a3\tRN\t2\n", asm_out_file);
14642 fputs ("__a4\tRN\t3\n", asm_out_file);
14643 fputs ("__v1\tRN\t4\n", asm_out_file);
14644 fputs ("__v2\tRN\t5\n", asm_out_file);
14645 fputs ("__v3\tRN\t6\n", asm_out_file);
14646 fputs ("__v4\tRN\t7\n", asm_out_file);
14647 fputs ("__v5\tRN\t8\n", asm_out_file);
14648 fputs ("__v6\tRN\t9\n", asm_out_file);
14649 fputs ("__sl\tRN\t10\n", asm_out_file);
14650 fputs ("__fp\tRN\t11\n", asm_out_file);
14651 fputs ("__ip\tRN\t12\n", asm_out_file);
14652 fputs ("__sp\tRN\t13\n", asm_out_file);
14653 fputs ("__lr\tRN\t14\n", asm_out_file);
14654 fputs ("__pc\tRN\t15\n", asm_out_file);
14655 fputs ("__f0\tFN\t0\n", asm_out_file);
14656 fputs ("__f1\tFN\t1\n", asm_out_file);
14657 fputs ("__f2\tFN\t2\n", asm_out_file);
14658 fputs ("__f3\tFN\t3\n", asm_out_file);
14659 fputs ("__f4\tFN\t4\n", asm_out_file);
14660 fputs ("__f5\tFN\t5\n", asm_out_file);
14661 fputs ("__f6\tFN\t6\n", asm_out_file);
14662 fputs ("__f7\tFN\t7\n", asm_out_file);
14663 switch_to_section (text_section);
14666 static void
14667 aof_file_end (void)
14669 if (flag_pic)
14670 aof_dump_pic_table (asm_out_file);
14671 arm_file_end ();
14672 aof_dump_imports (asm_out_file);
14673 fputs ("\tEND\n", asm_out_file);
14675 #endif /* AOF_ASSEMBLER */
14677 #ifndef ARM_PE
14678 /* Symbols in the text segment can be accessed without indirecting via the
14679 constant pool; it may take an extra binary operation, but this is still
14680 faster than indirecting via memory. Don't do this when not optimizing,
14681 since we won't be calculating al of the offsets necessary to do this
14682 simplification. */
14684 static void
14685 arm_encode_section_info (tree decl, rtx rtl, int first)
14687 /* This doesn't work with AOF syntax, since the string table may be in
14688 a different AREA. */
14689 #ifndef AOF_ASSEMBLER
14690 if (optimize > 0 && TREE_CONSTANT (decl))
14691 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14692 #endif
14694 /* If we are referencing a function that is weak then encode a long call
14695 flag in the function name, otherwise if the function is static or
14696 or known to be defined in this file then encode a short call flag. */
14697 if (first && DECL_P (decl))
14699 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14700 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14701 else if (! TREE_PUBLIC (decl))
14702 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14705 default_encode_section_info (decl, rtl, first);
14707 #endif /* !ARM_PE */
14709 static void
14710 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14712 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14713 && !strcmp (prefix, "L"))
14715 arm_ccfsm_state = 0;
14716 arm_target_insn = NULL;
14718 default_internal_label (stream, prefix, labelno);
14721 /* Output code to add DELTA to the first argument, and then jump
14722 to FUNCTION. Used for C++ multiple inheritance. */
14723 static void
14724 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14725 HOST_WIDE_INT delta,
14726 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14727 tree function)
14729 static int thunk_label = 0;
14730 char label[256];
14731 char labelpc[256];
14732 int mi_delta = delta;
14733 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14734 int shift = 0;
14735 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14736 ? 1 : 0);
14737 if (mi_delta < 0)
14738 mi_delta = - mi_delta;
14739 if (TARGET_THUMB)
14741 int labelno = thunk_label++;
14742 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14743 fputs ("\tldr\tr12, ", file);
14744 assemble_name (file, label);
14745 fputc ('\n', file);
14746 if (flag_pic)
14748 /* If we are generating PIC, the ldr instruction below loads
14749 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
14750 the address of the add + 8, so we have:
14752 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
14753 = target + 1.
14755 Note that we have "+ 1" because some versions of GNU ld
14756 don't set the low bit of the result for R_ARM_REL32
14757 relocations against thumb function symbols. */
14758 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
14759 assemble_name (file, labelpc);
14760 fputs (":\n", file);
14761 fputs ("\tadd\tr12, pc, r12\n", file);
14764 while (mi_delta != 0)
14766 if ((mi_delta & (3 << shift)) == 0)
14767 shift += 2;
14768 else
14770 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14771 mi_op, this_regno, this_regno,
14772 mi_delta & (0xff << shift));
14773 mi_delta &= ~(0xff << shift);
14774 shift += 8;
14777 if (TARGET_THUMB)
14779 fprintf (file, "\tbx\tr12\n");
14780 ASM_OUTPUT_ALIGN (file, 2);
14781 assemble_name (file, label);
14782 fputs (":\n", file);
14783 if (flag_pic)
14785 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
14786 rtx tem = XEXP (DECL_RTL (function), 0);
14787 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
14788 tem = gen_rtx_MINUS (GET_MODE (tem),
14789 tem,
14790 gen_rtx_SYMBOL_REF (Pmode,
14791 ggc_strdup (labelpc)));
14792 assemble_integer (tem, 4, BITS_PER_WORD, 1);
14794 else
14795 /* Output ".word .LTHUNKn". */
14796 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14798 else
14800 fputs ("\tb\t", file);
14801 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14802 if (NEED_PLT_RELOC)
14803 fputs ("(PLT)", file);
14804 fputc ('\n', file);
14809 arm_emit_vector_const (FILE *file, rtx x)
14811 int i;
14812 const char * pattern;
14814 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14816 switch (GET_MODE (x))
14818 case V2SImode: pattern = "%08x"; break;
14819 case V4HImode: pattern = "%04x"; break;
14820 case V8QImode: pattern = "%02x"; break;
14821 default: gcc_unreachable ();
14824 fprintf (file, "0x");
14825 for (i = CONST_VECTOR_NUNITS (x); i--;)
14827 rtx element;
14829 element = CONST_VECTOR_ELT (x, i);
14830 fprintf (file, pattern, INTVAL (element));
14833 return 1;
14836 const char *
14837 arm_output_load_gr (rtx *operands)
14839 rtx reg;
14840 rtx offset;
14841 rtx wcgr;
14842 rtx sum;
14844 if (GET_CODE (operands [1]) != MEM
14845 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14846 || GET_CODE (reg = XEXP (sum, 0)) != REG
14847 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14848 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14849 return "wldrw%?\t%0, %1";
14851 /* Fix up an out-of-range load of a GR register. */
14852 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14853 wcgr = operands[0];
14854 operands[0] = reg;
14855 output_asm_insn ("ldr%?\t%0, %1", operands);
14857 operands[0] = wcgr;
14858 operands[1] = reg;
14859 output_asm_insn ("tmcr%?\t%0, %1", operands);
14860 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14862 return "";
14865 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14867 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14868 named arg and all anonymous args onto the stack.
14869 XXX I know the prologue shouldn't be pushing registers, but it is faster
14870 that way. */
14872 static void
14873 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14874 enum machine_mode mode ATTRIBUTE_UNUSED,
14875 tree type ATTRIBUTE_UNUSED,
14876 int *pretend_size,
14877 int second_time ATTRIBUTE_UNUSED)
14879 cfun->machine->uses_anonymous_args = 1;
14880 if (cum->nregs < NUM_ARG_REGS)
14881 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14884 /* Return nonzero if the CONSUMER instruction (a store) does not need
14885 PRODUCER's value to calculate the address. */
14888 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14890 rtx value = PATTERN (producer);
14891 rtx addr = PATTERN (consumer);
14893 if (GET_CODE (value) == COND_EXEC)
14894 value = COND_EXEC_CODE (value);
14895 if (GET_CODE (value) == PARALLEL)
14896 value = XVECEXP (value, 0, 0);
14897 value = XEXP (value, 0);
14898 if (GET_CODE (addr) == COND_EXEC)
14899 addr = COND_EXEC_CODE (addr);
14900 if (GET_CODE (addr) == PARALLEL)
14901 addr = XVECEXP (addr, 0, 0);
14902 addr = XEXP (addr, 0);
14904 return !reg_overlap_mentioned_p (value, addr);
14907 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14908 have an early register shift value or amount dependency on the
14909 result of PRODUCER. */
14912 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14914 rtx value = PATTERN (producer);
14915 rtx op = PATTERN (consumer);
14916 rtx early_op;
14918 if (GET_CODE (value) == COND_EXEC)
14919 value = COND_EXEC_CODE (value);
14920 if (GET_CODE (value) == PARALLEL)
14921 value = XVECEXP (value, 0, 0);
14922 value = XEXP (value, 0);
14923 if (GET_CODE (op) == COND_EXEC)
14924 op = COND_EXEC_CODE (op);
14925 if (GET_CODE (op) == PARALLEL)
14926 op = XVECEXP (op, 0, 0);
14927 op = XEXP (op, 1);
14929 early_op = XEXP (op, 0);
14930 /* This is either an actual independent shift, or a shift applied to
14931 the first operand of another operation. We want the whole shift
14932 operation. */
14933 if (GET_CODE (early_op) == REG)
14934 early_op = op;
14936 return !reg_overlap_mentioned_p (value, early_op);
14939 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14940 have an early register shift value dependency on the result of
14941 PRODUCER. */
14944 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14946 rtx value = PATTERN (producer);
14947 rtx op = PATTERN (consumer);
14948 rtx early_op;
14950 if (GET_CODE (value) == COND_EXEC)
14951 value = COND_EXEC_CODE (value);
14952 if (GET_CODE (value) == PARALLEL)
14953 value = XVECEXP (value, 0, 0);
14954 value = XEXP (value, 0);
14955 if (GET_CODE (op) == COND_EXEC)
14956 op = COND_EXEC_CODE (op);
14957 if (GET_CODE (op) == PARALLEL)
14958 op = XVECEXP (op, 0, 0);
14959 op = XEXP (op, 1);
14961 early_op = XEXP (op, 0);
14963 /* This is either an actual independent shift, or a shift applied to
14964 the first operand of another operation. We want the value being
14965 shifted, in either case. */
14966 if (GET_CODE (early_op) != REG)
14967 early_op = XEXP (early_op, 0);
14969 return !reg_overlap_mentioned_p (value, early_op);
14972 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14973 have an early register mult dependency on the result of
14974 PRODUCER. */
14977 arm_no_early_mul_dep (rtx producer, rtx consumer)
14979 rtx value = PATTERN (producer);
14980 rtx op = PATTERN (consumer);
14982 if (GET_CODE (value) == COND_EXEC)
14983 value = COND_EXEC_CODE (value);
14984 if (GET_CODE (value) == PARALLEL)
14985 value = XVECEXP (value, 0, 0);
14986 value = XEXP (value, 0);
14987 if (GET_CODE (op) == COND_EXEC)
14988 op = COND_EXEC_CODE (op);
14989 if (GET_CODE (op) == PARALLEL)
14990 op = XVECEXP (op, 0, 0);
14991 op = XEXP (op, 1);
14993 return (GET_CODE (op) == PLUS
14994 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14998 /* We can't rely on the caller doing the proper promotion when
14999 using APCS or ATPCS. */
15001 static bool
15002 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
15004 return !TARGET_AAPCS_BASED;
15008 /* AAPCS based ABIs use short enums by default. */
15010 static bool
15011 arm_default_short_enums (void)
15013 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
15017 /* AAPCS requires that anonymous bitfields affect structure alignment. */
15019 static bool
15020 arm_align_anon_bitfield (void)
15022 return TARGET_AAPCS_BASED;
15026 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
15028 static tree
15029 arm_cxx_guard_type (void)
15031 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
15035 /* The EABI says test the least significant bit of a guard variable. */
15037 static bool
15038 arm_cxx_guard_mask_bit (void)
15040 return TARGET_AAPCS_BASED;
15044 /* The EABI specifies that all array cookies are 8 bytes long. */
15046 static tree
15047 arm_get_cookie_size (tree type)
15049 tree size;
15051 if (!TARGET_AAPCS_BASED)
15052 return default_cxx_get_cookie_size (type);
15054 size = build_int_cst (sizetype, 8);
15055 return size;
15059 /* The EABI says that array cookies should also contain the element size. */
15061 static bool
15062 arm_cookie_has_size (void)
15064 return TARGET_AAPCS_BASED;
15068 /* The EABI says constructors and destructors should return a pointer to
15069 the object constructed/destroyed. */
15071 static bool
15072 arm_cxx_cdtor_returns_this (void)
15074 return TARGET_AAPCS_BASED;
15077 /* The EABI says that an inline function may never be the key
15078 method. */
15080 static bool
15081 arm_cxx_key_method_may_be_inline (void)
15083 return !TARGET_AAPCS_BASED;
15086 static void
15087 arm_cxx_determine_class_data_visibility (tree decl)
15089 if (!TARGET_AAPCS_BASED)
15090 return;
15092 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
15093 is exported. However, on systems without dynamic vague linkage,
15094 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
15095 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
15096 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
15097 else
15098 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
15099 DECL_VISIBILITY_SPECIFIED (decl) = 1;
15102 static bool
15103 arm_cxx_class_data_always_comdat (void)
15105 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
15106 vague linkage if the class has no key function. */
15107 return !TARGET_AAPCS_BASED;
15111 /* The EABI says __aeabi_atexit should be used to register static
15112 destructors. */
15114 static bool
15115 arm_cxx_use_aeabi_atexit (void)
15117 return TARGET_AAPCS_BASED;
15121 void
15122 arm_set_return_address (rtx source, rtx scratch)
15124 arm_stack_offsets *offsets;
15125 HOST_WIDE_INT delta;
15126 rtx addr;
15127 unsigned long saved_regs;
15129 saved_regs = arm_compute_save_reg_mask ();
15131 if ((saved_regs & (1 << LR_REGNUM)) == 0)
15132 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15133 else
15135 if (frame_pointer_needed)
15136 addr = plus_constant(hard_frame_pointer_rtx, -4);
15137 else
15139 /* LR will be the first saved register. */
15140 offsets = arm_get_frame_offsets ();
15141 delta = offsets->outgoing_args - (offsets->frame + 4);
15144 if (delta >= 4096)
15146 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
15147 GEN_INT (delta & ~4095)));
15148 addr = scratch;
15149 delta &= 4095;
15151 else
15152 addr = stack_pointer_rtx;
15154 addr = plus_constant (addr, delta);
15156 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15161 void
15162 thumb_set_return_address (rtx source, rtx scratch)
15164 arm_stack_offsets *offsets;
15165 HOST_WIDE_INT delta;
15166 int reg;
15167 rtx addr;
15168 unsigned long mask;
15170 emit_insn (gen_rtx_USE (VOIDmode, source));
15172 mask = thumb_compute_save_reg_mask ();
15173 if (mask & (1 << LR_REGNUM))
15175 offsets = arm_get_frame_offsets ();
15177 /* Find the saved regs. */
15178 if (frame_pointer_needed)
15180 delta = offsets->soft_frame - offsets->saved_args;
15181 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
15183 else
15185 delta = offsets->outgoing_args - offsets->saved_args;
15186 reg = SP_REGNUM;
15188 /* Allow for the stack frame. */
15189 if (TARGET_BACKTRACE)
15190 delta -= 16;
15191 /* The link register is always the first saved register. */
15192 delta -= 4;
15194 /* Construct the address. */
15195 addr = gen_rtx_REG (SImode, reg);
15196 if ((reg != SP_REGNUM && delta >= 128)
15197 || delta >= 1024)
15199 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
15200 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
15201 addr = scratch;
15203 else
15204 addr = plus_constant (addr, delta);
15206 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15208 else
15209 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15212 /* Implements target hook vector_mode_supported_p. */
15213 bool
15214 arm_vector_mode_supported_p (enum machine_mode mode)
15216 if ((mode == V2SImode)
15217 || (mode == V4HImode)
15218 || (mode == V8QImode))
15219 return true;
15221 return false;
15224 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15225 ARM insns and therefore guarantee that the shift count is modulo 256.
15226 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15227 guarantee no particular behavior for out-of-range counts. */
15229 static unsigned HOST_WIDE_INT
15230 arm_shift_truncation_mask (enum machine_mode mode)
15232 return mode == SImode ? 255 : 0;
15236 /* Map internal gcc register numbers to DWARF2 register numbers. */
15238 unsigned int
15239 arm_dbx_register_number (unsigned int regno)
15241 if (regno < 16)
15242 return regno;
15244 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15245 compatibility. The EABI defines them as registers 96-103. */
15246 if (IS_FPA_REGNUM (regno))
15247 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
15249 if (IS_VFP_REGNUM (regno))
15250 return 64 + regno - FIRST_VFP_REGNUM;
15252 if (IS_IWMMXT_GR_REGNUM (regno))
15253 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
15255 if (IS_IWMMXT_REGNUM (regno))
15256 return 112 + regno - FIRST_IWMMXT_REGNUM;
15258 gcc_unreachable ();
15262 #ifdef TARGET_UNWIND_INFO
15263 /* Emit unwind directives for a store-multiple instruction. This should
15264 only ever be generated by the function prologue code, so we expect it
15265 to have a particular form. */
15267 static void
15268 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
15270 int i;
15271 HOST_WIDE_INT offset;
15272 HOST_WIDE_INT nregs;
15273 int reg_size;
15274 unsigned reg;
15275 unsigned lastreg;
15276 rtx e;
15278 /* First insn will adjust the stack pointer. */
15279 e = XVECEXP (p, 0, 0);
15280 if (GET_CODE (e) != SET
15281 || GET_CODE (XEXP (e, 0)) != REG
15282 || REGNO (XEXP (e, 0)) != SP_REGNUM
15283 || GET_CODE (XEXP (e, 1)) != PLUS)
15284 abort ();
15286 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
15287 nregs = XVECLEN (p, 0) - 1;
15289 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
15290 if (reg < 16)
15292 /* The function prologue may also push pc, but not annotate it as it is
15293 never restored. We turn this into a stack pointer adjustment. */
15294 if (nregs * 4 == offset - 4)
15296 fprintf (asm_out_file, "\t.pad #4\n");
15297 offset -= 4;
15299 reg_size = 4;
15301 else if (IS_VFP_REGNUM (reg))
15303 /* FPA register saves use an additional word. */
15304 offset -= 4;
15305 reg_size = 8;
15307 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
15309 /* FPA registers are done differently. */
15310 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
15311 return;
15313 else
15314 /* Unknown register type. */
15315 abort ();
15317 /* If the stack increment doesn't match the size of the saved registers,
15318 something has gone horribly wrong. */
15319 if (offset != nregs * reg_size)
15320 abort ();
15322 fprintf (asm_out_file, "\t.save {");
15324 offset = 0;
15325 lastreg = 0;
15326 /* The remaining insns will describe the stores. */
15327 for (i = 1; i <= nregs; i++)
15329 /* Expect (set (mem <addr>) (reg)).
15330 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15331 e = XVECEXP (p, 0, i);
15332 if (GET_CODE (e) != SET
15333 || GET_CODE (XEXP (e, 0)) != MEM
15334 || GET_CODE (XEXP (e, 1)) != REG)
15335 abort ();
15337 reg = REGNO (XEXP (e, 1));
15338 if (reg < lastreg)
15339 abort ();
15341 if (i != 1)
15342 fprintf (asm_out_file, ", ");
15343 /* We can't use %r for vfp because we need to use the
15344 double precision register names. */
15345 if (IS_VFP_REGNUM (reg))
15346 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
15347 else
15348 asm_fprintf (asm_out_file, "%r", reg);
15350 #ifdef ENABLE_CHECKING
15351 /* Check that the addresses are consecutive. */
15352 e = XEXP (XEXP (e, 0), 0);
15353 if (GET_CODE (e) == PLUS)
15355 offset += reg_size;
15356 if (GET_CODE (XEXP (e, 0)) != REG
15357 || REGNO (XEXP (e, 0)) != SP_REGNUM
15358 || GET_CODE (XEXP (e, 1)) != CONST_INT
15359 || offset != INTVAL (XEXP (e, 1)))
15360 abort ();
15362 else if (i != 1
15363 || GET_CODE (e) != REG
15364 || REGNO (e) != SP_REGNUM)
15365 abort ();
15366 #endif
15368 fprintf (asm_out_file, "}\n");
15371 /* Emit unwind directives for a SET. */
15373 static void
15374 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
15376 rtx e0;
15377 rtx e1;
15379 e0 = XEXP (p, 0);
15380 e1 = XEXP (p, 1);
15381 switch (GET_CODE (e0))
15383 case MEM:
15384 /* Pushing a single register. */
15385 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
15386 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
15387 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
15388 abort ();
15390 asm_fprintf (asm_out_file, "\t.save ");
15391 if (IS_VFP_REGNUM (REGNO (e1)))
15392 asm_fprintf(asm_out_file, "{d%d}\n",
15393 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
15394 else
15395 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
15396 break;
15398 case REG:
15399 if (REGNO (e0) == SP_REGNUM)
15401 /* A stack increment. */
15402 if (GET_CODE (e1) != PLUS
15403 || GET_CODE (XEXP (e1, 0)) != REG
15404 || REGNO (XEXP (e1, 0)) != SP_REGNUM
15405 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15406 abort ();
15408 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
15409 -INTVAL (XEXP (e1, 1)));
15411 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
15413 HOST_WIDE_INT offset;
15414 unsigned reg;
15416 if (GET_CODE (e1) == PLUS)
15418 if (GET_CODE (XEXP (e1, 0)) != REG
15419 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15420 abort ();
15421 reg = REGNO (XEXP (e1, 0));
15422 offset = INTVAL (XEXP (e1, 1));
15423 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
15424 HARD_FRAME_POINTER_REGNUM, reg,
15425 INTVAL (XEXP (e1, 1)));
15427 else if (GET_CODE (e1) == REG)
15429 reg = REGNO (e1);
15430 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
15431 HARD_FRAME_POINTER_REGNUM, reg);
15433 else
15434 abort ();
15436 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
15438 /* Move from sp to reg. */
15439 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
15441 else if (GET_CODE (e1) == PLUS
15442 && GET_CODE (XEXP (e1, 0)) == REG
15443 && REGNO (XEXP (e1, 0)) == SP_REGNUM
15444 && GET_CODE (XEXP (e1, 1)) == CONST_INT)
15446 /* Set reg to offset from sp. */
15447 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
15448 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
15450 else
15451 abort ();
15452 break;
15454 default:
15455 abort ();
15460 /* Emit unwind directives for the given insn. */
15462 static void
15463 arm_unwind_emit (FILE * asm_out_file, rtx insn)
15465 rtx pat;
15467 if (!ARM_EABI_UNWIND_TABLES)
15468 return;
15470 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15471 return;
15473 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15474 if (pat)
15475 pat = XEXP (pat, 0);
15476 else
15477 pat = PATTERN (insn);
15479 switch (GET_CODE (pat))
15481 case SET:
15482 arm_unwind_emit_set (asm_out_file, pat);
15483 break;
15485 case SEQUENCE:
15486 /* Store multiple. */
15487 arm_unwind_emit_stm (asm_out_file, pat);
15488 break;
15490 default:
15491 abort();
15496 /* Output a reference from a function exception table to the type_info
15497 object X. The EABI specifies that the symbol should be relocated by
15498 an R_ARM_TARGET2 relocation. */
15500 static bool
15501 arm_output_ttype (rtx x)
15503 fputs ("\t.word\t", asm_out_file);
15504 output_addr_const (asm_out_file, x);
15505 /* Use special relocations for symbol references. */
15506 if (GET_CODE (x) != CONST_INT)
15507 fputs ("(TARGET2)", asm_out_file);
15508 fputc ('\n', asm_out_file);
15510 return TRUE;
15512 #endif /* TARGET_UNWIND_INFO */
15515 /* Output unwind directives for the start/end of a function. */
15517 void
15518 arm_output_fn_unwind (FILE * f, bool prologue)
15520 if (!ARM_EABI_UNWIND_TABLES)
15521 return;
15523 if (prologue)
15524 fputs ("\t.fnstart\n", f);
15525 else
15526 fputs ("\t.fnend\n", f);
15529 static bool
15530 arm_emit_tls_decoration (FILE *fp, rtx x)
15532 enum tls_reloc reloc;
15533 rtx val;
15535 val = XVECEXP (x, 0, 0);
15536 reloc = INTVAL (XVECEXP (x, 0, 1));
15538 output_addr_const (fp, val);
15540 switch (reloc)
15542 case TLS_GD32:
15543 fputs ("(tlsgd)", fp);
15544 break;
15545 case TLS_LDM32:
15546 fputs ("(tlsldm)", fp);
15547 break;
15548 case TLS_LDO32:
15549 fputs ("(tlsldo)", fp);
15550 break;
15551 case TLS_IE32:
15552 fputs ("(gottpoff)", fp);
15553 break;
15554 case TLS_LE32:
15555 fputs ("(tpoff)", fp);
15556 break;
15557 default:
15558 gcc_unreachable ();
15561 switch (reloc)
15563 case TLS_GD32:
15564 case TLS_LDM32:
15565 case TLS_IE32:
15566 fputs (" + (. - ", fp);
15567 output_addr_const (fp, XVECEXP (x, 0, 2));
15568 fputs (" - ", fp);
15569 output_addr_const (fp, XVECEXP (x, 0, 3));
15570 fputc (')', fp);
15571 break;
15572 default:
15573 break;
15576 return TRUE;
15579 bool
15580 arm_output_addr_const_extra (FILE *fp, rtx x)
15582 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
15583 return arm_emit_tls_decoration (fp, x);
15584 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
15586 char label[256];
15587 int labelno = INTVAL (XVECEXP (x, 0, 0));
15589 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
15590 assemble_name_raw (fp, label);
15592 return TRUE;
15594 else if (GET_CODE (x) == CONST_VECTOR)
15595 return arm_emit_vector_const (fp, x);
15597 return FALSE;
15600 #include "gt-arm.h"