* arm.c (thumb_legitimate_addres_p): Allow any constant offset
[official-gcc.git] / gcc / config / arm / arm.c
blob4342c4682f2127ef23e3201e6ac8c7480d513c7a
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
77 #ifndef AOF_ASSEMBLER
78 static bool arm_assemble_integer (rtx, unsigned int, int);
79 #endif
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
85 int, HOST_WIDE_INT);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
114 #endif
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static rtx emit_set_insn (rtx, rtx);
146 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
147 tree, bool);
149 #ifdef OBJECT_FORMAT_ELF
150 static void arm_elf_asm_constructor (rtx, int);
151 #endif
152 #ifndef ARM_PE
153 static void arm_encode_section_info (tree, rtx, int);
154 #endif
156 static void arm_file_end (void);
157 static void arm_file_start (void);
159 #ifdef AOF_ASSEMBLER
160 static void aof_globalize_label (FILE *, const char *);
161 static void aof_dump_imports (FILE *);
162 static void aof_dump_pic_table (FILE *);
163 static void aof_file_start (void);
164 static void aof_file_end (void);
165 static void aof_asm_init_sections (void);
166 #endif
167 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
168 tree, int *, int);
169 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
170 enum machine_mode, tree, bool);
171 static bool arm_promote_prototypes (tree);
172 static bool arm_default_short_enums (void);
173 static bool arm_align_anon_bitfield (void);
174 static bool arm_return_in_msb (tree);
175 static bool arm_must_pass_in_stack (enum machine_mode, tree);
176 #ifdef TARGET_UNWIND_INFO
177 static void arm_unwind_emit (FILE *, rtx);
178 static bool arm_output_ttype (rtx);
179 #endif
181 static tree arm_cxx_guard_type (void);
182 static bool arm_cxx_guard_mask_bit (void);
183 static tree arm_get_cookie_size (tree);
184 static bool arm_cookie_has_size (void);
185 static bool arm_cxx_cdtor_returns_this (void);
186 static bool arm_cxx_key_method_may_be_inline (void);
187 static void arm_cxx_determine_class_data_visibility (tree);
188 static bool arm_cxx_class_data_always_comdat (void);
189 static bool arm_cxx_use_aeabi_atexit (void);
190 static void arm_init_libfuncs (void);
191 static bool arm_handle_option (size_t, const char *, int);
192 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
193 static bool arm_cannot_copy_insn_p (rtx);
194 static bool arm_tls_symbol_p (rtx x);
197 /* Initialize the GCC target structure. */
198 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
199 #undef TARGET_MERGE_DECL_ATTRIBUTES
200 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
201 #endif
203 #undef TARGET_ATTRIBUTE_TABLE
204 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
206 #undef TARGET_ASM_FILE_START
207 #define TARGET_ASM_FILE_START arm_file_start
209 #undef TARGET_ASM_FILE_END
210 #define TARGET_ASM_FILE_END arm_file_end
212 #ifdef AOF_ASSEMBLER
213 #undef TARGET_ASM_BYTE_OP
214 #define TARGET_ASM_BYTE_OP "\tDCB\t"
215 #undef TARGET_ASM_ALIGNED_HI_OP
216 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
217 #undef TARGET_ASM_ALIGNED_SI_OP
218 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
219 #undef TARGET_ASM_GLOBALIZE_LABEL
220 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
221 #undef TARGET_ASM_FILE_START
222 #define TARGET_ASM_FILE_START aof_file_start
223 #undef TARGET_ASM_FILE_END
224 #define TARGET_ASM_FILE_END aof_file_end
225 #else
226 #undef TARGET_ASM_ALIGNED_SI_OP
227 #define TARGET_ASM_ALIGNED_SI_OP NULL
228 #undef TARGET_ASM_INTEGER
229 #define TARGET_ASM_INTEGER arm_assemble_integer
230 #endif
232 #undef TARGET_ASM_FUNCTION_PROLOGUE
233 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
235 #undef TARGET_ASM_FUNCTION_EPILOGUE
236 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
238 #undef TARGET_DEFAULT_TARGET_FLAGS
239 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
240 #undef TARGET_HANDLE_OPTION
241 #define TARGET_HANDLE_OPTION arm_handle_option
243 #undef TARGET_COMP_TYPE_ATTRIBUTES
244 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
246 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
247 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
249 #undef TARGET_SCHED_ADJUST_COST
250 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
252 #undef TARGET_ENCODE_SECTION_INFO
253 #ifdef ARM_PE
254 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
255 #else
256 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
257 #endif
259 #undef TARGET_STRIP_NAME_ENCODING
260 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
262 #undef TARGET_ASM_INTERNAL_LABEL
263 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
265 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
266 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
268 #undef TARGET_ASM_OUTPUT_MI_THUNK
269 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
270 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
271 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
273 /* This will be overridden in arm_override_options. */
274 #undef TARGET_RTX_COSTS
275 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
276 #undef TARGET_ADDRESS_COST
277 #define TARGET_ADDRESS_COST arm_address_cost
279 #undef TARGET_SHIFT_TRUNCATION_MASK
280 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
281 #undef TARGET_VECTOR_MODE_SUPPORTED_P
282 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
284 #undef TARGET_MACHINE_DEPENDENT_REORG
285 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
287 #undef TARGET_INIT_BUILTINS
288 #define TARGET_INIT_BUILTINS arm_init_builtins
289 #undef TARGET_EXPAND_BUILTIN
290 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
292 #undef TARGET_INIT_LIBFUNCS
293 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
295 #undef TARGET_PROMOTE_FUNCTION_ARGS
296 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
297 #undef TARGET_PROMOTE_FUNCTION_RETURN
298 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
299 #undef TARGET_PROMOTE_PROTOTYPES
300 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
301 #undef TARGET_PASS_BY_REFERENCE
302 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
303 #undef TARGET_ARG_PARTIAL_BYTES
304 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
306 #undef TARGET_SETUP_INCOMING_VARARGS
307 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
309 #undef TARGET_DEFAULT_SHORT_ENUMS
310 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
312 #undef TARGET_ALIGN_ANON_BITFIELD
313 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
315 #undef TARGET_NARROW_VOLATILE_BITFIELD
316 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
318 #undef TARGET_CXX_GUARD_TYPE
319 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
321 #undef TARGET_CXX_GUARD_MASK_BIT
322 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
324 #undef TARGET_CXX_GET_COOKIE_SIZE
325 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
327 #undef TARGET_CXX_COOKIE_HAS_SIZE
328 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
330 #undef TARGET_CXX_CDTOR_RETURNS_THIS
331 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
333 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
334 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
336 #undef TARGET_CXX_USE_AEABI_ATEXIT
337 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
339 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
340 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
341 arm_cxx_determine_class_data_visibility
343 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
344 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
346 #undef TARGET_RETURN_IN_MSB
347 #define TARGET_RETURN_IN_MSB arm_return_in_msb
349 #undef TARGET_MUST_PASS_IN_STACK
350 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
352 #ifdef TARGET_UNWIND_INFO
353 #undef TARGET_UNWIND_EMIT
354 #define TARGET_UNWIND_EMIT arm_unwind_emit
356 /* EABI unwinding tables use a different format for the typeinfo tables. */
357 #undef TARGET_ASM_TTYPE
358 #define TARGET_ASM_TTYPE arm_output_ttype
360 #undef TARGET_ARM_EABI_UNWINDER
361 #define TARGET_ARM_EABI_UNWINDER true
362 #endif /* TARGET_UNWIND_INFO */
364 #undef TARGET_CANNOT_COPY_INSN_P
365 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
367 #ifdef HAVE_AS_TLS
368 #undef TARGET_HAVE_TLS
369 #define TARGET_HAVE_TLS true
370 #endif
372 #undef TARGET_CANNOT_FORCE_CONST_MEM
373 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
375 struct gcc_target targetm = TARGET_INITIALIZER;
377 /* Obstack for minipool constant handling. */
378 static struct obstack minipool_obstack;
379 static char * minipool_startobj;
381 /* The maximum number of insns skipped which
382 will be conditionalised if possible. */
383 static int max_insns_skipped = 5;
385 extern FILE * asm_out_file;
387 /* True if we are currently building a constant table. */
388 int making_const_table;
390 /* Define the information needed to generate branch insns. This is
391 stored from the compare operation. */
392 rtx arm_compare_op0, arm_compare_op1;
394 /* The processor for which instructions should be scheduled. */
395 enum processor_type arm_tune = arm_none;
397 /* The default processor used if not overriden by commandline. */
398 static enum processor_type arm_default_cpu = arm_none;
400 /* Which floating point model to use. */
401 enum arm_fp_model arm_fp_model;
403 /* Which floating point hardware is available. */
404 enum fputype arm_fpu_arch;
406 /* Which floating point hardware to schedule for. */
407 enum fputype arm_fpu_tune;
409 /* Whether to use floating point hardware. */
410 enum float_abi_type arm_float_abi;
412 /* Which ABI to use. */
413 enum arm_abi_type arm_abi;
415 /* Which thread pointer model to use. */
416 enum arm_tp_type target_thread_pointer = TP_AUTO;
418 /* Used to parse -mstructure_size_boundary command line option. */
419 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
421 /* Used for Thumb call_via trampolines. */
422 rtx thumb_call_via_label[14];
423 static int thumb_call_reg_needed;
425 /* Bit values used to identify processor capabilities. */
426 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
427 #define FL_ARCH3M (1 << 1) /* Extended multiply */
428 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
429 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
430 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
431 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
432 #define FL_THUMB (1 << 6) /* Thumb aware */
433 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
434 #define FL_STRONG (1 << 8) /* StrongARM */
435 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
436 #define FL_XSCALE (1 << 10) /* XScale */
437 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
438 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
439 media instructions. */
440 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
441 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
442 Note: ARM6 & 7 derivatives only. */
443 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
445 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
447 #define FL_FOR_ARCH2 0
448 #define FL_FOR_ARCH3 FL_MODE32
449 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
450 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
451 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
452 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
453 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
454 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
455 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
456 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
457 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
458 #define FL_FOR_ARCH6J FL_FOR_ARCH6
459 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
460 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
461 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
463 /* The bits in this mask specify which
464 instructions we are allowed to generate. */
465 static unsigned long insn_flags = 0;
467 /* The bits in this mask specify which instruction scheduling options should
468 be used. */
469 static unsigned long tune_flags = 0;
471 /* The following are used in the arm.md file as equivalents to bits
472 in the above two flag variables. */
474 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
475 int arm_arch3m = 0;
477 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
478 int arm_arch4 = 0;
480 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
481 int arm_arch4t = 0;
483 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
484 int arm_arch5 = 0;
486 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
487 int arm_arch5e = 0;
489 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
490 int arm_arch6 = 0;
492 /* Nonzero if this chip supports the ARM 6K extensions. */
493 int arm_arch6k = 0;
495 /* Nonzero if this chip can benefit from load scheduling. */
496 int arm_ld_sched = 0;
498 /* Nonzero if this chip is a StrongARM. */
499 int arm_tune_strongarm = 0;
501 /* Nonzero if this chip is a Cirrus variant. */
502 int arm_arch_cirrus = 0;
504 /* Nonzero if this chip supports Intel Wireless MMX technology. */
505 int arm_arch_iwmmxt = 0;
507 /* Nonzero if this chip is an XScale. */
508 int arm_arch_xscale = 0;
510 /* Nonzero if tuning for XScale */
511 int arm_tune_xscale = 0;
513 /* Nonzero if we want to tune for stores that access the write-buffer.
514 This typically means an ARM6 or ARM7 with MMU or MPU. */
515 int arm_tune_wbuf = 0;
517 /* Nonzero if generating Thumb instructions. */
518 int thumb_code = 0;
520 /* Nonzero if we should define __THUMB_INTERWORK__ in the
521 preprocessor.
522 XXX This is a bit of a hack, it's intended to help work around
523 problems in GLD which doesn't understand that armv5t code is
524 interworking clean. */
525 int arm_cpp_interwork = 0;
527 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
528 must report the mode of the memory reference from PRINT_OPERAND to
529 PRINT_OPERAND_ADDRESS. */
530 enum machine_mode output_memory_reference_mode;
532 /* The register number to be used for the PIC offset register. */
533 unsigned arm_pic_register = INVALID_REGNUM;
535 /* Set to 1 when a return insn is output, this means that the epilogue
536 is not needed. */
537 int return_used_this_function;
539 /* Set to 1 after arm_reorg has started. Reset to start at the start of
540 the next function. */
541 static int after_arm_reorg = 0;
543 /* The maximum number of insns to be used when loading a constant. */
544 static int arm_constant_limit = 3;
546 /* For an explanation of these variables, see final_prescan_insn below. */
547 int arm_ccfsm_state;
548 enum arm_cond_code arm_current_cc;
549 rtx arm_target_insn;
550 int arm_target_label;
552 /* The condition codes of the ARM, and the inverse function. */
553 static const char * const arm_condition_codes[] =
555 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
556 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
559 #define streq(string1, string2) (strcmp (string1, string2) == 0)
561 /* Initialization code. */
563 struct processors
565 const char *const name;
566 enum processor_type core;
567 const char *arch;
568 const unsigned long flags;
569 bool (* rtx_costs) (rtx, int, int, int *);
572 /* Not all of these give usefully different compilation alternatives,
573 but there is no simple way of generalizing them. */
574 static const struct processors all_cores[] =
576 /* ARM Cores */
577 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
578 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
579 #include "arm-cores.def"
580 #undef ARM_CORE
581 {NULL, arm_none, NULL, 0, NULL}
584 static const struct processors all_architectures[] =
586 /* ARM Architectures */
587 /* We don't specify rtx_costs here as it will be figured out
588 from the core. */
590 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
591 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
592 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
593 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
594 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
595 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
596 implementations that support it, so we will leave it out for now. */
597 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
598 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
599 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
600 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
601 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
602 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
603 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
604 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
605 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
606 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
607 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
608 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
609 {NULL, arm_none, NULL, 0 , NULL}
612 struct arm_cpu_select
614 const char * string;
615 const char * name;
616 const struct processors * processors;
619 /* This is a magic structure. The 'string' field is magically filled in
620 with a pointer to the value specified by the user on the command line
621 assuming that the user has specified such a value. */
623 static struct arm_cpu_select arm_select[] =
625 /* string name processors */
626 { NULL, "-mcpu=", all_cores },
627 { NULL, "-march=", all_architectures },
628 { NULL, "-mtune=", all_cores }
631 /* Defines representing the indexes into the above table. */
632 #define ARM_OPT_SET_CPU 0
633 #define ARM_OPT_SET_ARCH 1
634 #define ARM_OPT_SET_TUNE 2
636 /* The name of the preprocessor macro to define for this architecture. */
638 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
640 struct fpu_desc
642 const char * name;
643 enum fputype fpu;
647 /* Available values for -mfpu=. */
649 static const struct fpu_desc all_fpus[] =
651 {"fpa", FPUTYPE_FPA},
652 {"fpe2", FPUTYPE_FPA_EMU2},
653 {"fpe3", FPUTYPE_FPA_EMU2},
654 {"maverick", FPUTYPE_MAVERICK},
655 {"vfp", FPUTYPE_VFP}
659 /* Floating point models used by the different hardware.
660 See fputype in arm.h. */
662 static const enum fputype fp_model_for_fpu[] =
664 /* No FP hardware. */
665 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
666 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
667 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
668 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
669 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
670 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
674 struct float_abi
676 const char * name;
677 enum float_abi_type abi_type;
681 /* Available values for -mfloat-abi=. */
683 static const struct float_abi all_float_abis[] =
685 {"soft", ARM_FLOAT_ABI_SOFT},
686 {"softfp", ARM_FLOAT_ABI_SOFTFP},
687 {"hard", ARM_FLOAT_ABI_HARD}
691 struct abi_name
693 const char *name;
694 enum arm_abi_type abi_type;
698 /* Available values for -mabi=. */
700 static const struct abi_name arm_all_abis[] =
702 {"apcs-gnu", ARM_ABI_APCS},
703 {"atpcs", ARM_ABI_ATPCS},
704 {"aapcs", ARM_ABI_AAPCS},
705 {"iwmmxt", ARM_ABI_IWMMXT},
706 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
709 /* Supported TLS relocations. */
711 enum tls_reloc {
712 TLS_GD32,
713 TLS_LDM32,
714 TLS_LDO32,
715 TLS_IE32,
716 TLS_LE32
719 /* Emit an insn that's a simple single-set. Both the operands must be known
720 to be valid. */
721 inline static rtx
722 emit_set_insn (rtx x, rtx y)
724 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
727 /* Return the number of bits set in VALUE. */
728 static unsigned
729 bit_count (unsigned long value)
731 unsigned long count = 0;
733 while (value)
735 count++;
736 value &= value - 1; /* Clear the least-significant set bit. */
739 return count;
742 /* Set up library functions unique to ARM. */
744 static void
745 arm_init_libfuncs (void)
747 /* There are no special library functions unless we are using the
748 ARM BPABI. */
749 if (!TARGET_BPABI)
750 return;
752 /* The functions below are described in Section 4 of the "Run-Time
753 ABI for the ARM architecture", Version 1.0. */
755 /* Double-precision floating-point arithmetic. Table 2. */
756 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
757 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
758 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
759 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
760 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
762 /* Double-precision comparisons. Table 3. */
763 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
764 set_optab_libfunc (ne_optab, DFmode, NULL);
765 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
766 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
767 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
768 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
769 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
771 /* Single-precision floating-point arithmetic. Table 4. */
772 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
773 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
774 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
775 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
776 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
778 /* Single-precision comparisons. Table 5. */
779 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
780 set_optab_libfunc (ne_optab, SFmode, NULL);
781 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
782 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
783 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
784 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
785 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
787 /* Floating-point to integer conversions. Table 6. */
788 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
789 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
790 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
791 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
792 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
793 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
794 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
795 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
797 /* Conversions between floating types. Table 7. */
798 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
799 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
801 /* Integer to floating-point conversions. Table 8. */
802 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
803 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
804 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
805 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
806 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
807 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
808 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
809 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
811 /* Long long. Table 9. */
812 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
813 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
814 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
815 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
816 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
817 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
818 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
819 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
821 /* Integer (32/32->32) division. \S 4.3.1. */
822 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
823 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
825 /* The divmod functions are designed so that they can be used for
826 plain division, even though they return both the quotient and the
827 remainder. The quotient is returned in the usual location (i.e.,
828 r0 for SImode, {r0, r1} for DImode), just as would be expected
829 for an ordinary division routine. Because the AAPCS calling
830 conventions specify that all of { r0, r1, r2, r3 } are
831 callee-saved registers, there is no need to tell the compiler
832 explicitly that those registers are clobbered by these
833 routines. */
834 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
835 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
837 /* For SImode division the ABI provides div-without-mod routines,
838 which are faster. */
839 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
840 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
842 /* We don't have mod libcalls. Fortunately gcc knows how to use the
843 divmod libcalls instead. */
844 set_optab_libfunc (smod_optab, DImode, NULL);
845 set_optab_libfunc (umod_optab, DImode, NULL);
846 set_optab_libfunc (smod_optab, SImode, NULL);
847 set_optab_libfunc (umod_optab, SImode, NULL);
850 /* Implement TARGET_HANDLE_OPTION. */
852 static bool
853 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
855 switch (code)
857 case OPT_march_:
858 arm_select[1].string = arg;
859 return true;
861 case OPT_mcpu_:
862 arm_select[0].string = arg;
863 return true;
865 case OPT_mhard_float:
866 target_float_abi_name = "hard";
867 return true;
869 case OPT_msoft_float:
870 target_float_abi_name = "soft";
871 return true;
873 case OPT_mtune_:
874 arm_select[2].string = arg;
875 return true;
877 default:
878 return true;
882 /* Fix up any incompatible options that the user has specified.
883 This has now turned into a maze. */
884 void
885 arm_override_options (void)
887 unsigned i;
888 enum processor_type target_arch_cpu = arm_none;
890 /* Set up the flags based on the cpu/architecture selected by the user. */
891 for (i = ARRAY_SIZE (arm_select); i--;)
893 struct arm_cpu_select * ptr = arm_select + i;
895 if (ptr->string != NULL && ptr->string[0] != '\0')
897 const struct processors * sel;
899 for (sel = ptr->processors; sel->name != NULL; sel++)
900 if (streq (ptr->string, sel->name))
902 /* Set the architecture define. */
903 if (i != ARM_OPT_SET_TUNE)
904 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
906 /* Determine the processor core for which we should
907 tune code-generation. */
908 if (/* -mcpu= is a sensible default. */
909 i == ARM_OPT_SET_CPU
910 /* -mtune= overrides -mcpu= and -march=. */
911 || i == ARM_OPT_SET_TUNE)
912 arm_tune = (enum processor_type) (sel - ptr->processors);
914 /* Remember the CPU associated with this architecture.
915 If no other option is used to set the CPU type,
916 we'll use this to guess the most suitable tuning
917 options. */
918 if (i == ARM_OPT_SET_ARCH)
919 target_arch_cpu = sel->core;
921 if (i != ARM_OPT_SET_TUNE)
923 /* If we have been given an architecture and a processor
924 make sure that they are compatible. We only generate
925 a warning though, and we prefer the CPU over the
926 architecture. */
927 if (insn_flags != 0 && (insn_flags ^ sel->flags))
928 warning (0, "switch -mcpu=%s conflicts with -march= switch",
929 ptr->string);
931 insn_flags = sel->flags;
934 break;
937 if (sel->name == NULL)
938 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
942 /* Guess the tuning options from the architecture if necessary. */
943 if (arm_tune == arm_none)
944 arm_tune = target_arch_cpu;
946 /* If the user did not specify a processor, choose one for them. */
947 if (insn_flags == 0)
949 const struct processors * sel;
950 unsigned int sought;
951 enum processor_type cpu;
953 cpu = TARGET_CPU_DEFAULT;
954 if (cpu == arm_none)
956 #ifdef SUBTARGET_CPU_DEFAULT
957 /* Use the subtarget default CPU if none was specified by
958 configure. */
959 cpu = SUBTARGET_CPU_DEFAULT;
960 #endif
961 /* Default to ARM6. */
962 if (cpu == arm_none)
963 cpu = arm6;
965 sel = &all_cores[cpu];
967 insn_flags = sel->flags;
969 /* Now check to see if the user has specified some command line
970 switch that require certain abilities from the cpu. */
971 sought = 0;
973 if (TARGET_INTERWORK || TARGET_THUMB)
975 sought |= (FL_THUMB | FL_MODE32);
977 /* There are no ARM processors that support both APCS-26 and
978 interworking. Therefore we force FL_MODE26 to be removed
979 from insn_flags here (if it was set), so that the search
980 below will always be able to find a compatible processor. */
981 insn_flags &= ~FL_MODE26;
984 if (sought != 0 && ((sought & insn_flags) != sought))
986 /* Try to locate a CPU type that supports all of the abilities
987 of the default CPU, plus the extra abilities requested by
988 the user. */
989 for (sel = all_cores; sel->name != NULL; sel++)
990 if ((sel->flags & sought) == (sought | insn_flags))
991 break;
993 if (sel->name == NULL)
995 unsigned current_bit_count = 0;
996 const struct processors * best_fit = NULL;
998 /* Ideally we would like to issue an error message here
999 saying that it was not possible to find a CPU compatible
1000 with the default CPU, but which also supports the command
1001 line options specified by the programmer, and so they
1002 ought to use the -mcpu=<name> command line option to
1003 override the default CPU type.
1005 If we cannot find a cpu that has both the
1006 characteristics of the default cpu and the given
1007 command line options we scan the array again looking
1008 for a best match. */
1009 for (sel = all_cores; sel->name != NULL; sel++)
1010 if ((sel->flags & sought) == sought)
1012 unsigned count;
1014 count = bit_count (sel->flags & insn_flags);
1016 if (count >= current_bit_count)
1018 best_fit = sel;
1019 current_bit_count = count;
1023 gcc_assert (best_fit);
1024 sel = best_fit;
1027 insn_flags = sel->flags;
1029 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1030 arm_default_cpu = (enum processor_type) (sel - all_cores);
1031 if (arm_tune == arm_none)
1032 arm_tune = arm_default_cpu;
1035 /* The processor for which we should tune should now have been
1036 chosen. */
1037 gcc_assert (arm_tune != arm_none);
1039 tune_flags = all_cores[(int)arm_tune].flags;
1040 if (optimize_size)
1041 targetm.rtx_costs = arm_size_rtx_costs;
1042 else
1043 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1045 /* Make sure that the processor choice does not conflict with any of the
1046 other command line choices. */
1047 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1049 warning (0, "target CPU does not support interworking" );
1050 target_flags &= ~MASK_INTERWORK;
1053 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1055 warning (0, "target CPU does not support THUMB instructions");
1056 target_flags &= ~MASK_THUMB;
1059 if (TARGET_APCS_FRAME && TARGET_THUMB)
1061 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1062 target_flags &= ~MASK_APCS_FRAME;
1065 /* Callee super interworking implies thumb interworking. Adding
1066 this to the flags here simplifies the logic elsewhere. */
1067 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1068 target_flags |= MASK_INTERWORK;
1070 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1071 from here where no function is being compiled currently. */
1072 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1073 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1075 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1076 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1078 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1079 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1081 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1083 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1084 target_flags |= MASK_APCS_FRAME;
1087 if (TARGET_POKE_FUNCTION_NAME)
1088 target_flags |= MASK_APCS_FRAME;
1090 if (TARGET_APCS_REENT && flag_pic)
1091 error ("-fpic and -mapcs-reent are incompatible");
1093 if (TARGET_APCS_REENT)
1094 warning (0, "APCS reentrant code not supported. Ignored");
1096 /* If this target is normally configured to use APCS frames, warn if they
1097 are turned off and debugging is turned on. */
1098 if (TARGET_ARM
1099 && write_symbols != NO_DEBUG
1100 && !TARGET_APCS_FRAME
1101 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1102 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1104 /* If stack checking is disabled, we can use r10 as the PIC register,
1105 which keeps r9 available. */
1106 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1107 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1109 if (TARGET_APCS_FLOAT)
1110 warning (0, "passing floating point arguments in fp regs not yet supported");
1112 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1113 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1114 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1115 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1116 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1117 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1118 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1119 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1120 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1121 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1123 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1124 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1125 thumb_code = (TARGET_ARM == 0);
1126 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1127 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1128 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1130 /* V5 code we generate is completely interworking capable, so we turn off
1131 TARGET_INTERWORK here to avoid many tests later on. */
1133 /* XXX However, we must pass the right pre-processor defines to CPP
1134 or GLD can get confused. This is a hack. */
1135 if (TARGET_INTERWORK)
1136 arm_cpp_interwork = 1;
1138 if (arm_arch5)
1139 target_flags &= ~MASK_INTERWORK;
1141 if (target_abi_name)
1143 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1145 if (streq (arm_all_abis[i].name, target_abi_name))
1147 arm_abi = arm_all_abis[i].abi_type;
1148 break;
1151 if (i == ARRAY_SIZE (arm_all_abis))
1152 error ("invalid ABI option: -mabi=%s", target_abi_name);
1154 else
1155 arm_abi = ARM_DEFAULT_ABI;
1157 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1158 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1160 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1161 error ("iwmmxt abi requires an iwmmxt capable cpu");
1163 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1164 if (target_fpu_name == NULL && target_fpe_name != NULL)
1166 if (streq (target_fpe_name, "2"))
1167 target_fpu_name = "fpe2";
1168 else if (streq (target_fpe_name, "3"))
1169 target_fpu_name = "fpe3";
1170 else
1171 error ("invalid floating point emulation option: -mfpe=%s",
1172 target_fpe_name);
1174 if (target_fpu_name != NULL)
1176 /* The user specified a FPU. */
1177 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1179 if (streq (all_fpus[i].name, target_fpu_name))
1181 arm_fpu_arch = all_fpus[i].fpu;
1182 arm_fpu_tune = arm_fpu_arch;
1183 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1184 break;
1187 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1188 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1190 else
1192 #ifdef FPUTYPE_DEFAULT
1193 /* Use the default if it is specified for this platform. */
1194 arm_fpu_arch = FPUTYPE_DEFAULT;
1195 arm_fpu_tune = FPUTYPE_DEFAULT;
1196 #else
1197 /* Pick one based on CPU type. */
1198 /* ??? Some targets assume FPA is the default.
1199 if ((insn_flags & FL_VFP) != 0)
1200 arm_fpu_arch = FPUTYPE_VFP;
1201 else
1203 if (arm_arch_cirrus)
1204 arm_fpu_arch = FPUTYPE_MAVERICK;
1205 else
1206 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1207 #endif
1208 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1209 arm_fpu_tune = FPUTYPE_FPA;
1210 else
1211 arm_fpu_tune = arm_fpu_arch;
1212 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1213 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1216 if (target_float_abi_name != NULL)
1218 /* The user specified a FP ABI. */
1219 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1221 if (streq (all_float_abis[i].name, target_float_abi_name))
1223 arm_float_abi = all_float_abis[i].abi_type;
1224 break;
1227 if (i == ARRAY_SIZE (all_float_abis))
1228 error ("invalid floating point abi: -mfloat-abi=%s",
1229 target_float_abi_name);
1231 else
1232 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1234 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1235 sorry ("-mfloat-abi=hard and VFP");
1237 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1238 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1239 will ever exist. GCC makes no attempt to support this combination. */
1240 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1241 sorry ("iWMMXt and hardware floating point");
1243 /* If soft-float is specified then don't use FPU. */
1244 if (TARGET_SOFT_FLOAT)
1245 arm_fpu_arch = FPUTYPE_NONE;
1247 /* For arm2/3 there is no need to do any scheduling if there is only
1248 a floating point emulator, or we are doing software floating-point. */
1249 if ((TARGET_SOFT_FLOAT
1250 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1251 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1252 && (tune_flags & FL_MODE32) == 0)
1253 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1255 if (target_thread_switch)
1257 if (strcmp (target_thread_switch, "soft") == 0)
1258 target_thread_pointer = TP_SOFT;
1259 else if (strcmp (target_thread_switch, "auto") == 0)
1260 target_thread_pointer = TP_AUTO;
1261 else if (strcmp (target_thread_switch, "cp15") == 0)
1262 target_thread_pointer = TP_CP15;
1263 else
1264 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1267 /* Use the cp15 method if it is available. */
1268 if (target_thread_pointer == TP_AUTO)
1270 if (arm_arch6k && !TARGET_THUMB)
1271 target_thread_pointer = TP_CP15;
1272 else
1273 target_thread_pointer = TP_SOFT;
1276 if (TARGET_HARD_TP && TARGET_THUMB)
1277 error ("can not use -mtp=cp15 with -mthumb");
1279 /* Override the default structure alignment for AAPCS ABI. */
1280 if (TARGET_AAPCS_BASED)
1281 arm_structure_size_boundary = 8;
1283 if (structure_size_string != NULL)
1285 int size = strtol (structure_size_string, NULL, 0);
1287 if (size == 8 || size == 32
1288 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1289 arm_structure_size_boundary = size;
1290 else
1291 warning (0, "structure size boundary can only be set to %s",
1292 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1295 if (arm_pic_register_string != NULL)
1297 int pic_register = decode_reg_name (arm_pic_register_string);
1299 if (!flag_pic)
1300 warning (0, "-mpic-register= is useless without -fpic");
1302 /* Prevent the user from choosing an obviously stupid PIC register. */
1303 else if (pic_register < 0 || call_used_regs[pic_register]
1304 || pic_register == HARD_FRAME_POINTER_REGNUM
1305 || pic_register == STACK_POINTER_REGNUM
1306 || pic_register >= PC_REGNUM)
1307 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1308 else
1309 arm_pic_register = pic_register;
1312 if (TARGET_THUMB && flag_schedule_insns)
1314 /* Don't warn since it's on by default in -O2. */
1315 flag_schedule_insns = 0;
1318 if (optimize_size)
1320 arm_constant_limit = 1;
1322 /* If optimizing for size, bump the number of instructions that we
1323 are prepared to conditionally execute (even on a StrongARM). */
1324 max_insns_skipped = 6;
1326 else
1328 /* For processors with load scheduling, it never costs more than
1329 2 cycles to load a constant, and the load scheduler may well
1330 reduce that to 1. */
1331 if (arm_ld_sched)
1332 arm_constant_limit = 1;
1334 /* On XScale the longer latency of a load makes it more difficult
1335 to achieve a good schedule, so it's faster to synthesize
1336 constants that can be done in two insns. */
1337 if (arm_tune_xscale)
1338 arm_constant_limit = 2;
1340 /* StrongARM has early execution of branches, so a sequence
1341 that is worth skipping is shorter. */
1342 if (arm_tune_strongarm)
1343 max_insns_skipped = 3;
1346 /* Register global variables with the garbage collector. */
1347 arm_add_gc_roots ();
1350 static void
1351 arm_add_gc_roots (void)
1353 gcc_obstack_init(&minipool_obstack);
1354 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1357 /* A table of known ARM exception types.
1358 For use with the interrupt function attribute. */
1360 typedef struct
1362 const char *const arg;
1363 const unsigned long return_value;
1365 isr_attribute_arg;
1367 static const isr_attribute_arg isr_attribute_args [] =
1369 { "IRQ", ARM_FT_ISR },
1370 { "irq", ARM_FT_ISR },
1371 { "FIQ", ARM_FT_FIQ },
1372 { "fiq", ARM_FT_FIQ },
1373 { "ABORT", ARM_FT_ISR },
1374 { "abort", ARM_FT_ISR },
1375 { "ABORT", ARM_FT_ISR },
1376 { "abort", ARM_FT_ISR },
1377 { "UNDEF", ARM_FT_EXCEPTION },
1378 { "undef", ARM_FT_EXCEPTION },
1379 { "SWI", ARM_FT_EXCEPTION },
1380 { "swi", ARM_FT_EXCEPTION },
1381 { NULL, ARM_FT_NORMAL }
1384 /* Returns the (interrupt) function type of the current
1385 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1387 static unsigned long
1388 arm_isr_value (tree argument)
1390 const isr_attribute_arg * ptr;
1391 const char * arg;
1393 /* No argument - default to IRQ. */
1394 if (argument == NULL_TREE)
1395 return ARM_FT_ISR;
1397 /* Get the value of the argument. */
1398 if (TREE_VALUE (argument) == NULL_TREE
1399 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1400 return ARM_FT_UNKNOWN;
1402 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1404 /* Check it against the list of known arguments. */
1405 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1406 if (streq (arg, ptr->arg))
1407 return ptr->return_value;
1409 /* An unrecognized interrupt type. */
1410 return ARM_FT_UNKNOWN;
1413 /* Computes the type of the current function. */
1415 static unsigned long
1416 arm_compute_func_type (void)
1418 unsigned long type = ARM_FT_UNKNOWN;
1419 tree a;
1420 tree attr;
1422 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1424 /* Decide if the current function is volatile. Such functions
1425 never return, and many memory cycles can be saved by not storing
1426 register values that will never be needed again. This optimization
1427 was added to speed up context switching in a kernel application. */
1428 if (optimize > 0
1429 && (TREE_NOTHROW (current_function_decl)
1430 || !(flag_unwind_tables
1431 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1432 && TREE_THIS_VOLATILE (current_function_decl))
1433 type |= ARM_FT_VOLATILE;
1435 if (cfun->static_chain_decl != NULL)
1436 type |= ARM_FT_NESTED;
1438 attr = DECL_ATTRIBUTES (current_function_decl);
1440 a = lookup_attribute ("naked", attr);
1441 if (a != NULL_TREE)
1442 type |= ARM_FT_NAKED;
1444 a = lookup_attribute ("isr", attr);
1445 if (a == NULL_TREE)
1446 a = lookup_attribute ("interrupt", attr);
1448 if (a == NULL_TREE)
1449 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1450 else
1451 type |= arm_isr_value (TREE_VALUE (a));
1453 return type;
1456 /* Returns the type of the current function. */
1458 unsigned long
1459 arm_current_func_type (void)
1461 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1462 cfun->machine->func_type = arm_compute_func_type ();
1464 return cfun->machine->func_type;
1467 /* Return 1 if it is possible to return using a single instruction.
1468 If SIBLING is non-null, this is a test for a return before a sibling
1469 call. SIBLING is the call insn, so we can examine its register usage. */
1472 use_return_insn (int iscond, rtx sibling)
1474 int regno;
1475 unsigned int func_type;
1476 unsigned long saved_int_regs;
1477 unsigned HOST_WIDE_INT stack_adjust;
1478 arm_stack_offsets *offsets;
1480 /* Never use a return instruction before reload has run. */
1481 if (!reload_completed)
1482 return 0;
1484 func_type = arm_current_func_type ();
1486 /* Naked functions and volatile functions need special
1487 consideration. */
1488 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1489 return 0;
1491 /* So do interrupt functions that use the frame pointer. */
1492 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1493 return 0;
1495 offsets = arm_get_frame_offsets ();
1496 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1498 /* As do variadic functions. */
1499 if (current_function_pretend_args_size
1500 || cfun->machine->uses_anonymous_args
1501 /* Or if the function calls __builtin_eh_return () */
1502 || current_function_calls_eh_return
1503 /* Or if the function calls alloca */
1504 || current_function_calls_alloca
1505 /* Or if there is a stack adjustment. However, if the stack pointer
1506 is saved on the stack, we can use a pre-incrementing stack load. */
1507 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1508 return 0;
1510 saved_int_regs = arm_compute_save_reg_mask ();
1512 /* Unfortunately, the insn
1514 ldmib sp, {..., sp, ...}
1516 triggers a bug on most SA-110 based devices, such that the stack
1517 pointer won't be correctly restored if the instruction takes a
1518 page fault. We work around this problem by popping r3 along with
1519 the other registers, since that is never slower than executing
1520 another instruction.
1522 We test for !arm_arch5 here, because code for any architecture
1523 less than this could potentially be run on one of the buggy
1524 chips. */
1525 if (stack_adjust == 4 && !arm_arch5)
1527 /* Validate that r3 is a call-clobbered register (always true in
1528 the default abi) ... */
1529 if (!call_used_regs[3])
1530 return 0;
1532 /* ... that it isn't being used for a return value ... */
1533 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1534 return 0;
1536 /* ... or for a tail-call argument ... */
1537 if (sibling)
1539 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1541 if (find_regno_fusage (sibling, USE, 3))
1542 return 0;
1545 /* ... and that there are no call-saved registers in r0-r2
1546 (always true in the default ABI). */
1547 if (saved_int_regs & 0x7)
1548 return 0;
1551 /* Can't be done if interworking with Thumb, and any registers have been
1552 stacked. */
1553 if (TARGET_INTERWORK && saved_int_regs != 0)
1554 return 0;
1556 /* On StrongARM, conditional returns are expensive if they aren't
1557 taken and multiple registers have been stacked. */
1558 if (iscond && arm_tune_strongarm)
1560 /* Conditional return when just the LR is stored is a simple
1561 conditional-load instruction, that's not expensive. */
1562 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1563 return 0;
1565 if (flag_pic
1566 && arm_pic_register != INVALID_REGNUM
1567 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1568 return 0;
1571 /* If there are saved registers but the LR isn't saved, then we need
1572 two instructions for the return. */
1573 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1574 return 0;
1576 /* Can't be done if any of the FPA regs are pushed,
1577 since this also requires an insn. */
1578 if (TARGET_HARD_FLOAT && TARGET_FPA)
1579 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1580 if (regs_ever_live[regno] && !call_used_regs[regno])
1581 return 0;
1583 /* Likewise VFP regs. */
1584 if (TARGET_HARD_FLOAT && TARGET_VFP)
1585 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1586 if (regs_ever_live[regno] && !call_used_regs[regno])
1587 return 0;
1589 if (TARGET_REALLY_IWMMXT)
1590 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1591 if (regs_ever_live[regno] && ! call_used_regs [regno])
1592 return 0;
1594 return 1;
1597 /* Return TRUE if int I is a valid immediate ARM constant. */
1600 const_ok_for_arm (HOST_WIDE_INT i)
1602 int lowbit;
1604 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1605 be all zero, or all one. */
1606 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1607 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1608 != ((~(unsigned HOST_WIDE_INT) 0)
1609 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1610 return FALSE;
1612 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1614 /* Fast return for 0 and small values. We must do this for zero, since
1615 the code below can't handle that one case. */
1616 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1617 return TRUE;
1619 /* Get the number of trailing zeros, rounded down to the nearest even
1620 number. */
1621 lowbit = (ffs ((int) i) - 1) & ~1;
1623 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1624 return TRUE;
1625 else if (lowbit <= 4
1626 && ((i & ~0xc000003f) == 0
1627 || (i & ~0xf000000f) == 0
1628 || (i & ~0xfc000003) == 0))
1629 return TRUE;
1631 return FALSE;
1634 /* Return true if I is a valid constant for the operation CODE. */
1635 static int
1636 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1638 if (const_ok_for_arm (i))
1639 return 1;
1641 switch (code)
1643 case PLUS:
1644 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1646 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1647 case XOR:
1648 case IOR:
1649 return 0;
1651 case AND:
1652 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1654 default:
1655 gcc_unreachable ();
1659 /* Emit a sequence of insns to handle a large constant.
1660 CODE is the code of the operation required, it can be any of SET, PLUS,
1661 IOR, AND, XOR, MINUS;
1662 MODE is the mode in which the operation is being performed;
1663 VAL is the integer to operate on;
1664 SOURCE is the other operand (a register, or a null-pointer for SET);
1665 SUBTARGETS means it is safe to create scratch registers if that will
1666 either produce a simpler sequence, or we will want to cse the values.
1667 Return value is the number of insns emitted. */
1670 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1671 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1673 rtx cond;
1675 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1676 cond = COND_EXEC_TEST (PATTERN (insn));
1677 else
1678 cond = NULL_RTX;
1680 if (subtargets || code == SET
1681 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1682 && REGNO (target) != REGNO (source)))
1684 /* After arm_reorg has been called, we can't fix up expensive
1685 constants by pushing them into memory so we must synthesize
1686 them in-line, regardless of the cost. This is only likely to
1687 be more costly on chips that have load delay slots and we are
1688 compiling without running the scheduler (so no splitting
1689 occurred before the final instruction emission).
1691 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1693 if (!after_arm_reorg
1694 && !cond
1695 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1696 1, 0)
1697 > arm_constant_limit + (code != SET)))
1699 if (code == SET)
1701 /* Currently SET is the only monadic value for CODE, all
1702 the rest are diadic. */
1703 emit_set_insn (target, GEN_INT (val));
1704 return 1;
1706 else
1708 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1710 emit_set_insn (temp, GEN_INT (val));
1711 /* For MINUS, the value is subtracted from, since we never
1712 have subtraction of a constant. */
1713 if (code == MINUS)
1714 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1715 else
1716 emit_set_insn (target,
1717 gen_rtx_fmt_ee (code, mode, source, temp));
1718 return 2;
1723 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1727 static int
1728 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1730 HOST_WIDE_INT temp1;
1731 int num_insns = 0;
1734 int end;
1736 if (i <= 0)
1737 i += 32;
1738 if (remainder & (3 << (i - 2)))
1740 end = i - 8;
1741 if (end < 0)
1742 end += 32;
1743 temp1 = remainder & ((0x0ff << end)
1744 | ((i < end) ? (0xff >> (32 - end)) : 0));
1745 remainder &= ~temp1;
1746 num_insns++;
1747 i -= 6;
1749 i -= 2;
1750 } while (remainder);
1751 return num_insns;
1754 /* Emit an instruction with the indicated PATTERN. If COND is
1755 non-NULL, conditionalize the execution of the instruction on COND
1756 being true. */
1758 static void
1759 emit_constant_insn (rtx cond, rtx pattern)
1761 if (cond)
1762 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1763 emit_insn (pattern);
1766 /* As above, but extra parameter GENERATE which, if clear, suppresses
1767 RTL generation. */
1769 static int
1770 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1771 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1772 int generate)
1774 int can_invert = 0;
1775 int can_negate = 0;
1776 int can_negate_initial = 0;
1777 int can_shift = 0;
1778 int i;
1779 int num_bits_set = 0;
1780 int set_sign_bit_copies = 0;
1781 int clear_sign_bit_copies = 0;
1782 int clear_zero_bit_copies = 0;
1783 int set_zero_bit_copies = 0;
1784 int insns = 0;
1785 unsigned HOST_WIDE_INT temp1, temp2;
1786 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1788 /* Find out which operations are safe for a given CODE. Also do a quick
1789 check for degenerate cases; these can occur when DImode operations
1790 are split. */
1791 switch (code)
1793 case SET:
1794 can_invert = 1;
1795 can_shift = 1;
1796 can_negate = 1;
1797 break;
1799 case PLUS:
1800 can_negate = 1;
1801 can_negate_initial = 1;
1802 break;
1804 case IOR:
1805 if (remainder == 0xffffffff)
1807 if (generate)
1808 emit_constant_insn (cond,
1809 gen_rtx_SET (VOIDmode, target,
1810 GEN_INT (ARM_SIGN_EXTEND (val))));
1811 return 1;
1813 if (remainder == 0)
1815 if (reload_completed && rtx_equal_p (target, source))
1816 return 0;
1817 if (generate)
1818 emit_constant_insn (cond,
1819 gen_rtx_SET (VOIDmode, target, source));
1820 return 1;
1822 break;
1824 case AND:
1825 if (remainder == 0)
1827 if (generate)
1828 emit_constant_insn (cond,
1829 gen_rtx_SET (VOIDmode, target, const0_rtx));
1830 return 1;
1832 if (remainder == 0xffffffff)
1834 if (reload_completed && rtx_equal_p (target, source))
1835 return 0;
1836 if (generate)
1837 emit_constant_insn (cond,
1838 gen_rtx_SET (VOIDmode, target, source));
1839 return 1;
1841 can_invert = 1;
1842 break;
1844 case XOR:
1845 if (remainder == 0)
1847 if (reload_completed && rtx_equal_p (target, source))
1848 return 0;
1849 if (generate)
1850 emit_constant_insn (cond,
1851 gen_rtx_SET (VOIDmode, target, source));
1852 return 1;
1855 /* We don't know how to handle other cases yet. */
1856 gcc_assert (remainder == 0xffffffff);
1858 if (generate)
1859 emit_constant_insn (cond,
1860 gen_rtx_SET (VOIDmode, target,
1861 gen_rtx_NOT (mode, source)));
1862 return 1;
1864 case MINUS:
1865 /* We treat MINUS as (val - source), since (source - val) is always
1866 passed as (source + (-val)). */
1867 if (remainder == 0)
1869 if (generate)
1870 emit_constant_insn (cond,
1871 gen_rtx_SET (VOIDmode, target,
1872 gen_rtx_NEG (mode, source)));
1873 return 1;
1875 if (const_ok_for_arm (val))
1877 if (generate)
1878 emit_constant_insn (cond,
1879 gen_rtx_SET (VOIDmode, target,
1880 gen_rtx_MINUS (mode, GEN_INT (val),
1881 source)));
1882 return 1;
1884 can_negate = 1;
1886 break;
1888 default:
1889 gcc_unreachable ();
1892 /* If we can do it in one insn get out quickly. */
1893 if (const_ok_for_arm (val)
1894 || (can_negate_initial && const_ok_for_arm (-val))
1895 || (can_invert && const_ok_for_arm (~val)))
1897 if (generate)
1898 emit_constant_insn (cond,
1899 gen_rtx_SET (VOIDmode, target,
1900 (source
1901 ? gen_rtx_fmt_ee (code, mode, source,
1902 GEN_INT (val))
1903 : GEN_INT (val))));
1904 return 1;
1907 /* Calculate a few attributes that may be useful for specific
1908 optimizations. */
1909 for (i = 31; i >= 0; i--)
1911 if ((remainder & (1 << i)) == 0)
1912 clear_sign_bit_copies++;
1913 else
1914 break;
1917 for (i = 31; i >= 0; i--)
1919 if ((remainder & (1 << i)) != 0)
1920 set_sign_bit_copies++;
1921 else
1922 break;
1925 for (i = 0; i <= 31; i++)
1927 if ((remainder & (1 << i)) == 0)
1928 clear_zero_bit_copies++;
1929 else
1930 break;
1933 for (i = 0; i <= 31; i++)
1935 if ((remainder & (1 << i)) != 0)
1936 set_zero_bit_copies++;
1937 else
1938 break;
1941 switch (code)
1943 case SET:
1944 /* See if we can do this by sign_extending a constant that is known
1945 to be negative. This is a good, way of doing it, since the shift
1946 may well merge into a subsequent insn. */
1947 if (set_sign_bit_copies > 1)
1949 if (const_ok_for_arm
1950 (temp1 = ARM_SIGN_EXTEND (remainder
1951 << (set_sign_bit_copies - 1))))
1953 if (generate)
1955 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1956 emit_constant_insn (cond,
1957 gen_rtx_SET (VOIDmode, new_src,
1958 GEN_INT (temp1)));
1959 emit_constant_insn (cond,
1960 gen_ashrsi3 (target, new_src,
1961 GEN_INT (set_sign_bit_copies - 1)));
1963 return 2;
1965 /* For an inverted constant, we will need to set the low bits,
1966 these will be shifted out of harm's way. */
1967 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1968 if (const_ok_for_arm (~temp1))
1970 if (generate)
1972 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1973 emit_constant_insn (cond,
1974 gen_rtx_SET (VOIDmode, new_src,
1975 GEN_INT (temp1)));
1976 emit_constant_insn (cond,
1977 gen_ashrsi3 (target, new_src,
1978 GEN_INT (set_sign_bit_copies - 1)));
1980 return 2;
1984 /* See if we can calculate the value as the difference between two
1985 valid immediates. */
1986 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1988 int topshift = clear_sign_bit_copies & ~1;
1990 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1991 & (0xff000000 >> topshift));
1993 /* If temp1 is zero, then that means the 9 most significant
1994 bits of remainder were 1 and we've caused it to overflow.
1995 When topshift is 0 we don't need to do anything since we
1996 can borrow from 'bit 32'. */
1997 if (temp1 == 0 && topshift != 0)
1998 temp1 = 0x80000000 >> (topshift - 1);
2000 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
2002 if (const_ok_for_arm (temp2))
2004 if (generate)
2006 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2007 emit_constant_insn (cond,
2008 gen_rtx_SET (VOIDmode, new_src,
2009 GEN_INT (temp1)));
2010 emit_constant_insn (cond,
2011 gen_addsi3 (target, new_src,
2012 GEN_INT (-temp2)));
2015 return 2;
2019 /* See if we can generate this by setting the bottom (or the top)
2020 16 bits, and then shifting these into the other half of the
2021 word. We only look for the simplest cases, to do more would cost
2022 too much. Be careful, however, not to generate this when the
2023 alternative would take fewer insns. */
2024 if (val & 0xffff0000)
2026 temp1 = remainder & 0xffff0000;
2027 temp2 = remainder & 0x0000ffff;
2029 /* Overlaps outside this range are best done using other methods. */
2030 for (i = 9; i < 24; i++)
2032 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2033 && !const_ok_for_arm (temp2))
2035 rtx new_src = (subtargets
2036 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2037 : target);
2038 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2039 source, subtargets, generate);
2040 source = new_src;
2041 if (generate)
2042 emit_constant_insn
2043 (cond,
2044 gen_rtx_SET
2045 (VOIDmode, target,
2046 gen_rtx_IOR (mode,
2047 gen_rtx_ASHIFT (mode, source,
2048 GEN_INT (i)),
2049 source)));
2050 return insns + 1;
2054 /* Don't duplicate cases already considered. */
2055 for (i = 17; i < 24; i++)
2057 if (((temp1 | (temp1 >> i)) == remainder)
2058 && !const_ok_for_arm (temp1))
2060 rtx new_src = (subtargets
2061 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2062 : target);
2063 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2064 source, subtargets, generate);
2065 source = new_src;
2066 if (generate)
2067 emit_constant_insn
2068 (cond,
2069 gen_rtx_SET (VOIDmode, target,
2070 gen_rtx_IOR
2071 (mode,
2072 gen_rtx_LSHIFTRT (mode, source,
2073 GEN_INT (i)),
2074 source)));
2075 return insns + 1;
2079 break;
2081 case IOR:
2082 case XOR:
2083 /* If we have IOR or XOR, and the constant can be loaded in a
2084 single instruction, and we can find a temporary to put it in,
2085 then this can be done in two instructions instead of 3-4. */
2086 if (subtargets
2087 /* TARGET can't be NULL if SUBTARGETS is 0 */
2088 || (reload_completed && !reg_mentioned_p (target, source)))
2090 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2092 if (generate)
2094 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2096 emit_constant_insn (cond,
2097 gen_rtx_SET (VOIDmode, sub,
2098 GEN_INT (val)));
2099 emit_constant_insn (cond,
2100 gen_rtx_SET (VOIDmode, target,
2101 gen_rtx_fmt_ee (code, mode,
2102 source, sub)));
2104 return 2;
2108 if (code == XOR)
2109 break;
2111 if (set_sign_bit_copies > 8
2112 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2114 if (generate)
2116 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2117 rtx shift = GEN_INT (set_sign_bit_copies);
2119 emit_constant_insn
2120 (cond,
2121 gen_rtx_SET (VOIDmode, sub,
2122 gen_rtx_NOT (mode,
2123 gen_rtx_ASHIFT (mode,
2124 source,
2125 shift))));
2126 emit_constant_insn
2127 (cond,
2128 gen_rtx_SET (VOIDmode, target,
2129 gen_rtx_NOT (mode,
2130 gen_rtx_LSHIFTRT (mode, sub,
2131 shift))));
2133 return 2;
2136 if (set_zero_bit_copies > 8
2137 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2139 if (generate)
2141 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2142 rtx shift = GEN_INT (set_zero_bit_copies);
2144 emit_constant_insn
2145 (cond,
2146 gen_rtx_SET (VOIDmode, sub,
2147 gen_rtx_NOT (mode,
2148 gen_rtx_LSHIFTRT (mode,
2149 source,
2150 shift))));
2151 emit_constant_insn
2152 (cond,
2153 gen_rtx_SET (VOIDmode, target,
2154 gen_rtx_NOT (mode,
2155 gen_rtx_ASHIFT (mode, sub,
2156 shift))));
2158 return 2;
2161 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2163 if (generate)
2165 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2166 emit_constant_insn (cond,
2167 gen_rtx_SET (VOIDmode, sub,
2168 gen_rtx_NOT (mode, source)));
2169 source = sub;
2170 if (subtargets)
2171 sub = gen_reg_rtx (mode);
2172 emit_constant_insn (cond,
2173 gen_rtx_SET (VOIDmode, sub,
2174 gen_rtx_AND (mode, source,
2175 GEN_INT (temp1))));
2176 emit_constant_insn (cond,
2177 gen_rtx_SET (VOIDmode, target,
2178 gen_rtx_NOT (mode, sub)));
2180 return 3;
2182 break;
2184 case AND:
2185 /* See if two shifts will do 2 or more insn's worth of work. */
2186 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2188 HOST_WIDE_INT shift_mask = ((0xffffffff
2189 << (32 - clear_sign_bit_copies))
2190 & 0xffffffff);
2192 if ((remainder | shift_mask) != 0xffffffff)
2194 if (generate)
2196 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2197 insns = arm_gen_constant (AND, mode, cond,
2198 remainder | shift_mask,
2199 new_src, source, subtargets, 1);
2200 source = new_src;
2202 else
2204 rtx targ = subtargets ? NULL_RTX : target;
2205 insns = arm_gen_constant (AND, mode, cond,
2206 remainder | shift_mask,
2207 targ, source, subtargets, 0);
2211 if (generate)
2213 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2214 rtx shift = GEN_INT (clear_sign_bit_copies);
2216 emit_insn (gen_ashlsi3 (new_src, source, shift));
2217 emit_insn (gen_lshrsi3 (target, new_src, shift));
2220 return insns + 2;
2223 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2225 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2227 if ((remainder | shift_mask) != 0xffffffff)
2229 if (generate)
2231 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2233 insns = arm_gen_constant (AND, mode, cond,
2234 remainder | shift_mask,
2235 new_src, source, subtargets, 1);
2236 source = new_src;
2238 else
2240 rtx targ = subtargets ? NULL_RTX : target;
2242 insns = arm_gen_constant (AND, mode, cond,
2243 remainder | shift_mask,
2244 targ, source, subtargets, 0);
2248 if (generate)
2250 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2251 rtx shift = GEN_INT (clear_zero_bit_copies);
2253 emit_insn (gen_lshrsi3 (new_src, source, shift));
2254 emit_insn (gen_ashlsi3 (target, new_src, shift));
2257 return insns + 2;
2260 break;
2262 default:
2263 break;
2266 for (i = 0; i < 32; i++)
2267 if (remainder & (1 << i))
2268 num_bits_set++;
2270 if (code == AND || (can_invert && num_bits_set > 16))
2271 remainder = (~remainder) & 0xffffffff;
2272 else if (code == PLUS && num_bits_set > 16)
2273 remainder = (-remainder) & 0xffffffff;
2274 else
2276 can_invert = 0;
2277 can_negate = 0;
2280 /* Now try and find a way of doing the job in either two or three
2281 instructions.
2282 We start by looking for the largest block of zeros that are aligned on
2283 a 2-bit boundary, we then fill up the temps, wrapping around to the
2284 top of the word when we drop off the bottom.
2285 In the worst case this code should produce no more than four insns. */
2287 int best_start = 0;
2288 int best_consecutive_zeros = 0;
2290 for (i = 0; i < 32; i += 2)
2292 int consecutive_zeros = 0;
2294 if (!(remainder & (3 << i)))
2296 while ((i < 32) && !(remainder & (3 << i)))
2298 consecutive_zeros += 2;
2299 i += 2;
2301 if (consecutive_zeros > best_consecutive_zeros)
2303 best_consecutive_zeros = consecutive_zeros;
2304 best_start = i - consecutive_zeros;
2306 i -= 2;
2310 /* So long as it won't require any more insns to do so, it's
2311 desirable to emit a small constant (in bits 0...9) in the last
2312 insn. This way there is more chance that it can be combined with
2313 a later addressing insn to form a pre-indexed load or store
2314 operation. Consider:
2316 *((volatile int *)0xe0000100) = 1;
2317 *((volatile int *)0xe0000110) = 2;
2319 We want this to wind up as:
2321 mov rA, #0xe0000000
2322 mov rB, #1
2323 str rB, [rA, #0x100]
2324 mov rB, #2
2325 str rB, [rA, #0x110]
2327 rather than having to synthesize both large constants from scratch.
2329 Therefore, we calculate how many insns would be required to emit
2330 the constant starting from `best_start', and also starting from
2331 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2332 yield a shorter sequence, we may as well use zero. */
2333 if (best_start != 0
2334 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2335 && (count_insns_for_constant (remainder, 0) <=
2336 count_insns_for_constant (remainder, best_start)))
2337 best_start = 0;
2339 /* Now start emitting the insns. */
2340 i = best_start;
2343 int end;
2345 if (i <= 0)
2346 i += 32;
2347 if (remainder & (3 << (i - 2)))
2349 end = i - 8;
2350 if (end < 0)
2351 end += 32;
2352 temp1 = remainder & ((0x0ff << end)
2353 | ((i < end) ? (0xff >> (32 - end)) : 0));
2354 remainder &= ~temp1;
2356 if (generate)
2358 rtx new_src, temp1_rtx;
2360 if (code == SET || code == MINUS)
2362 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2363 if (can_invert && code != MINUS)
2364 temp1 = ~temp1;
2366 else
2368 if (remainder && subtargets)
2369 new_src = gen_reg_rtx (mode);
2370 else
2371 new_src = target;
2372 if (can_invert)
2373 temp1 = ~temp1;
2374 else if (can_negate)
2375 temp1 = -temp1;
2378 temp1 = trunc_int_for_mode (temp1, mode);
2379 temp1_rtx = GEN_INT (temp1);
2381 if (code == SET)
2383 else if (code == MINUS)
2384 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2385 else
2386 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2388 emit_constant_insn (cond,
2389 gen_rtx_SET (VOIDmode, new_src,
2390 temp1_rtx));
2391 source = new_src;
2394 if (code == SET)
2396 can_invert = 0;
2397 code = PLUS;
2399 else if (code == MINUS)
2400 code = PLUS;
2402 insns++;
2403 i -= 6;
2405 i -= 2;
2407 while (remainder);
2410 return insns;
2413 /* Canonicalize a comparison so that we are more likely to recognize it.
2414 This can be done for a few constant compares, where we can make the
2415 immediate value easier to load. */
2417 enum rtx_code
2418 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2419 rtx * op1)
2421 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2422 unsigned HOST_WIDE_INT maxval;
2423 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2425 switch (code)
2427 case EQ:
2428 case NE:
2429 return code;
2431 case GT:
2432 case LE:
2433 if (i != maxval
2434 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2436 *op1 = GEN_INT (i + 1);
2437 return code == GT ? GE : LT;
2439 break;
2441 case GE:
2442 case LT:
2443 if (i != ~maxval
2444 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2446 *op1 = GEN_INT (i - 1);
2447 return code == GE ? GT : LE;
2449 break;
2451 case GTU:
2452 case LEU:
2453 if (i != ~((unsigned HOST_WIDE_INT) 0)
2454 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2456 *op1 = GEN_INT (i + 1);
2457 return code == GTU ? GEU : LTU;
2459 break;
2461 case GEU:
2462 case LTU:
2463 if (i != 0
2464 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2466 *op1 = GEN_INT (i - 1);
2467 return code == GEU ? GTU : LEU;
2469 break;
2471 default:
2472 gcc_unreachable ();
2475 return code;
2479 /* Define how to find the value returned by a function. */
2482 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2484 enum machine_mode mode;
2485 int unsignedp ATTRIBUTE_UNUSED;
2486 rtx r ATTRIBUTE_UNUSED;
2488 mode = TYPE_MODE (type);
2489 /* Promote integer types. */
2490 if (INTEGRAL_TYPE_P (type))
2491 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2493 /* Promotes small structs returned in a register to full-word size
2494 for big-endian AAPCS. */
2495 if (arm_return_in_msb (type))
2497 HOST_WIDE_INT size = int_size_in_bytes (type);
2498 if (size % UNITS_PER_WORD != 0)
2500 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2501 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2505 return LIBCALL_VALUE(mode);
2508 /* Determine the amount of memory needed to store the possible return
2509 registers of an untyped call. */
2511 arm_apply_result_size (void)
2513 int size = 16;
2515 if (TARGET_ARM)
2517 if (TARGET_HARD_FLOAT_ABI)
2519 if (TARGET_FPA)
2520 size += 12;
2521 if (TARGET_MAVERICK)
2522 size += 8;
2524 if (TARGET_IWMMXT_ABI)
2525 size += 8;
2528 return size;
2531 /* Decide whether a type should be returned in memory (true)
2532 or in a register (false). This is called by the macro
2533 RETURN_IN_MEMORY. */
2535 arm_return_in_memory (tree type)
2537 HOST_WIDE_INT size;
2539 if (!AGGREGATE_TYPE_P (type) &&
2540 (TREE_CODE (type) != VECTOR_TYPE) &&
2541 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2542 /* All simple types are returned in registers.
2543 For AAPCS, complex types are treated the same as aggregates. */
2544 return 0;
2546 size = int_size_in_bytes (type);
2548 if (arm_abi != ARM_ABI_APCS)
2550 /* ATPCS and later return aggregate types in memory only if they are
2551 larger than a word (or are variable size). */
2552 return (size < 0 || size > UNITS_PER_WORD);
2555 /* To maximize backwards compatibility with previous versions of gcc,
2556 return vectors up to 4 words in registers. */
2557 if (TREE_CODE (type) == VECTOR_TYPE)
2558 return (size < 0 || size > (4 * UNITS_PER_WORD));
2560 /* For the arm-wince targets we choose to be compatible with Microsoft's
2561 ARM and Thumb compilers, which always return aggregates in memory. */
2562 #ifndef ARM_WINCE
2563 /* All structures/unions bigger than one word are returned in memory.
2564 Also catch the case where int_size_in_bytes returns -1. In this case
2565 the aggregate is either huge or of variable size, and in either case
2566 we will want to return it via memory and not in a register. */
2567 if (size < 0 || size > UNITS_PER_WORD)
2568 return 1;
2570 if (TREE_CODE (type) == RECORD_TYPE)
2572 tree field;
2574 /* For a struct the APCS says that we only return in a register
2575 if the type is 'integer like' and every addressable element
2576 has an offset of zero. For practical purposes this means
2577 that the structure can have at most one non bit-field element
2578 and that this element must be the first one in the structure. */
2580 /* Find the first field, ignoring non FIELD_DECL things which will
2581 have been created by C++. */
2582 for (field = TYPE_FIELDS (type);
2583 field && TREE_CODE (field) != FIELD_DECL;
2584 field = TREE_CHAIN (field))
2585 continue;
2587 if (field == NULL)
2588 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2590 /* Check that the first field is valid for returning in a register. */
2592 /* ... Floats are not allowed */
2593 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2594 return 1;
2596 /* ... Aggregates that are not themselves valid for returning in
2597 a register are not allowed. */
2598 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2599 return 1;
2601 /* Now check the remaining fields, if any. Only bitfields are allowed,
2602 since they are not addressable. */
2603 for (field = TREE_CHAIN (field);
2604 field;
2605 field = TREE_CHAIN (field))
2607 if (TREE_CODE (field) != FIELD_DECL)
2608 continue;
2610 if (!DECL_BIT_FIELD_TYPE (field))
2611 return 1;
2614 return 0;
2617 if (TREE_CODE (type) == UNION_TYPE)
2619 tree field;
2621 /* Unions can be returned in registers if every element is
2622 integral, or can be returned in an integer register. */
2623 for (field = TYPE_FIELDS (type);
2624 field;
2625 field = TREE_CHAIN (field))
2627 if (TREE_CODE (field) != FIELD_DECL)
2628 continue;
2630 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2631 return 1;
2633 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2634 return 1;
2637 return 0;
2639 #endif /* not ARM_WINCE */
2641 /* Return all other types in memory. */
2642 return 1;
2645 /* Indicate whether or not words of a double are in big-endian order. */
2648 arm_float_words_big_endian (void)
2650 if (TARGET_MAVERICK)
2651 return 0;
2653 /* For FPA, float words are always big-endian. For VFP, floats words
2654 follow the memory system mode. */
2656 if (TARGET_FPA)
2658 return 1;
2661 if (TARGET_VFP)
2662 return (TARGET_BIG_END ? 1 : 0);
2664 return 1;
2667 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2668 for a call to a function whose data type is FNTYPE.
2669 For a library call, FNTYPE is NULL. */
2670 void
2671 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2672 rtx libname ATTRIBUTE_UNUSED,
2673 tree fndecl ATTRIBUTE_UNUSED)
2675 /* On the ARM, the offset starts at 0. */
2676 pcum->nregs = 0;
2677 pcum->iwmmxt_nregs = 0;
2678 pcum->can_split = true;
2680 pcum->call_cookie = CALL_NORMAL;
2682 if (TARGET_LONG_CALLS)
2683 pcum->call_cookie = CALL_LONG;
2685 /* Check for long call/short call attributes. The attributes
2686 override any command line option. */
2687 if (fntype)
2689 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2690 pcum->call_cookie = CALL_SHORT;
2691 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2692 pcum->call_cookie = CALL_LONG;
2695 /* Varargs vectors are treated the same as long long.
2696 named_count avoids having to change the way arm handles 'named' */
2697 pcum->named_count = 0;
2698 pcum->nargs = 0;
2700 if (TARGET_REALLY_IWMMXT && fntype)
2702 tree fn_arg;
2704 for (fn_arg = TYPE_ARG_TYPES (fntype);
2705 fn_arg;
2706 fn_arg = TREE_CHAIN (fn_arg))
2707 pcum->named_count += 1;
2709 if (! pcum->named_count)
2710 pcum->named_count = INT_MAX;
2715 /* Return true if mode/type need doubleword alignment. */
2716 bool
2717 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2719 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2720 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2724 /* Determine where to put an argument to a function.
2725 Value is zero to push the argument on the stack,
2726 or a hard register in which to store the argument.
2728 MODE is the argument's machine mode.
2729 TYPE is the data type of the argument (as a tree).
2730 This is null for libcalls where that information may
2731 not be available.
2732 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2733 the preceding args and about the function being called.
2734 NAMED is nonzero if this argument is a named parameter
2735 (otherwise it is an extra parameter matching an ellipsis). */
2738 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2739 tree type, int named)
2741 int nregs;
2743 /* Varargs vectors are treated the same as long long.
2744 named_count avoids having to change the way arm handles 'named' */
2745 if (TARGET_IWMMXT_ABI
2746 && arm_vector_mode_supported_p (mode)
2747 && pcum->named_count > pcum->nargs + 1)
2749 if (pcum->iwmmxt_nregs <= 9)
2750 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2751 else
2753 pcum->can_split = false;
2754 return NULL_RTX;
2758 /* Put doubleword aligned quantities in even register pairs. */
2759 if (pcum->nregs & 1
2760 && ARM_DOUBLEWORD_ALIGN
2761 && arm_needs_doubleword_align (mode, type))
2762 pcum->nregs++;
2764 if (mode == VOIDmode)
2765 /* Compute operand 2 of the call insn. */
2766 return GEN_INT (pcum->call_cookie);
2768 /* Only allow splitting an arg between regs and memory if all preceding
2769 args were allocated to regs. For args passed by reference we only count
2770 the reference pointer. */
2771 if (pcum->can_split)
2772 nregs = 1;
2773 else
2774 nregs = ARM_NUM_REGS2 (mode, type);
2776 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2777 return NULL_RTX;
2779 return gen_rtx_REG (mode, pcum->nregs);
2782 static int
2783 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2784 tree type, bool named ATTRIBUTE_UNUSED)
2786 int nregs = pcum->nregs;
2788 if (arm_vector_mode_supported_p (mode))
2789 return 0;
2791 if (NUM_ARG_REGS > nregs
2792 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2793 && pcum->can_split)
2794 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2796 return 0;
2799 /* Variable sized types are passed by reference. This is a GCC
2800 extension to the ARM ABI. */
2802 static bool
2803 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2804 enum machine_mode mode ATTRIBUTE_UNUSED,
2805 tree type, bool named ATTRIBUTE_UNUSED)
2807 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2810 /* Encode the current state of the #pragma [no_]long_calls. */
2811 typedef enum
2813 OFF, /* No #pragma [no_]long_calls is in effect. */
2814 LONG, /* #pragma long_calls is in effect. */
2815 SHORT /* #pragma no_long_calls is in effect. */
2816 } arm_pragma_enum;
2818 static arm_pragma_enum arm_pragma_long_calls = OFF;
2820 void
2821 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2823 arm_pragma_long_calls = LONG;
2826 void
2827 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2829 arm_pragma_long_calls = SHORT;
2832 void
2833 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2835 arm_pragma_long_calls = OFF;
2838 /* Table of machine attributes. */
2839 const struct attribute_spec arm_attribute_table[] =
2841 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2842 /* Function calls made to this symbol must be done indirectly, because
2843 it may lie outside of the 26 bit addressing range of a normal function
2844 call. */
2845 { "long_call", 0, 0, false, true, true, NULL },
2846 /* Whereas these functions are always known to reside within the 26 bit
2847 addressing range. */
2848 { "short_call", 0, 0, false, true, true, NULL },
2849 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2850 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2851 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2852 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2853 #ifdef ARM_PE
2854 /* ARM/PE has three new attributes:
2855 interfacearm - ?
2856 dllexport - for exporting a function/variable that will live in a dll
2857 dllimport - for importing a function/variable from a dll
2859 Microsoft allows multiple declspecs in one __declspec, separating
2860 them with spaces. We do NOT support this. Instead, use __declspec
2861 multiple times.
2863 { "dllimport", 0, 0, true, false, false, NULL },
2864 { "dllexport", 0, 0, true, false, false, NULL },
2865 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2866 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2867 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2868 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2869 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2870 #endif
2871 { NULL, 0, 0, false, false, false, NULL }
2874 /* Handle an attribute requiring a FUNCTION_DECL;
2875 arguments as in struct attribute_spec.handler. */
2876 static tree
2877 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2878 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2880 if (TREE_CODE (*node) != FUNCTION_DECL)
2882 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2883 IDENTIFIER_POINTER (name));
2884 *no_add_attrs = true;
2887 return NULL_TREE;
2890 /* Handle an "interrupt" or "isr" attribute;
2891 arguments as in struct attribute_spec.handler. */
2892 static tree
2893 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2894 bool *no_add_attrs)
2896 if (DECL_P (*node))
2898 if (TREE_CODE (*node) != FUNCTION_DECL)
2900 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2901 IDENTIFIER_POINTER (name));
2902 *no_add_attrs = true;
2904 /* FIXME: the argument if any is checked for type attributes;
2905 should it be checked for decl ones? */
2907 else
2909 if (TREE_CODE (*node) == FUNCTION_TYPE
2910 || TREE_CODE (*node) == METHOD_TYPE)
2912 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2914 warning (OPT_Wattributes, "%qs attribute ignored",
2915 IDENTIFIER_POINTER (name));
2916 *no_add_attrs = true;
2919 else if (TREE_CODE (*node) == POINTER_TYPE
2920 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2921 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2922 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2924 *node = build_variant_type_copy (*node);
2925 TREE_TYPE (*node) = build_type_attribute_variant
2926 (TREE_TYPE (*node),
2927 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2928 *no_add_attrs = true;
2930 else
2932 /* Possibly pass this attribute on from the type to a decl. */
2933 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2934 | (int) ATTR_FLAG_FUNCTION_NEXT
2935 | (int) ATTR_FLAG_ARRAY_NEXT))
2937 *no_add_attrs = true;
2938 return tree_cons (name, args, NULL_TREE);
2940 else
2942 warning (OPT_Wattributes, "%qs attribute ignored",
2943 IDENTIFIER_POINTER (name));
2948 return NULL_TREE;
2951 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2952 /* Handle the "notshared" attribute. This attribute is another way of
2953 requesting hidden visibility. ARM's compiler supports
2954 "__declspec(notshared)"; we support the same thing via an
2955 attribute. */
2957 static tree
2958 arm_handle_notshared_attribute (tree *node,
2959 tree name ATTRIBUTE_UNUSED,
2960 tree args ATTRIBUTE_UNUSED,
2961 int flags ATTRIBUTE_UNUSED,
2962 bool *no_add_attrs)
2964 tree decl = TYPE_NAME (*node);
2966 if (decl)
2968 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2969 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2970 *no_add_attrs = false;
2972 return NULL_TREE;
2974 #endif
2976 /* Return 0 if the attributes for two types are incompatible, 1 if they
2977 are compatible, and 2 if they are nearly compatible (which causes a
2978 warning to be generated). */
2979 static int
2980 arm_comp_type_attributes (tree type1, tree type2)
2982 int l1, l2, s1, s2;
2984 /* Check for mismatch of non-default calling convention. */
2985 if (TREE_CODE (type1) != FUNCTION_TYPE)
2986 return 1;
2988 /* Check for mismatched call attributes. */
2989 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2990 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2991 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2992 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2994 /* Only bother to check if an attribute is defined. */
2995 if (l1 | l2 | s1 | s2)
2997 /* If one type has an attribute, the other must have the same attribute. */
2998 if ((l1 != l2) || (s1 != s2))
2999 return 0;
3001 /* Disallow mixed attributes. */
3002 if ((l1 & s2) || (l2 & s1))
3003 return 0;
3006 /* Check for mismatched ISR attribute. */
3007 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3008 if (! l1)
3009 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3010 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3011 if (! l2)
3012 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3013 if (l1 != l2)
3014 return 0;
3016 return 1;
3019 /* Encode long_call or short_call attribute by prefixing
3020 symbol name in DECL with a special character FLAG. */
3021 void
3022 arm_encode_call_attribute (tree decl, int flag)
3024 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3025 int len = strlen (str);
3026 char * newstr;
3028 /* Do not allow weak functions to be treated as short call. */
3029 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3030 return;
3032 newstr = alloca (len + 2);
3033 newstr[0] = flag;
3034 strcpy (newstr + 1, str);
3036 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3037 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3040 /* Assigns default attributes to newly defined type. This is used to
3041 set short_call/long_call attributes for function types of
3042 functions defined inside corresponding #pragma scopes. */
3043 static void
3044 arm_set_default_type_attributes (tree type)
3046 /* Add __attribute__ ((long_call)) to all functions, when
3047 inside #pragma long_calls or __attribute__ ((short_call)),
3048 when inside #pragma no_long_calls. */
3049 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3051 tree type_attr_list, attr_name;
3052 type_attr_list = TYPE_ATTRIBUTES (type);
3054 if (arm_pragma_long_calls == LONG)
3055 attr_name = get_identifier ("long_call");
3056 else if (arm_pragma_long_calls == SHORT)
3057 attr_name = get_identifier ("short_call");
3058 else
3059 return;
3061 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3062 TYPE_ATTRIBUTES (type) = type_attr_list;
3066 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3067 defined within the current compilation unit. If this cannot be
3068 determined, then 0 is returned. */
3069 static int
3070 current_file_function_operand (rtx sym_ref)
3072 /* This is a bit of a fib. A function will have a short call flag
3073 applied to its name if it has the short call attribute, or it has
3074 already been defined within the current compilation unit. */
3075 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3076 return 1;
3078 /* The current function is always defined within the current compilation
3079 unit. If it s a weak definition however, then this may not be the real
3080 definition of the function, and so we have to say no. */
3081 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3082 && !DECL_WEAK (current_function_decl))
3083 return 1;
3085 /* We cannot make the determination - default to returning 0. */
3086 return 0;
3089 /* Return nonzero if a 32 bit "long_call" should be generated for
3090 this call. We generate a long_call if the function:
3092 a. has an __attribute__((long call))
3093 or b. is within the scope of a #pragma long_calls
3094 or c. the -mlong-calls command line switch has been specified
3095 . and either:
3096 1. -ffunction-sections is in effect
3097 or 2. the current function has __attribute__ ((section))
3098 or 3. the target function has __attribute__ ((section))
3100 However we do not generate a long call if the function:
3102 d. has an __attribute__ ((short_call))
3103 or e. is inside the scope of a #pragma no_long_calls
3104 or f. is defined within the current compilation unit.
3106 This function will be called by C fragments contained in the machine
3107 description file. SYM_REF and CALL_COOKIE correspond to the matched
3108 rtl operands. CALL_SYMBOL is used to distinguish between
3109 two different callers of the function. It is set to 1 in the
3110 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3111 and "call_value" patterns. This is because of the difference in the
3112 SYM_REFs passed by these patterns. */
3114 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3116 if (!call_symbol)
3118 if (GET_CODE (sym_ref) != MEM)
3119 return 0;
3121 sym_ref = XEXP (sym_ref, 0);
3124 if (GET_CODE (sym_ref) != SYMBOL_REF)
3125 return 0;
3127 if (call_cookie & CALL_SHORT)
3128 return 0;
3130 if (TARGET_LONG_CALLS)
3132 if (flag_function_sections
3133 || DECL_SECTION_NAME (current_function_decl))
3134 /* c.3 is handled by the definition of the
3135 ARM_DECLARE_FUNCTION_SIZE macro. */
3136 return 1;
3139 if (current_file_function_operand (sym_ref))
3140 return 0;
3142 return (call_cookie & CALL_LONG)
3143 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3144 || TARGET_LONG_CALLS;
3147 /* Return nonzero if it is ok to make a tail-call to DECL. */
3148 static bool
3149 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3151 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3153 if (cfun->machine->sibcall_blocked)
3154 return false;
3156 /* Never tailcall something for which we have no decl, or if we
3157 are in Thumb mode. */
3158 if (decl == NULL || TARGET_THUMB)
3159 return false;
3161 /* Get the calling method. */
3162 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3163 call_type = CALL_SHORT;
3164 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3165 call_type = CALL_LONG;
3167 /* Cannot tail-call to long calls, since these are out of range of
3168 a branch instruction. However, if not compiling PIC, we know
3169 we can reach the symbol if it is in this compilation unit. */
3170 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3171 return false;
3173 /* If we are interworking and the function is not declared static
3174 then we can't tail-call it unless we know that it exists in this
3175 compilation unit (since it might be a Thumb routine). */
3176 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3177 return false;
3179 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3180 if (IS_INTERRUPT (arm_current_func_type ()))
3181 return false;
3183 /* Everything else is ok. */
3184 return true;
3188 /* Addressing mode support functions. */
3190 /* Return nonzero if X is a legitimate immediate operand when compiling
3191 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3193 legitimate_pic_operand_p (rtx x)
3195 if (GET_CODE (x) == SYMBOL_REF
3196 || (GET_CODE (x) == CONST
3197 && GET_CODE (XEXP (x, 0)) == PLUS
3198 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3199 return 0;
3201 return 1;
3205 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3207 if (GET_CODE (orig) == SYMBOL_REF
3208 || GET_CODE (orig) == LABEL_REF)
3210 #ifndef AOF_ASSEMBLER
3211 rtx pic_ref, address;
3212 #endif
3213 rtx insn;
3214 int subregs = 0;
3216 /* If this function doesn't have a pic register, create one now.
3217 A lot of the logic here is made obscure by the fact that this
3218 routine gets called as part of the rtx cost estimation
3219 process. We don't want those calls to affect any assumptions
3220 about the real function; and further, we can't call
3221 entry_of_function() until we start the real expansion
3222 process. */
3223 if (!current_function_uses_pic_offset_table)
3225 gcc_assert (!no_new_pseudos);
3226 if (arm_pic_register != INVALID_REGNUM)
3228 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3230 /* Play games to avoid marking the function as needing pic
3231 if we are being called as part of the cost-estimation
3232 process. */
3233 if (current_ir_type () != IR_GIMPLE)
3234 current_function_uses_pic_offset_table = 1;
3236 else
3238 rtx seq;
3240 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3242 /* Play games to avoid marking the function as needing pic
3243 if we are being called as part of the cost-estimation
3244 process. */
3245 if (current_ir_type () != IR_GIMPLE)
3247 current_function_uses_pic_offset_table = 1;
3248 start_sequence ();
3250 arm_load_pic_register (0UL);
3252 seq = get_insns ();
3253 end_sequence ();
3254 emit_insn_after (seq, entry_of_function ());
3259 if (reg == 0)
3261 gcc_assert (!no_new_pseudos);
3262 reg = gen_reg_rtx (Pmode);
3264 subregs = 1;
3267 #ifdef AOF_ASSEMBLER
3268 /* The AOF assembler can generate relocations for these directly, and
3269 understands that the PIC register has to be added into the offset. */
3270 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3271 #else
3272 if (subregs)
3273 address = gen_reg_rtx (Pmode);
3274 else
3275 address = reg;
3277 if (TARGET_ARM)
3278 emit_insn (gen_pic_load_addr_arm (address, orig));
3279 else
3280 emit_insn (gen_pic_load_addr_thumb (address, orig));
3282 if ((GET_CODE (orig) == LABEL_REF
3283 || (GET_CODE (orig) == SYMBOL_REF &&
3284 SYMBOL_REF_LOCAL_P (orig)))
3285 && NEED_GOT_RELOC)
3286 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3287 else
3289 pic_ref = gen_const_mem (Pmode,
3290 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3291 address));
3294 insn = emit_move_insn (reg, pic_ref);
3295 #endif
3296 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3297 by loop. */
3298 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3299 REG_NOTES (insn));
3300 return reg;
3302 else if (GET_CODE (orig) == CONST)
3304 rtx base, offset;
3306 if (GET_CODE (XEXP (orig, 0)) == PLUS
3307 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3308 return orig;
3310 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3311 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3312 return orig;
3314 if (reg == 0)
3316 gcc_assert (!no_new_pseudos);
3317 reg = gen_reg_rtx (Pmode);
3320 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3322 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3323 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3324 base == reg ? 0 : reg);
3326 if (GET_CODE (offset) == CONST_INT)
3328 /* The base register doesn't really matter, we only want to
3329 test the index for the appropriate mode. */
3330 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3332 gcc_assert (!no_new_pseudos);
3333 offset = force_reg (Pmode, offset);
3336 if (GET_CODE (offset) == CONST_INT)
3337 return plus_constant (base, INTVAL (offset));
3340 if (GET_MODE_SIZE (mode) > 4
3341 && (GET_MODE_CLASS (mode) == MODE_INT
3342 || TARGET_SOFT_FLOAT))
3344 emit_insn (gen_addsi3 (reg, base, offset));
3345 return reg;
3348 return gen_rtx_PLUS (Pmode, base, offset);
3351 return orig;
3355 /* Find a spare low register to use during the prolog of a function. */
3357 static int
3358 thumb_find_work_register (unsigned long pushed_regs_mask)
3360 int reg;
3362 /* Check the argument registers first as these are call-used. The
3363 register allocation order means that sometimes r3 might be used
3364 but earlier argument registers might not, so check them all. */
3365 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3366 if (!regs_ever_live[reg])
3367 return reg;
3369 /* Before going on to check the call-saved registers we can try a couple
3370 more ways of deducing that r3 is available. The first is when we are
3371 pushing anonymous arguments onto the stack and we have less than 4
3372 registers worth of fixed arguments(*). In this case r3 will be part of
3373 the variable argument list and so we can be sure that it will be
3374 pushed right at the start of the function. Hence it will be available
3375 for the rest of the prologue.
3376 (*): ie current_function_pretend_args_size is greater than 0. */
3377 if (cfun->machine->uses_anonymous_args
3378 && current_function_pretend_args_size > 0)
3379 return LAST_ARG_REGNUM;
3381 /* The other case is when we have fixed arguments but less than 4 registers
3382 worth. In this case r3 might be used in the body of the function, but
3383 it is not being used to convey an argument into the function. In theory
3384 we could just check current_function_args_size to see how many bytes are
3385 being passed in argument registers, but it seems that it is unreliable.
3386 Sometimes it will have the value 0 when in fact arguments are being
3387 passed. (See testcase execute/20021111-1.c for an example). So we also
3388 check the args_info.nregs field as well. The problem with this field is
3389 that it makes no allowances for arguments that are passed to the
3390 function but which are not used. Hence we could miss an opportunity
3391 when a function has an unused argument in r3. But it is better to be
3392 safe than to be sorry. */
3393 if (! cfun->machine->uses_anonymous_args
3394 && current_function_args_size >= 0
3395 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3396 && cfun->args_info.nregs < 4)
3397 return LAST_ARG_REGNUM;
3399 /* Otherwise look for a call-saved register that is going to be pushed. */
3400 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3401 if (pushed_regs_mask & (1 << reg))
3402 return reg;
3404 /* Something went wrong - thumb_compute_save_reg_mask()
3405 should have arranged for a suitable register to be pushed. */
3406 gcc_unreachable ();
3409 static GTY(()) int pic_labelno;
3411 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3412 low register. */
3414 void
3415 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3417 #ifndef AOF_ASSEMBLER
3418 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3419 rtx global_offset_table;
3421 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3422 return;
3424 gcc_assert (flag_pic);
3426 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3427 in the code stream. */
3429 labelno = GEN_INT (pic_labelno++);
3430 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3431 l1 = gen_rtx_CONST (VOIDmode, l1);
3433 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3434 /* On the ARM the PC register contains 'dot + 8' at the time of the
3435 addition, on the Thumb it is 'dot + 4'. */
3436 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3437 if (GOT_PCREL)
3438 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3439 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3440 else
3441 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3443 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3445 if (TARGET_ARM)
3447 emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx));
3448 emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg,
3449 cfun->machine->pic_reg, labelno));
3451 else
3453 if (arm_pic_register != INVALID_REGNUM
3454 && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
3456 /* We will have pushed the pic register, so we should always be
3457 able to find a work register. */
3458 pic_tmp = gen_rtx_REG (SImode,
3459 thumb_find_work_register (saved_regs));
3460 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3461 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3463 else
3464 emit_insn (gen_pic_load_addr_thumb (cfun->machine->pic_reg, pic_rtx));
3465 emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg,
3466 cfun->machine->pic_reg, labelno));
3469 /* Need to emit this whether or not we obey regdecls,
3470 since setjmp/longjmp can cause life info to screw up. */
3471 emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
3472 #endif /* AOF_ASSEMBLER */
3476 /* Return nonzero if X is valid as an ARM state addressing register. */
3477 static int
3478 arm_address_register_rtx_p (rtx x, int strict_p)
3480 int regno;
3482 if (GET_CODE (x) != REG)
3483 return 0;
3485 regno = REGNO (x);
3487 if (strict_p)
3488 return ARM_REGNO_OK_FOR_BASE_P (regno);
3490 return (regno <= LAST_ARM_REGNUM
3491 || regno >= FIRST_PSEUDO_REGISTER
3492 || regno == FRAME_POINTER_REGNUM
3493 || regno == ARG_POINTER_REGNUM);
3496 /* Return TRUE if this rtx is the difference of a symbol and a label,
3497 and will reduce to a PC-relative relocation in the object file.
3498 Expressions like this can be left alone when generating PIC, rather
3499 than forced through the GOT. */
3500 static int
3501 pcrel_constant_p (rtx x)
3503 if (GET_CODE (x) == MINUS)
3504 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3506 return FALSE;
3509 /* Return nonzero if X is a valid ARM state address operand. */
3511 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3512 int strict_p)
3514 bool use_ldrd;
3515 enum rtx_code code = GET_CODE (x);
3517 if (arm_address_register_rtx_p (x, strict_p))
3518 return 1;
3520 use_ldrd = (TARGET_LDRD
3521 && (mode == DImode
3522 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3524 if (code == POST_INC || code == PRE_DEC
3525 || ((code == PRE_INC || code == POST_DEC)
3526 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3527 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3529 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3530 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3531 && GET_CODE (XEXP (x, 1)) == PLUS
3532 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3534 rtx addend = XEXP (XEXP (x, 1), 1);
3536 /* Don't allow ldrd post increment by register because it's hard
3537 to fixup invalid register choices. */
3538 if (use_ldrd
3539 && GET_CODE (x) == POST_MODIFY
3540 && GET_CODE (addend) == REG)
3541 return 0;
3543 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3544 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3547 /* After reload constants split into minipools will have addresses
3548 from a LABEL_REF. */
3549 else if (reload_completed
3550 && (code == LABEL_REF
3551 || (code == CONST
3552 && GET_CODE (XEXP (x, 0)) == PLUS
3553 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3554 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3555 return 1;
3557 else if (mode == TImode)
3558 return 0;
3560 else if (code == PLUS)
3562 rtx xop0 = XEXP (x, 0);
3563 rtx xop1 = XEXP (x, 1);
3565 return ((arm_address_register_rtx_p (xop0, strict_p)
3566 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3567 || (arm_address_register_rtx_p (xop1, strict_p)
3568 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3571 #if 0
3572 /* Reload currently can't handle MINUS, so disable this for now */
3573 else if (GET_CODE (x) == MINUS)
3575 rtx xop0 = XEXP (x, 0);
3576 rtx xop1 = XEXP (x, 1);
3578 return (arm_address_register_rtx_p (xop0, strict_p)
3579 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3581 #endif
3583 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3584 && code == SYMBOL_REF
3585 && CONSTANT_POOL_ADDRESS_P (x)
3586 && ! (flag_pic
3587 && symbol_mentioned_p (get_pool_constant (x))
3588 && ! pcrel_constant_p (get_pool_constant (x))))
3589 return 1;
3591 return 0;
3594 /* Return nonzero if INDEX is valid for an address index operand in
3595 ARM state. */
3596 static int
3597 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3598 int strict_p)
3600 HOST_WIDE_INT range;
3601 enum rtx_code code = GET_CODE (index);
3603 /* Standard coprocessor addressing modes. */
3604 if (TARGET_HARD_FLOAT
3605 && (TARGET_FPA || TARGET_MAVERICK)
3606 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3607 || (TARGET_MAVERICK && mode == DImode)))
3608 return (code == CONST_INT && INTVAL (index) < 1024
3609 && INTVAL (index) > -1024
3610 && (INTVAL (index) & 3) == 0);
3612 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3613 return (code == CONST_INT
3614 && INTVAL (index) < 1024
3615 && INTVAL (index) > -1024
3616 && (INTVAL (index) & 3) == 0);
3618 if (arm_address_register_rtx_p (index, strict_p)
3619 && (GET_MODE_SIZE (mode) <= 4))
3620 return 1;
3622 if (mode == DImode || mode == DFmode)
3624 if (code == CONST_INT)
3626 HOST_WIDE_INT val = INTVAL (index);
3628 if (TARGET_LDRD)
3629 return val > -256 && val < 256;
3630 else
3631 return val > -4096 && val < 4092;
3634 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3637 if (GET_MODE_SIZE (mode) <= 4
3638 && ! (arm_arch4
3639 && (mode == HImode
3640 || (mode == QImode && outer == SIGN_EXTEND))))
3642 if (code == MULT)
3644 rtx xiop0 = XEXP (index, 0);
3645 rtx xiop1 = XEXP (index, 1);
3647 return ((arm_address_register_rtx_p (xiop0, strict_p)
3648 && power_of_two_operand (xiop1, SImode))
3649 || (arm_address_register_rtx_p (xiop1, strict_p)
3650 && power_of_two_operand (xiop0, SImode)));
3652 else if (code == LSHIFTRT || code == ASHIFTRT
3653 || code == ASHIFT || code == ROTATERT)
3655 rtx op = XEXP (index, 1);
3657 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3658 && GET_CODE (op) == CONST_INT
3659 && INTVAL (op) > 0
3660 && INTVAL (op) <= 31);
3664 /* For ARM v4 we may be doing a sign-extend operation during the
3665 load. */
3666 if (arm_arch4)
3668 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3669 range = 256;
3670 else
3671 range = 4096;
3673 else
3674 range = (mode == HImode) ? 4095 : 4096;
3676 return (code == CONST_INT
3677 && INTVAL (index) < range
3678 && INTVAL (index) > -range);
3681 /* Return nonzero if X is valid as a Thumb state base register. */
3682 static int
3683 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3685 int regno;
3687 if (GET_CODE (x) != REG)
3688 return 0;
3690 regno = REGNO (x);
3692 if (strict_p)
3693 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3695 return (regno <= LAST_LO_REGNUM
3696 || regno > LAST_VIRTUAL_REGISTER
3697 || regno == FRAME_POINTER_REGNUM
3698 || (GET_MODE_SIZE (mode) >= 4
3699 && (regno == STACK_POINTER_REGNUM
3700 || regno >= FIRST_PSEUDO_REGISTER
3701 || x == hard_frame_pointer_rtx
3702 || x == arg_pointer_rtx)));
3705 /* Return nonzero if x is a legitimate index register. This is the case
3706 for any base register that can access a QImode object. */
3707 inline static int
3708 thumb_index_register_rtx_p (rtx x, int strict_p)
3710 return thumb_base_register_rtx_p (x, QImode, strict_p);
3713 /* Return nonzero if x is a legitimate Thumb-state address.
3715 The AP may be eliminated to either the SP or the FP, so we use the
3716 least common denominator, e.g. SImode, and offsets from 0 to 64.
3718 ??? Verify whether the above is the right approach.
3720 ??? Also, the FP may be eliminated to the SP, so perhaps that
3721 needs special handling also.
3723 ??? Look at how the mips16 port solves this problem. It probably uses
3724 better ways to solve some of these problems.
3726 Although it is not incorrect, we don't accept QImode and HImode
3727 addresses based on the frame pointer or arg pointer until the
3728 reload pass starts. This is so that eliminating such addresses
3729 into stack based ones won't produce impossible code. */
3731 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3733 /* ??? Not clear if this is right. Experiment. */
3734 if (GET_MODE_SIZE (mode) < 4
3735 && !(reload_in_progress || reload_completed)
3736 && (reg_mentioned_p (frame_pointer_rtx, x)
3737 || reg_mentioned_p (arg_pointer_rtx, x)
3738 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3739 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3740 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3741 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3742 return 0;
3744 /* Accept any base register. SP only in SImode or larger. */
3745 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3746 return 1;
3748 /* This is PC relative data before arm_reorg runs. */
3749 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3750 && GET_CODE (x) == SYMBOL_REF
3751 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
3752 return 1;
3754 /* This is PC relative data after arm_reorg runs. */
3755 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3756 && (GET_CODE (x) == LABEL_REF
3757 || (GET_CODE (x) == CONST
3758 && GET_CODE (XEXP (x, 0)) == PLUS
3759 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3760 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3761 return 1;
3763 /* Post-inc indexing only supported for SImode and larger. */
3764 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3765 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3766 return 1;
3768 else if (GET_CODE (x) == PLUS)
3770 /* REG+REG address can be any two index registers. */
3771 /* We disallow FRAME+REG addressing since we know that FRAME
3772 will be replaced with STACK, and SP relative addressing only
3773 permits SP+OFFSET. */
3774 if (GET_MODE_SIZE (mode) <= 4
3775 && XEXP (x, 0) != frame_pointer_rtx
3776 && XEXP (x, 1) != frame_pointer_rtx
3777 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3778 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3779 return 1;
3781 /* REG+const has 5-7 bit offset for non-SP registers. */
3782 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3783 || XEXP (x, 0) == arg_pointer_rtx)
3784 && GET_CODE (XEXP (x, 1)) == CONST_INT
3785 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3786 return 1;
3788 /* REG+const has 10 bit offset for SP, but only SImode and
3789 larger is supported. */
3790 /* ??? Should probably check for DI/DFmode overflow here
3791 just like GO_IF_LEGITIMATE_OFFSET does. */
3792 else if (GET_CODE (XEXP (x, 0)) == REG
3793 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3794 && GET_MODE_SIZE (mode) >= 4
3795 && GET_CODE (XEXP (x, 1)) == CONST_INT
3796 && INTVAL (XEXP (x, 1)) >= 0
3797 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3798 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3799 return 1;
3801 else if (GET_CODE (XEXP (x, 0)) == REG
3802 && (REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3803 || REGNO (XEXP (x, 0)) == ARG_POINTER_REGNUM
3804 || (REGNO (XEXP (x, 0)) >= FIRST_VIRTUAL_REGISTER
3805 && REGNO (XEXP (x, 0)) <= LAST_VIRTUAL_REGISTER))
3806 && GET_MODE_SIZE (mode) >= 4
3807 && GET_CODE (XEXP (x, 1)) == CONST_INT
3808 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3809 return 1;
3812 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3813 && GET_MODE_SIZE (mode) == 4
3814 && GET_CODE (x) == SYMBOL_REF
3815 && CONSTANT_POOL_ADDRESS_P (x)
3816 && ! (flag_pic
3817 && symbol_mentioned_p (get_pool_constant (x))
3818 && ! pcrel_constant_p (get_pool_constant (x))))
3819 return 1;
3821 return 0;
3824 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3825 instruction of mode MODE. */
3827 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3829 switch (GET_MODE_SIZE (mode))
3831 case 1:
3832 return val >= 0 && val < 32;
3834 case 2:
3835 return val >= 0 && val < 64 && (val & 1) == 0;
3837 default:
3838 return (val >= 0
3839 && (val + GET_MODE_SIZE (mode)) <= 128
3840 && (val & 3) == 0);
3844 /* Build the SYMBOL_REF for __tls_get_addr. */
3846 static GTY(()) rtx tls_get_addr_libfunc;
3848 static rtx
3849 get_tls_get_addr (void)
3851 if (!tls_get_addr_libfunc)
3852 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
3853 return tls_get_addr_libfunc;
3856 static rtx
3857 arm_load_tp (rtx target)
3859 if (!target)
3860 target = gen_reg_rtx (SImode);
3862 if (TARGET_HARD_TP)
3864 /* Can return in any reg. */
3865 emit_insn (gen_load_tp_hard (target));
3867 else
3869 /* Always returned in r0. Immediately copy the result into a pseudo,
3870 otherwise other uses of r0 (e.g. setting up function arguments) may
3871 clobber the value. */
3873 rtx tmp;
3875 emit_insn (gen_load_tp_soft ());
3877 tmp = gen_rtx_REG (SImode, 0);
3878 emit_move_insn (target, tmp);
3880 return target;
3883 static rtx
3884 load_tls_operand (rtx x, rtx reg)
3886 rtx tmp;
3888 if (reg == NULL_RTX)
3889 reg = gen_reg_rtx (SImode);
3891 tmp = gen_rtx_CONST (SImode, x);
3893 emit_move_insn (reg, tmp);
3895 return reg;
3898 static rtx
3899 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
3901 rtx insns, label, labelno, sum;
3903 start_sequence ();
3905 labelno = GEN_INT (pic_labelno++);
3906 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3907 label = gen_rtx_CONST (VOIDmode, label);
3909 sum = gen_rtx_UNSPEC (Pmode,
3910 gen_rtvec (4, x, GEN_INT (reloc), label,
3911 GEN_INT (TARGET_ARM ? 8 : 4)),
3912 UNSPEC_TLS);
3913 reg = load_tls_operand (sum, reg);
3915 if (TARGET_ARM)
3916 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
3917 else
3918 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3920 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
3921 Pmode, 1, reg, Pmode);
3923 insns = get_insns ();
3924 end_sequence ();
3926 return insns;
3930 legitimize_tls_address (rtx x, rtx reg)
3932 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
3933 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
3935 switch (model)
3937 case TLS_MODEL_GLOBAL_DYNAMIC:
3938 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
3939 dest = gen_reg_rtx (Pmode);
3940 emit_libcall_block (insns, dest, ret, x);
3941 return dest;
3943 case TLS_MODEL_LOCAL_DYNAMIC:
3944 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
3946 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3947 share the LDM result with other LD model accesses. */
3948 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
3949 UNSPEC_TLS);
3950 dest = gen_reg_rtx (Pmode);
3951 emit_libcall_block (insns, dest, ret, eqv);
3953 /* Load the addend. */
3954 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
3955 UNSPEC_TLS);
3956 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
3957 return gen_rtx_PLUS (Pmode, dest, addend);
3959 case TLS_MODEL_INITIAL_EXEC:
3960 labelno = GEN_INT (pic_labelno++);
3961 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3962 label = gen_rtx_CONST (VOIDmode, label);
3963 sum = gen_rtx_UNSPEC (Pmode,
3964 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
3965 GEN_INT (TARGET_ARM ? 8 : 4)),
3966 UNSPEC_TLS);
3967 reg = load_tls_operand (sum, reg);
3969 if (TARGET_ARM)
3970 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
3971 else
3973 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3974 emit_move_insn (reg, gen_const_mem (SImode, reg));
3977 tp = arm_load_tp (NULL_RTX);
3979 return gen_rtx_PLUS (Pmode, tp, reg);
3981 case TLS_MODEL_LOCAL_EXEC:
3982 tp = arm_load_tp (NULL_RTX);
3984 reg = gen_rtx_UNSPEC (Pmode,
3985 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
3986 UNSPEC_TLS);
3987 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
3989 return gen_rtx_PLUS (Pmode, tp, reg);
3991 default:
3992 abort ();
3996 /* Try machine-dependent ways of modifying an illegitimate address
3997 to be legitimate. If we find one, return the new, valid address. */
3999 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4001 if (arm_tls_symbol_p (x))
4002 return legitimize_tls_address (x, NULL_RTX);
4004 if (GET_CODE (x) == PLUS)
4006 rtx xop0 = XEXP (x, 0);
4007 rtx xop1 = XEXP (x, 1);
4009 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
4010 xop0 = force_reg (SImode, xop0);
4012 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4013 xop1 = force_reg (SImode, xop1);
4015 if (ARM_BASE_REGISTER_RTX_P (xop0)
4016 && GET_CODE (xop1) == CONST_INT)
4018 HOST_WIDE_INT n, low_n;
4019 rtx base_reg, val;
4020 n = INTVAL (xop1);
4022 /* VFP addressing modes actually allow greater offsets, but for
4023 now we just stick with the lowest common denominator. */
4024 if (mode == DImode
4025 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4027 low_n = n & 0x0f;
4028 n &= ~0x0f;
4029 if (low_n > 4)
4031 n += 16;
4032 low_n -= 16;
4035 else
4037 low_n = ((mode) == TImode ? 0
4038 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4039 n -= low_n;
4042 base_reg = gen_reg_rtx (SImode);
4043 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4044 emit_move_insn (base_reg, val);
4045 x = plus_constant (base_reg, low_n);
4047 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4048 x = gen_rtx_PLUS (SImode, xop0, xop1);
4051 /* XXX We don't allow MINUS any more -- see comment in
4052 arm_legitimate_address_p (). */
4053 else if (GET_CODE (x) == MINUS)
4055 rtx xop0 = XEXP (x, 0);
4056 rtx xop1 = XEXP (x, 1);
4058 if (CONSTANT_P (xop0))
4059 xop0 = force_reg (SImode, xop0);
4061 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4062 xop1 = force_reg (SImode, xop1);
4064 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4065 x = gen_rtx_MINUS (SImode, xop0, xop1);
4068 /* Make sure to take full advantage of the pre-indexed addressing mode
4069 with absolute addresses which often allows for the base register to
4070 be factorized for multiple adjacent memory references, and it might
4071 even allows for the mini pool to be avoided entirely. */
4072 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4074 unsigned int bits;
4075 HOST_WIDE_INT mask, base, index;
4076 rtx base_reg;
4078 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4079 use a 8 bit index. So let's use a 12 bit index for SImode only and
4080 hope that arm_gen_constant will enable ldrb to use more bits. */
4081 bits = (mode == SImode) ? 12 : 8;
4082 mask = (1 << bits) - 1;
4083 base = INTVAL (x) & ~mask;
4084 index = INTVAL (x) & mask;
4085 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4087 /* It'll most probably be more efficient to generate the base
4088 with more bits set and use a negative index instead. */
4089 base |= mask;
4090 index -= mask;
4092 base_reg = force_reg (SImode, GEN_INT (base));
4093 x = plus_constant (base_reg, index);
4096 if (flag_pic)
4098 /* We need to find and carefully transform any SYMBOL and LABEL
4099 references; so go back to the original address expression. */
4100 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4102 if (new_x != orig_x)
4103 x = new_x;
4106 return x;
4110 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4111 to be legitimate. If we find one, return the new, valid address. */
4113 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4115 if (arm_tls_symbol_p (x))
4116 return legitimize_tls_address (x, NULL_RTX);
4118 if (GET_CODE (x) == PLUS
4119 && GET_CODE (XEXP (x, 1)) == CONST_INT
4120 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4121 || INTVAL (XEXP (x, 1)) < 0))
4123 rtx xop0 = XEXP (x, 0);
4124 rtx xop1 = XEXP (x, 1);
4125 HOST_WIDE_INT offset = INTVAL (xop1);
4127 /* Try and fold the offset into a biasing of the base register and
4128 then offsetting that. Don't do this when optimizing for space
4129 since it can cause too many CSEs. */
4130 if (optimize_size && offset >= 0
4131 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4133 HOST_WIDE_INT delta;
4135 if (offset >= 256)
4136 delta = offset - (256 - GET_MODE_SIZE (mode));
4137 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4138 delta = 31 * GET_MODE_SIZE (mode);
4139 else
4140 delta = offset & (~31 * GET_MODE_SIZE (mode));
4142 xop0 = force_operand (plus_constant (xop0, offset - delta),
4143 NULL_RTX);
4144 x = plus_constant (xop0, delta);
4146 else if (offset < 0 && offset > -256)
4147 /* Small negative offsets are best done with a subtract before the
4148 dereference, forcing these into a register normally takes two
4149 instructions. */
4150 x = force_operand (x, NULL_RTX);
4151 else
4153 /* For the remaining cases, force the constant into a register. */
4154 xop1 = force_reg (SImode, xop1);
4155 x = gen_rtx_PLUS (SImode, xop0, xop1);
4158 else if (GET_CODE (x) == PLUS
4159 && s_register_operand (XEXP (x, 1), SImode)
4160 && !s_register_operand (XEXP (x, 0), SImode))
4162 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4164 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4167 if (flag_pic)
4169 /* We need to find and carefully transform any SYMBOL and LABEL
4170 references; so go back to the original address expression. */
4171 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4173 if (new_x != orig_x)
4174 x = new_x;
4177 return x;
4181 thumb_legitimize_reload_address (rtx *x_p,
4182 enum machine_mode mode,
4183 int opnum, int type,
4184 int ind_levels ATTRIBUTE_UNUSED)
4186 rtx x = *x_p;
4188 if (GET_CODE (x) == PLUS
4189 && GET_MODE_SIZE (mode) < 4
4190 && REG_P (XEXP (x, 0))
4191 && XEXP (x, 0) == stack_pointer_rtx
4192 && GET_CODE (XEXP (x, 1)) == CONST_INT
4193 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4195 rtx orig_x = x;
4197 x = copy_rtx (x);
4198 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4199 Pmode, VOIDmode, 0, 0, opnum, type);
4200 return x;
4203 /* If both registers are hi-regs, then it's better to reload the
4204 entire expression rather than each register individually. That
4205 only requires one reload register rather than two. */
4206 if (GET_CODE (x) == PLUS
4207 && REG_P (XEXP (x, 0))
4208 && REG_P (XEXP (x, 1))
4209 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4210 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4212 rtx orig_x = x;
4214 x = copy_rtx (x);
4215 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4216 Pmode, VOIDmode, 0, 0, opnum, type);
4217 return x;
4220 return NULL;
4223 /* Test for various thread-local symbols. */
4225 /* Return TRUE if X is a thread-local symbol. */
4227 static bool
4228 arm_tls_symbol_p (rtx x)
4230 if (! TARGET_HAVE_TLS)
4231 return false;
4233 if (GET_CODE (x) != SYMBOL_REF)
4234 return false;
4236 return SYMBOL_REF_TLS_MODEL (x) != 0;
4239 /* Helper for arm_tls_referenced_p. */
4241 static int
4242 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4244 if (GET_CODE (*x) == SYMBOL_REF)
4245 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4247 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4248 TLS offsets, not real symbol references. */
4249 if (GET_CODE (*x) == UNSPEC
4250 && XINT (*x, 1) == UNSPEC_TLS)
4251 return -1;
4253 return 0;
4256 /* Return TRUE if X contains any TLS symbol references. */
4258 bool
4259 arm_tls_referenced_p (rtx x)
4261 if (! TARGET_HAVE_TLS)
4262 return false;
4264 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4267 #define REG_OR_SUBREG_REG(X) \
4268 (GET_CODE (X) == REG \
4269 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4271 #define REG_OR_SUBREG_RTX(X) \
4272 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4274 #ifndef COSTS_N_INSNS
4275 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4276 #endif
4277 static inline int
4278 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4280 enum machine_mode mode = GET_MODE (x);
4282 switch (code)
4284 case ASHIFT:
4285 case ASHIFTRT:
4286 case LSHIFTRT:
4287 case ROTATERT:
4288 case PLUS:
4289 case MINUS:
4290 case COMPARE:
4291 case NEG:
4292 case NOT:
4293 return COSTS_N_INSNS (1);
4295 case MULT:
4296 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4298 int cycles = 0;
4299 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4301 while (i)
4303 i >>= 2;
4304 cycles++;
4306 return COSTS_N_INSNS (2) + cycles;
4308 return COSTS_N_INSNS (1) + 16;
4310 case SET:
4311 return (COSTS_N_INSNS (1)
4312 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4313 + GET_CODE (SET_DEST (x)) == MEM));
4315 case CONST_INT:
4316 if (outer == SET)
4318 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4319 return 0;
4320 if (thumb_shiftable_const (INTVAL (x)))
4321 return COSTS_N_INSNS (2);
4322 return COSTS_N_INSNS (3);
4324 else if ((outer == PLUS || outer == COMPARE)
4325 && INTVAL (x) < 256 && INTVAL (x) > -256)
4326 return 0;
4327 else if (outer == AND
4328 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4329 return COSTS_N_INSNS (1);
4330 else if (outer == ASHIFT || outer == ASHIFTRT
4331 || outer == LSHIFTRT)
4332 return 0;
4333 return COSTS_N_INSNS (2);
4335 case CONST:
4336 case CONST_DOUBLE:
4337 case LABEL_REF:
4338 case SYMBOL_REF:
4339 return COSTS_N_INSNS (3);
4341 case UDIV:
4342 case UMOD:
4343 case DIV:
4344 case MOD:
4345 return 100;
4347 case TRUNCATE:
4348 return 99;
4350 case AND:
4351 case XOR:
4352 case IOR:
4353 /* XXX guess. */
4354 return 8;
4356 case MEM:
4357 /* XXX another guess. */
4358 /* Memory costs quite a lot for the first word, but subsequent words
4359 load at the equivalent of a single insn each. */
4360 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4361 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4362 ? 4 : 0));
4364 case IF_THEN_ELSE:
4365 /* XXX a guess. */
4366 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4367 return 14;
4368 return 2;
4370 case ZERO_EXTEND:
4371 /* XXX still guessing. */
4372 switch (GET_MODE (XEXP (x, 0)))
4374 case QImode:
4375 return (1 + (mode == DImode ? 4 : 0)
4376 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4378 case HImode:
4379 return (4 + (mode == DImode ? 4 : 0)
4380 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4382 case SImode:
4383 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4385 default:
4386 return 99;
4389 default:
4390 return 99;
4395 /* Worker routine for arm_rtx_costs. */
4396 static inline int
4397 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4399 enum machine_mode mode = GET_MODE (x);
4400 enum rtx_code subcode;
4401 int extra_cost;
4403 switch (code)
4405 case MEM:
4406 /* Memory costs quite a lot for the first word, but subsequent words
4407 load at the equivalent of a single insn each. */
4408 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4409 + (GET_CODE (x) == SYMBOL_REF
4410 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4412 case DIV:
4413 case MOD:
4414 case UDIV:
4415 case UMOD:
4416 return optimize_size ? COSTS_N_INSNS (2) : 100;
4418 case ROTATE:
4419 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4420 return 4;
4421 /* Fall through */
4422 case ROTATERT:
4423 if (mode != SImode)
4424 return 8;
4425 /* Fall through */
4426 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4427 if (mode == DImode)
4428 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4429 + ((GET_CODE (XEXP (x, 0)) == REG
4430 || (GET_CODE (XEXP (x, 0)) == SUBREG
4431 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4432 ? 0 : 8));
4433 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4434 || (GET_CODE (XEXP (x, 0)) == SUBREG
4435 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4436 ? 0 : 4)
4437 + ((GET_CODE (XEXP (x, 1)) == REG
4438 || (GET_CODE (XEXP (x, 1)) == SUBREG
4439 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4440 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4441 ? 0 : 4));
4443 case MINUS:
4444 if (mode == DImode)
4445 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4446 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4447 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4448 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4449 ? 0 : 8));
4451 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4452 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4453 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4454 && arm_const_double_rtx (XEXP (x, 1))))
4455 ? 0 : 8)
4456 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4457 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4458 && arm_const_double_rtx (XEXP (x, 0))))
4459 ? 0 : 8));
4461 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4462 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4463 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4464 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4465 || subcode == ASHIFTRT || subcode == LSHIFTRT
4466 || subcode == ROTATE || subcode == ROTATERT
4467 || (subcode == MULT
4468 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4469 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4470 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4471 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4472 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4473 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4474 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4475 return 1;
4476 /* Fall through */
4478 case PLUS:
4479 if (GET_CODE (XEXP (x, 0)) == MULT)
4481 extra_cost = rtx_cost (XEXP (x, 0), code);
4482 if (!REG_OR_SUBREG_REG (XEXP (x, 1)))
4483 extra_cost += 4 * ARM_NUM_REGS (mode);
4484 return extra_cost;
4487 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4488 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4489 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4490 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4491 && arm_const_double_rtx (XEXP (x, 1))))
4492 ? 0 : 8));
4494 /* Fall through */
4495 case AND: case XOR: case IOR:
4496 extra_cost = 0;
4498 /* Normally the frame registers will be spilt into reg+const during
4499 reload, so it is a bad idea to combine them with other instructions,
4500 since then they might not be moved outside of loops. As a compromise
4501 we allow integration with ops that have a constant as their second
4502 operand. */
4503 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4504 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4505 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4506 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4507 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4508 extra_cost = 4;
4510 if (mode == DImode)
4511 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4512 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4513 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4514 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4515 ? 0 : 8));
4517 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4518 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4519 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4520 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4521 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4522 ? 0 : 4));
4524 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4525 return (1 + extra_cost
4526 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4527 || subcode == LSHIFTRT || subcode == ASHIFTRT
4528 || subcode == ROTATE || subcode == ROTATERT
4529 || (subcode == MULT
4530 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4531 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4532 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4533 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4534 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4535 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4536 ? 0 : 4));
4538 return 8;
4540 case MULT:
4541 /* This should have been handled by the CPU specific routines. */
4542 gcc_unreachable ();
4544 case TRUNCATE:
4545 if (arm_arch3m && mode == SImode
4546 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4547 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4548 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4549 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4550 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4551 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4552 return 8;
4553 return 99;
4555 case NEG:
4556 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4557 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4558 /* Fall through */
4559 case NOT:
4560 if (mode == DImode)
4561 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4563 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4565 case IF_THEN_ELSE:
4566 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4567 return 14;
4568 return 2;
4570 case COMPARE:
4571 return 1;
4573 case ABS:
4574 return 4 + (mode == DImode ? 4 : 0);
4576 case SIGN_EXTEND:
4577 if (GET_MODE (XEXP (x, 0)) == QImode)
4578 return (4 + (mode == DImode ? 4 : 0)
4579 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4580 /* Fall through */
4581 case ZERO_EXTEND:
4582 switch (GET_MODE (XEXP (x, 0)))
4584 case QImode:
4585 return (1 + (mode == DImode ? 4 : 0)
4586 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4588 case HImode:
4589 return (4 + (mode == DImode ? 4 : 0)
4590 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4592 case SImode:
4593 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4595 case V8QImode:
4596 case V4HImode:
4597 case V2SImode:
4598 case V4QImode:
4599 case V2HImode:
4600 return 1;
4602 default:
4603 gcc_unreachable ();
4605 gcc_unreachable ();
4607 case CONST_INT:
4608 if (const_ok_for_arm (INTVAL (x)))
4609 return outer == SET ? 2 : -1;
4610 else if (outer == AND
4611 && const_ok_for_arm (~INTVAL (x)))
4612 return -1;
4613 else if ((outer == COMPARE
4614 || outer == PLUS || outer == MINUS)
4615 && const_ok_for_arm (-INTVAL (x)))
4616 return -1;
4617 else
4618 return 5;
4620 case CONST:
4621 case LABEL_REF:
4622 case SYMBOL_REF:
4623 return 6;
4625 case CONST_DOUBLE:
4626 if (arm_const_double_rtx (x))
4627 return outer == SET ? 2 : -1;
4628 else if ((outer == COMPARE || outer == PLUS)
4629 && neg_const_double_rtx_ok_for_fpa (x))
4630 return -1;
4631 return 7;
4633 default:
4634 return 99;
4638 /* RTX costs when optimizing for size. */
4639 static bool
4640 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4642 enum machine_mode mode = GET_MODE (x);
4644 if (TARGET_THUMB)
4646 /* XXX TBD. For now, use the standard costs. */
4647 *total = thumb_rtx_costs (x, code, outer_code);
4648 return true;
4651 switch (code)
4653 case MEM:
4654 /* A memory access costs 1 insn if the mode is small, or the address is
4655 a single register, otherwise it costs one insn per word. */
4656 if (REG_P (XEXP (x, 0)))
4657 *total = COSTS_N_INSNS (1);
4658 else
4659 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4660 return true;
4662 case DIV:
4663 case MOD:
4664 case UDIV:
4665 case UMOD:
4666 /* Needs a libcall, so it costs about this. */
4667 *total = COSTS_N_INSNS (2);
4668 return false;
4670 case ROTATE:
4671 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4673 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4674 return true;
4676 /* Fall through */
4677 case ROTATERT:
4678 case ASHIFT:
4679 case LSHIFTRT:
4680 case ASHIFTRT:
4681 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4683 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4684 return true;
4686 else if (mode == SImode)
4688 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4689 /* Slightly disparage register shifts, but not by much. */
4690 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4691 *total += 1 + rtx_cost (XEXP (x, 1), code);
4692 return true;
4695 /* Needs a libcall. */
4696 *total = COSTS_N_INSNS (2);
4697 return false;
4699 case MINUS:
4700 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4702 *total = COSTS_N_INSNS (1);
4703 return false;
4706 if (mode == SImode)
4708 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4709 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4711 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4712 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4713 || subcode1 == ROTATE || subcode1 == ROTATERT
4714 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4715 || subcode1 == ASHIFTRT)
4717 /* It's just the cost of the two operands. */
4718 *total = 0;
4719 return false;
4722 *total = COSTS_N_INSNS (1);
4723 return false;
4726 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4727 return false;
4729 case PLUS:
4730 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4732 *total = COSTS_N_INSNS (1);
4733 return false;
4736 /* Fall through */
4737 case AND: case XOR: case IOR:
4738 if (mode == SImode)
4740 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4742 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4743 || subcode == LSHIFTRT || subcode == ASHIFTRT
4744 || (code == AND && subcode == NOT))
4746 /* It's just the cost of the two operands. */
4747 *total = 0;
4748 return false;
4752 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4753 return false;
4755 case MULT:
4756 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4757 return false;
4759 case NEG:
4760 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4761 *total = COSTS_N_INSNS (1);
4762 /* Fall through */
4763 case NOT:
4764 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4766 return false;
4768 case IF_THEN_ELSE:
4769 *total = 0;
4770 return false;
4772 case COMPARE:
4773 if (cc_register (XEXP (x, 0), VOIDmode))
4774 * total = 0;
4775 else
4776 *total = COSTS_N_INSNS (1);
4777 return false;
4779 case ABS:
4780 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4781 *total = COSTS_N_INSNS (1);
4782 else
4783 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4784 return false;
4786 case SIGN_EXTEND:
4787 *total = 0;
4788 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4790 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4791 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4793 if (mode == DImode)
4794 *total += COSTS_N_INSNS (1);
4795 return false;
4797 case ZERO_EXTEND:
4798 *total = 0;
4799 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4801 switch (GET_MODE (XEXP (x, 0)))
4803 case QImode:
4804 *total += COSTS_N_INSNS (1);
4805 break;
4807 case HImode:
4808 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4810 case SImode:
4811 break;
4813 default:
4814 *total += COSTS_N_INSNS (2);
4818 if (mode == DImode)
4819 *total += COSTS_N_INSNS (1);
4821 return false;
4823 case CONST_INT:
4824 if (const_ok_for_arm (INTVAL (x)))
4825 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4826 else if (const_ok_for_arm (~INTVAL (x)))
4827 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4828 else if (const_ok_for_arm (-INTVAL (x)))
4830 if (outer_code == COMPARE || outer_code == PLUS
4831 || outer_code == MINUS)
4832 *total = 0;
4833 else
4834 *total = COSTS_N_INSNS (1);
4836 else
4837 *total = COSTS_N_INSNS (2);
4838 return true;
4840 case CONST:
4841 case LABEL_REF:
4842 case SYMBOL_REF:
4843 *total = COSTS_N_INSNS (2);
4844 return true;
4846 case CONST_DOUBLE:
4847 *total = COSTS_N_INSNS (4);
4848 return true;
4850 default:
4851 if (mode != VOIDmode)
4852 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4853 else
4854 *total = COSTS_N_INSNS (4); /* How knows? */
4855 return false;
4859 /* RTX costs for cores with a slow MUL implementation. */
4861 static bool
4862 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4864 enum machine_mode mode = GET_MODE (x);
4866 if (TARGET_THUMB)
4868 *total = thumb_rtx_costs (x, code, outer_code);
4869 return true;
4872 switch (code)
4874 case MULT:
4875 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4876 || mode == DImode)
4878 *total = 30;
4879 return true;
4882 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4884 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4885 & (unsigned HOST_WIDE_INT) 0xffffffff);
4886 int cost, const_ok = const_ok_for_arm (i);
4887 int j, booth_unit_size;
4889 /* Tune as appropriate. */
4890 cost = const_ok ? 4 : 8;
4891 booth_unit_size = 2;
4892 for (j = 0; i && j < 32; j += booth_unit_size)
4894 i >>= booth_unit_size;
4895 cost += 2;
4898 *total = cost;
4899 return true;
4902 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4903 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4904 return true;
4906 default:
4907 *total = arm_rtx_costs_1 (x, code, outer_code);
4908 return true;
4913 /* RTX cost for cores with a fast multiply unit (M variants). */
4915 static bool
4916 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4918 enum machine_mode mode = GET_MODE (x);
4920 if (TARGET_THUMB)
4922 *total = thumb_rtx_costs (x, code, outer_code);
4923 return true;
4926 switch (code)
4928 case MULT:
4929 /* There is no point basing this on the tuning, since it is always the
4930 fast variant if it exists at all. */
4931 if (mode == DImode
4932 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4933 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4934 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4936 *total = 8;
4937 return true;
4941 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4942 || mode == DImode)
4944 *total = 30;
4945 return true;
4948 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4950 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4951 & (unsigned HOST_WIDE_INT) 0xffffffff);
4952 int cost, const_ok = const_ok_for_arm (i);
4953 int j, booth_unit_size;
4955 /* Tune as appropriate. */
4956 cost = const_ok ? 4 : 8;
4957 booth_unit_size = 8;
4958 for (j = 0; i && j < 32; j += booth_unit_size)
4960 i >>= booth_unit_size;
4961 cost += 2;
4964 *total = cost;
4965 return true;
4968 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4969 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4970 return true;
4972 default:
4973 *total = arm_rtx_costs_1 (x, code, outer_code);
4974 return true;
4979 /* RTX cost for XScale CPUs. */
4981 static bool
4982 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4984 enum machine_mode mode = GET_MODE (x);
4986 if (TARGET_THUMB)
4988 *total = thumb_rtx_costs (x, code, outer_code);
4989 return true;
4992 switch (code)
4994 case MULT:
4995 /* There is no point basing this on the tuning, since it is always the
4996 fast variant if it exists at all. */
4997 if (mode == DImode
4998 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4999 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5000 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5002 *total = 8;
5003 return true;
5007 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5008 || mode == DImode)
5010 *total = 30;
5011 return true;
5014 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5016 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5017 & (unsigned HOST_WIDE_INT) 0xffffffff);
5018 int cost, const_ok = const_ok_for_arm (i);
5019 unsigned HOST_WIDE_INT masked_const;
5021 /* The cost will be related to two insns.
5022 First a load of the constant (MOV or LDR), then a multiply. */
5023 cost = 2;
5024 if (! const_ok)
5025 cost += 1; /* LDR is probably more expensive because
5026 of longer result latency. */
5027 masked_const = i & 0xffff8000;
5028 if (masked_const != 0 && masked_const != 0xffff8000)
5030 masked_const = i & 0xf8000000;
5031 if (masked_const == 0 || masked_const == 0xf8000000)
5032 cost += 1;
5033 else
5034 cost += 2;
5036 *total = cost;
5037 return true;
5040 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5041 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5042 return true;
5044 case COMPARE:
5045 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5046 will stall until the multiplication is complete. */
5047 if (GET_CODE (XEXP (x, 0)) == MULT)
5048 *total = 4 + rtx_cost (XEXP (x, 0), code);
5049 else
5050 *total = arm_rtx_costs_1 (x, code, outer_code);
5051 return true;
5053 default:
5054 *total = arm_rtx_costs_1 (x, code, outer_code);
5055 return true;
5060 /* RTX costs for 9e (and later) cores. */
5062 static bool
5063 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5065 enum machine_mode mode = GET_MODE (x);
5066 int nonreg_cost;
5067 int cost;
5069 if (TARGET_THUMB)
5071 switch (code)
5073 case MULT:
5074 *total = COSTS_N_INSNS (3);
5075 return true;
5077 default:
5078 *total = thumb_rtx_costs (x, code, outer_code);
5079 return true;
5083 switch (code)
5085 case MULT:
5086 /* There is no point basing this on the tuning, since it is always the
5087 fast variant if it exists at all. */
5088 if (mode == DImode
5089 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5090 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5091 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5093 *total = 3;
5094 return true;
5098 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5100 *total = 30;
5101 return true;
5103 if (mode == DImode)
5105 cost = 7;
5106 nonreg_cost = 8;
5108 else
5110 cost = 2;
5111 nonreg_cost = 4;
5115 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5116 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5117 return true;
5119 default:
5120 *total = arm_rtx_costs_1 (x, code, outer_code);
5121 return true;
5124 /* All address computations that can be done are free, but rtx cost returns
5125 the same for practically all of them. So we weight the different types
5126 of address here in the order (most pref first):
5127 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5128 static inline int
5129 arm_arm_address_cost (rtx x)
5131 enum rtx_code c = GET_CODE (x);
5133 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5134 return 0;
5135 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5136 return 10;
5138 if (c == PLUS || c == MINUS)
5140 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5141 return 2;
5143 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5144 return 3;
5146 return 4;
5149 return 6;
5152 static inline int
5153 arm_thumb_address_cost (rtx x)
5155 enum rtx_code c = GET_CODE (x);
5157 if (c == REG)
5158 return 1;
5159 if (c == PLUS
5160 && GET_CODE (XEXP (x, 0)) == REG
5161 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5162 return 1;
5164 return 2;
5167 static int
5168 arm_address_cost (rtx x)
5170 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5173 static int
5174 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5176 rtx i_pat, d_pat;
5178 /* Some true dependencies can have a higher cost depending
5179 on precisely how certain input operands are used. */
5180 if (arm_tune_xscale
5181 && REG_NOTE_KIND (link) == 0
5182 && recog_memoized (insn) >= 0
5183 && recog_memoized (dep) >= 0)
5185 int shift_opnum = get_attr_shift (insn);
5186 enum attr_type attr_type = get_attr_type (dep);
5188 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5189 operand for INSN. If we have a shifted input operand and the
5190 instruction we depend on is another ALU instruction, then we may
5191 have to account for an additional stall. */
5192 if (shift_opnum != 0
5193 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5195 rtx shifted_operand;
5196 int opno;
5198 /* Get the shifted operand. */
5199 extract_insn (insn);
5200 shifted_operand = recog_data.operand[shift_opnum];
5202 /* Iterate over all the operands in DEP. If we write an operand
5203 that overlaps with SHIFTED_OPERAND, then we have increase the
5204 cost of this dependency. */
5205 extract_insn (dep);
5206 preprocess_constraints ();
5207 for (opno = 0; opno < recog_data.n_operands; opno++)
5209 /* We can ignore strict inputs. */
5210 if (recog_data.operand_type[opno] == OP_IN)
5211 continue;
5213 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5214 shifted_operand))
5215 return 2;
5220 /* XXX This is not strictly true for the FPA. */
5221 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5222 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5223 return 0;
5225 /* Call insns don't incur a stall, even if they follow a load. */
5226 if (REG_NOTE_KIND (link) == 0
5227 && GET_CODE (insn) == CALL_INSN)
5228 return 1;
5230 if ((i_pat = single_set (insn)) != NULL
5231 && GET_CODE (SET_SRC (i_pat)) == MEM
5232 && (d_pat = single_set (dep)) != NULL
5233 && GET_CODE (SET_DEST (d_pat)) == MEM)
5235 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5236 /* This is a load after a store, there is no conflict if the load reads
5237 from a cached area. Assume that loads from the stack, and from the
5238 constant pool are cached, and that others will miss. This is a
5239 hack. */
5241 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5242 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5243 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5244 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5245 return 1;
5248 return cost;
5251 static int fp_consts_inited = 0;
5253 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5254 static const char * const strings_fp[8] =
5256 "0", "1", "2", "3",
5257 "4", "5", "0.5", "10"
5260 static REAL_VALUE_TYPE values_fp[8];
5262 static void
5263 init_fp_table (void)
5265 int i;
5266 REAL_VALUE_TYPE r;
5268 if (TARGET_VFP)
5269 fp_consts_inited = 1;
5270 else
5271 fp_consts_inited = 8;
5273 for (i = 0; i < fp_consts_inited; i++)
5275 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5276 values_fp[i] = r;
5280 /* Return TRUE if rtx X is a valid immediate FP constant. */
5282 arm_const_double_rtx (rtx x)
5284 REAL_VALUE_TYPE r;
5285 int i;
5287 if (!fp_consts_inited)
5288 init_fp_table ();
5290 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5291 if (REAL_VALUE_MINUS_ZERO (r))
5292 return 0;
5294 for (i = 0; i < fp_consts_inited; i++)
5295 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5296 return 1;
5298 return 0;
5301 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5303 neg_const_double_rtx_ok_for_fpa (rtx x)
5305 REAL_VALUE_TYPE r;
5306 int i;
5308 if (!fp_consts_inited)
5309 init_fp_table ();
5311 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5312 r = REAL_VALUE_NEGATE (r);
5313 if (REAL_VALUE_MINUS_ZERO (r))
5314 return 0;
5316 for (i = 0; i < 8; i++)
5317 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5318 return 1;
5320 return 0;
5323 /* Predicates for `match_operand' and `match_operator'. */
5325 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5327 cirrus_memory_offset (rtx op)
5329 /* Reject eliminable registers. */
5330 if (! (reload_in_progress || reload_completed)
5331 && ( reg_mentioned_p (frame_pointer_rtx, op)
5332 || reg_mentioned_p (arg_pointer_rtx, op)
5333 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5334 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5335 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5336 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5337 return 0;
5339 if (GET_CODE (op) == MEM)
5341 rtx ind;
5343 ind = XEXP (op, 0);
5345 /* Match: (mem (reg)). */
5346 if (GET_CODE (ind) == REG)
5347 return 1;
5349 /* Match:
5350 (mem (plus (reg)
5351 (const))). */
5352 if (GET_CODE (ind) == PLUS
5353 && GET_CODE (XEXP (ind, 0)) == REG
5354 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5355 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5356 return 1;
5359 return 0;
5362 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5363 WB if true if writeback address modes are allowed. */
5366 arm_coproc_mem_operand (rtx op, bool wb)
5368 rtx ind;
5370 /* Reject eliminable registers. */
5371 if (! (reload_in_progress || reload_completed)
5372 && ( reg_mentioned_p (frame_pointer_rtx, op)
5373 || reg_mentioned_p (arg_pointer_rtx, op)
5374 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5375 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5376 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5377 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5378 return FALSE;
5380 /* Constants are converted into offsets from labels. */
5381 if (GET_CODE (op) != MEM)
5382 return FALSE;
5384 ind = XEXP (op, 0);
5386 if (reload_completed
5387 && (GET_CODE (ind) == LABEL_REF
5388 || (GET_CODE (ind) == CONST
5389 && GET_CODE (XEXP (ind, 0)) == PLUS
5390 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5391 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5392 return TRUE;
5394 /* Match: (mem (reg)). */
5395 if (GET_CODE (ind) == REG)
5396 return arm_address_register_rtx_p (ind, 0);
5398 /* Autoincremment addressing modes. */
5399 if (wb
5400 && (GET_CODE (ind) == PRE_INC
5401 || GET_CODE (ind) == POST_INC
5402 || GET_CODE (ind) == PRE_DEC
5403 || GET_CODE (ind) == POST_DEC))
5404 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5406 if (wb
5407 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5408 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5409 && GET_CODE (XEXP (ind, 1)) == PLUS
5410 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5411 ind = XEXP (ind, 1);
5413 /* Match:
5414 (plus (reg)
5415 (const)). */
5416 if (GET_CODE (ind) == PLUS
5417 && GET_CODE (XEXP (ind, 0)) == REG
5418 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5419 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5420 && INTVAL (XEXP (ind, 1)) > -1024
5421 && INTVAL (XEXP (ind, 1)) < 1024
5422 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5423 return TRUE;
5425 return FALSE;
5428 /* Return true if X is a register that will be eliminated later on. */
5430 arm_eliminable_register (rtx x)
5432 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5433 || REGNO (x) == ARG_POINTER_REGNUM
5434 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5435 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5438 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5439 VFP registers. Otherwise return NO_REGS. */
5441 enum reg_class
5442 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
5444 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
5445 return NO_REGS;
5447 return GENERAL_REGS;
5450 /* Values which must be returned in the most-significant end of the return
5451 register. */
5453 static bool
5454 arm_return_in_msb (tree valtype)
5456 return (TARGET_AAPCS_BASED
5457 && BYTES_BIG_ENDIAN
5458 && (AGGREGATE_TYPE_P (valtype)
5459 || TREE_CODE (valtype) == COMPLEX_TYPE));
5462 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5463 Use by the Cirrus Maverick code which has to workaround
5464 a hardware bug triggered by such instructions. */
5465 static bool
5466 arm_memory_load_p (rtx insn)
5468 rtx body, lhs, rhs;;
5470 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5471 return false;
5473 body = PATTERN (insn);
5475 if (GET_CODE (body) != SET)
5476 return false;
5478 lhs = XEXP (body, 0);
5479 rhs = XEXP (body, 1);
5481 lhs = REG_OR_SUBREG_RTX (lhs);
5483 /* If the destination is not a general purpose
5484 register we do not have to worry. */
5485 if (GET_CODE (lhs) != REG
5486 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5487 return false;
5489 /* As well as loads from memory we also have to react
5490 to loads of invalid constants which will be turned
5491 into loads from the minipool. */
5492 return (GET_CODE (rhs) == MEM
5493 || GET_CODE (rhs) == SYMBOL_REF
5494 || note_invalid_constants (insn, -1, false));
5497 /* Return TRUE if INSN is a Cirrus instruction. */
5498 static bool
5499 arm_cirrus_insn_p (rtx insn)
5501 enum attr_cirrus attr;
5503 /* get_attr cannot accept USE or CLOBBER. */
5504 if (!insn
5505 || GET_CODE (insn) != INSN
5506 || GET_CODE (PATTERN (insn)) == USE
5507 || GET_CODE (PATTERN (insn)) == CLOBBER)
5508 return 0;
5510 attr = get_attr_cirrus (insn);
5512 return attr != CIRRUS_NOT;
5515 /* Cirrus reorg for invalid instruction combinations. */
5516 static void
5517 cirrus_reorg (rtx first)
5519 enum attr_cirrus attr;
5520 rtx body = PATTERN (first);
5521 rtx t;
5522 int nops;
5524 /* Any branch must be followed by 2 non Cirrus instructions. */
5525 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5527 nops = 0;
5528 t = next_nonnote_insn (first);
5530 if (arm_cirrus_insn_p (t))
5531 ++ nops;
5533 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5534 ++ nops;
5536 while (nops --)
5537 emit_insn_after (gen_nop (), first);
5539 return;
5542 /* (float (blah)) is in parallel with a clobber. */
5543 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5544 body = XVECEXP (body, 0, 0);
5546 if (GET_CODE (body) == SET)
5548 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5550 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5551 be followed by a non Cirrus insn. */
5552 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5554 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5555 emit_insn_after (gen_nop (), first);
5557 return;
5559 else if (arm_memory_load_p (first))
5561 unsigned int arm_regno;
5563 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5564 ldr/cfmv64hr combination where the Rd field is the same
5565 in both instructions must be split with a non Cirrus
5566 insn. Example:
5568 ldr r0, blah
5570 cfmvsr mvf0, r0. */
5572 /* Get Arm register number for ldr insn. */
5573 if (GET_CODE (lhs) == REG)
5574 arm_regno = REGNO (lhs);
5575 else
5577 gcc_assert (GET_CODE (rhs) == REG);
5578 arm_regno = REGNO (rhs);
5581 /* Next insn. */
5582 first = next_nonnote_insn (first);
5584 if (! arm_cirrus_insn_p (first))
5585 return;
5587 body = PATTERN (first);
5589 /* (float (blah)) is in parallel with a clobber. */
5590 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5591 body = XVECEXP (body, 0, 0);
5593 if (GET_CODE (body) == FLOAT)
5594 body = XEXP (body, 0);
5596 if (get_attr_cirrus (first) == CIRRUS_MOVE
5597 && GET_CODE (XEXP (body, 1)) == REG
5598 && arm_regno == REGNO (XEXP (body, 1)))
5599 emit_insn_after (gen_nop (), first);
5601 return;
5605 /* get_attr cannot accept USE or CLOBBER. */
5606 if (!first
5607 || GET_CODE (first) != INSN
5608 || GET_CODE (PATTERN (first)) == USE
5609 || GET_CODE (PATTERN (first)) == CLOBBER)
5610 return;
5612 attr = get_attr_cirrus (first);
5614 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5615 must be followed by a non-coprocessor instruction. */
5616 if (attr == CIRRUS_COMPARE)
5618 nops = 0;
5620 t = next_nonnote_insn (first);
5622 if (arm_cirrus_insn_p (t))
5623 ++ nops;
5625 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5626 ++ nops;
5628 while (nops --)
5629 emit_insn_after (gen_nop (), first);
5631 return;
5635 /* Return TRUE if X references a SYMBOL_REF. */
5637 symbol_mentioned_p (rtx x)
5639 const char * fmt;
5640 int i;
5642 if (GET_CODE (x) == SYMBOL_REF)
5643 return 1;
5645 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5646 are constant offsets, not symbols. */
5647 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5648 return 0;
5650 fmt = GET_RTX_FORMAT (GET_CODE (x));
5652 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5654 if (fmt[i] == 'E')
5656 int j;
5658 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5659 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5660 return 1;
5662 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5663 return 1;
5666 return 0;
5669 /* Return TRUE if X references a LABEL_REF. */
5671 label_mentioned_p (rtx x)
5673 const char * fmt;
5674 int i;
5676 if (GET_CODE (x) == LABEL_REF)
5677 return 1;
5679 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5680 instruction, but they are constant offsets, not symbols. */
5681 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5682 return 0;
5684 fmt = GET_RTX_FORMAT (GET_CODE (x));
5685 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5687 if (fmt[i] == 'E')
5689 int j;
5691 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5692 if (label_mentioned_p (XVECEXP (x, i, j)))
5693 return 1;
5695 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5696 return 1;
5699 return 0;
5703 tls_mentioned_p (rtx x)
5705 switch (GET_CODE (x))
5707 case CONST:
5708 return tls_mentioned_p (XEXP (x, 0));
5710 case UNSPEC:
5711 if (XINT (x, 1) == UNSPEC_TLS)
5712 return 1;
5714 default:
5715 return 0;
5719 /* Must not copy a SET whose source operand is PC-relative. */
5721 static bool
5722 arm_cannot_copy_insn_p (rtx insn)
5724 rtx pat = PATTERN (insn);
5726 if (GET_CODE (pat) == PARALLEL
5727 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
5729 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
5731 if (GET_CODE (rhs) == UNSPEC
5732 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
5733 return TRUE;
5735 if (GET_CODE (rhs) == MEM
5736 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
5737 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
5738 return TRUE;
5741 return FALSE;
5744 enum rtx_code
5745 minmax_code (rtx x)
5747 enum rtx_code code = GET_CODE (x);
5749 switch (code)
5751 case SMAX:
5752 return GE;
5753 case SMIN:
5754 return LE;
5755 case UMIN:
5756 return LEU;
5757 case UMAX:
5758 return GEU;
5759 default:
5760 gcc_unreachable ();
5764 /* Return 1 if memory locations are adjacent. */
5766 adjacent_mem_locations (rtx a, rtx b)
5768 /* We don't guarantee to preserve the order of these memory refs. */
5769 if (volatile_refs_p (a) || volatile_refs_p (b))
5770 return 0;
5772 if ((GET_CODE (XEXP (a, 0)) == REG
5773 || (GET_CODE (XEXP (a, 0)) == PLUS
5774 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5775 && (GET_CODE (XEXP (b, 0)) == REG
5776 || (GET_CODE (XEXP (b, 0)) == PLUS
5777 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5779 HOST_WIDE_INT val0 = 0, val1 = 0;
5780 rtx reg0, reg1;
5781 int val_diff;
5783 if (GET_CODE (XEXP (a, 0)) == PLUS)
5785 reg0 = XEXP (XEXP (a, 0), 0);
5786 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5788 else
5789 reg0 = XEXP (a, 0);
5791 if (GET_CODE (XEXP (b, 0)) == PLUS)
5793 reg1 = XEXP (XEXP (b, 0), 0);
5794 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5796 else
5797 reg1 = XEXP (b, 0);
5799 /* Don't accept any offset that will require multiple
5800 instructions to handle, since this would cause the
5801 arith_adjacentmem pattern to output an overlong sequence. */
5802 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5803 return 0;
5805 /* Don't allow an eliminable register: register elimination can make
5806 the offset too large. */
5807 if (arm_eliminable_register (reg0))
5808 return 0;
5810 val_diff = val1 - val0;
5812 if (arm_ld_sched)
5814 /* If the target has load delay slots, then there's no benefit
5815 to using an ldm instruction unless the offset is zero and
5816 we are optimizing for size. */
5817 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5818 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5819 && (val_diff == 4 || val_diff == -4));
5822 return ((REGNO (reg0) == REGNO (reg1))
5823 && (val_diff == 4 || val_diff == -4));
5826 return 0;
5830 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5831 HOST_WIDE_INT *load_offset)
5833 int unsorted_regs[4];
5834 HOST_WIDE_INT unsorted_offsets[4];
5835 int order[4];
5836 int base_reg = -1;
5837 int i;
5839 /* Can only handle 2, 3, or 4 insns at present,
5840 though could be easily extended if required. */
5841 gcc_assert (nops >= 2 && nops <= 4);
5843 /* Loop over the operands and check that the memory references are
5844 suitable (i.e. immediate offsets from the same base register). At
5845 the same time, extract the target register, and the memory
5846 offsets. */
5847 for (i = 0; i < nops; i++)
5849 rtx reg;
5850 rtx offset;
5852 /* Convert a subreg of a mem into the mem itself. */
5853 if (GET_CODE (operands[nops + i]) == SUBREG)
5854 operands[nops + i] = alter_subreg (operands + (nops + i));
5856 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5858 /* Don't reorder volatile memory references; it doesn't seem worth
5859 looking for the case where the order is ok anyway. */
5860 if (MEM_VOLATILE_P (operands[nops + i]))
5861 return 0;
5863 offset = const0_rtx;
5865 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5866 || (GET_CODE (reg) == SUBREG
5867 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5868 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5869 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5870 == REG)
5871 || (GET_CODE (reg) == SUBREG
5872 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5873 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5874 == CONST_INT)))
5876 if (i == 0)
5878 base_reg = REGNO (reg);
5879 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5880 ? REGNO (operands[i])
5881 : REGNO (SUBREG_REG (operands[i])));
5882 order[0] = 0;
5884 else
5886 if (base_reg != (int) REGNO (reg))
5887 /* Not addressed from the same base register. */
5888 return 0;
5890 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5891 ? REGNO (operands[i])
5892 : REGNO (SUBREG_REG (operands[i])));
5893 if (unsorted_regs[i] < unsorted_regs[order[0]])
5894 order[0] = i;
5897 /* If it isn't an integer register, or if it overwrites the
5898 base register but isn't the last insn in the list, then
5899 we can't do this. */
5900 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5901 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5902 return 0;
5904 unsorted_offsets[i] = INTVAL (offset);
5906 else
5907 /* Not a suitable memory address. */
5908 return 0;
5911 /* All the useful information has now been extracted from the
5912 operands into unsorted_regs and unsorted_offsets; additionally,
5913 order[0] has been set to the lowest numbered register in the
5914 list. Sort the registers into order, and check that the memory
5915 offsets are ascending and adjacent. */
5917 for (i = 1; i < nops; i++)
5919 int j;
5921 order[i] = order[i - 1];
5922 for (j = 0; j < nops; j++)
5923 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5924 && (order[i] == order[i - 1]
5925 || unsorted_regs[j] < unsorted_regs[order[i]]))
5926 order[i] = j;
5928 /* Have we found a suitable register? if not, one must be used more
5929 than once. */
5930 if (order[i] == order[i - 1])
5931 return 0;
5933 /* Is the memory address adjacent and ascending? */
5934 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5935 return 0;
5938 if (base)
5940 *base = base_reg;
5942 for (i = 0; i < nops; i++)
5943 regs[i] = unsorted_regs[order[i]];
5945 *load_offset = unsorted_offsets[order[0]];
5948 if (unsorted_offsets[order[0]] == 0)
5949 return 1; /* ldmia */
5951 if (unsorted_offsets[order[0]] == 4)
5952 return 2; /* ldmib */
5954 if (unsorted_offsets[order[nops - 1]] == 0)
5955 return 3; /* ldmda */
5957 if (unsorted_offsets[order[nops - 1]] == -4)
5958 return 4; /* ldmdb */
5960 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5961 if the offset isn't small enough. The reason 2 ldrs are faster
5962 is because these ARMs are able to do more than one cache access
5963 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5964 whilst the ARM8 has a double bandwidth cache. This means that
5965 these cores can do both an instruction fetch and a data fetch in
5966 a single cycle, so the trick of calculating the address into a
5967 scratch register (one of the result regs) and then doing a load
5968 multiple actually becomes slower (and no smaller in code size).
5969 That is the transformation
5971 ldr rd1, [rbase + offset]
5972 ldr rd2, [rbase + offset + 4]
5976 add rd1, rbase, offset
5977 ldmia rd1, {rd1, rd2}
5979 produces worse code -- '3 cycles + any stalls on rd2' instead of
5980 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5981 access per cycle, the first sequence could never complete in less
5982 than 6 cycles, whereas the ldm sequence would only take 5 and
5983 would make better use of sequential accesses if not hitting the
5984 cache.
5986 We cheat here and test 'arm_ld_sched' which we currently know to
5987 only be true for the ARM8, ARM9 and StrongARM. If this ever
5988 changes, then the test below needs to be reworked. */
5989 if (nops == 2 && arm_ld_sched)
5990 return 0;
5992 /* Can't do it without setting up the offset, only do this if it takes
5993 no more than one insn. */
5994 return (const_ok_for_arm (unsorted_offsets[order[0]])
5995 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5998 const char *
5999 emit_ldm_seq (rtx *operands, int nops)
6001 int regs[4];
6002 int base_reg;
6003 HOST_WIDE_INT offset;
6004 char buf[100];
6005 int i;
6007 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6009 case 1:
6010 strcpy (buf, "ldm%?ia\t");
6011 break;
6013 case 2:
6014 strcpy (buf, "ldm%?ib\t");
6015 break;
6017 case 3:
6018 strcpy (buf, "ldm%?da\t");
6019 break;
6021 case 4:
6022 strcpy (buf, "ldm%?db\t");
6023 break;
6025 case 5:
6026 if (offset >= 0)
6027 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6028 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6029 (long) offset);
6030 else
6031 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6032 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6033 (long) -offset);
6034 output_asm_insn (buf, operands);
6035 base_reg = regs[0];
6036 strcpy (buf, "ldm%?ia\t");
6037 break;
6039 default:
6040 gcc_unreachable ();
6043 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6044 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6046 for (i = 1; i < nops; i++)
6047 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6048 reg_names[regs[i]]);
6050 strcat (buf, "}\t%@ phole ldm");
6052 output_asm_insn (buf, operands);
6053 return "";
6057 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6058 HOST_WIDE_INT * load_offset)
6060 int unsorted_regs[4];
6061 HOST_WIDE_INT unsorted_offsets[4];
6062 int order[4];
6063 int base_reg = -1;
6064 int i;
6066 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6067 extended if required. */
6068 gcc_assert (nops >= 2 && nops <= 4);
6070 /* Loop over the operands and check that the memory references are
6071 suitable (i.e. immediate offsets from the same base register). At
6072 the same time, extract the target register, and the memory
6073 offsets. */
6074 for (i = 0; i < nops; i++)
6076 rtx reg;
6077 rtx offset;
6079 /* Convert a subreg of a mem into the mem itself. */
6080 if (GET_CODE (operands[nops + i]) == SUBREG)
6081 operands[nops + i] = alter_subreg (operands + (nops + i));
6083 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6085 /* Don't reorder volatile memory references; it doesn't seem worth
6086 looking for the case where the order is ok anyway. */
6087 if (MEM_VOLATILE_P (operands[nops + i]))
6088 return 0;
6090 offset = const0_rtx;
6092 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6093 || (GET_CODE (reg) == SUBREG
6094 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6095 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6096 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6097 == REG)
6098 || (GET_CODE (reg) == SUBREG
6099 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6100 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6101 == CONST_INT)))
6103 if (i == 0)
6105 base_reg = REGNO (reg);
6106 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6107 ? REGNO (operands[i])
6108 : REGNO (SUBREG_REG (operands[i])));
6109 order[0] = 0;
6111 else
6113 if (base_reg != (int) REGNO (reg))
6114 /* Not addressed from the same base register. */
6115 return 0;
6117 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6118 ? REGNO (operands[i])
6119 : REGNO (SUBREG_REG (operands[i])));
6120 if (unsorted_regs[i] < unsorted_regs[order[0]])
6121 order[0] = i;
6124 /* If it isn't an integer register, then we can't do this. */
6125 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6126 return 0;
6128 unsorted_offsets[i] = INTVAL (offset);
6130 else
6131 /* Not a suitable memory address. */
6132 return 0;
6135 /* All the useful information has now been extracted from the
6136 operands into unsorted_regs and unsorted_offsets; additionally,
6137 order[0] has been set to the lowest numbered register in the
6138 list. Sort the registers into order, and check that the memory
6139 offsets are ascending and adjacent. */
6141 for (i = 1; i < nops; i++)
6143 int j;
6145 order[i] = order[i - 1];
6146 for (j = 0; j < nops; j++)
6147 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6148 && (order[i] == order[i - 1]
6149 || unsorted_regs[j] < unsorted_regs[order[i]]))
6150 order[i] = j;
6152 /* Have we found a suitable register? if not, one must be used more
6153 than once. */
6154 if (order[i] == order[i - 1])
6155 return 0;
6157 /* Is the memory address adjacent and ascending? */
6158 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6159 return 0;
6162 if (base)
6164 *base = base_reg;
6166 for (i = 0; i < nops; i++)
6167 regs[i] = unsorted_regs[order[i]];
6169 *load_offset = unsorted_offsets[order[0]];
6172 if (unsorted_offsets[order[0]] == 0)
6173 return 1; /* stmia */
6175 if (unsorted_offsets[order[0]] == 4)
6176 return 2; /* stmib */
6178 if (unsorted_offsets[order[nops - 1]] == 0)
6179 return 3; /* stmda */
6181 if (unsorted_offsets[order[nops - 1]] == -4)
6182 return 4; /* stmdb */
6184 return 0;
6187 const char *
6188 emit_stm_seq (rtx *operands, int nops)
6190 int regs[4];
6191 int base_reg;
6192 HOST_WIDE_INT offset;
6193 char buf[100];
6194 int i;
6196 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6198 case 1:
6199 strcpy (buf, "stm%?ia\t");
6200 break;
6202 case 2:
6203 strcpy (buf, "stm%?ib\t");
6204 break;
6206 case 3:
6207 strcpy (buf, "stm%?da\t");
6208 break;
6210 case 4:
6211 strcpy (buf, "stm%?db\t");
6212 break;
6214 default:
6215 gcc_unreachable ();
6218 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6219 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6221 for (i = 1; i < nops; i++)
6222 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6223 reg_names[regs[i]]);
6225 strcat (buf, "}\t%@ phole stm");
6227 output_asm_insn (buf, operands);
6228 return "";
6231 /* Routines for use in generating RTL. */
6234 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6235 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6237 HOST_WIDE_INT offset = *offsetp;
6238 int i = 0, j;
6239 rtx result;
6240 int sign = up ? 1 : -1;
6241 rtx mem, addr;
6243 /* XScale has load-store double instructions, but they have stricter
6244 alignment requirements than load-store multiple, so we cannot
6245 use them.
6247 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6248 the pipeline until completion.
6250 NREGS CYCLES
6256 An ldr instruction takes 1-3 cycles, but does not block the
6257 pipeline.
6259 NREGS CYCLES
6260 1 1-3
6261 2 2-6
6262 3 3-9
6263 4 4-12
6265 Best case ldr will always win. However, the more ldr instructions
6266 we issue, the less likely we are to be able to schedule them well.
6267 Using ldr instructions also increases code size.
6269 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6270 for counts of 3 or 4 regs. */
6271 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6273 rtx seq;
6275 start_sequence ();
6277 for (i = 0; i < count; i++)
6279 addr = plus_constant (from, i * 4 * sign);
6280 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6281 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6282 offset += 4 * sign;
6285 if (write_back)
6287 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6288 *offsetp = offset;
6291 seq = get_insns ();
6292 end_sequence ();
6294 return seq;
6297 result = gen_rtx_PARALLEL (VOIDmode,
6298 rtvec_alloc (count + (write_back ? 1 : 0)));
6299 if (write_back)
6301 XVECEXP (result, 0, 0)
6302 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6303 i = 1;
6304 count++;
6307 for (j = 0; i < count; i++, j++)
6309 addr = plus_constant (from, j * 4 * sign);
6310 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6311 XVECEXP (result, 0, i)
6312 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6313 offset += 4 * sign;
6316 if (write_back)
6317 *offsetp = offset;
6319 return result;
6323 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6324 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6326 HOST_WIDE_INT offset = *offsetp;
6327 int i = 0, j;
6328 rtx result;
6329 int sign = up ? 1 : -1;
6330 rtx mem, addr;
6332 /* See arm_gen_load_multiple for discussion of
6333 the pros/cons of ldm/stm usage for XScale. */
6334 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6336 rtx seq;
6338 start_sequence ();
6340 for (i = 0; i < count; i++)
6342 addr = plus_constant (to, i * 4 * sign);
6343 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6344 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6345 offset += 4 * sign;
6348 if (write_back)
6350 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6351 *offsetp = offset;
6354 seq = get_insns ();
6355 end_sequence ();
6357 return seq;
6360 result = gen_rtx_PARALLEL (VOIDmode,
6361 rtvec_alloc (count + (write_back ? 1 : 0)));
6362 if (write_back)
6364 XVECEXP (result, 0, 0)
6365 = gen_rtx_SET (VOIDmode, to,
6366 plus_constant (to, count * 4 * sign));
6367 i = 1;
6368 count++;
6371 for (j = 0; i < count; i++, j++)
6373 addr = plus_constant (to, j * 4 * sign);
6374 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6375 XVECEXP (result, 0, i)
6376 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6377 offset += 4 * sign;
6380 if (write_back)
6381 *offsetp = offset;
6383 return result;
6387 arm_gen_movmemqi (rtx *operands)
6389 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6390 HOST_WIDE_INT srcoffset, dstoffset;
6391 int i;
6392 rtx src, dst, srcbase, dstbase;
6393 rtx part_bytes_reg = NULL;
6394 rtx mem;
6396 if (GET_CODE (operands[2]) != CONST_INT
6397 || GET_CODE (operands[3]) != CONST_INT
6398 || INTVAL (operands[2]) > 64
6399 || INTVAL (operands[3]) & 3)
6400 return 0;
6402 dstbase = operands[0];
6403 srcbase = operands[1];
6405 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6406 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6408 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6409 out_words_to_go = INTVAL (operands[2]) / 4;
6410 last_bytes = INTVAL (operands[2]) & 3;
6411 dstoffset = srcoffset = 0;
6413 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6414 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6416 for (i = 0; in_words_to_go >= 2; i+=4)
6418 if (in_words_to_go > 4)
6419 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6420 srcbase, &srcoffset));
6421 else
6422 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6423 FALSE, srcbase, &srcoffset));
6425 if (out_words_to_go)
6427 if (out_words_to_go > 4)
6428 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6429 dstbase, &dstoffset));
6430 else if (out_words_to_go != 1)
6431 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6432 dst, TRUE,
6433 (last_bytes == 0
6434 ? FALSE : TRUE),
6435 dstbase, &dstoffset));
6436 else
6438 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6439 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6440 if (last_bytes != 0)
6442 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6443 dstoffset += 4;
6448 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6449 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6452 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6453 if (out_words_to_go)
6455 rtx sreg;
6457 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6458 sreg = copy_to_reg (mem);
6460 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6461 emit_move_insn (mem, sreg);
6462 in_words_to_go--;
6464 gcc_assert (!in_words_to_go); /* Sanity check */
6467 if (in_words_to_go)
6469 gcc_assert (in_words_to_go > 0);
6471 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6472 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6475 gcc_assert (!last_bytes || part_bytes_reg);
6477 if (BYTES_BIG_ENDIAN && last_bytes)
6479 rtx tmp = gen_reg_rtx (SImode);
6481 /* The bytes we want are in the top end of the word. */
6482 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6483 GEN_INT (8 * (4 - last_bytes))));
6484 part_bytes_reg = tmp;
6486 while (last_bytes)
6488 mem = adjust_automodify_address (dstbase, QImode,
6489 plus_constant (dst, last_bytes - 1),
6490 dstoffset + last_bytes - 1);
6491 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6493 if (--last_bytes)
6495 tmp = gen_reg_rtx (SImode);
6496 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6497 part_bytes_reg = tmp;
6502 else
6504 if (last_bytes > 1)
6506 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6507 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6508 last_bytes -= 2;
6509 if (last_bytes)
6511 rtx tmp = gen_reg_rtx (SImode);
6512 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6513 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6514 part_bytes_reg = tmp;
6515 dstoffset += 2;
6519 if (last_bytes)
6521 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6522 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6526 return 1;
6529 /* Select a dominance comparison mode if possible for a test of the general
6530 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6531 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6532 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6533 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6534 In all cases OP will be either EQ or NE, but we don't need to know which
6535 here. If we are unable to support a dominance comparison we return
6536 CC mode. This will then fail to match for the RTL expressions that
6537 generate this call. */
6538 enum machine_mode
6539 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6541 enum rtx_code cond1, cond2;
6542 int swapped = 0;
6544 /* Currently we will probably get the wrong result if the individual
6545 comparisons are not simple. This also ensures that it is safe to
6546 reverse a comparison if necessary. */
6547 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6548 != CCmode)
6549 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6550 != CCmode))
6551 return CCmode;
6553 /* The if_then_else variant of this tests the second condition if the
6554 first passes, but is true if the first fails. Reverse the first
6555 condition to get a true "inclusive-or" expression. */
6556 if (cond_or == DOM_CC_NX_OR_Y)
6557 cond1 = reverse_condition (cond1);
6559 /* If the comparisons are not equal, and one doesn't dominate the other,
6560 then we can't do this. */
6561 if (cond1 != cond2
6562 && !comparison_dominates_p (cond1, cond2)
6563 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6564 return CCmode;
6566 if (swapped)
6568 enum rtx_code temp = cond1;
6569 cond1 = cond2;
6570 cond2 = temp;
6573 switch (cond1)
6575 case EQ:
6576 if (cond_or == DOM_CC_X_AND_Y)
6577 return CC_DEQmode;
6579 switch (cond2)
6581 case EQ: return CC_DEQmode;
6582 case LE: return CC_DLEmode;
6583 case LEU: return CC_DLEUmode;
6584 case GE: return CC_DGEmode;
6585 case GEU: return CC_DGEUmode;
6586 default: gcc_unreachable ();
6589 case LT:
6590 if (cond_or == DOM_CC_X_AND_Y)
6591 return CC_DLTmode;
6593 switch (cond2)
6595 case LT:
6596 return CC_DLTmode;
6597 case LE:
6598 return CC_DLEmode;
6599 case NE:
6600 return CC_DNEmode;
6601 default:
6602 gcc_unreachable ();
6605 case GT:
6606 if (cond_or == DOM_CC_X_AND_Y)
6607 return CC_DGTmode;
6609 switch (cond2)
6611 case GT:
6612 return CC_DGTmode;
6613 case GE:
6614 return CC_DGEmode;
6615 case NE:
6616 return CC_DNEmode;
6617 default:
6618 gcc_unreachable ();
6621 case LTU:
6622 if (cond_or == DOM_CC_X_AND_Y)
6623 return CC_DLTUmode;
6625 switch (cond2)
6627 case LTU:
6628 return CC_DLTUmode;
6629 case LEU:
6630 return CC_DLEUmode;
6631 case NE:
6632 return CC_DNEmode;
6633 default:
6634 gcc_unreachable ();
6637 case GTU:
6638 if (cond_or == DOM_CC_X_AND_Y)
6639 return CC_DGTUmode;
6641 switch (cond2)
6643 case GTU:
6644 return CC_DGTUmode;
6645 case GEU:
6646 return CC_DGEUmode;
6647 case NE:
6648 return CC_DNEmode;
6649 default:
6650 gcc_unreachable ();
6653 /* The remaining cases only occur when both comparisons are the
6654 same. */
6655 case NE:
6656 gcc_assert (cond1 == cond2);
6657 return CC_DNEmode;
6659 case LE:
6660 gcc_assert (cond1 == cond2);
6661 return CC_DLEmode;
6663 case GE:
6664 gcc_assert (cond1 == cond2);
6665 return CC_DGEmode;
6667 case LEU:
6668 gcc_assert (cond1 == cond2);
6669 return CC_DLEUmode;
6671 case GEU:
6672 gcc_assert (cond1 == cond2);
6673 return CC_DGEUmode;
6675 default:
6676 gcc_unreachable ();
6680 enum machine_mode
6681 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6683 /* All floating point compares return CCFP if it is an equality
6684 comparison, and CCFPE otherwise. */
6685 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6687 switch (op)
6689 case EQ:
6690 case NE:
6691 case UNORDERED:
6692 case ORDERED:
6693 case UNLT:
6694 case UNLE:
6695 case UNGT:
6696 case UNGE:
6697 case UNEQ:
6698 case LTGT:
6699 return CCFPmode;
6701 case LT:
6702 case LE:
6703 case GT:
6704 case GE:
6705 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6706 return CCFPmode;
6707 return CCFPEmode;
6709 default:
6710 gcc_unreachable ();
6714 /* A compare with a shifted operand. Because of canonicalization, the
6715 comparison will have to be swapped when we emit the assembler. */
6716 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6717 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6718 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6719 || GET_CODE (x) == ROTATERT))
6720 return CC_SWPmode;
6722 /* This operation is performed swapped, but since we only rely on the Z
6723 flag we don't need an additional mode. */
6724 if (GET_MODE (y) == SImode && REG_P (y)
6725 && GET_CODE (x) == NEG
6726 && (op == EQ || op == NE))
6727 return CC_Zmode;
6729 /* This is a special case that is used by combine to allow a
6730 comparison of a shifted byte load to be split into a zero-extend
6731 followed by a comparison of the shifted integer (only valid for
6732 equalities and unsigned inequalities). */
6733 if (GET_MODE (x) == SImode
6734 && GET_CODE (x) == ASHIFT
6735 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6736 && GET_CODE (XEXP (x, 0)) == SUBREG
6737 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6738 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6739 && (op == EQ || op == NE
6740 || op == GEU || op == GTU || op == LTU || op == LEU)
6741 && GET_CODE (y) == CONST_INT)
6742 return CC_Zmode;
6744 /* A construct for a conditional compare, if the false arm contains
6745 0, then both conditions must be true, otherwise either condition
6746 must be true. Not all conditions are possible, so CCmode is
6747 returned if it can't be done. */
6748 if (GET_CODE (x) == IF_THEN_ELSE
6749 && (XEXP (x, 2) == const0_rtx
6750 || XEXP (x, 2) == const1_rtx)
6751 && COMPARISON_P (XEXP (x, 0))
6752 && COMPARISON_P (XEXP (x, 1)))
6753 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6754 INTVAL (XEXP (x, 2)));
6756 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6757 if (GET_CODE (x) == AND
6758 && COMPARISON_P (XEXP (x, 0))
6759 && COMPARISON_P (XEXP (x, 1)))
6760 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6761 DOM_CC_X_AND_Y);
6763 if (GET_CODE (x) == IOR
6764 && COMPARISON_P (XEXP (x, 0))
6765 && COMPARISON_P (XEXP (x, 1)))
6766 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6767 DOM_CC_X_OR_Y);
6769 /* An operation (on Thumb) where we want to test for a single bit.
6770 This is done by shifting that bit up into the top bit of a
6771 scratch register; we can then branch on the sign bit. */
6772 if (TARGET_THUMB
6773 && GET_MODE (x) == SImode
6774 && (op == EQ || op == NE)
6775 && GET_CODE (x) == ZERO_EXTRACT
6776 && XEXP (x, 1) == const1_rtx)
6777 return CC_Nmode;
6779 /* An operation that sets the condition codes as a side-effect, the
6780 V flag is not set correctly, so we can only use comparisons where
6781 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6782 instead.) */
6783 if (GET_MODE (x) == SImode
6784 && y == const0_rtx
6785 && (op == EQ || op == NE || op == LT || op == GE)
6786 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6787 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6788 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6789 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6790 || GET_CODE (x) == LSHIFTRT
6791 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6792 || GET_CODE (x) == ROTATERT
6793 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6794 return CC_NOOVmode;
6796 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6797 return CC_Zmode;
6799 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6800 && GET_CODE (x) == PLUS
6801 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6802 return CC_Cmode;
6804 return CCmode;
6807 /* X and Y are two things to compare using CODE. Emit the compare insn and
6808 return the rtx for register 0 in the proper mode. FP means this is a
6809 floating point compare: I don't think that it is needed on the arm. */
6811 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6813 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6814 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6816 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
6818 return cc_reg;
6821 /* Generate a sequence of insns that will generate the correct return
6822 address mask depending on the physical architecture that the program
6823 is running on. */
6825 arm_gen_return_addr_mask (void)
6827 rtx reg = gen_reg_rtx (Pmode);
6829 emit_insn (gen_return_addr_mask (reg));
6830 return reg;
6833 void
6834 arm_reload_in_hi (rtx *operands)
6836 rtx ref = operands[1];
6837 rtx base, scratch;
6838 HOST_WIDE_INT offset = 0;
6840 if (GET_CODE (ref) == SUBREG)
6842 offset = SUBREG_BYTE (ref);
6843 ref = SUBREG_REG (ref);
6846 if (GET_CODE (ref) == REG)
6848 /* We have a pseudo which has been spilt onto the stack; there
6849 are two cases here: the first where there is a simple
6850 stack-slot replacement and a second where the stack-slot is
6851 out of range, or is used as a subreg. */
6852 if (reg_equiv_mem[REGNO (ref)])
6854 ref = reg_equiv_mem[REGNO (ref)];
6855 base = find_replacement (&XEXP (ref, 0));
6857 else
6858 /* The slot is out of range, or was dressed up in a SUBREG. */
6859 base = reg_equiv_address[REGNO (ref)];
6861 else
6862 base = find_replacement (&XEXP (ref, 0));
6864 /* Handle the case where the address is too complex to be offset by 1. */
6865 if (GET_CODE (base) == MINUS
6866 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6868 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6870 emit_set_insn (base_plus, base);
6871 base = base_plus;
6873 else if (GET_CODE (base) == PLUS)
6875 /* The addend must be CONST_INT, or we would have dealt with it above. */
6876 HOST_WIDE_INT hi, lo;
6878 offset += INTVAL (XEXP (base, 1));
6879 base = XEXP (base, 0);
6881 /* Rework the address into a legal sequence of insns. */
6882 /* Valid range for lo is -4095 -> 4095 */
6883 lo = (offset >= 0
6884 ? (offset & 0xfff)
6885 : -((-offset) & 0xfff));
6887 /* Corner case, if lo is the max offset then we would be out of range
6888 once we have added the additional 1 below, so bump the msb into the
6889 pre-loading insn(s). */
6890 if (lo == 4095)
6891 lo &= 0x7ff;
6893 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6894 ^ (HOST_WIDE_INT) 0x80000000)
6895 - (HOST_WIDE_INT) 0x80000000);
6897 gcc_assert (hi + lo == offset);
6899 if (hi != 0)
6901 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6903 /* Get the base address; addsi3 knows how to handle constants
6904 that require more than one insn. */
6905 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6906 base = base_plus;
6907 offset = lo;
6911 /* Operands[2] may overlap operands[0] (though it won't overlap
6912 operands[1]), that's why we asked for a DImode reg -- so we can
6913 use the bit that does not overlap. */
6914 if (REGNO (operands[2]) == REGNO (operands[0]))
6915 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6916 else
6917 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6919 emit_insn (gen_zero_extendqisi2 (scratch,
6920 gen_rtx_MEM (QImode,
6921 plus_constant (base,
6922 offset))));
6923 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6924 gen_rtx_MEM (QImode,
6925 plus_constant (base,
6926 offset + 1))));
6927 if (!BYTES_BIG_ENDIAN)
6928 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6929 gen_rtx_IOR (SImode,
6930 gen_rtx_ASHIFT
6931 (SImode,
6932 gen_rtx_SUBREG (SImode, operands[0], 0),
6933 GEN_INT (8)),
6934 scratch));
6935 else
6936 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6937 gen_rtx_IOR (SImode,
6938 gen_rtx_ASHIFT (SImode, scratch,
6939 GEN_INT (8)),
6940 gen_rtx_SUBREG (SImode, operands[0], 0)));
6943 /* Handle storing a half-word to memory during reload by synthesizing as two
6944 byte stores. Take care not to clobber the input values until after we
6945 have moved them somewhere safe. This code assumes that if the DImode
6946 scratch in operands[2] overlaps either the input value or output address
6947 in some way, then that value must die in this insn (we absolutely need
6948 two scratch registers for some corner cases). */
6949 void
6950 arm_reload_out_hi (rtx *operands)
6952 rtx ref = operands[0];
6953 rtx outval = operands[1];
6954 rtx base, scratch;
6955 HOST_WIDE_INT offset = 0;
6957 if (GET_CODE (ref) == SUBREG)
6959 offset = SUBREG_BYTE (ref);
6960 ref = SUBREG_REG (ref);
6963 if (GET_CODE (ref) == REG)
6965 /* We have a pseudo which has been spilt onto the stack; there
6966 are two cases here: the first where there is a simple
6967 stack-slot replacement and a second where the stack-slot is
6968 out of range, or is used as a subreg. */
6969 if (reg_equiv_mem[REGNO (ref)])
6971 ref = reg_equiv_mem[REGNO (ref)];
6972 base = find_replacement (&XEXP (ref, 0));
6974 else
6975 /* The slot is out of range, or was dressed up in a SUBREG. */
6976 base = reg_equiv_address[REGNO (ref)];
6978 else
6979 base = find_replacement (&XEXP (ref, 0));
6981 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6983 /* Handle the case where the address is too complex to be offset by 1. */
6984 if (GET_CODE (base) == MINUS
6985 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6987 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6989 /* Be careful not to destroy OUTVAL. */
6990 if (reg_overlap_mentioned_p (base_plus, outval))
6992 /* Updating base_plus might destroy outval, see if we can
6993 swap the scratch and base_plus. */
6994 if (!reg_overlap_mentioned_p (scratch, outval))
6996 rtx tmp = scratch;
6997 scratch = base_plus;
6998 base_plus = tmp;
7000 else
7002 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7004 /* Be conservative and copy OUTVAL into the scratch now,
7005 this should only be necessary if outval is a subreg
7006 of something larger than a word. */
7007 /* XXX Might this clobber base? I can't see how it can,
7008 since scratch is known to overlap with OUTVAL, and
7009 must be wider than a word. */
7010 emit_insn (gen_movhi (scratch_hi, outval));
7011 outval = scratch_hi;
7015 emit_set_insn (base_plus, base);
7016 base = base_plus;
7018 else if (GET_CODE (base) == PLUS)
7020 /* The addend must be CONST_INT, or we would have dealt with it above. */
7021 HOST_WIDE_INT hi, lo;
7023 offset += INTVAL (XEXP (base, 1));
7024 base = XEXP (base, 0);
7026 /* Rework the address into a legal sequence of insns. */
7027 /* Valid range for lo is -4095 -> 4095 */
7028 lo = (offset >= 0
7029 ? (offset & 0xfff)
7030 : -((-offset) & 0xfff));
7032 /* Corner case, if lo is the max offset then we would be out of range
7033 once we have added the additional 1 below, so bump the msb into the
7034 pre-loading insn(s). */
7035 if (lo == 4095)
7036 lo &= 0x7ff;
7038 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7039 ^ (HOST_WIDE_INT) 0x80000000)
7040 - (HOST_WIDE_INT) 0x80000000);
7042 gcc_assert (hi + lo == offset);
7044 if (hi != 0)
7046 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7048 /* Be careful not to destroy OUTVAL. */
7049 if (reg_overlap_mentioned_p (base_plus, outval))
7051 /* Updating base_plus might destroy outval, see if we
7052 can swap the scratch and base_plus. */
7053 if (!reg_overlap_mentioned_p (scratch, outval))
7055 rtx tmp = scratch;
7056 scratch = base_plus;
7057 base_plus = tmp;
7059 else
7061 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7063 /* Be conservative and copy outval into scratch now,
7064 this should only be necessary if outval is a
7065 subreg of something larger than a word. */
7066 /* XXX Might this clobber base? I can't see how it
7067 can, since scratch is known to overlap with
7068 outval. */
7069 emit_insn (gen_movhi (scratch_hi, outval));
7070 outval = scratch_hi;
7074 /* Get the base address; addsi3 knows how to handle constants
7075 that require more than one insn. */
7076 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7077 base = base_plus;
7078 offset = lo;
7082 if (BYTES_BIG_ENDIAN)
7084 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7085 plus_constant (base, offset + 1)),
7086 gen_lowpart (QImode, outval)));
7087 emit_insn (gen_lshrsi3 (scratch,
7088 gen_rtx_SUBREG (SImode, outval, 0),
7089 GEN_INT (8)));
7090 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7091 gen_lowpart (QImode, scratch)));
7093 else
7095 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7096 gen_lowpart (QImode, outval)));
7097 emit_insn (gen_lshrsi3 (scratch,
7098 gen_rtx_SUBREG (SImode, outval, 0),
7099 GEN_INT (8)));
7100 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7101 plus_constant (base, offset + 1)),
7102 gen_lowpart (QImode, scratch)));
7106 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7107 (padded to the size of a word) should be passed in a register. */
7109 static bool
7110 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7112 if (TARGET_AAPCS_BASED)
7113 return must_pass_in_stack_var_size (mode, type);
7114 else
7115 return must_pass_in_stack_var_size_or_pad (mode, type);
7119 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7120 Return true if an argument passed on the stack should be padded upwards,
7121 i.e. if the least-significant byte has useful data.
7122 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7123 aggregate types are placed in the lowest memory address. */
7125 bool
7126 arm_pad_arg_upward (enum machine_mode mode, tree type)
7128 if (!TARGET_AAPCS_BASED)
7129 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7131 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7132 return false;
7134 return true;
7138 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7139 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7140 byte of the register has useful data, and return the opposite if the
7141 most significant byte does.
7142 For AAPCS, small aggregates and small complex types are always padded
7143 upwards. */
7145 bool
7146 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7147 tree type, int first ATTRIBUTE_UNUSED)
7149 if (TARGET_AAPCS_BASED
7150 && BYTES_BIG_ENDIAN
7151 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7152 && int_size_in_bytes (type) <= 4)
7153 return true;
7155 /* Otherwise, use default padding. */
7156 return !BYTES_BIG_ENDIAN;
7160 /* Print a symbolic form of X to the debug file, F. */
7161 static void
7162 arm_print_value (FILE *f, rtx x)
7164 switch (GET_CODE (x))
7166 case CONST_INT:
7167 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7168 return;
7170 case CONST_DOUBLE:
7171 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7172 return;
7174 case CONST_VECTOR:
7176 int i;
7178 fprintf (f, "<");
7179 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7181 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7182 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7183 fputc (',', f);
7185 fprintf (f, ">");
7187 return;
7189 case CONST_STRING:
7190 fprintf (f, "\"%s\"", XSTR (x, 0));
7191 return;
7193 case SYMBOL_REF:
7194 fprintf (f, "`%s'", XSTR (x, 0));
7195 return;
7197 case LABEL_REF:
7198 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7199 return;
7201 case CONST:
7202 arm_print_value (f, XEXP (x, 0));
7203 return;
7205 case PLUS:
7206 arm_print_value (f, XEXP (x, 0));
7207 fprintf (f, "+");
7208 arm_print_value (f, XEXP (x, 1));
7209 return;
7211 case PC:
7212 fprintf (f, "pc");
7213 return;
7215 default:
7216 fprintf (f, "????");
7217 return;
7221 /* Routines for manipulation of the constant pool. */
7223 /* Arm instructions cannot load a large constant directly into a
7224 register; they have to come from a pc relative load. The constant
7225 must therefore be placed in the addressable range of the pc
7226 relative load. Depending on the precise pc relative load
7227 instruction the range is somewhere between 256 bytes and 4k. This
7228 means that we often have to dump a constant inside a function, and
7229 generate code to branch around it.
7231 It is important to minimize this, since the branches will slow
7232 things down and make the code larger.
7234 Normally we can hide the table after an existing unconditional
7235 branch so that there is no interruption of the flow, but in the
7236 worst case the code looks like this:
7238 ldr rn, L1
7240 b L2
7241 align
7242 L1: .long value
7246 ldr rn, L3
7248 b L4
7249 align
7250 L3: .long value
7254 We fix this by performing a scan after scheduling, which notices
7255 which instructions need to have their operands fetched from the
7256 constant table and builds the table.
7258 The algorithm starts by building a table of all the constants that
7259 need fixing up and all the natural barriers in the function (places
7260 where a constant table can be dropped without breaking the flow).
7261 For each fixup we note how far the pc-relative replacement will be
7262 able to reach and the offset of the instruction into the function.
7264 Having built the table we then group the fixes together to form
7265 tables that are as large as possible (subject to addressing
7266 constraints) and emit each table of constants after the last
7267 barrier that is within range of all the instructions in the group.
7268 If a group does not contain a barrier, then we forcibly create one
7269 by inserting a jump instruction into the flow. Once the table has
7270 been inserted, the insns are then modified to reference the
7271 relevant entry in the pool.
7273 Possible enhancements to the algorithm (not implemented) are:
7275 1) For some processors and object formats, there may be benefit in
7276 aligning the pools to the start of cache lines; this alignment
7277 would need to be taken into account when calculating addressability
7278 of a pool. */
7280 /* These typedefs are located at the start of this file, so that
7281 they can be used in the prototypes there. This comment is to
7282 remind readers of that fact so that the following structures
7283 can be understood more easily.
7285 typedef struct minipool_node Mnode;
7286 typedef struct minipool_fixup Mfix; */
7288 struct minipool_node
7290 /* Doubly linked chain of entries. */
7291 Mnode * next;
7292 Mnode * prev;
7293 /* The maximum offset into the code that this entry can be placed. While
7294 pushing fixes for forward references, all entries are sorted in order
7295 of increasing max_address. */
7296 HOST_WIDE_INT max_address;
7297 /* Similarly for an entry inserted for a backwards ref. */
7298 HOST_WIDE_INT min_address;
7299 /* The number of fixes referencing this entry. This can become zero
7300 if we "unpush" an entry. In this case we ignore the entry when we
7301 come to emit the code. */
7302 int refcount;
7303 /* The offset from the start of the minipool. */
7304 HOST_WIDE_INT offset;
7305 /* The value in table. */
7306 rtx value;
7307 /* The mode of value. */
7308 enum machine_mode mode;
7309 /* The size of the value. With iWMMXt enabled
7310 sizes > 4 also imply an alignment of 8-bytes. */
7311 int fix_size;
7314 struct minipool_fixup
7316 Mfix * next;
7317 rtx insn;
7318 HOST_WIDE_INT address;
7319 rtx * loc;
7320 enum machine_mode mode;
7321 int fix_size;
7322 rtx value;
7323 Mnode * minipool;
7324 HOST_WIDE_INT forwards;
7325 HOST_WIDE_INT backwards;
7328 /* Fixes less than a word need padding out to a word boundary. */
7329 #define MINIPOOL_FIX_SIZE(mode) \
7330 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7332 static Mnode * minipool_vector_head;
7333 static Mnode * minipool_vector_tail;
7334 static rtx minipool_vector_label;
7335 static int minipool_pad;
7337 /* The linked list of all minipool fixes required for this function. */
7338 Mfix * minipool_fix_head;
7339 Mfix * minipool_fix_tail;
7340 /* The fix entry for the current minipool, once it has been placed. */
7341 Mfix * minipool_barrier;
7343 /* Determines if INSN is the start of a jump table. Returns the end
7344 of the TABLE or NULL_RTX. */
7345 static rtx
7346 is_jump_table (rtx insn)
7348 rtx table;
7350 if (GET_CODE (insn) == JUMP_INSN
7351 && JUMP_LABEL (insn) != NULL
7352 && ((table = next_real_insn (JUMP_LABEL (insn)))
7353 == next_real_insn (insn))
7354 && table != NULL
7355 && GET_CODE (table) == JUMP_INSN
7356 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7357 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7358 return table;
7360 return NULL_RTX;
7363 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7364 #define JUMP_TABLES_IN_TEXT_SECTION 0
7365 #endif
7367 static HOST_WIDE_INT
7368 get_jump_table_size (rtx insn)
7370 /* ADDR_VECs only take room if read-only data does into the text
7371 section. */
7372 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7374 rtx body = PATTERN (insn);
7375 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7377 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7380 return 0;
7383 /* Move a minipool fix MP from its current location to before MAX_MP.
7384 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7385 constraints may need updating. */
7386 static Mnode *
7387 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7388 HOST_WIDE_INT max_address)
7390 /* The code below assumes these are different. */
7391 gcc_assert (mp != max_mp);
7393 if (max_mp == NULL)
7395 if (max_address < mp->max_address)
7396 mp->max_address = max_address;
7398 else
7400 if (max_address > max_mp->max_address - mp->fix_size)
7401 mp->max_address = max_mp->max_address - mp->fix_size;
7402 else
7403 mp->max_address = max_address;
7405 /* Unlink MP from its current position. Since max_mp is non-null,
7406 mp->prev must be non-null. */
7407 mp->prev->next = mp->next;
7408 if (mp->next != NULL)
7409 mp->next->prev = mp->prev;
7410 else
7411 minipool_vector_tail = mp->prev;
7413 /* Re-insert it before MAX_MP. */
7414 mp->next = max_mp;
7415 mp->prev = max_mp->prev;
7416 max_mp->prev = mp;
7418 if (mp->prev != NULL)
7419 mp->prev->next = mp;
7420 else
7421 minipool_vector_head = mp;
7424 /* Save the new entry. */
7425 max_mp = mp;
7427 /* Scan over the preceding entries and adjust their addresses as
7428 required. */
7429 while (mp->prev != NULL
7430 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7432 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7433 mp = mp->prev;
7436 return max_mp;
7439 /* Add a constant to the minipool for a forward reference. Returns the
7440 node added or NULL if the constant will not fit in this pool. */
7441 static Mnode *
7442 add_minipool_forward_ref (Mfix *fix)
7444 /* If set, max_mp is the first pool_entry that has a lower
7445 constraint than the one we are trying to add. */
7446 Mnode * max_mp = NULL;
7447 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7448 Mnode * mp;
7450 /* If the minipool starts before the end of FIX->INSN then this FIX
7451 can not be placed into the current pool. Furthermore, adding the
7452 new constant pool entry may cause the pool to start FIX_SIZE bytes
7453 earlier. */
7454 if (minipool_vector_head &&
7455 (fix->address + get_attr_length (fix->insn)
7456 >= minipool_vector_head->max_address - fix->fix_size))
7457 return NULL;
7459 /* Scan the pool to see if a constant with the same value has
7460 already been added. While we are doing this, also note the
7461 location where we must insert the constant if it doesn't already
7462 exist. */
7463 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7465 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7466 && fix->mode == mp->mode
7467 && (GET_CODE (fix->value) != CODE_LABEL
7468 || (CODE_LABEL_NUMBER (fix->value)
7469 == CODE_LABEL_NUMBER (mp->value)))
7470 && rtx_equal_p (fix->value, mp->value))
7472 /* More than one fix references this entry. */
7473 mp->refcount++;
7474 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7477 /* Note the insertion point if necessary. */
7478 if (max_mp == NULL
7479 && mp->max_address > max_address)
7480 max_mp = mp;
7482 /* If we are inserting an 8-bytes aligned quantity and
7483 we have not already found an insertion point, then
7484 make sure that all such 8-byte aligned quantities are
7485 placed at the start of the pool. */
7486 if (ARM_DOUBLEWORD_ALIGN
7487 && max_mp == NULL
7488 && fix->fix_size == 8
7489 && mp->fix_size != 8)
7491 max_mp = mp;
7492 max_address = mp->max_address;
7496 /* The value is not currently in the minipool, so we need to create
7497 a new entry for it. If MAX_MP is NULL, the entry will be put on
7498 the end of the list since the placement is less constrained than
7499 any existing entry. Otherwise, we insert the new fix before
7500 MAX_MP and, if necessary, adjust the constraints on the other
7501 entries. */
7502 mp = XNEW (Mnode);
7503 mp->fix_size = fix->fix_size;
7504 mp->mode = fix->mode;
7505 mp->value = fix->value;
7506 mp->refcount = 1;
7507 /* Not yet required for a backwards ref. */
7508 mp->min_address = -65536;
7510 if (max_mp == NULL)
7512 mp->max_address = max_address;
7513 mp->next = NULL;
7514 mp->prev = minipool_vector_tail;
7516 if (mp->prev == NULL)
7518 minipool_vector_head = mp;
7519 minipool_vector_label = gen_label_rtx ();
7521 else
7522 mp->prev->next = mp;
7524 minipool_vector_tail = mp;
7526 else
7528 if (max_address > max_mp->max_address - mp->fix_size)
7529 mp->max_address = max_mp->max_address - mp->fix_size;
7530 else
7531 mp->max_address = max_address;
7533 mp->next = max_mp;
7534 mp->prev = max_mp->prev;
7535 max_mp->prev = mp;
7536 if (mp->prev != NULL)
7537 mp->prev->next = mp;
7538 else
7539 minipool_vector_head = mp;
7542 /* Save the new entry. */
7543 max_mp = mp;
7545 /* Scan over the preceding entries and adjust their addresses as
7546 required. */
7547 while (mp->prev != NULL
7548 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7550 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7551 mp = mp->prev;
7554 return max_mp;
7557 static Mnode *
7558 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7559 HOST_WIDE_INT min_address)
7561 HOST_WIDE_INT offset;
7563 /* The code below assumes these are different. */
7564 gcc_assert (mp != min_mp);
7566 if (min_mp == NULL)
7568 if (min_address > mp->min_address)
7569 mp->min_address = min_address;
7571 else
7573 /* We will adjust this below if it is too loose. */
7574 mp->min_address = min_address;
7576 /* Unlink MP from its current position. Since min_mp is non-null,
7577 mp->next must be non-null. */
7578 mp->next->prev = mp->prev;
7579 if (mp->prev != NULL)
7580 mp->prev->next = mp->next;
7581 else
7582 minipool_vector_head = mp->next;
7584 /* Reinsert it after MIN_MP. */
7585 mp->prev = min_mp;
7586 mp->next = min_mp->next;
7587 min_mp->next = mp;
7588 if (mp->next != NULL)
7589 mp->next->prev = mp;
7590 else
7591 minipool_vector_tail = mp;
7594 min_mp = mp;
7596 offset = 0;
7597 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7599 mp->offset = offset;
7600 if (mp->refcount > 0)
7601 offset += mp->fix_size;
7603 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7604 mp->next->min_address = mp->min_address + mp->fix_size;
7607 return min_mp;
7610 /* Add a constant to the minipool for a backward reference. Returns the
7611 node added or NULL if the constant will not fit in this pool.
7613 Note that the code for insertion for a backwards reference can be
7614 somewhat confusing because the calculated offsets for each fix do
7615 not take into account the size of the pool (which is still under
7616 construction. */
7617 static Mnode *
7618 add_minipool_backward_ref (Mfix *fix)
7620 /* If set, min_mp is the last pool_entry that has a lower constraint
7621 than the one we are trying to add. */
7622 Mnode *min_mp = NULL;
7623 /* This can be negative, since it is only a constraint. */
7624 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7625 Mnode *mp;
7627 /* If we can't reach the current pool from this insn, or if we can't
7628 insert this entry at the end of the pool without pushing other
7629 fixes out of range, then we don't try. This ensures that we
7630 can't fail later on. */
7631 if (min_address >= minipool_barrier->address
7632 || (minipool_vector_tail->min_address + fix->fix_size
7633 >= minipool_barrier->address))
7634 return NULL;
7636 /* Scan the pool to see if a constant with the same value has
7637 already been added. While we are doing this, also note the
7638 location where we must insert the constant if it doesn't already
7639 exist. */
7640 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7642 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7643 && fix->mode == mp->mode
7644 && (GET_CODE (fix->value) != CODE_LABEL
7645 || (CODE_LABEL_NUMBER (fix->value)
7646 == CODE_LABEL_NUMBER (mp->value)))
7647 && rtx_equal_p (fix->value, mp->value)
7648 /* Check that there is enough slack to move this entry to the
7649 end of the table (this is conservative). */
7650 && (mp->max_address
7651 > (minipool_barrier->address
7652 + minipool_vector_tail->offset
7653 + minipool_vector_tail->fix_size)))
7655 mp->refcount++;
7656 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7659 if (min_mp != NULL)
7660 mp->min_address += fix->fix_size;
7661 else
7663 /* Note the insertion point if necessary. */
7664 if (mp->min_address < min_address)
7666 /* For now, we do not allow the insertion of 8-byte alignment
7667 requiring nodes anywhere but at the start of the pool. */
7668 if (ARM_DOUBLEWORD_ALIGN
7669 && fix->fix_size == 8 && mp->fix_size != 8)
7670 return NULL;
7671 else
7672 min_mp = mp;
7674 else if (mp->max_address
7675 < minipool_barrier->address + mp->offset + fix->fix_size)
7677 /* Inserting before this entry would push the fix beyond
7678 its maximum address (which can happen if we have
7679 re-located a forwards fix); force the new fix to come
7680 after it. */
7681 min_mp = mp;
7682 min_address = mp->min_address + fix->fix_size;
7684 /* If we are inserting an 8-bytes aligned quantity and
7685 we have not already found an insertion point, then
7686 make sure that all such 8-byte aligned quantities are
7687 placed at the start of the pool. */
7688 else if (ARM_DOUBLEWORD_ALIGN
7689 && min_mp == NULL
7690 && fix->fix_size == 8
7691 && mp->fix_size < 8)
7693 min_mp = mp;
7694 min_address = mp->min_address + fix->fix_size;
7699 /* We need to create a new entry. */
7700 mp = XNEW (Mnode);
7701 mp->fix_size = fix->fix_size;
7702 mp->mode = fix->mode;
7703 mp->value = fix->value;
7704 mp->refcount = 1;
7705 mp->max_address = minipool_barrier->address + 65536;
7707 mp->min_address = min_address;
7709 if (min_mp == NULL)
7711 mp->prev = NULL;
7712 mp->next = minipool_vector_head;
7714 if (mp->next == NULL)
7716 minipool_vector_tail = mp;
7717 minipool_vector_label = gen_label_rtx ();
7719 else
7720 mp->next->prev = mp;
7722 minipool_vector_head = mp;
7724 else
7726 mp->next = min_mp->next;
7727 mp->prev = min_mp;
7728 min_mp->next = mp;
7730 if (mp->next != NULL)
7731 mp->next->prev = mp;
7732 else
7733 minipool_vector_tail = mp;
7736 /* Save the new entry. */
7737 min_mp = mp;
7739 if (mp->prev)
7740 mp = mp->prev;
7741 else
7742 mp->offset = 0;
7744 /* Scan over the following entries and adjust their offsets. */
7745 while (mp->next != NULL)
7747 if (mp->next->min_address < mp->min_address + mp->fix_size)
7748 mp->next->min_address = mp->min_address + mp->fix_size;
7750 if (mp->refcount)
7751 mp->next->offset = mp->offset + mp->fix_size;
7752 else
7753 mp->next->offset = mp->offset;
7755 mp = mp->next;
7758 return min_mp;
7761 static void
7762 assign_minipool_offsets (Mfix *barrier)
7764 HOST_WIDE_INT offset = 0;
7765 Mnode *mp;
7767 minipool_barrier = barrier;
7769 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7771 mp->offset = offset;
7773 if (mp->refcount > 0)
7774 offset += mp->fix_size;
7778 /* Output the literal table */
7779 static void
7780 dump_minipool (rtx scan)
7782 Mnode * mp;
7783 Mnode * nmp;
7784 int align64 = 0;
7786 if (ARM_DOUBLEWORD_ALIGN)
7787 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7788 if (mp->refcount > 0 && mp->fix_size == 8)
7790 align64 = 1;
7791 break;
7794 if (dump_file)
7795 fprintf (dump_file,
7796 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7797 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7799 scan = emit_label_after (gen_label_rtx (), scan);
7800 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7801 scan = emit_label_after (minipool_vector_label, scan);
7803 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7805 if (mp->refcount > 0)
7807 if (dump_file)
7809 fprintf (dump_file,
7810 ";; Offset %u, min %ld, max %ld ",
7811 (unsigned) mp->offset, (unsigned long) mp->min_address,
7812 (unsigned long) mp->max_address);
7813 arm_print_value (dump_file, mp->value);
7814 fputc ('\n', dump_file);
7817 switch (mp->fix_size)
7819 #ifdef HAVE_consttable_1
7820 case 1:
7821 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7822 break;
7824 #endif
7825 #ifdef HAVE_consttable_2
7826 case 2:
7827 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7828 break;
7830 #endif
7831 #ifdef HAVE_consttable_4
7832 case 4:
7833 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7834 break;
7836 #endif
7837 #ifdef HAVE_consttable_8
7838 case 8:
7839 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7840 break;
7842 #endif
7843 default:
7844 gcc_unreachable ();
7848 nmp = mp->next;
7849 free (mp);
7852 minipool_vector_head = minipool_vector_tail = NULL;
7853 scan = emit_insn_after (gen_consttable_end (), scan);
7854 scan = emit_barrier_after (scan);
7857 /* Return the cost of forcibly inserting a barrier after INSN. */
7858 static int
7859 arm_barrier_cost (rtx insn)
7861 /* Basing the location of the pool on the loop depth is preferable,
7862 but at the moment, the basic block information seems to be
7863 corrupt by this stage of the compilation. */
7864 int base_cost = 50;
7865 rtx next = next_nonnote_insn (insn);
7867 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7868 base_cost -= 20;
7870 switch (GET_CODE (insn))
7872 case CODE_LABEL:
7873 /* It will always be better to place the table before the label, rather
7874 than after it. */
7875 return 50;
7877 case INSN:
7878 case CALL_INSN:
7879 return base_cost;
7881 case JUMP_INSN:
7882 return base_cost - 10;
7884 default:
7885 return base_cost + 10;
7889 /* Find the best place in the insn stream in the range
7890 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7891 Create the barrier by inserting a jump and add a new fix entry for
7892 it. */
7893 static Mfix *
7894 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7896 HOST_WIDE_INT count = 0;
7897 rtx barrier;
7898 rtx from = fix->insn;
7899 /* The instruction after which we will insert the jump. */
7900 rtx selected = NULL;
7901 int selected_cost;
7902 /* The address at which the jump instruction will be placed. */
7903 HOST_WIDE_INT selected_address;
7904 Mfix * new_fix;
7905 HOST_WIDE_INT max_count = max_address - fix->address;
7906 rtx label = gen_label_rtx ();
7908 selected_cost = arm_barrier_cost (from);
7909 selected_address = fix->address;
7911 while (from && count < max_count)
7913 rtx tmp;
7914 int new_cost;
7916 /* This code shouldn't have been called if there was a natural barrier
7917 within range. */
7918 gcc_assert (GET_CODE (from) != BARRIER);
7920 /* Count the length of this insn. */
7921 count += get_attr_length (from);
7923 /* If there is a jump table, add its length. */
7924 tmp = is_jump_table (from);
7925 if (tmp != NULL)
7927 count += get_jump_table_size (tmp);
7929 /* Jump tables aren't in a basic block, so base the cost on
7930 the dispatch insn. If we select this location, we will
7931 still put the pool after the table. */
7932 new_cost = arm_barrier_cost (from);
7934 if (count < max_count
7935 && (!selected || new_cost <= selected_cost))
7937 selected = tmp;
7938 selected_cost = new_cost;
7939 selected_address = fix->address + count;
7942 /* Continue after the dispatch table. */
7943 from = NEXT_INSN (tmp);
7944 continue;
7947 new_cost = arm_barrier_cost (from);
7949 if (count < max_count
7950 && (!selected || new_cost <= selected_cost))
7952 selected = from;
7953 selected_cost = new_cost;
7954 selected_address = fix->address + count;
7957 from = NEXT_INSN (from);
7960 /* Make sure that we found a place to insert the jump. */
7961 gcc_assert (selected);
7963 /* Create a new JUMP_INSN that branches around a barrier. */
7964 from = emit_jump_insn_after (gen_jump (label), selected);
7965 JUMP_LABEL (from) = label;
7966 barrier = emit_barrier_after (from);
7967 emit_label_after (label, barrier);
7969 /* Create a minipool barrier entry for the new barrier. */
7970 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7971 new_fix->insn = barrier;
7972 new_fix->address = selected_address;
7973 new_fix->next = fix->next;
7974 fix->next = new_fix;
7976 return new_fix;
7979 /* Record that there is a natural barrier in the insn stream at
7980 ADDRESS. */
7981 static void
7982 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7984 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7986 fix->insn = insn;
7987 fix->address = address;
7989 fix->next = NULL;
7990 if (minipool_fix_head != NULL)
7991 minipool_fix_tail->next = fix;
7992 else
7993 minipool_fix_head = fix;
7995 minipool_fix_tail = fix;
7998 /* Record INSN, which will need fixing up to load a value from the
7999 minipool. ADDRESS is the offset of the insn since the start of the
8000 function; LOC is a pointer to the part of the insn which requires
8001 fixing; VALUE is the constant that must be loaded, which is of type
8002 MODE. */
8003 static void
8004 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
8005 enum machine_mode mode, rtx value)
8007 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8009 #ifdef AOF_ASSEMBLER
8010 /* PIC symbol references need to be converted into offsets into the
8011 based area. */
8012 /* XXX This shouldn't be done here. */
8013 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
8014 value = aof_pic_entry (value);
8015 #endif /* AOF_ASSEMBLER */
8017 fix->insn = insn;
8018 fix->address = address;
8019 fix->loc = loc;
8020 fix->mode = mode;
8021 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8022 fix->value = value;
8023 fix->forwards = get_attr_pool_range (insn);
8024 fix->backwards = get_attr_neg_pool_range (insn);
8025 fix->minipool = NULL;
8027 /* If an insn doesn't have a range defined for it, then it isn't
8028 expecting to be reworked by this code. Better to stop now than
8029 to generate duff assembly code. */
8030 gcc_assert (fix->forwards || fix->backwards);
8032 /* If an entry requires 8-byte alignment then assume all constant pools
8033 require 4 bytes of padding. Trying to do this later on a per-pool
8034 basis is awkward because existing pool entries have to be modified. */
8035 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8036 minipool_pad = 4;
8038 if (dump_file)
8040 fprintf (dump_file,
8041 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8042 GET_MODE_NAME (mode),
8043 INSN_UID (insn), (unsigned long) address,
8044 -1 * (long)fix->backwards, (long)fix->forwards);
8045 arm_print_value (dump_file, fix->value);
8046 fprintf (dump_file, "\n");
8049 /* Add it to the chain of fixes. */
8050 fix->next = NULL;
8052 if (minipool_fix_head != NULL)
8053 minipool_fix_tail->next = fix;
8054 else
8055 minipool_fix_head = fix;
8057 minipool_fix_tail = fix;
8060 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8061 Returns the number of insns needed, or 99 if we don't know how to
8062 do it. */
8064 arm_const_double_inline_cost (rtx val)
8066 rtx lowpart, highpart;
8067 enum machine_mode mode;
8069 mode = GET_MODE (val);
8071 if (mode == VOIDmode)
8072 mode = DImode;
8074 gcc_assert (GET_MODE_SIZE (mode) == 8);
8076 lowpart = gen_lowpart (SImode, val);
8077 highpart = gen_highpart_mode (SImode, mode, val);
8079 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8080 gcc_assert (GET_CODE (highpart) == CONST_INT);
8082 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8083 NULL_RTX, NULL_RTX, 0, 0)
8084 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8085 NULL_RTX, NULL_RTX, 0, 0));
8088 /* Return true if it is worthwhile to split a 64-bit constant into two
8089 32-bit operations. This is the case if optimizing for size, or
8090 if we have load delay slots, or if one 32-bit part can be done with
8091 a single data operation. */
8092 bool
8093 arm_const_double_by_parts (rtx val)
8095 enum machine_mode mode = GET_MODE (val);
8096 rtx part;
8098 if (optimize_size || arm_ld_sched)
8099 return true;
8101 if (mode == VOIDmode)
8102 mode = DImode;
8104 part = gen_highpart_mode (SImode, mode, val);
8106 gcc_assert (GET_CODE (part) == CONST_INT);
8108 if (const_ok_for_arm (INTVAL (part))
8109 || const_ok_for_arm (~INTVAL (part)))
8110 return true;
8112 part = gen_lowpart (SImode, val);
8114 gcc_assert (GET_CODE (part) == CONST_INT);
8116 if (const_ok_for_arm (INTVAL (part))
8117 || const_ok_for_arm (~INTVAL (part)))
8118 return true;
8120 return false;
8123 /* Scan INSN and note any of its operands that need fixing.
8124 If DO_PUSHES is false we do not actually push any of the fixups
8125 needed. The function returns TRUE if any fixups were needed/pushed.
8126 This is used by arm_memory_load_p() which needs to know about loads
8127 of constants that will be converted into minipool loads. */
8128 static bool
8129 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8131 bool result = false;
8132 int opno;
8134 extract_insn (insn);
8136 if (!constrain_operands (1))
8137 fatal_insn_not_found (insn);
8139 if (recog_data.n_alternatives == 0)
8140 return false;
8142 /* Fill in recog_op_alt with information about the constraints of
8143 this insn. */
8144 preprocess_constraints ();
8146 for (opno = 0; opno < recog_data.n_operands; opno++)
8148 /* Things we need to fix can only occur in inputs. */
8149 if (recog_data.operand_type[opno] != OP_IN)
8150 continue;
8152 /* If this alternative is a memory reference, then any mention
8153 of constants in this alternative is really to fool reload
8154 into allowing us to accept one there. We need to fix them up
8155 now so that we output the right code. */
8156 if (recog_op_alt[opno][which_alternative].memory_ok)
8158 rtx op = recog_data.operand[opno];
8160 if (CONSTANT_P (op))
8162 if (do_pushes)
8163 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8164 recog_data.operand_mode[opno], op);
8165 result = true;
8167 else if (GET_CODE (op) == MEM
8168 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8169 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8171 if (do_pushes)
8173 rtx cop = avoid_constant_pool_reference (op);
8175 /* Casting the address of something to a mode narrower
8176 than a word can cause avoid_constant_pool_reference()
8177 to return the pool reference itself. That's no good to
8178 us here. Lets just hope that we can use the
8179 constant pool value directly. */
8180 if (op == cop)
8181 cop = get_pool_constant (XEXP (op, 0));
8183 push_minipool_fix (insn, address,
8184 recog_data.operand_loc[opno],
8185 recog_data.operand_mode[opno], cop);
8188 result = true;
8193 return result;
8196 /* Gcc puts the pool in the wrong place for ARM, since we can only
8197 load addresses a limited distance around the pc. We do some
8198 special munging to move the constant pool values to the correct
8199 point in the code. */
8200 static void
8201 arm_reorg (void)
8203 rtx insn;
8204 HOST_WIDE_INT address = 0;
8205 Mfix * fix;
8207 minipool_fix_head = minipool_fix_tail = NULL;
8209 /* The first insn must always be a note, or the code below won't
8210 scan it properly. */
8211 insn = get_insns ();
8212 gcc_assert (GET_CODE (insn) == NOTE);
8213 minipool_pad = 0;
8215 /* Scan all the insns and record the operands that will need fixing. */
8216 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8218 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8219 && (arm_cirrus_insn_p (insn)
8220 || GET_CODE (insn) == JUMP_INSN
8221 || arm_memory_load_p (insn)))
8222 cirrus_reorg (insn);
8224 if (GET_CODE (insn) == BARRIER)
8225 push_minipool_barrier (insn, address);
8226 else if (INSN_P (insn))
8228 rtx table;
8230 note_invalid_constants (insn, address, true);
8231 address += get_attr_length (insn);
8233 /* If the insn is a vector jump, add the size of the table
8234 and skip the table. */
8235 if ((table = is_jump_table (insn)) != NULL)
8237 address += get_jump_table_size (table);
8238 insn = table;
8243 fix = minipool_fix_head;
8245 /* Now scan the fixups and perform the required changes. */
8246 while (fix)
8248 Mfix * ftmp;
8249 Mfix * fdel;
8250 Mfix * last_added_fix;
8251 Mfix * last_barrier = NULL;
8252 Mfix * this_fix;
8254 /* Skip any further barriers before the next fix. */
8255 while (fix && GET_CODE (fix->insn) == BARRIER)
8256 fix = fix->next;
8258 /* No more fixes. */
8259 if (fix == NULL)
8260 break;
8262 last_added_fix = NULL;
8264 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8266 if (GET_CODE (ftmp->insn) == BARRIER)
8268 if (ftmp->address >= minipool_vector_head->max_address)
8269 break;
8271 last_barrier = ftmp;
8273 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8274 break;
8276 last_added_fix = ftmp; /* Keep track of the last fix added. */
8279 /* If we found a barrier, drop back to that; any fixes that we
8280 could have reached but come after the barrier will now go in
8281 the next mini-pool. */
8282 if (last_barrier != NULL)
8284 /* Reduce the refcount for those fixes that won't go into this
8285 pool after all. */
8286 for (fdel = last_barrier->next;
8287 fdel && fdel != ftmp;
8288 fdel = fdel->next)
8290 fdel->minipool->refcount--;
8291 fdel->minipool = NULL;
8294 ftmp = last_barrier;
8296 else
8298 /* ftmp is first fix that we can't fit into this pool and
8299 there no natural barriers that we could use. Insert a
8300 new barrier in the code somewhere between the previous
8301 fix and this one, and arrange to jump around it. */
8302 HOST_WIDE_INT max_address;
8304 /* The last item on the list of fixes must be a barrier, so
8305 we can never run off the end of the list of fixes without
8306 last_barrier being set. */
8307 gcc_assert (ftmp);
8309 max_address = minipool_vector_head->max_address;
8310 /* Check that there isn't another fix that is in range that
8311 we couldn't fit into this pool because the pool was
8312 already too large: we need to put the pool before such an
8313 instruction. The pool itself may come just after the
8314 fix because create_fix_barrier also allows space for a
8315 jump instruction. */
8316 if (ftmp->address < max_address)
8317 max_address = ftmp->address + 1;
8319 last_barrier = create_fix_barrier (last_added_fix, max_address);
8322 assign_minipool_offsets (last_barrier);
8324 while (ftmp)
8326 if (GET_CODE (ftmp->insn) != BARRIER
8327 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8328 == NULL))
8329 break;
8331 ftmp = ftmp->next;
8334 /* Scan over the fixes we have identified for this pool, fixing them
8335 up and adding the constants to the pool itself. */
8336 for (this_fix = fix; this_fix && ftmp != this_fix;
8337 this_fix = this_fix->next)
8338 if (GET_CODE (this_fix->insn) != BARRIER)
8340 rtx addr
8341 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8342 minipool_vector_label),
8343 this_fix->minipool->offset);
8344 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8347 dump_minipool (last_barrier->insn);
8348 fix = ftmp;
8351 /* From now on we must synthesize any constants that we can't handle
8352 directly. This can happen if the RTL gets split during final
8353 instruction generation. */
8354 after_arm_reorg = 1;
8356 /* Free the minipool memory. */
8357 obstack_free (&minipool_obstack, minipool_startobj);
8360 /* Routines to output assembly language. */
8362 /* If the rtx is the correct value then return the string of the number.
8363 In this way we can ensure that valid double constants are generated even
8364 when cross compiling. */
8365 const char *
8366 fp_immediate_constant (rtx x)
8368 REAL_VALUE_TYPE r;
8369 int i;
8371 if (!fp_consts_inited)
8372 init_fp_table ();
8374 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8375 for (i = 0; i < 8; i++)
8376 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8377 return strings_fp[i];
8379 gcc_unreachable ();
8382 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8383 static const char *
8384 fp_const_from_val (REAL_VALUE_TYPE *r)
8386 int i;
8388 if (!fp_consts_inited)
8389 init_fp_table ();
8391 for (i = 0; i < 8; i++)
8392 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8393 return strings_fp[i];
8395 gcc_unreachable ();
8398 /* Output the operands of a LDM/STM instruction to STREAM.
8399 MASK is the ARM register set mask of which only bits 0-15 are important.
8400 REG is the base register, either the frame pointer or the stack pointer,
8401 INSTR is the possibly suffixed load or store instruction. */
8403 static void
8404 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8405 unsigned long mask)
8407 unsigned i;
8408 bool not_first = FALSE;
8410 fputc ('\t', stream);
8411 asm_fprintf (stream, instr, reg);
8412 fputs (", {", stream);
8414 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8415 if (mask & (1 << i))
8417 if (not_first)
8418 fprintf (stream, ", ");
8420 asm_fprintf (stream, "%r", i);
8421 not_first = TRUE;
8424 fprintf (stream, "}\n");
8428 /* Output a FLDMX instruction to STREAM.
8429 BASE if the register containing the address.
8430 REG and COUNT specify the register range.
8431 Extra registers may be added to avoid hardware bugs. */
8433 static void
8434 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8436 int i;
8438 /* Workaround ARM10 VFPr1 bug. */
8439 if (count == 2 && !arm_arch6)
8441 if (reg == 15)
8442 reg--;
8443 count++;
8446 fputc ('\t', stream);
8447 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8449 for (i = reg; i < reg + count; i++)
8451 if (i > reg)
8452 fputs (", ", stream);
8453 asm_fprintf (stream, "d%d", i);
8455 fputs ("}\n", stream);
8460 /* Output the assembly for a store multiple. */
8462 const char *
8463 vfp_output_fstmx (rtx * operands)
8465 char pattern[100];
8466 int p;
8467 int base;
8468 int i;
8470 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8471 p = strlen (pattern);
8473 gcc_assert (GET_CODE (operands[1]) == REG);
8475 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8476 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8478 p += sprintf (&pattern[p], ", d%d", base + i);
8480 strcpy (&pattern[p], "}");
8482 output_asm_insn (pattern, operands);
8483 return "";
8487 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8488 number of bytes pushed. */
8490 static int
8491 vfp_emit_fstmx (int base_reg, int count)
8493 rtx par;
8494 rtx dwarf;
8495 rtx tmp, reg;
8496 int i;
8498 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8499 register pairs are stored by a store multiple insn. We avoid this
8500 by pushing an extra pair. */
8501 if (count == 2 && !arm_arch6)
8503 if (base_reg == LAST_VFP_REGNUM - 3)
8504 base_reg -= 2;
8505 count++;
8508 /* ??? The frame layout is implementation defined. We describe
8509 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8510 We really need some way of representing the whole block so that the
8511 unwinder can figure it out at runtime. */
8512 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8513 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8515 reg = gen_rtx_REG (DFmode, base_reg);
8516 base_reg += 2;
8518 XVECEXP (par, 0, 0)
8519 = gen_rtx_SET (VOIDmode,
8520 gen_frame_mem (BLKmode,
8521 gen_rtx_PRE_DEC (BLKmode,
8522 stack_pointer_rtx)),
8523 gen_rtx_UNSPEC (BLKmode,
8524 gen_rtvec (1, reg),
8525 UNSPEC_PUSH_MULT));
8527 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8528 plus_constant (stack_pointer_rtx, -(count * 8 + 4)));
8529 RTX_FRAME_RELATED_P (tmp) = 1;
8530 XVECEXP (dwarf, 0, 0) = tmp;
8532 tmp = gen_rtx_SET (VOIDmode,
8533 gen_frame_mem (DFmode, stack_pointer_rtx),
8534 reg);
8535 RTX_FRAME_RELATED_P (tmp) = 1;
8536 XVECEXP (dwarf, 0, 1) = tmp;
8538 for (i = 1; i < count; i++)
8540 reg = gen_rtx_REG (DFmode, base_reg);
8541 base_reg += 2;
8542 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8544 tmp = gen_rtx_SET (VOIDmode,
8545 gen_frame_mem (DFmode,
8546 plus_constant (stack_pointer_rtx,
8547 i * 8)),
8548 reg);
8549 RTX_FRAME_RELATED_P (tmp) = 1;
8550 XVECEXP (dwarf, 0, i + 1) = tmp;
8553 par = emit_insn (par);
8554 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8555 REG_NOTES (par));
8556 RTX_FRAME_RELATED_P (par) = 1;
8558 return count * 8 + 4;
8562 /* Output a 'call' insn. */
8563 const char *
8564 output_call (rtx *operands)
8566 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8568 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8569 if (REGNO (operands[0]) == LR_REGNUM)
8571 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8572 output_asm_insn ("mov%?\t%0, %|lr", operands);
8575 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8577 if (TARGET_INTERWORK || arm_arch4t)
8578 output_asm_insn ("bx%?\t%0", operands);
8579 else
8580 output_asm_insn ("mov%?\t%|pc, %0", operands);
8582 return "";
8585 /* Output a 'call' insn that is a reference in memory. */
8586 const char *
8587 output_call_mem (rtx *operands)
8589 if (TARGET_INTERWORK && !arm_arch5)
8591 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8592 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8593 output_asm_insn ("bx%?\t%|ip", operands);
8595 else if (regno_use_in (LR_REGNUM, operands[0]))
8597 /* LR is used in the memory address. We load the address in the
8598 first instruction. It's safe to use IP as the target of the
8599 load since the call will kill it anyway. */
8600 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8601 if (arm_arch5)
8602 output_asm_insn ("blx%?\t%|ip", operands);
8603 else
8605 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8606 if (arm_arch4t)
8607 output_asm_insn ("bx%?\t%|ip", operands);
8608 else
8609 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8612 else
8614 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8615 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8618 return "";
8622 /* Output a move from arm registers to an fpa registers.
8623 OPERANDS[0] is an fpa register.
8624 OPERANDS[1] is the first registers of an arm register pair. */
8625 const char *
8626 output_mov_long_double_fpa_from_arm (rtx *operands)
8628 int arm_reg0 = REGNO (operands[1]);
8629 rtx ops[3];
8631 gcc_assert (arm_reg0 != IP_REGNUM);
8633 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8634 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8635 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8637 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8638 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8640 return "";
8643 /* Output a move from an fpa register to arm registers.
8644 OPERANDS[0] is the first registers of an arm register pair.
8645 OPERANDS[1] is an fpa register. */
8646 const char *
8647 output_mov_long_double_arm_from_fpa (rtx *operands)
8649 int arm_reg0 = REGNO (operands[0]);
8650 rtx ops[3];
8652 gcc_assert (arm_reg0 != IP_REGNUM);
8654 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8655 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8656 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8658 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8659 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8660 return "";
8663 /* Output a move from arm registers to arm registers of a long double
8664 OPERANDS[0] is the destination.
8665 OPERANDS[1] is the source. */
8666 const char *
8667 output_mov_long_double_arm_from_arm (rtx *operands)
8669 /* We have to be careful here because the two might overlap. */
8670 int dest_start = REGNO (operands[0]);
8671 int src_start = REGNO (operands[1]);
8672 rtx ops[2];
8673 int i;
8675 if (dest_start < src_start)
8677 for (i = 0; i < 3; i++)
8679 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8680 ops[1] = gen_rtx_REG (SImode, src_start + i);
8681 output_asm_insn ("mov%?\t%0, %1", ops);
8684 else
8686 for (i = 2; i >= 0; i--)
8688 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8689 ops[1] = gen_rtx_REG (SImode, src_start + i);
8690 output_asm_insn ("mov%?\t%0, %1", ops);
8694 return "";
8698 /* Output a move from arm registers to an fpa registers.
8699 OPERANDS[0] is an fpa register.
8700 OPERANDS[1] is the first registers of an arm register pair. */
8701 const char *
8702 output_mov_double_fpa_from_arm (rtx *operands)
8704 int arm_reg0 = REGNO (operands[1]);
8705 rtx ops[2];
8707 gcc_assert (arm_reg0 != IP_REGNUM);
8709 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8710 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8711 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8712 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8713 return "";
8716 /* Output a move from an fpa register to arm registers.
8717 OPERANDS[0] is the first registers of an arm register pair.
8718 OPERANDS[1] is an fpa register. */
8719 const char *
8720 output_mov_double_arm_from_fpa (rtx *operands)
8722 int arm_reg0 = REGNO (operands[0]);
8723 rtx ops[2];
8725 gcc_assert (arm_reg0 != IP_REGNUM);
8727 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8728 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8729 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8730 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8731 return "";
8734 /* Output a move between double words.
8735 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8736 or MEM<-REG and all MEMs must be offsettable addresses. */
8737 const char *
8738 output_move_double (rtx *operands)
8740 enum rtx_code code0 = GET_CODE (operands[0]);
8741 enum rtx_code code1 = GET_CODE (operands[1]);
8742 rtx otherops[3];
8744 if (code0 == REG)
8746 int reg0 = REGNO (operands[0]);
8748 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8750 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8752 switch (GET_CODE (XEXP (operands[1], 0)))
8754 case REG:
8755 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8756 break;
8758 case PRE_INC:
8759 gcc_assert (TARGET_LDRD);
8760 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8761 break;
8763 case PRE_DEC:
8764 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8765 break;
8767 case POST_INC:
8768 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8769 break;
8771 case POST_DEC:
8772 gcc_assert (TARGET_LDRD);
8773 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8774 break;
8776 case PRE_MODIFY:
8777 case POST_MODIFY:
8778 otherops[0] = operands[0];
8779 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8780 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8782 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8784 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8786 /* Registers overlap so split out the increment. */
8787 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8788 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8790 else
8791 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8793 else
8795 /* We only allow constant increments, so this is safe. */
8796 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8798 break;
8800 case LABEL_REF:
8801 case CONST:
8802 output_asm_insn ("adr%?\t%0, %1", operands);
8803 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8804 break;
8806 default:
8807 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8808 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8810 otherops[0] = operands[0];
8811 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8812 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8814 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8816 if (GET_CODE (otherops[2]) == CONST_INT)
8818 switch ((int) INTVAL (otherops[2]))
8820 case -8:
8821 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8822 return "";
8823 case -4:
8824 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8825 return "";
8826 case 4:
8827 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8828 return "";
8831 if (TARGET_LDRD
8832 && (GET_CODE (otherops[2]) == REG
8833 || (GET_CODE (otherops[2]) == CONST_INT
8834 && INTVAL (otherops[2]) > -256
8835 && INTVAL (otherops[2]) < 256)))
8837 if (reg_overlap_mentioned_p (otherops[0],
8838 otherops[2]))
8840 /* Swap base and index registers over to
8841 avoid a conflict. */
8842 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8843 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8845 /* If both registers conflict, it will usually
8846 have been fixed by a splitter. */
8847 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8849 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8850 output_asm_insn ("ldr%?d\t%0, [%1]",
8851 otherops);
8853 else
8854 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8855 return "";
8858 if (GET_CODE (otherops[2]) == CONST_INT)
8860 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8861 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8862 else
8863 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8865 else
8866 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8868 else
8869 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8871 return "ldm%?ia\t%0, %M0";
8873 else
8875 otherops[1] = adjust_address (operands[1], SImode, 4);
8876 /* Take care of overlapping base/data reg. */
8877 if (reg_mentioned_p (operands[0], operands[1]))
8879 output_asm_insn ("ldr%?\t%0, %1", otherops);
8880 output_asm_insn ("ldr%?\t%0, %1", operands);
8882 else
8884 output_asm_insn ("ldr%?\t%0, %1", operands);
8885 output_asm_insn ("ldr%?\t%0, %1", otherops);
8890 else
8892 /* Constraints should ensure this. */
8893 gcc_assert (code0 == MEM && code1 == REG);
8894 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8896 switch (GET_CODE (XEXP (operands[0], 0)))
8898 case REG:
8899 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8900 break;
8902 case PRE_INC:
8903 gcc_assert (TARGET_LDRD);
8904 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8905 break;
8907 case PRE_DEC:
8908 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8909 break;
8911 case POST_INC:
8912 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8913 break;
8915 case POST_DEC:
8916 gcc_assert (TARGET_LDRD);
8917 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8918 break;
8920 case PRE_MODIFY:
8921 case POST_MODIFY:
8922 otherops[0] = operands[1];
8923 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8924 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8926 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8927 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8928 else
8929 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8930 break;
8932 case PLUS:
8933 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8934 if (GET_CODE (otherops[2]) == CONST_INT)
8936 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8938 case -8:
8939 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8940 return "";
8942 case -4:
8943 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8944 return "";
8946 case 4:
8947 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8948 return "";
8951 if (TARGET_LDRD
8952 && (GET_CODE (otherops[2]) == REG
8953 || (GET_CODE (otherops[2]) == CONST_INT
8954 && INTVAL (otherops[2]) > -256
8955 && INTVAL (otherops[2]) < 256)))
8957 otherops[0] = operands[1];
8958 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8959 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8960 return "";
8962 /* Fall through */
8964 default:
8965 otherops[0] = adjust_address (operands[0], SImode, 4);
8966 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8967 output_asm_insn ("str%?\t%1, %0", operands);
8968 output_asm_insn ("str%?\t%1, %0", otherops);
8972 return "";
8975 /* Output an ADD r, s, #n where n may be too big for one instruction.
8976 If adding zero to one register, output nothing. */
8977 const char *
8978 output_add_immediate (rtx *operands)
8980 HOST_WIDE_INT n = INTVAL (operands[2]);
8982 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8984 if (n < 0)
8985 output_multi_immediate (operands,
8986 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8987 -n);
8988 else
8989 output_multi_immediate (operands,
8990 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8994 return "";
8997 /* Output a multiple immediate operation.
8998 OPERANDS is the vector of operands referred to in the output patterns.
8999 INSTR1 is the output pattern to use for the first constant.
9000 INSTR2 is the output pattern to use for subsequent constants.
9001 IMMED_OP is the index of the constant slot in OPERANDS.
9002 N is the constant value. */
9003 static const char *
9004 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
9005 int immed_op, HOST_WIDE_INT n)
9007 #if HOST_BITS_PER_WIDE_INT > 32
9008 n &= 0xffffffff;
9009 #endif
9011 if (n == 0)
9013 /* Quick and easy output. */
9014 operands[immed_op] = const0_rtx;
9015 output_asm_insn (instr1, operands);
9017 else
9019 int i;
9020 const char * instr = instr1;
9022 /* Note that n is never zero here (which would give no output). */
9023 for (i = 0; i < 32; i += 2)
9025 if (n & (3 << i))
9027 operands[immed_op] = GEN_INT (n & (255 << i));
9028 output_asm_insn (instr, operands);
9029 instr = instr2;
9030 i += 6;
9035 return "";
9038 /* Return the appropriate ARM instruction for the operation code.
9039 The returned result should not be overwritten. OP is the rtx of the
9040 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9041 was shifted. */
9042 const char *
9043 arithmetic_instr (rtx op, int shift_first_arg)
9045 switch (GET_CODE (op))
9047 case PLUS:
9048 return "add";
9050 case MINUS:
9051 return shift_first_arg ? "rsb" : "sub";
9053 case IOR:
9054 return "orr";
9056 case XOR:
9057 return "eor";
9059 case AND:
9060 return "and";
9062 default:
9063 gcc_unreachable ();
9067 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9068 for the operation code. The returned result should not be overwritten.
9069 OP is the rtx code of the shift.
9070 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9071 shift. */
9072 static const char *
9073 shift_op (rtx op, HOST_WIDE_INT *amountp)
9075 const char * mnem;
9076 enum rtx_code code = GET_CODE (op);
9078 switch (GET_CODE (XEXP (op, 1)))
9080 case REG:
9081 case SUBREG:
9082 *amountp = -1;
9083 break;
9085 case CONST_INT:
9086 *amountp = INTVAL (XEXP (op, 1));
9087 break;
9089 default:
9090 gcc_unreachable ();
9093 switch (code)
9095 case ASHIFT:
9096 mnem = "asl";
9097 break;
9099 case ASHIFTRT:
9100 mnem = "asr";
9101 break;
9103 case LSHIFTRT:
9104 mnem = "lsr";
9105 break;
9107 case ROTATE:
9108 gcc_assert (*amountp != -1);
9109 *amountp = 32 - *amountp;
9111 /* Fall through. */
9113 case ROTATERT:
9114 mnem = "ror";
9115 break;
9117 case MULT:
9118 /* We never have to worry about the amount being other than a
9119 power of 2, since this case can never be reloaded from a reg. */
9120 gcc_assert (*amountp != -1);
9121 *amountp = int_log2 (*amountp);
9122 return "asl";
9124 default:
9125 gcc_unreachable ();
9128 if (*amountp != -1)
9130 /* This is not 100% correct, but follows from the desire to merge
9131 multiplication by a power of 2 with the recognizer for a
9132 shift. >=32 is not a valid shift for "asl", so we must try and
9133 output a shift that produces the correct arithmetical result.
9134 Using lsr #32 is identical except for the fact that the carry bit
9135 is not set correctly if we set the flags; but we never use the
9136 carry bit from such an operation, so we can ignore that. */
9137 if (code == ROTATERT)
9138 /* Rotate is just modulo 32. */
9139 *amountp &= 31;
9140 else if (*amountp != (*amountp & 31))
9142 if (code == ASHIFT)
9143 mnem = "lsr";
9144 *amountp = 32;
9147 /* Shifts of 0 are no-ops. */
9148 if (*amountp == 0)
9149 return NULL;
9152 return mnem;
9155 /* Obtain the shift from the POWER of two. */
9157 static HOST_WIDE_INT
9158 int_log2 (HOST_WIDE_INT power)
9160 HOST_WIDE_INT shift = 0;
9162 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9164 gcc_assert (shift <= 31);
9165 shift++;
9168 return shift;
9171 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9172 because /bin/as is horribly restrictive. The judgement about
9173 whether or not each character is 'printable' (and can be output as
9174 is) or not (and must be printed with an octal escape) must be made
9175 with reference to the *host* character set -- the situation is
9176 similar to that discussed in the comments above pp_c_char in
9177 c-pretty-print.c. */
9179 #define MAX_ASCII_LEN 51
9181 void
9182 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9184 int i;
9185 int len_so_far = 0;
9187 fputs ("\t.ascii\t\"", stream);
9189 for (i = 0; i < len; i++)
9191 int c = p[i];
9193 if (len_so_far >= MAX_ASCII_LEN)
9195 fputs ("\"\n\t.ascii\t\"", stream);
9196 len_so_far = 0;
9199 if (ISPRINT (c))
9201 if (c == '\\' || c == '\"')
9203 putc ('\\', stream);
9204 len_so_far++;
9206 putc (c, stream);
9207 len_so_far++;
9209 else
9211 fprintf (stream, "\\%03o", c);
9212 len_so_far += 4;
9216 fputs ("\"\n", stream);
9219 /* Compute the register save mask for registers 0 through 12
9220 inclusive. This code is used by arm_compute_save_reg_mask. */
9222 static unsigned long
9223 arm_compute_save_reg0_reg12_mask (void)
9225 unsigned long func_type = arm_current_func_type ();
9226 unsigned long save_reg_mask = 0;
9227 unsigned int reg;
9229 if (IS_INTERRUPT (func_type))
9231 unsigned int max_reg;
9232 /* Interrupt functions must not corrupt any registers,
9233 even call clobbered ones. If this is a leaf function
9234 we can just examine the registers used by the RTL, but
9235 otherwise we have to assume that whatever function is
9236 called might clobber anything, and so we have to save
9237 all the call-clobbered registers as well. */
9238 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9239 /* FIQ handlers have registers r8 - r12 banked, so
9240 we only need to check r0 - r7, Normal ISRs only
9241 bank r14 and r15, so we must check up to r12.
9242 r13 is the stack pointer which is always preserved,
9243 so we do not need to consider it here. */
9244 max_reg = 7;
9245 else
9246 max_reg = 12;
9248 for (reg = 0; reg <= max_reg; reg++)
9249 if (regs_ever_live[reg]
9250 || (! current_function_is_leaf && call_used_regs [reg]))
9251 save_reg_mask |= (1 << reg);
9253 /* Also save the pic base register if necessary. */
9254 if (flag_pic
9255 && !TARGET_SINGLE_PIC_BASE
9256 && arm_pic_register != INVALID_REGNUM
9257 && current_function_uses_pic_offset_table)
9258 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9260 else
9262 /* In the normal case we only need to save those registers
9263 which are call saved and which are used by this function. */
9264 for (reg = 0; reg <= 10; reg++)
9265 if (regs_ever_live[reg] && ! call_used_regs [reg])
9266 save_reg_mask |= (1 << reg);
9268 /* Handle the frame pointer as a special case. */
9269 if (! TARGET_APCS_FRAME
9270 && ! frame_pointer_needed
9271 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9272 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9273 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9275 /* If we aren't loading the PIC register,
9276 don't stack it even though it may be live. */
9277 if (flag_pic
9278 && !TARGET_SINGLE_PIC_BASE
9279 && arm_pic_register != INVALID_REGNUM
9280 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9281 || current_function_uses_pic_offset_table))
9282 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9285 /* Save registers so the exception handler can modify them. */
9286 if (current_function_calls_eh_return)
9288 unsigned int i;
9290 for (i = 0; ; i++)
9292 reg = EH_RETURN_DATA_REGNO (i);
9293 if (reg == INVALID_REGNUM)
9294 break;
9295 save_reg_mask |= 1 << reg;
9299 return save_reg_mask;
9302 /* Compute a bit mask of which registers need to be
9303 saved on the stack for the current function. */
9305 static unsigned long
9306 arm_compute_save_reg_mask (void)
9308 unsigned int save_reg_mask = 0;
9309 unsigned long func_type = arm_current_func_type ();
9311 if (IS_NAKED (func_type))
9312 /* This should never really happen. */
9313 return 0;
9315 /* If we are creating a stack frame, then we must save the frame pointer,
9316 IP (which will hold the old stack pointer), LR and the PC. */
9317 if (frame_pointer_needed)
9318 save_reg_mask |=
9319 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9320 | (1 << IP_REGNUM)
9321 | (1 << LR_REGNUM)
9322 | (1 << PC_REGNUM);
9324 /* Volatile functions do not return, so there
9325 is no need to save any other registers. */
9326 if (IS_VOLATILE (func_type))
9327 return save_reg_mask;
9329 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9331 /* Decide if we need to save the link register.
9332 Interrupt routines have their own banked link register,
9333 so they never need to save it.
9334 Otherwise if we do not use the link register we do not need to save
9335 it. If we are pushing other registers onto the stack however, we
9336 can save an instruction in the epilogue by pushing the link register
9337 now and then popping it back into the PC. This incurs extra memory
9338 accesses though, so we only do it when optimizing for size, and only
9339 if we know that we will not need a fancy return sequence. */
9340 if (regs_ever_live [LR_REGNUM]
9341 || (save_reg_mask
9342 && optimize_size
9343 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9344 && !current_function_calls_eh_return))
9345 save_reg_mask |= 1 << LR_REGNUM;
9347 if (cfun->machine->lr_save_eliminated)
9348 save_reg_mask &= ~ (1 << LR_REGNUM);
9350 if (TARGET_REALLY_IWMMXT
9351 && ((bit_count (save_reg_mask)
9352 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9354 unsigned int reg;
9356 /* The total number of registers that are going to be pushed
9357 onto the stack is odd. We need to ensure that the stack
9358 is 64-bit aligned before we start to save iWMMXt registers,
9359 and also before we start to create locals. (A local variable
9360 might be a double or long long which we will load/store using
9361 an iWMMXt instruction). Therefore we need to push another
9362 ARM register, so that the stack will be 64-bit aligned. We
9363 try to avoid using the arg registers (r0 -r3) as they might be
9364 used to pass values in a tail call. */
9365 for (reg = 4; reg <= 12; reg++)
9366 if ((save_reg_mask & (1 << reg)) == 0)
9367 break;
9369 if (reg <= 12)
9370 save_reg_mask |= (1 << reg);
9371 else
9373 cfun->machine->sibcall_blocked = 1;
9374 save_reg_mask |= (1 << 3);
9378 return save_reg_mask;
9382 /* Compute a bit mask of which registers need to be
9383 saved on the stack for the current function. */
9384 static unsigned long
9385 thumb_compute_save_reg_mask (void)
9387 unsigned long mask;
9388 unsigned reg;
9390 mask = 0;
9391 for (reg = 0; reg < 12; reg ++)
9392 if (regs_ever_live[reg] && !call_used_regs[reg])
9393 mask |= 1 << reg;
9395 if (flag_pic
9396 && !TARGET_SINGLE_PIC_BASE
9397 && arm_pic_register != INVALID_REGNUM
9398 && current_function_uses_pic_offset_table)
9399 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9401 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9402 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9403 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9405 /* LR will also be pushed if any lo regs are pushed. */
9406 if (mask & 0xff || thumb_force_lr_save ())
9407 mask |= (1 << LR_REGNUM);
9409 /* Make sure we have a low work register if we need one.
9410 We will need one if we are going to push a high register,
9411 but we are not currently intending to push a low register. */
9412 if ((mask & 0xff) == 0
9413 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9415 /* Use thumb_find_work_register to choose which register
9416 we will use. If the register is live then we will
9417 have to push it. Use LAST_LO_REGNUM as our fallback
9418 choice for the register to select. */
9419 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9421 if (! call_used_regs[reg])
9422 mask |= 1 << reg;
9425 return mask;
9429 /* Return the number of bytes required to save VFP registers. */
9430 static int
9431 arm_get_vfp_saved_size (void)
9433 unsigned int regno;
9434 int count;
9435 int saved;
9437 saved = 0;
9438 /* Space for saved VFP registers. */
9439 if (TARGET_HARD_FLOAT && TARGET_VFP)
9441 count = 0;
9442 for (regno = FIRST_VFP_REGNUM;
9443 regno < LAST_VFP_REGNUM;
9444 regno += 2)
9446 if ((!regs_ever_live[regno] || call_used_regs[regno])
9447 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9449 if (count > 0)
9451 /* Workaround ARM10 VFPr1 bug. */
9452 if (count == 2 && !arm_arch6)
9453 count++;
9454 saved += count * 8 + 4;
9456 count = 0;
9458 else
9459 count++;
9461 if (count > 0)
9463 if (count == 2 && !arm_arch6)
9464 count++;
9465 saved += count * 8 + 4;
9468 return saved;
9472 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9473 everything bar the final return instruction. */
9474 const char *
9475 output_return_instruction (rtx operand, int really_return, int reverse)
9477 char conditional[10];
9478 char instr[100];
9479 unsigned reg;
9480 unsigned long live_regs_mask;
9481 unsigned long func_type;
9482 arm_stack_offsets *offsets;
9484 func_type = arm_current_func_type ();
9486 if (IS_NAKED (func_type))
9487 return "";
9489 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9491 /* If this function was declared non-returning, and we have
9492 found a tail call, then we have to trust that the called
9493 function won't return. */
9494 if (really_return)
9496 rtx ops[2];
9498 /* Otherwise, trap an attempted return by aborting. */
9499 ops[0] = operand;
9500 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9501 : "abort");
9502 assemble_external_libcall (ops[1]);
9503 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9506 return "";
9509 gcc_assert (!current_function_calls_alloca || really_return);
9511 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9513 return_used_this_function = 1;
9515 live_regs_mask = arm_compute_save_reg_mask ();
9517 if (live_regs_mask)
9519 const char * return_reg;
9521 /* If we do not have any special requirements for function exit
9522 (e.g. interworking, or ISR) then we can load the return address
9523 directly into the PC. Otherwise we must load it into LR. */
9524 if (really_return
9525 && ! TARGET_INTERWORK)
9526 return_reg = reg_names[PC_REGNUM];
9527 else
9528 return_reg = reg_names[LR_REGNUM];
9530 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9532 /* There are three possible reasons for the IP register
9533 being saved. 1) a stack frame was created, in which case
9534 IP contains the old stack pointer, or 2) an ISR routine
9535 corrupted it, or 3) it was saved to align the stack on
9536 iWMMXt. In case 1, restore IP into SP, otherwise just
9537 restore IP. */
9538 if (frame_pointer_needed)
9540 live_regs_mask &= ~ (1 << IP_REGNUM);
9541 live_regs_mask |= (1 << SP_REGNUM);
9543 else
9544 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9547 /* On some ARM architectures it is faster to use LDR rather than
9548 LDM to load a single register. On other architectures, the
9549 cost is the same. In 26 bit mode, or for exception handlers,
9550 we have to use LDM to load the PC so that the CPSR is also
9551 restored. */
9552 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9553 if (live_regs_mask == (1U << reg))
9554 break;
9556 if (reg <= LAST_ARM_REGNUM
9557 && (reg != LR_REGNUM
9558 || ! really_return
9559 || ! IS_INTERRUPT (func_type)))
9561 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9562 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9564 else
9566 char *p;
9567 int first = 1;
9569 /* Generate the load multiple instruction to restore the
9570 registers. Note we can get here, even if
9571 frame_pointer_needed is true, but only if sp already
9572 points to the base of the saved core registers. */
9573 if (live_regs_mask & (1 << SP_REGNUM))
9575 unsigned HOST_WIDE_INT stack_adjust;
9577 offsets = arm_get_frame_offsets ();
9578 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9579 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9581 if (stack_adjust && arm_arch5)
9582 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9583 else
9585 /* If we can't use ldmib (SA110 bug),
9586 then try to pop r3 instead. */
9587 if (stack_adjust)
9588 live_regs_mask |= 1 << 3;
9589 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9592 else
9593 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9595 p = instr + strlen (instr);
9597 for (reg = 0; reg <= SP_REGNUM; reg++)
9598 if (live_regs_mask & (1 << reg))
9600 int l = strlen (reg_names[reg]);
9602 if (first)
9603 first = 0;
9604 else
9606 memcpy (p, ", ", 2);
9607 p += 2;
9610 memcpy (p, "%|", 2);
9611 memcpy (p + 2, reg_names[reg], l);
9612 p += l + 2;
9615 if (live_regs_mask & (1 << LR_REGNUM))
9617 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9618 /* If returning from an interrupt, restore the CPSR. */
9619 if (IS_INTERRUPT (func_type))
9620 strcat (p, "^");
9622 else
9623 strcpy (p, "}");
9626 output_asm_insn (instr, & operand);
9628 /* See if we need to generate an extra instruction to
9629 perform the actual function return. */
9630 if (really_return
9631 && func_type != ARM_FT_INTERWORKED
9632 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9634 /* The return has already been handled
9635 by loading the LR into the PC. */
9636 really_return = 0;
9640 if (really_return)
9642 switch ((int) ARM_FUNC_TYPE (func_type))
9644 case ARM_FT_ISR:
9645 case ARM_FT_FIQ:
9646 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9647 break;
9649 case ARM_FT_INTERWORKED:
9650 sprintf (instr, "bx%s\t%%|lr", conditional);
9651 break;
9653 case ARM_FT_EXCEPTION:
9654 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9655 break;
9657 default:
9658 /* Use bx if it's available. */
9659 if (arm_arch5 || arm_arch4t)
9660 sprintf (instr, "bx%s\t%%|lr", conditional);
9661 else
9662 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9663 break;
9666 output_asm_insn (instr, & operand);
9669 return "";
9672 /* Write the function name into the code section, directly preceding
9673 the function prologue.
9675 Code will be output similar to this:
9677 .ascii "arm_poke_function_name", 0
9678 .align
9680 .word 0xff000000 + (t1 - t0)
9681 arm_poke_function_name
9682 mov ip, sp
9683 stmfd sp!, {fp, ip, lr, pc}
9684 sub fp, ip, #4
9686 When performing a stack backtrace, code can inspect the value
9687 of 'pc' stored at 'fp' + 0. If the trace function then looks
9688 at location pc - 12 and the top 8 bits are set, then we know
9689 that there is a function name embedded immediately preceding this
9690 location and has length ((pc[-3]) & 0xff000000).
9692 We assume that pc is declared as a pointer to an unsigned long.
9694 It is of no benefit to output the function name if we are assembling
9695 a leaf function. These function types will not contain a stack
9696 backtrace structure, therefore it is not possible to determine the
9697 function name. */
9698 void
9699 arm_poke_function_name (FILE *stream, const char *name)
9701 unsigned long alignlength;
9702 unsigned long length;
9703 rtx x;
9705 length = strlen (name) + 1;
9706 alignlength = ROUND_UP_WORD (length);
9708 ASM_OUTPUT_ASCII (stream, name, length);
9709 ASM_OUTPUT_ALIGN (stream, 2);
9710 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9711 assemble_aligned_integer (UNITS_PER_WORD, x);
9714 /* Place some comments into the assembler stream
9715 describing the current function. */
9716 static void
9717 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9719 unsigned long func_type;
9721 if (!TARGET_ARM)
9723 thumb_output_function_prologue (f, frame_size);
9724 return;
9727 /* Sanity check. */
9728 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9730 func_type = arm_current_func_type ();
9732 switch ((int) ARM_FUNC_TYPE (func_type))
9734 default:
9735 case ARM_FT_NORMAL:
9736 break;
9737 case ARM_FT_INTERWORKED:
9738 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9739 break;
9740 case ARM_FT_ISR:
9741 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9742 break;
9743 case ARM_FT_FIQ:
9744 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9745 break;
9746 case ARM_FT_EXCEPTION:
9747 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9748 break;
9751 if (IS_NAKED (func_type))
9752 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9754 if (IS_VOLATILE (func_type))
9755 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9757 if (IS_NESTED (func_type))
9758 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9760 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9761 current_function_args_size,
9762 current_function_pretend_args_size, frame_size);
9764 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9765 frame_pointer_needed,
9766 cfun->machine->uses_anonymous_args);
9768 if (cfun->machine->lr_save_eliminated)
9769 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9771 if (current_function_calls_eh_return)
9772 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9774 #ifdef AOF_ASSEMBLER
9775 if (flag_pic)
9776 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9777 #endif
9779 return_used_this_function = 0;
9782 const char *
9783 arm_output_epilogue (rtx sibling)
9785 int reg;
9786 unsigned long saved_regs_mask;
9787 unsigned long func_type;
9788 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9789 frame that is $fp + 4 for a non-variadic function. */
9790 int floats_offset = 0;
9791 rtx operands[3];
9792 FILE * f = asm_out_file;
9793 unsigned int lrm_count = 0;
9794 int really_return = (sibling == NULL);
9795 int start_reg;
9796 arm_stack_offsets *offsets;
9798 /* If we have already generated the return instruction
9799 then it is futile to generate anything else. */
9800 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9801 return "";
9803 func_type = arm_current_func_type ();
9805 if (IS_NAKED (func_type))
9806 /* Naked functions don't have epilogues. */
9807 return "";
9809 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9811 rtx op;
9813 /* A volatile function should never return. Call abort. */
9814 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9815 assemble_external_libcall (op);
9816 output_asm_insn ("bl\t%a0", &op);
9818 return "";
9821 /* If we are throwing an exception, then we really must be doing a
9822 return, so we can't tail-call. */
9823 gcc_assert (!current_function_calls_eh_return || really_return);
9825 offsets = arm_get_frame_offsets ();
9826 saved_regs_mask = arm_compute_save_reg_mask ();
9828 if (TARGET_IWMMXT)
9829 lrm_count = bit_count (saved_regs_mask);
9831 floats_offset = offsets->saved_args;
9832 /* Compute how far away the floats will be. */
9833 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9834 if (saved_regs_mask & (1 << reg))
9835 floats_offset += 4;
9837 if (frame_pointer_needed)
9839 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9840 int vfp_offset = offsets->frame;
9842 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9844 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9845 if (regs_ever_live[reg] && !call_used_regs[reg])
9847 floats_offset += 12;
9848 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9849 reg, FP_REGNUM, floats_offset - vfp_offset);
9852 else
9854 start_reg = LAST_FPA_REGNUM;
9856 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9858 if (regs_ever_live[reg] && !call_used_regs[reg])
9860 floats_offset += 12;
9862 /* We can't unstack more than four registers at once. */
9863 if (start_reg - reg == 3)
9865 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9866 reg, FP_REGNUM, floats_offset - vfp_offset);
9867 start_reg = reg - 1;
9870 else
9872 if (reg != start_reg)
9873 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9874 reg + 1, start_reg - reg,
9875 FP_REGNUM, floats_offset - vfp_offset);
9876 start_reg = reg - 1;
9880 /* Just in case the last register checked also needs unstacking. */
9881 if (reg != start_reg)
9882 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9883 reg + 1, start_reg - reg,
9884 FP_REGNUM, floats_offset - vfp_offset);
9887 if (TARGET_HARD_FLOAT && TARGET_VFP)
9889 int saved_size;
9891 /* The fldmx insn does not have base+offset addressing modes,
9892 so we use IP to hold the address. */
9893 saved_size = arm_get_vfp_saved_size ();
9895 if (saved_size > 0)
9897 floats_offset += saved_size;
9898 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9899 FP_REGNUM, floats_offset - vfp_offset);
9901 start_reg = FIRST_VFP_REGNUM;
9902 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9904 if ((!regs_ever_live[reg] || call_used_regs[reg])
9905 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9907 if (start_reg != reg)
9908 arm_output_fldmx (f, IP_REGNUM,
9909 (start_reg - FIRST_VFP_REGNUM) / 2,
9910 (reg - start_reg) / 2);
9911 start_reg = reg + 2;
9914 if (start_reg != reg)
9915 arm_output_fldmx (f, IP_REGNUM,
9916 (start_reg - FIRST_VFP_REGNUM) / 2,
9917 (reg - start_reg) / 2);
9920 if (TARGET_IWMMXT)
9922 /* The frame pointer is guaranteed to be non-double-word aligned.
9923 This is because it is set to (old_stack_pointer - 4) and the
9924 old_stack_pointer was double word aligned. Thus the offset to
9925 the iWMMXt registers to be loaded must also be non-double-word
9926 sized, so that the resultant address *is* double-word aligned.
9927 We can ignore floats_offset since that was already included in
9928 the live_regs_mask. */
9929 lrm_count += (lrm_count % 2 ? 2 : 1);
9931 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9932 if (regs_ever_live[reg] && !call_used_regs[reg])
9934 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9935 reg, FP_REGNUM, lrm_count * 4);
9936 lrm_count += 2;
9940 /* saved_regs_mask should contain the IP, which at the time of stack
9941 frame generation actually contains the old stack pointer. So a
9942 quick way to unwind the stack is just pop the IP register directly
9943 into the stack pointer. */
9944 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9945 saved_regs_mask &= ~ (1 << IP_REGNUM);
9946 saved_regs_mask |= (1 << SP_REGNUM);
9948 /* There are two registers left in saved_regs_mask - LR and PC. We
9949 only need to restore the LR register (the return address), but to
9950 save time we can load it directly into the PC, unless we need a
9951 special function exit sequence, or we are not really returning. */
9952 if (really_return
9953 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9954 && !current_function_calls_eh_return)
9955 /* Delete the LR from the register mask, so that the LR on
9956 the stack is loaded into the PC in the register mask. */
9957 saved_regs_mask &= ~ (1 << LR_REGNUM);
9958 else
9959 saved_regs_mask &= ~ (1 << PC_REGNUM);
9961 /* We must use SP as the base register, because SP is one of the
9962 registers being restored. If an interrupt or page fault
9963 happens in the ldm instruction, the SP might or might not
9964 have been restored. That would be bad, as then SP will no
9965 longer indicate the safe area of stack, and we can get stack
9966 corruption. Using SP as the base register means that it will
9967 be reset correctly to the original value, should an interrupt
9968 occur. If the stack pointer already points at the right
9969 place, then omit the subtraction. */
9970 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9971 || current_function_calls_alloca)
9972 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9973 4 * bit_count (saved_regs_mask));
9974 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9976 if (IS_INTERRUPT (func_type))
9977 /* Interrupt handlers will have pushed the
9978 IP onto the stack, so restore it now. */
9979 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9981 else
9983 /* Restore stack pointer if necessary. */
9984 if (offsets->outgoing_args != offsets->saved_regs)
9986 operands[0] = operands[1] = stack_pointer_rtx;
9987 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9988 output_add_immediate (operands);
9991 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9993 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9994 if (regs_ever_live[reg] && !call_used_regs[reg])
9995 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9996 reg, SP_REGNUM);
9998 else
10000 start_reg = FIRST_FPA_REGNUM;
10002 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10004 if (regs_ever_live[reg] && !call_used_regs[reg])
10006 if (reg - start_reg == 3)
10008 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
10009 start_reg, SP_REGNUM);
10010 start_reg = reg + 1;
10013 else
10015 if (reg != start_reg)
10016 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10017 start_reg, reg - start_reg,
10018 SP_REGNUM);
10020 start_reg = reg + 1;
10024 /* Just in case the last register checked also needs unstacking. */
10025 if (reg != start_reg)
10026 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10027 start_reg, reg - start_reg, SP_REGNUM);
10030 if (TARGET_HARD_FLOAT && TARGET_VFP)
10032 start_reg = FIRST_VFP_REGNUM;
10033 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10035 if ((!regs_ever_live[reg] || call_used_regs[reg])
10036 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10038 if (start_reg != reg)
10039 arm_output_fldmx (f, SP_REGNUM,
10040 (start_reg - FIRST_VFP_REGNUM) / 2,
10041 (reg - start_reg) / 2);
10042 start_reg = reg + 2;
10045 if (start_reg != reg)
10046 arm_output_fldmx (f, SP_REGNUM,
10047 (start_reg - FIRST_VFP_REGNUM) / 2,
10048 (reg - start_reg) / 2);
10050 if (TARGET_IWMMXT)
10051 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10052 if (regs_ever_live[reg] && !call_used_regs[reg])
10053 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10055 /* If we can, restore the LR into the PC. */
10056 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10057 && really_return
10058 && current_function_pretend_args_size == 0
10059 && saved_regs_mask & (1 << LR_REGNUM)
10060 && !current_function_calls_eh_return)
10062 saved_regs_mask &= ~ (1 << LR_REGNUM);
10063 saved_regs_mask |= (1 << PC_REGNUM);
10066 /* Load the registers off the stack. If we only have one register
10067 to load use the LDR instruction - it is faster. */
10068 if (saved_regs_mask == (1 << LR_REGNUM))
10070 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10072 else if (saved_regs_mask)
10074 if (saved_regs_mask & (1 << SP_REGNUM))
10075 /* Note - write back to the stack register is not enabled
10076 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10077 in the list of registers and if we add writeback the
10078 instruction becomes UNPREDICTABLE. */
10079 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
10080 else
10081 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
10084 if (current_function_pretend_args_size)
10086 /* Unwind the pre-pushed regs. */
10087 operands[0] = operands[1] = stack_pointer_rtx;
10088 operands[2] = GEN_INT (current_function_pretend_args_size);
10089 output_add_immediate (operands);
10093 /* We may have already restored PC directly from the stack. */
10094 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10095 return "";
10097 /* Stack adjustment for exception handler. */
10098 if (current_function_calls_eh_return)
10099 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10100 ARM_EH_STACKADJ_REGNUM);
10102 /* Generate the return instruction. */
10103 switch ((int) ARM_FUNC_TYPE (func_type))
10105 case ARM_FT_ISR:
10106 case ARM_FT_FIQ:
10107 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10108 break;
10110 case ARM_FT_EXCEPTION:
10111 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10112 break;
10114 case ARM_FT_INTERWORKED:
10115 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10116 break;
10118 default:
10119 if (arm_arch5 || arm_arch4t)
10120 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10121 else
10122 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10123 break;
10126 return "";
10129 static void
10130 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10131 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10133 arm_stack_offsets *offsets;
10135 if (TARGET_THUMB)
10137 int regno;
10139 /* Emit any call-via-reg trampolines that are needed for v4t support
10140 of call_reg and call_value_reg type insns. */
10141 for (regno = 0; regno < LR_REGNUM; regno++)
10143 rtx label = cfun->machine->call_via[regno];
10145 if (label != NULL)
10147 switch_to_section (function_section (current_function_decl));
10148 targetm.asm_out.internal_label (asm_out_file, "L",
10149 CODE_LABEL_NUMBER (label));
10150 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10154 /* ??? Probably not safe to set this here, since it assumes that a
10155 function will be emitted as assembly immediately after we generate
10156 RTL for it. This does not happen for inline functions. */
10157 return_used_this_function = 0;
10159 else
10161 /* We need to take into account any stack-frame rounding. */
10162 offsets = arm_get_frame_offsets ();
10164 gcc_assert (!use_return_insn (FALSE, NULL)
10165 || !return_used_this_function
10166 || offsets->saved_regs == offsets->outgoing_args
10167 || frame_pointer_needed);
10169 /* Reset the ARM-specific per-function variables. */
10170 after_arm_reorg = 0;
10174 /* Generate and emit an insn that we will recognize as a push_multi.
10175 Unfortunately, since this insn does not reflect very well the actual
10176 semantics of the operation, we need to annotate the insn for the benefit
10177 of DWARF2 frame unwind information. */
10178 static rtx
10179 emit_multi_reg_push (unsigned long mask)
10181 int num_regs = 0;
10182 int num_dwarf_regs;
10183 int i, j;
10184 rtx par;
10185 rtx dwarf;
10186 int dwarf_par_index;
10187 rtx tmp, reg;
10189 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10190 if (mask & (1 << i))
10191 num_regs++;
10193 gcc_assert (num_regs && num_regs <= 16);
10195 /* We don't record the PC in the dwarf frame information. */
10196 num_dwarf_regs = num_regs;
10197 if (mask & (1 << PC_REGNUM))
10198 num_dwarf_regs--;
10200 /* For the body of the insn we are going to generate an UNSPEC in
10201 parallel with several USEs. This allows the insn to be recognized
10202 by the push_multi pattern in the arm.md file. The insn looks
10203 something like this:
10205 (parallel [
10206 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10207 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10208 (use (reg:SI 11 fp))
10209 (use (reg:SI 12 ip))
10210 (use (reg:SI 14 lr))
10211 (use (reg:SI 15 pc))
10214 For the frame note however, we try to be more explicit and actually
10215 show each register being stored into the stack frame, plus a (single)
10216 decrement of the stack pointer. We do it this way in order to be
10217 friendly to the stack unwinding code, which only wants to see a single
10218 stack decrement per instruction. The RTL we generate for the note looks
10219 something like this:
10221 (sequence [
10222 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10223 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10224 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10225 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10226 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10229 This sequence is used both by the code to support stack unwinding for
10230 exceptions handlers and the code to generate dwarf2 frame debugging. */
10232 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10233 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10234 dwarf_par_index = 1;
10236 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10238 if (mask & (1 << i))
10240 reg = gen_rtx_REG (SImode, i);
10242 XVECEXP (par, 0, 0)
10243 = gen_rtx_SET (VOIDmode,
10244 gen_frame_mem (BLKmode,
10245 gen_rtx_PRE_DEC (BLKmode,
10246 stack_pointer_rtx)),
10247 gen_rtx_UNSPEC (BLKmode,
10248 gen_rtvec (1, reg),
10249 UNSPEC_PUSH_MULT));
10251 if (i != PC_REGNUM)
10253 tmp = gen_rtx_SET (VOIDmode,
10254 gen_frame_mem (SImode, stack_pointer_rtx),
10255 reg);
10256 RTX_FRAME_RELATED_P (tmp) = 1;
10257 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10258 dwarf_par_index++;
10261 break;
10265 for (j = 1, i++; j < num_regs; i++)
10267 if (mask & (1 << i))
10269 reg = gen_rtx_REG (SImode, i);
10271 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10273 if (i != PC_REGNUM)
10276 = gen_rtx_SET (VOIDmode,
10277 gen_frame_mem (SImode,
10278 plus_constant (stack_pointer_rtx,
10279 4 * j)),
10280 reg);
10281 RTX_FRAME_RELATED_P (tmp) = 1;
10282 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10285 j++;
10289 par = emit_insn (par);
10291 tmp = gen_rtx_SET (VOIDmode,
10292 stack_pointer_rtx,
10293 plus_constant (stack_pointer_rtx, -4 * num_regs));
10294 RTX_FRAME_RELATED_P (tmp) = 1;
10295 XVECEXP (dwarf, 0, 0) = tmp;
10297 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10298 REG_NOTES (par));
10299 return par;
10302 /* Calculate the size of the return value that is passed in registers. */
10303 static int
10304 arm_size_return_regs (void)
10306 enum machine_mode mode;
10308 if (current_function_return_rtx != 0)
10309 mode = GET_MODE (current_function_return_rtx);
10310 else
10311 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10313 return GET_MODE_SIZE (mode);
10316 static rtx
10317 emit_sfm (int base_reg, int count)
10319 rtx par;
10320 rtx dwarf;
10321 rtx tmp, reg;
10322 int i;
10324 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10325 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10327 reg = gen_rtx_REG (XFmode, base_reg++);
10329 XVECEXP (par, 0, 0)
10330 = gen_rtx_SET (VOIDmode,
10331 gen_frame_mem (BLKmode,
10332 gen_rtx_PRE_DEC (BLKmode,
10333 stack_pointer_rtx)),
10334 gen_rtx_UNSPEC (BLKmode,
10335 gen_rtvec (1, reg),
10336 UNSPEC_PUSH_MULT));
10337 tmp = gen_rtx_SET (VOIDmode,
10338 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10339 RTX_FRAME_RELATED_P (tmp) = 1;
10340 XVECEXP (dwarf, 0, 1) = tmp;
10342 for (i = 1; i < count; i++)
10344 reg = gen_rtx_REG (XFmode, base_reg++);
10345 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10347 tmp = gen_rtx_SET (VOIDmode,
10348 gen_frame_mem (XFmode,
10349 plus_constant (stack_pointer_rtx,
10350 i * 12)),
10351 reg);
10352 RTX_FRAME_RELATED_P (tmp) = 1;
10353 XVECEXP (dwarf, 0, i + 1) = tmp;
10356 tmp = gen_rtx_SET (VOIDmode,
10357 stack_pointer_rtx,
10358 plus_constant (stack_pointer_rtx, -12 * count));
10360 RTX_FRAME_RELATED_P (tmp) = 1;
10361 XVECEXP (dwarf, 0, 0) = tmp;
10363 par = emit_insn (par);
10364 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10365 REG_NOTES (par));
10366 return par;
10370 /* Return true if the current function needs to save/restore LR. */
10372 static bool
10373 thumb_force_lr_save (void)
10375 return !cfun->machine->lr_save_eliminated
10376 && (!leaf_function_p ()
10377 || thumb_far_jump_used_p ()
10378 || regs_ever_live [LR_REGNUM]);
10382 /* Compute the distance from register FROM to register TO.
10383 These can be the arg pointer (26), the soft frame pointer (25),
10384 the stack pointer (13) or the hard frame pointer (11).
10385 In thumb mode r7 is used as the soft frame pointer, if needed.
10386 Typical stack layout looks like this:
10388 old stack pointer -> | |
10389 ----
10390 | | \
10391 | | saved arguments for
10392 | | vararg functions
10393 | | /
10395 hard FP & arg pointer -> | | \
10396 | | stack
10397 | | frame
10398 | | /
10400 | | \
10401 | | call saved
10402 | | registers
10403 soft frame pointer -> | | /
10405 | | \
10406 | | local
10407 | | variables
10408 locals base pointer -> | | /
10410 | | \
10411 | | outgoing
10412 | | arguments
10413 current stack pointer -> | | /
10416 For a given function some or all of these stack components
10417 may not be needed, giving rise to the possibility of
10418 eliminating some of the registers.
10420 The values returned by this function must reflect the behavior
10421 of arm_expand_prologue() and arm_compute_save_reg_mask().
10423 The sign of the number returned reflects the direction of stack
10424 growth, so the values are positive for all eliminations except
10425 from the soft frame pointer to the hard frame pointer.
10427 SFP may point just inside the local variables block to ensure correct
10428 alignment. */
10431 /* Calculate stack offsets. These are used to calculate register elimination
10432 offsets and in prologue/epilogue code. */
10434 static arm_stack_offsets *
10435 arm_get_frame_offsets (void)
10437 struct arm_stack_offsets *offsets;
10438 unsigned long func_type;
10439 int leaf;
10440 int saved;
10441 HOST_WIDE_INT frame_size;
10443 offsets = &cfun->machine->stack_offsets;
10445 /* We need to know if we are a leaf function. Unfortunately, it
10446 is possible to be called after start_sequence has been called,
10447 which causes get_insns to return the insns for the sequence,
10448 not the function, which will cause leaf_function_p to return
10449 the incorrect result.
10451 to know about leaf functions once reload has completed, and the
10452 frame size cannot be changed after that time, so we can safely
10453 use the cached value. */
10455 if (reload_completed)
10456 return offsets;
10458 /* Initially this is the size of the local variables. It will translated
10459 into an offset once we have determined the size of preceding data. */
10460 frame_size = ROUND_UP_WORD (get_frame_size ());
10462 leaf = leaf_function_p ();
10464 /* Space for variadic functions. */
10465 offsets->saved_args = current_function_pretend_args_size;
10467 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10469 if (TARGET_ARM)
10471 unsigned int regno;
10473 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10475 /* We know that SP will be doubleword aligned on entry, and we must
10476 preserve that condition at any subroutine call. We also require the
10477 soft frame pointer to be doubleword aligned. */
10479 if (TARGET_REALLY_IWMMXT)
10481 /* Check for the call-saved iWMMXt registers. */
10482 for (regno = FIRST_IWMMXT_REGNUM;
10483 regno <= LAST_IWMMXT_REGNUM;
10484 regno++)
10485 if (regs_ever_live [regno] && ! call_used_regs [regno])
10486 saved += 8;
10489 func_type = arm_current_func_type ();
10490 if (! IS_VOLATILE (func_type))
10492 /* Space for saved FPA registers. */
10493 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10494 if (regs_ever_live[regno] && ! call_used_regs[regno])
10495 saved += 12;
10497 /* Space for saved VFP registers. */
10498 if (TARGET_HARD_FLOAT && TARGET_VFP)
10499 saved += arm_get_vfp_saved_size ();
10502 else /* TARGET_THUMB */
10504 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10505 if (TARGET_BACKTRACE)
10506 saved += 16;
10509 /* Saved registers include the stack frame. */
10510 offsets->saved_regs = offsets->saved_args + saved;
10511 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10512 /* A leaf function does not need any stack alignment if it has nothing
10513 on the stack. */
10514 if (leaf && frame_size == 0)
10516 offsets->outgoing_args = offsets->soft_frame;
10517 return offsets;
10520 /* Ensure SFP has the correct alignment. */
10521 if (ARM_DOUBLEWORD_ALIGN
10522 && (offsets->soft_frame & 7))
10523 offsets->soft_frame += 4;
10525 offsets->locals_base = offsets->soft_frame + frame_size;
10526 offsets->outgoing_args = (offsets->locals_base
10527 + current_function_outgoing_args_size);
10529 if (ARM_DOUBLEWORD_ALIGN)
10531 /* Ensure SP remains doubleword aligned. */
10532 if (offsets->outgoing_args & 7)
10533 offsets->outgoing_args += 4;
10534 gcc_assert (!(offsets->outgoing_args & 7));
10537 return offsets;
10541 /* Calculate the relative offsets for the different stack pointers. Positive
10542 offsets are in the direction of stack growth. */
10544 HOST_WIDE_INT
10545 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10547 arm_stack_offsets *offsets;
10549 offsets = arm_get_frame_offsets ();
10551 /* OK, now we have enough information to compute the distances.
10552 There must be an entry in these switch tables for each pair
10553 of registers in ELIMINABLE_REGS, even if some of the entries
10554 seem to be redundant or useless. */
10555 switch (from)
10557 case ARG_POINTER_REGNUM:
10558 switch (to)
10560 case THUMB_HARD_FRAME_POINTER_REGNUM:
10561 return 0;
10563 case FRAME_POINTER_REGNUM:
10564 /* This is the reverse of the soft frame pointer
10565 to hard frame pointer elimination below. */
10566 return offsets->soft_frame - offsets->saved_args;
10568 case ARM_HARD_FRAME_POINTER_REGNUM:
10569 /* If there is no stack frame then the hard
10570 frame pointer and the arg pointer coincide. */
10571 if (offsets->frame == offsets->saved_regs)
10572 return 0;
10573 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10574 return (frame_pointer_needed
10575 && cfun->static_chain_decl != NULL
10576 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10578 case STACK_POINTER_REGNUM:
10579 /* If nothing has been pushed on the stack at all
10580 then this will return -4. This *is* correct! */
10581 return offsets->outgoing_args - (offsets->saved_args + 4);
10583 default:
10584 gcc_unreachable ();
10586 gcc_unreachable ();
10588 case FRAME_POINTER_REGNUM:
10589 switch (to)
10591 case THUMB_HARD_FRAME_POINTER_REGNUM:
10592 return 0;
10594 case ARM_HARD_FRAME_POINTER_REGNUM:
10595 /* The hard frame pointer points to the top entry in the
10596 stack frame. The soft frame pointer to the bottom entry
10597 in the stack frame. If there is no stack frame at all,
10598 then they are identical. */
10600 return offsets->frame - offsets->soft_frame;
10602 case STACK_POINTER_REGNUM:
10603 return offsets->outgoing_args - offsets->soft_frame;
10605 default:
10606 gcc_unreachable ();
10608 gcc_unreachable ();
10610 default:
10611 /* You cannot eliminate from the stack pointer.
10612 In theory you could eliminate from the hard frame
10613 pointer to the stack pointer, but this will never
10614 happen, since if a stack frame is not needed the
10615 hard frame pointer will never be used. */
10616 gcc_unreachable ();
10621 /* Generate the prologue instructions for entry into an ARM function. */
10622 void
10623 arm_expand_prologue (void)
10625 int reg;
10626 rtx amount;
10627 rtx insn;
10628 rtx ip_rtx;
10629 unsigned long live_regs_mask;
10630 unsigned long func_type;
10631 int fp_offset = 0;
10632 int saved_pretend_args = 0;
10633 int saved_regs = 0;
10634 unsigned HOST_WIDE_INT args_to_push;
10635 arm_stack_offsets *offsets;
10637 func_type = arm_current_func_type ();
10639 /* Naked functions don't have prologues. */
10640 if (IS_NAKED (func_type))
10641 return;
10643 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10644 args_to_push = current_function_pretend_args_size;
10646 /* Compute which register we will have to save onto the stack. */
10647 live_regs_mask = arm_compute_save_reg_mask ();
10649 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10651 if (frame_pointer_needed)
10653 if (IS_INTERRUPT (func_type))
10655 /* Interrupt functions must not corrupt any registers.
10656 Creating a frame pointer however, corrupts the IP
10657 register, so we must push it first. */
10658 insn = emit_multi_reg_push (1 << IP_REGNUM);
10660 /* Do not set RTX_FRAME_RELATED_P on this insn.
10661 The dwarf stack unwinding code only wants to see one
10662 stack decrement per function, and this is not it. If
10663 this instruction is labeled as being part of the frame
10664 creation sequence then dwarf2out_frame_debug_expr will
10665 die when it encounters the assignment of IP to FP
10666 later on, since the use of SP here establishes SP as
10667 the CFA register and not IP.
10669 Anyway this instruction is not really part of the stack
10670 frame creation although it is part of the prologue. */
10672 else if (IS_NESTED (func_type))
10674 /* The Static chain register is the same as the IP register
10675 used as a scratch register during stack frame creation.
10676 To get around this need to find somewhere to store IP
10677 whilst the frame is being created. We try the following
10678 places in order:
10680 1. The last argument register.
10681 2. A slot on the stack above the frame. (This only
10682 works if the function is not a varargs function).
10683 3. Register r3, after pushing the argument registers
10684 onto the stack.
10686 Note - we only need to tell the dwarf2 backend about the SP
10687 adjustment in the second variant; the static chain register
10688 doesn't need to be unwound, as it doesn't contain a value
10689 inherited from the caller. */
10691 if (regs_ever_live[3] == 0)
10692 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10693 else if (args_to_push == 0)
10695 rtx dwarf;
10697 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10698 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
10699 fp_offset = 4;
10701 /* Just tell the dwarf backend that we adjusted SP. */
10702 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10703 plus_constant (stack_pointer_rtx,
10704 -fp_offset));
10705 RTX_FRAME_RELATED_P (insn) = 1;
10706 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10707 dwarf, REG_NOTES (insn));
10709 else
10711 /* Store the args on the stack. */
10712 if (cfun->machine->uses_anonymous_args)
10713 insn = emit_multi_reg_push
10714 ((0xf0 >> (args_to_push / 4)) & 0xf);
10715 else
10716 insn = emit_insn
10717 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10718 GEN_INT (- args_to_push)));
10720 RTX_FRAME_RELATED_P (insn) = 1;
10722 saved_pretend_args = 1;
10723 fp_offset = args_to_push;
10724 args_to_push = 0;
10726 /* Now reuse r3 to preserve IP. */
10727 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10731 insn = emit_set_insn (ip_rtx,
10732 plus_constant (stack_pointer_rtx, fp_offset));
10733 RTX_FRAME_RELATED_P (insn) = 1;
10736 if (args_to_push)
10738 /* Push the argument registers, or reserve space for them. */
10739 if (cfun->machine->uses_anonymous_args)
10740 insn = emit_multi_reg_push
10741 ((0xf0 >> (args_to_push / 4)) & 0xf);
10742 else
10743 insn = emit_insn
10744 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10745 GEN_INT (- args_to_push)));
10746 RTX_FRAME_RELATED_P (insn) = 1;
10749 /* If this is an interrupt service routine, and the link register
10750 is going to be pushed, and we are not creating a stack frame,
10751 (which would involve an extra push of IP and a pop in the epilogue)
10752 subtracting four from LR now will mean that the function return
10753 can be done with a single instruction. */
10754 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10755 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10756 && ! frame_pointer_needed)
10758 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
10760 emit_set_insn (lr, plus_constant (lr, -4));
10763 if (live_regs_mask)
10765 insn = emit_multi_reg_push (live_regs_mask);
10766 saved_regs += bit_count (live_regs_mask) * 4;
10767 RTX_FRAME_RELATED_P (insn) = 1;
10770 if (TARGET_IWMMXT)
10771 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10772 if (regs_ever_live[reg] && ! call_used_regs [reg])
10774 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10775 insn = gen_frame_mem (V2SImode, insn);
10776 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
10777 RTX_FRAME_RELATED_P (insn) = 1;
10778 saved_regs += 8;
10781 if (! IS_VOLATILE (func_type))
10783 int start_reg;
10785 /* Save any floating point call-saved registers used by this
10786 function. */
10787 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10789 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10790 if (regs_ever_live[reg] && !call_used_regs[reg])
10792 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10793 insn = gen_frame_mem (XFmode, insn);
10794 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
10795 RTX_FRAME_RELATED_P (insn) = 1;
10796 saved_regs += 12;
10799 else
10801 start_reg = LAST_FPA_REGNUM;
10803 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10805 if (regs_ever_live[reg] && !call_used_regs[reg])
10807 if (start_reg - reg == 3)
10809 insn = emit_sfm (reg, 4);
10810 RTX_FRAME_RELATED_P (insn) = 1;
10811 saved_regs += 48;
10812 start_reg = reg - 1;
10815 else
10817 if (start_reg != reg)
10819 insn = emit_sfm (reg + 1, start_reg - reg);
10820 RTX_FRAME_RELATED_P (insn) = 1;
10821 saved_regs += (start_reg - reg) * 12;
10823 start_reg = reg - 1;
10827 if (start_reg != reg)
10829 insn = emit_sfm (reg + 1, start_reg - reg);
10830 saved_regs += (start_reg - reg) * 12;
10831 RTX_FRAME_RELATED_P (insn) = 1;
10834 if (TARGET_HARD_FLOAT && TARGET_VFP)
10836 start_reg = FIRST_VFP_REGNUM;
10838 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10840 if ((!regs_ever_live[reg] || call_used_regs[reg])
10841 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10843 if (start_reg != reg)
10844 saved_regs += vfp_emit_fstmx (start_reg,
10845 (reg - start_reg) / 2);
10846 start_reg = reg + 2;
10849 if (start_reg != reg)
10850 saved_regs += vfp_emit_fstmx (start_reg,
10851 (reg - start_reg) / 2);
10855 if (frame_pointer_needed)
10857 /* Create the new frame pointer. */
10858 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10859 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10860 RTX_FRAME_RELATED_P (insn) = 1;
10862 if (IS_NESTED (func_type))
10864 /* Recover the static chain register. */
10865 if (regs_ever_live [3] == 0
10866 || saved_pretend_args)
10867 insn = gen_rtx_REG (SImode, 3);
10868 else /* if (current_function_pretend_args_size == 0) */
10870 insn = plus_constant (hard_frame_pointer_rtx, 4);
10871 insn = gen_frame_mem (SImode, insn);
10874 emit_set_insn (ip_rtx, insn);
10875 /* Add a USE to stop propagate_one_insn() from barfing. */
10876 emit_insn (gen_prologue_use (ip_rtx));
10880 offsets = arm_get_frame_offsets ();
10881 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10883 /* This add can produce multiple insns for a large constant, so we
10884 need to get tricky. */
10885 rtx last = get_last_insn ();
10887 amount = GEN_INT (offsets->saved_args + saved_regs
10888 - offsets->outgoing_args);
10890 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10891 amount));
10894 last = last ? NEXT_INSN (last) : get_insns ();
10895 RTX_FRAME_RELATED_P (last) = 1;
10897 while (last != insn);
10899 /* If the frame pointer is needed, emit a special barrier that
10900 will prevent the scheduler from moving stores to the frame
10901 before the stack adjustment. */
10902 if (frame_pointer_needed)
10903 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10904 hard_frame_pointer_rtx));
10908 if (flag_pic && arm_pic_register != INVALID_REGNUM)
10909 arm_load_pic_register (0UL);
10911 /* If we are profiling, make sure no instructions are scheduled before
10912 the call to mcount. Similarly if the user has requested no
10913 scheduling in the prolog. Similarly if we want non-call exceptions
10914 using the EABI unwinder, to prevent faulting instructions from being
10915 swapped with a stack adjustment. */
10916 if (current_function_profile || !TARGET_SCHED_PROLOG
10917 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
10918 emit_insn (gen_blockage ());
10920 /* If the link register is being kept alive, with the return address in it,
10921 then make sure that it does not get reused by the ce2 pass. */
10922 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10924 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10925 cfun->machine->lr_save_eliminated = 1;
10929 /* If CODE is 'd', then the X is a condition operand and the instruction
10930 should only be executed if the condition is true.
10931 if CODE is 'D', then the X is a condition operand and the instruction
10932 should only be executed if the condition is false: however, if the mode
10933 of the comparison is CCFPEmode, then always execute the instruction -- we
10934 do this because in these circumstances !GE does not necessarily imply LT;
10935 in these cases the instruction pattern will take care to make sure that
10936 an instruction containing %d will follow, thereby undoing the effects of
10937 doing this instruction unconditionally.
10938 If CODE is 'N' then X is a floating point operand that must be negated
10939 before output.
10940 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10941 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10942 void
10943 arm_print_operand (FILE *stream, rtx x, int code)
10945 switch (code)
10947 case '@':
10948 fputs (ASM_COMMENT_START, stream);
10949 return;
10951 case '_':
10952 fputs (user_label_prefix, stream);
10953 return;
10955 case '|':
10956 fputs (REGISTER_PREFIX, stream);
10957 return;
10959 case '?':
10960 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10962 if (TARGET_THUMB)
10964 output_operand_lossage ("predicated Thumb instruction");
10965 break;
10967 if (current_insn_predicate != NULL)
10969 output_operand_lossage
10970 ("predicated instruction in conditional sequence");
10971 break;
10974 fputs (arm_condition_codes[arm_current_cc], stream);
10976 else if (current_insn_predicate)
10978 enum arm_cond_code code;
10980 if (TARGET_THUMB)
10982 output_operand_lossage ("predicated Thumb instruction");
10983 break;
10986 code = get_arm_condition_code (current_insn_predicate);
10987 fputs (arm_condition_codes[code], stream);
10989 return;
10991 case 'N':
10993 REAL_VALUE_TYPE r;
10994 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10995 r = REAL_VALUE_NEGATE (r);
10996 fprintf (stream, "%s", fp_const_from_val (&r));
10998 return;
11000 case 'B':
11001 if (GET_CODE (x) == CONST_INT)
11003 HOST_WIDE_INT val;
11004 val = ARM_SIGN_EXTEND (~INTVAL (x));
11005 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
11007 else
11009 putc ('~', stream);
11010 output_addr_const (stream, x);
11012 return;
11014 case 'i':
11015 fprintf (stream, "%s", arithmetic_instr (x, 1));
11016 return;
11018 /* Truncate Cirrus shift counts. */
11019 case 's':
11020 if (GET_CODE (x) == CONST_INT)
11022 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11023 return;
11025 arm_print_operand (stream, x, 0);
11026 return;
11028 case 'I':
11029 fprintf (stream, "%s", arithmetic_instr (x, 0));
11030 return;
11032 case 'S':
11034 HOST_WIDE_INT val;
11035 const char *shift;
11037 if (!shift_operator (x, SImode))
11039 output_operand_lossage ("invalid shift operand");
11040 break;
11043 shift = shift_op (x, &val);
11045 if (shift)
11047 fprintf (stream, ", %s ", shift);
11048 if (val == -1)
11049 arm_print_operand (stream, XEXP (x, 1), 0);
11050 else
11051 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11054 return;
11056 /* An explanation of the 'Q', 'R' and 'H' register operands:
11058 In a pair of registers containing a DI or DF value the 'Q'
11059 operand returns the register number of the register containing
11060 the least significant part of the value. The 'R' operand returns
11061 the register number of the register containing the most
11062 significant part of the value.
11064 The 'H' operand returns the higher of the two register numbers.
11065 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11066 same as the 'Q' operand, since the most significant part of the
11067 value is held in the lower number register. The reverse is true
11068 on systems where WORDS_BIG_ENDIAN is false.
11070 The purpose of these operands is to distinguish between cases
11071 where the endian-ness of the values is important (for example
11072 when they are added together), and cases where the endian-ness
11073 is irrelevant, but the order of register operations is important.
11074 For example when loading a value from memory into a register
11075 pair, the endian-ness does not matter. Provided that the value
11076 from the lower memory address is put into the lower numbered
11077 register, and the value from the higher address is put into the
11078 higher numbered register, the load will work regardless of whether
11079 the value being loaded is big-wordian or little-wordian. The
11080 order of the two register loads can matter however, if the address
11081 of the memory location is actually held in one of the registers
11082 being overwritten by the load. */
11083 case 'Q':
11084 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11086 output_operand_lossage ("invalid operand for code '%c'", code);
11087 return;
11090 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11091 return;
11093 case 'R':
11094 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11096 output_operand_lossage ("invalid operand for code '%c'", code);
11097 return;
11100 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11101 return;
11103 case 'H':
11104 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11106 output_operand_lossage ("invalid operand for code '%c'", code);
11107 return;
11110 asm_fprintf (stream, "%r", REGNO (x) + 1);
11111 return;
11113 case 'm':
11114 asm_fprintf (stream, "%r",
11115 GET_CODE (XEXP (x, 0)) == REG
11116 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11117 return;
11119 case 'M':
11120 asm_fprintf (stream, "{%r-%r}",
11121 REGNO (x),
11122 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11123 return;
11125 case 'd':
11126 /* CONST_TRUE_RTX means always -- that's the default. */
11127 if (x == const_true_rtx)
11128 return;
11130 if (!COMPARISON_P (x))
11132 output_operand_lossage ("invalid operand for code '%c'", code);
11133 return;
11136 fputs (arm_condition_codes[get_arm_condition_code (x)],
11137 stream);
11138 return;
11140 case 'D':
11141 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11142 want to do that. */
11143 if (x == const_true_rtx)
11145 output_operand_lossage ("instruction never exectued");
11146 return;
11148 if (!COMPARISON_P (x))
11150 output_operand_lossage ("invalid operand for code '%c'", code);
11151 return;
11154 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11155 (get_arm_condition_code (x))],
11156 stream);
11157 return;
11159 /* Cirrus registers can be accessed in a variety of ways:
11160 single floating point (f)
11161 double floating point (d)
11162 32bit integer (fx)
11163 64bit integer (dx). */
11164 case 'W': /* Cirrus register in F mode. */
11165 case 'X': /* Cirrus register in D mode. */
11166 case 'Y': /* Cirrus register in FX mode. */
11167 case 'Z': /* Cirrus register in DX mode. */
11168 gcc_assert (GET_CODE (x) == REG
11169 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11171 fprintf (stream, "mv%s%s",
11172 code == 'W' ? "f"
11173 : code == 'X' ? "d"
11174 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11176 return;
11178 /* Print cirrus register in the mode specified by the register's mode. */
11179 case 'V':
11181 int mode = GET_MODE (x);
11183 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11185 output_operand_lossage ("invalid operand for code '%c'", code);
11186 return;
11189 fprintf (stream, "mv%s%s",
11190 mode == DFmode ? "d"
11191 : mode == SImode ? "fx"
11192 : mode == DImode ? "dx"
11193 : "f", reg_names[REGNO (x)] + 2);
11195 return;
11198 case 'U':
11199 if (GET_CODE (x) != REG
11200 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11201 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11202 /* Bad value for wCG register number. */
11204 output_operand_lossage ("invalid operand for code '%c'", code);
11205 return;
11208 else
11209 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11210 return;
11212 /* Print an iWMMXt control register name. */
11213 case 'w':
11214 if (GET_CODE (x) != CONST_INT
11215 || INTVAL (x) < 0
11216 || INTVAL (x) >= 16)
11217 /* Bad value for wC register number. */
11219 output_operand_lossage ("invalid operand for code '%c'", code);
11220 return;
11223 else
11225 static const char * wc_reg_names [16] =
11227 "wCID", "wCon", "wCSSF", "wCASF",
11228 "wC4", "wC5", "wC6", "wC7",
11229 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11230 "wC12", "wC13", "wC14", "wC15"
11233 fprintf (stream, wc_reg_names [INTVAL (x)]);
11235 return;
11237 /* Print a VFP double precision register name. */
11238 case 'P':
11240 int mode = GET_MODE (x);
11241 int num;
11243 if (mode != DImode && mode != DFmode)
11245 output_operand_lossage ("invalid operand for code '%c'", code);
11246 return;
11249 if (GET_CODE (x) != REG
11250 || !IS_VFP_REGNUM (REGNO (x)))
11252 output_operand_lossage ("invalid operand for code '%c'", code);
11253 return;
11256 num = REGNO(x) - FIRST_VFP_REGNUM;
11257 if (num & 1)
11259 output_operand_lossage ("invalid operand for code '%c'", code);
11260 return;
11263 fprintf (stream, "d%d", num >> 1);
11265 return;
11267 default:
11268 if (x == 0)
11270 output_operand_lossage ("missing operand");
11271 return;
11274 switch (GET_CODE (x))
11276 case REG:
11277 asm_fprintf (stream, "%r", REGNO (x));
11278 break;
11280 case MEM:
11281 output_memory_reference_mode = GET_MODE (x);
11282 output_address (XEXP (x, 0));
11283 break;
11285 case CONST_DOUBLE:
11286 fprintf (stream, "#%s", fp_immediate_constant (x));
11287 break;
11289 default:
11290 gcc_assert (GET_CODE (x) != NEG);
11291 fputc ('#', stream);
11292 output_addr_const (stream, x);
11293 break;
11298 #ifndef AOF_ASSEMBLER
11299 /* Target hook for assembling integer objects. The ARM version needs to
11300 handle word-sized values specially. */
11301 static bool
11302 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11304 if (size == UNITS_PER_WORD && aligned_p)
11306 fputs ("\t.word\t", asm_out_file);
11307 output_addr_const (asm_out_file, x);
11309 /* Mark symbols as position independent. We only do this in the
11310 .text segment, not in the .data segment. */
11311 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11312 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11314 if (GET_CODE (x) == SYMBOL_REF
11315 && (CONSTANT_POOL_ADDRESS_P (x)
11316 || SYMBOL_REF_LOCAL_P (x)))
11317 fputs ("(GOTOFF)", asm_out_file);
11318 else if (GET_CODE (x) == LABEL_REF)
11319 fputs ("(GOTOFF)", asm_out_file);
11320 else
11321 fputs ("(GOT)", asm_out_file);
11323 fputc ('\n', asm_out_file);
11324 return true;
11327 if (arm_vector_mode_supported_p (GET_MODE (x)))
11329 int i, units;
11331 gcc_assert (GET_CODE (x) == CONST_VECTOR);
11333 units = CONST_VECTOR_NUNITS (x);
11335 switch (GET_MODE (x))
11337 case V2SImode: size = 4; break;
11338 case V4HImode: size = 2; break;
11339 case V8QImode: size = 1; break;
11340 default:
11341 gcc_unreachable ();
11344 for (i = 0; i < units; i++)
11346 rtx elt;
11348 elt = CONST_VECTOR_ELT (x, i);
11349 assemble_integer
11350 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11353 return true;
11356 return default_assemble_integer (x, size, aligned_p);
11360 /* Add a function to the list of static constructors. */
11362 static void
11363 arm_elf_asm_constructor (rtx symbol, int priority)
11365 section *s;
11367 if (!TARGET_AAPCS_BASED)
11369 default_named_section_asm_out_constructor (symbol, priority);
11370 return;
11373 /* Put these in the .init_array section, using a special relocation. */
11374 if (priority != DEFAULT_INIT_PRIORITY)
11376 char buf[18];
11377 sprintf (buf, ".init_array.%.5u", priority);
11378 s = get_section (buf, SECTION_WRITE, NULL_TREE);
11380 else
11381 s = ctors_section;
11383 switch_to_section (s);
11384 assemble_align (POINTER_SIZE);
11385 fputs ("\t.word\t", asm_out_file);
11386 output_addr_const (asm_out_file, symbol);
11387 fputs ("(target1)\n", asm_out_file);
11389 #endif
11391 /* A finite state machine takes care of noticing whether or not instructions
11392 can be conditionally executed, and thus decrease execution time and code
11393 size by deleting branch instructions. The fsm is controlled by
11394 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11396 /* The state of the fsm controlling condition codes are:
11397 0: normal, do nothing special
11398 1: make ASM_OUTPUT_OPCODE not output this instruction
11399 2: make ASM_OUTPUT_OPCODE not output this instruction
11400 3: make instructions conditional
11401 4: make instructions conditional
11403 State transitions (state->state by whom under condition):
11404 0 -> 1 final_prescan_insn if the `target' is a label
11405 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11406 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11407 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11408 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11409 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11410 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11411 (the target insn is arm_target_insn).
11413 If the jump clobbers the conditions then we use states 2 and 4.
11415 A similar thing can be done with conditional return insns.
11417 XXX In case the `target' is an unconditional branch, this conditionalising
11418 of the instructions always reduces code size, but not always execution
11419 time. But then, I want to reduce the code size to somewhere near what
11420 /bin/cc produces. */
11422 /* Returns the index of the ARM condition code string in
11423 `arm_condition_codes'. COMPARISON should be an rtx like
11424 `(eq (...) (...))'. */
11425 static enum arm_cond_code
11426 get_arm_condition_code (rtx comparison)
11428 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11429 int code;
11430 enum rtx_code comp_code = GET_CODE (comparison);
11432 if (GET_MODE_CLASS (mode) != MODE_CC)
11433 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11434 XEXP (comparison, 1));
11436 switch (mode)
11438 case CC_DNEmode: code = ARM_NE; goto dominance;
11439 case CC_DEQmode: code = ARM_EQ; goto dominance;
11440 case CC_DGEmode: code = ARM_GE; goto dominance;
11441 case CC_DGTmode: code = ARM_GT; goto dominance;
11442 case CC_DLEmode: code = ARM_LE; goto dominance;
11443 case CC_DLTmode: code = ARM_LT; goto dominance;
11444 case CC_DGEUmode: code = ARM_CS; goto dominance;
11445 case CC_DGTUmode: code = ARM_HI; goto dominance;
11446 case CC_DLEUmode: code = ARM_LS; goto dominance;
11447 case CC_DLTUmode: code = ARM_CC;
11449 dominance:
11450 gcc_assert (comp_code == EQ || comp_code == NE);
11452 if (comp_code == EQ)
11453 return ARM_INVERSE_CONDITION_CODE (code);
11454 return code;
11456 case CC_NOOVmode:
11457 switch (comp_code)
11459 case NE: return ARM_NE;
11460 case EQ: return ARM_EQ;
11461 case GE: return ARM_PL;
11462 case LT: return ARM_MI;
11463 default: gcc_unreachable ();
11466 case CC_Zmode:
11467 switch (comp_code)
11469 case NE: return ARM_NE;
11470 case EQ: return ARM_EQ;
11471 default: gcc_unreachable ();
11474 case CC_Nmode:
11475 switch (comp_code)
11477 case NE: return ARM_MI;
11478 case EQ: return ARM_PL;
11479 default: gcc_unreachable ();
11482 case CCFPEmode:
11483 case CCFPmode:
11484 /* These encodings assume that AC=1 in the FPA system control
11485 byte. This allows us to handle all cases except UNEQ and
11486 LTGT. */
11487 switch (comp_code)
11489 case GE: return ARM_GE;
11490 case GT: return ARM_GT;
11491 case LE: return ARM_LS;
11492 case LT: return ARM_MI;
11493 case NE: return ARM_NE;
11494 case EQ: return ARM_EQ;
11495 case ORDERED: return ARM_VC;
11496 case UNORDERED: return ARM_VS;
11497 case UNLT: return ARM_LT;
11498 case UNLE: return ARM_LE;
11499 case UNGT: return ARM_HI;
11500 case UNGE: return ARM_PL;
11501 /* UNEQ and LTGT do not have a representation. */
11502 case UNEQ: /* Fall through. */
11503 case LTGT: /* Fall through. */
11504 default: gcc_unreachable ();
11507 case CC_SWPmode:
11508 switch (comp_code)
11510 case NE: return ARM_NE;
11511 case EQ: return ARM_EQ;
11512 case GE: return ARM_LE;
11513 case GT: return ARM_LT;
11514 case LE: return ARM_GE;
11515 case LT: return ARM_GT;
11516 case GEU: return ARM_LS;
11517 case GTU: return ARM_CC;
11518 case LEU: return ARM_CS;
11519 case LTU: return ARM_HI;
11520 default: gcc_unreachable ();
11523 case CC_Cmode:
11524 switch (comp_code)
11526 case LTU: return ARM_CS;
11527 case GEU: return ARM_CC;
11528 default: gcc_unreachable ();
11531 case CCmode:
11532 switch (comp_code)
11534 case NE: return ARM_NE;
11535 case EQ: return ARM_EQ;
11536 case GE: return ARM_GE;
11537 case GT: return ARM_GT;
11538 case LE: return ARM_LE;
11539 case LT: return ARM_LT;
11540 case GEU: return ARM_CS;
11541 case GTU: return ARM_HI;
11542 case LEU: return ARM_LS;
11543 case LTU: return ARM_CC;
11544 default: gcc_unreachable ();
11547 default: gcc_unreachable ();
11551 void
11552 arm_final_prescan_insn (rtx insn)
11554 /* BODY will hold the body of INSN. */
11555 rtx body = PATTERN (insn);
11557 /* This will be 1 if trying to repeat the trick, and things need to be
11558 reversed if it appears to fail. */
11559 int reverse = 0;
11561 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11562 taken are clobbered, even if the rtl suggests otherwise. It also
11563 means that we have to grub around within the jump expression to find
11564 out what the conditions are when the jump isn't taken. */
11565 int jump_clobbers = 0;
11567 /* If we start with a return insn, we only succeed if we find another one. */
11568 int seeking_return = 0;
11570 /* START_INSN will hold the insn from where we start looking. This is the
11571 first insn after the following code_label if REVERSE is true. */
11572 rtx start_insn = insn;
11574 /* If in state 4, check if the target branch is reached, in order to
11575 change back to state 0. */
11576 if (arm_ccfsm_state == 4)
11578 if (insn == arm_target_insn)
11580 arm_target_insn = NULL;
11581 arm_ccfsm_state = 0;
11583 return;
11586 /* If in state 3, it is possible to repeat the trick, if this insn is an
11587 unconditional branch to a label, and immediately following this branch
11588 is the previous target label which is only used once, and the label this
11589 branch jumps to is not too far off. */
11590 if (arm_ccfsm_state == 3)
11592 if (simplejump_p (insn))
11594 start_insn = next_nonnote_insn (start_insn);
11595 if (GET_CODE (start_insn) == BARRIER)
11597 /* XXX Isn't this always a barrier? */
11598 start_insn = next_nonnote_insn (start_insn);
11600 if (GET_CODE (start_insn) == CODE_LABEL
11601 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11602 && LABEL_NUSES (start_insn) == 1)
11603 reverse = TRUE;
11604 else
11605 return;
11607 else if (GET_CODE (body) == RETURN)
11609 start_insn = next_nonnote_insn (start_insn);
11610 if (GET_CODE (start_insn) == BARRIER)
11611 start_insn = next_nonnote_insn (start_insn);
11612 if (GET_CODE (start_insn) == CODE_LABEL
11613 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11614 && LABEL_NUSES (start_insn) == 1)
11616 reverse = TRUE;
11617 seeking_return = 1;
11619 else
11620 return;
11622 else
11623 return;
11626 gcc_assert (!arm_ccfsm_state || reverse);
11627 if (GET_CODE (insn) != JUMP_INSN)
11628 return;
11630 /* This jump might be paralleled with a clobber of the condition codes
11631 the jump should always come first */
11632 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11633 body = XVECEXP (body, 0, 0);
11635 if (reverse
11636 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11637 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11639 int insns_skipped;
11640 int fail = FALSE, succeed = FALSE;
11641 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11642 int then_not_else = TRUE;
11643 rtx this_insn = start_insn, label = 0;
11645 /* If the jump cannot be done with one instruction, we cannot
11646 conditionally execute the instruction in the inverse case. */
11647 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11649 jump_clobbers = 1;
11650 return;
11653 /* Register the insn jumped to. */
11654 if (reverse)
11656 if (!seeking_return)
11657 label = XEXP (SET_SRC (body), 0);
11659 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11660 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11661 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11663 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11664 then_not_else = FALSE;
11666 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11667 seeking_return = 1;
11668 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11670 seeking_return = 1;
11671 then_not_else = FALSE;
11673 else
11674 gcc_unreachable ();
11676 /* See how many insns this branch skips, and what kind of insns. If all
11677 insns are okay, and the label or unconditional branch to the same
11678 label is not too far away, succeed. */
11679 for (insns_skipped = 0;
11680 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11682 rtx scanbody;
11684 this_insn = next_nonnote_insn (this_insn);
11685 if (!this_insn)
11686 break;
11688 switch (GET_CODE (this_insn))
11690 case CODE_LABEL:
11691 /* Succeed if it is the target label, otherwise fail since
11692 control falls in from somewhere else. */
11693 if (this_insn == label)
11695 if (jump_clobbers)
11697 arm_ccfsm_state = 2;
11698 this_insn = next_nonnote_insn (this_insn);
11700 else
11701 arm_ccfsm_state = 1;
11702 succeed = TRUE;
11704 else
11705 fail = TRUE;
11706 break;
11708 case BARRIER:
11709 /* Succeed if the following insn is the target label.
11710 Otherwise fail.
11711 If return insns are used then the last insn in a function
11712 will be a barrier. */
11713 this_insn = next_nonnote_insn (this_insn);
11714 if (this_insn && this_insn == label)
11716 if (jump_clobbers)
11718 arm_ccfsm_state = 2;
11719 this_insn = next_nonnote_insn (this_insn);
11721 else
11722 arm_ccfsm_state = 1;
11723 succeed = TRUE;
11725 else
11726 fail = TRUE;
11727 break;
11729 case CALL_INSN:
11730 /* The AAPCS says that conditional calls should not be
11731 used since they make interworking inefficient (the
11732 linker can't transform BL<cond> into BLX). That's
11733 only a problem if the machine has BLX. */
11734 if (arm_arch5)
11736 fail = TRUE;
11737 break;
11740 /* Succeed if the following insn is the target label, or
11741 if the following two insns are a barrier and the
11742 target label. */
11743 this_insn = next_nonnote_insn (this_insn);
11744 if (this_insn && GET_CODE (this_insn) == BARRIER)
11745 this_insn = next_nonnote_insn (this_insn);
11747 if (this_insn && this_insn == label
11748 && insns_skipped < max_insns_skipped)
11750 if (jump_clobbers)
11752 arm_ccfsm_state = 2;
11753 this_insn = next_nonnote_insn (this_insn);
11755 else
11756 arm_ccfsm_state = 1;
11757 succeed = TRUE;
11759 else
11760 fail = TRUE;
11761 break;
11763 case JUMP_INSN:
11764 /* If this is an unconditional branch to the same label, succeed.
11765 If it is to another label, do nothing. If it is conditional,
11766 fail. */
11767 /* XXX Probably, the tests for SET and the PC are
11768 unnecessary. */
11770 scanbody = PATTERN (this_insn);
11771 if (GET_CODE (scanbody) == SET
11772 && GET_CODE (SET_DEST (scanbody)) == PC)
11774 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11775 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11777 arm_ccfsm_state = 2;
11778 succeed = TRUE;
11780 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11781 fail = TRUE;
11783 /* Fail if a conditional return is undesirable (e.g. on a
11784 StrongARM), but still allow this if optimizing for size. */
11785 else if (GET_CODE (scanbody) == RETURN
11786 && !use_return_insn (TRUE, NULL)
11787 && !optimize_size)
11788 fail = TRUE;
11789 else if (GET_CODE (scanbody) == RETURN
11790 && seeking_return)
11792 arm_ccfsm_state = 2;
11793 succeed = TRUE;
11795 else if (GET_CODE (scanbody) == PARALLEL)
11797 switch (get_attr_conds (this_insn))
11799 case CONDS_NOCOND:
11800 break;
11801 default:
11802 fail = TRUE;
11803 break;
11806 else
11807 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11809 break;
11811 case INSN:
11812 /* Instructions using or affecting the condition codes make it
11813 fail. */
11814 scanbody = PATTERN (this_insn);
11815 if (!(GET_CODE (scanbody) == SET
11816 || GET_CODE (scanbody) == PARALLEL)
11817 || get_attr_conds (this_insn) != CONDS_NOCOND)
11818 fail = TRUE;
11820 /* A conditional cirrus instruction must be followed by
11821 a non Cirrus instruction. However, since we
11822 conditionalize instructions in this function and by
11823 the time we get here we can't add instructions
11824 (nops), because shorten_branches() has already been
11825 called, we will disable conditionalizing Cirrus
11826 instructions to be safe. */
11827 if (GET_CODE (scanbody) != USE
11828 && GET_CODE (scanbody) != CLOBBER
11829 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11830 fail = TRUE;
11831 break;
11833 default:
11834 break;
11837 if (succeed)
11839 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11840 arm_target_label = CODE_LABEL_NUMBER (label);
11841 else
11843 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11845 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11847 this_insn = next_nonnote_insn (this_insn);
11848 gcc_assert (!this_insn
11849 || (GET_CODE (this_insn) != BARRIER
11850 && GET_CODE (this_insn) != CODE_LABEL));
11852 if (!this_insn)
11854 /* Oh, dear! we ran off the end.. give up. */
11855 recog (PATTERN (insn), insn, NULL);
11856 arm_ccfsm_state = 0;
11857 arm_target_insn = NULL;
11858 return;
11860 arm_target_insn = this_insn;
11862 if (jump_clobbers)
11864 gcc_assert (!reverse);
11865 arm_current_cc =
11866 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11867 0), 0), 1));
11868 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11869 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11870 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11871 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11873 else
11875 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11876 what it was. */
11877 if (!reverse)
11878 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11879 0));
11882 if (reverse || then_not_else)
11883 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11886 /* Restore recog_data (getting the attributes of other insns can
11887 destroy this array, but final.c assumes that it remains intact
11888 across this call; since the insn has been recognized already we
11889 call recog direct). */
11890 recog (PATTERN (insn), insn, NULL);
11894 /* Returns true if REGNO is a valid register
11895 for holding a quantity of type MODE. */
11897 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11899 if (GET_MODE_CLASS (mode) == MODE_CC)
11900 return (regno == CC_REGNUM
11901 || (TARGET_HARD_FLOAT && TARGET_VFP
11902 && regno == VFPCC_REGNUM));
11904 if (TARGET_THUMB)
11905 /* For the Thumb we only allow values bigger than SImode in
11906 registers 0 - 6, so that there is always a second low
11907 register available to hold the upper part of the value.
11908 We probably we ought to ensure that the register is the
11909 start of an even numbered register pair. */
11910 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11912 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
11913 && IS_CIRRUS_REGNUM (regno))
11914 /* We have outlawed SI values in Cirrus registers because they
11915 reside in the lower 32 bits, but SF values reside in the
11916 upper 32 bits. This causes gcc all sorts of grief. We can't
11917 even split the registers into pairs because Cirrus SI values
11918 get sign extended to 64bits-- aldyh. */
11919 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11921 if (TARGET_HARD_FLOAT && TARGET_VFP
11922 && IS_VFP_REGNUM (regno))
11924 if (mode == SFmode || mode == SImode)
11925 return TRUE;
11927 /* DFmode values are only valid in even register pairs. */
11928 if (mode == DFmode)
11929 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11930 return FALSE;
11933 if (TARGET_REALLY_IWMMXT)
11935 if (IS_IWMMXT_GR_REGNUM (regno))
11936 return mode == SImode;
11938 if (IS_IWMMXT_REGNUM (regno))
11939 return VALID_IWMMXT_REG_MODE (mode);
11942 /* We allow any value to be stored in the general registers.
11943 Restrict doubleword quantities to even register pairs so that we can
11944 use ldrd. */
11945 if (regno <= LAST_ARM_REGNUM)
11946 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11948 if (regno == FRAME_POINTER_REGNUM
11949 || regno == ARG_POINTER_REGNUM)
11950 /* We only allow integers in the fake hard registers. */
11951 return GET_MODE_CLASS (mode) == MODE_INT;
11953 /* The only registers left are the FPA registers
11954 which we only allow to hold FP values. */
11955 return (TARGET_HARD_FLOAT && TARGET_FPA
11956 && GET_MODE_CLASS (mode) == MODE_FLOAT
11957 && regno >= FIRST_FPA_REGNUM
11958 && regno <= LAST_FPA_REGNUM);
11962 arm_regno_class (int regno)
11964 if (TARGET_THUMB)
11966 if (regno == STACK_POINTER_REGNUM)
11967 return STACK_REG;
11968 if (regno == CC_REGNUM)
11969 return CC_REG;
11970 if (regno < 8)
11971 return LO_REGS;
11972 return HI_REGS;
11975 if ( regno <= LAST_ARM_REGNUM
11976 || regno == FRAME_POINTER_REGNUM
11977 || regno == ARG_POINTER_REGNUM)
11978 return GENERAL_REGS;
11980 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11981 return NO_REGS;
11983 if (IS_CIRRUS_REGNUM (regno))
11984 return CIRRUS_REGS;
11986 if (IS_VFP_REGNUM (regno))
11987 return VFP_REGS;
11989 if (IS_IWMMXT_REGNUM (regno))
11990 return IWMMXT_REGS;
11992 if (IS_IWMMXT_GR_REGNUM (regno))
11993 return IWMMXT_GR_REGS;
11995 return FPA_REGS;
11998 /* Handle a special case when computing the offset
11999 of an argument from the frame pointer. */
12001 arm_debugger_arg_offset (int value, rtx addr)
12003 rtx insn;
12005 /* We are only interested if dbxout_parms() failed to compute the offset. */
12006 if (value != 0)
12007 return 0;
12009 /* We can only cope with the case where the address is held in a register. */
12010 if (GET_CODE (addr) != REG)
12011 return 0;
12013 /* If we are using the frame pointer to point at the argument, then
12014 an offset of 0 is correct. */
12015 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
12016 return 0;
12018 /* If we are using the stack pointer to point at the
12019 argument, then an offset of 0 is correct. */
12020 if ((TARGET_THUMB || !frame_pointer_needed)
12021 && REGNO (addr) == SP_REGNUM)
12022 return 0;
12024 /* Oh dear. The argument is pointed to by a register rather
12025 than being held in a register, or being stored at a known
12026 offset from the frame pointer. Since GDB only understands
12027 those two kinds of argument we must translate the address
12028 held in the register into an offset from the frame pointer.
12029 We do this by searching through the insns for the function
12030 looking to see where this register gets its value. If the
12031 register is initialized from the frame pointer plus an offset
12032 then we are in luck and we can continue, otherwise we give up.
12034 This code is exercised by producing debugging information
12035 for a function with arguments like this:
12037 double func (double a, double b, int c, double d) {return d;}
12039 Without this code the stab for parameter 'd' will be set to
12040 an offset of 0 from the frame pointer, rather than 8. */
12042 /* The if() statement says:
12044 If the insn is a normal instruction
12045 and if the insn is setting the value in a register
12046 and if the register being set is the register holding the address of the argument
12047 and if the address is computing by an addition
12048 that involves adding to a register
12049 which is the frame pointer
12050 a constant integer
12052 then... */
12054 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12056 if ( GET_CODE (insn) == INSN
12057 && GET_CODE (PATTERN (insn)) == SET
12058 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12059 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12060 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12061 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12062 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12065 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12067 break;
12071 if (value == 0)
12073 debug_rtx (addr);
12074 warning (0, "unable to compute real location of stacked parameter");
12075 value = 8; /* XXX magic hack */
12078 return value;
12081 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12082 do \
12084 if ((MASK) & insn_flags) \
12085 add_builtin_function ((NAME), (TYPE), (CODE), \
12086 BUILT_IN_MD, NULL, NULL_TREE); \
12088 while (0)
12090 struct builtin_description
12092 const unsigned int mask;
12093 const enum insn_code icode;
12094 const char * const name;
12095 const enum arm_builtins code;
12096 const enum rtx_code comparison;
12097 const unsigned int flag;
12100 static const struct builtin_description bdesc_2arg[] =
12102 #define IWMMXT_BUILTIN(code, string, builtin) \
12103 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12104 ARM_BUILTIN_##builtin, 0, 0 },
12106 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12107 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12108 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12109 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12110 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12111 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12112 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12113 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12114 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12115 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12116 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12117 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12118 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12119 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12120 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12121 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12122 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12123 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12124 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12125 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12126 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12127 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12128 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12129 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12130 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12131 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12132 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12133 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12134 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12135 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12136 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12137 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12138 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12139 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12140 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12141 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12142 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12143 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12144 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12145 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12146 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12147 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12148 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12149 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12150 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12151 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12152 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12153 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12154 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12155 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12156 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12157 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12158 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12159 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12160 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12161 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12162 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12163 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12165 #define IWMMXT_BUILTIN2(code, builtin) \
12166 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12168 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12169 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12170 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12171 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12172 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12173 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12174 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12175 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12176 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12177 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12178 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
12179 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
12180 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
12181 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
12182 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
12183 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
12184 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
12185 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
12186 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
12187 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
12188 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
12189 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
12190 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
12191 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
12192 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
12193 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
12194 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
12195 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
12196 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
12197 IWMMXT_BUILTIN2 (rordi3, WRORDI)
12198 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
12199 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
12202 static const struct builtin_description bdesc_1arg[] =
12204 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
12205 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
12206 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
12207 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
12208 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
12209 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
12210 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
12211 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
12212 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
12213 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
12214 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
12215 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
12216 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
12217 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
12218 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
12219 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
12220 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
12221 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
12224 /* Set up all the iWMMXt builtins. This is
12225 not called if TARGET_IWMMXT is zero. */
12227 static void
12228 arm_init_iwmmxt_builtins (void)
12230 const struct builtin_description * d;
12231 size_t i;
12232 tree endlink = void_list_node;
12234 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12235 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12236 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12238 tree int_ftype_int
12239 = build_function_type (integer_type_node,
12240 tree_cons (NULL_TREE, integer_type_node, endlink));
12241 tree v8qi_ftype_v8qi_v8qi_int
12242 = build_function_type (V8QI_type_node,
12243 tree_cons (NULL_TREE, V8QI_type_node,
12244 tree_cons (NULL_TREE, V8QI_type_node,
12245 tree_cons (NULL_TREE,
12246 integer_type_node,
12247 endlink))));
12248 tree v4hi_ftype_v4hi_int
12249 = build_function_type (V4HI_type_node,
12250 tree_cons (NULL_TREE, V4HI_type_node,
12251 tree_cons (NULL_TREE, integer_type_node,
12252 endlink)));
12253 tree v2si_ftype_v2si_int
12254 = build_function_type (V2SI_type_node,
12255 tree_cons (NULL_TREE, V2SI_type_node,
12256 tree_cons (NULL_TREE, integer_type_node,
12257 endlink)));
12258 tree v2si_ftype_di_di
12259 = build_function_type (V2SI_type_node,
12260 tree_cons (NULL_TREE, long_long_integer_type_node,
12261 tree_cons (NULL_TREE, long_long_integer_type_node,
12262 endlink)));
12263 tree di_ftype_di_int
12264 = build_function_type (long_long_integer_type_node,
12265 tree_cons (NULL_TREE, long_long_integer_type_node,
12266 tree_cons (NULL_TREE, integer_type_node,
12267 endlink)));
12268 tree di_ftype_di_int_int
12269 = build_function_type (long_long_integer_type_node,
12270 tree_cons (NULL_TREE, long_long_integer_type_node,
12271 tree_cons (NULL_TREE, integer_type_node,
12272 tree_cons (NULL_TREE,
12273 integer_type_node,
12274 endlink))));
12275 tree int_ftype_v8qi
12276 = build_function_type (integer_type_node,
12277 tree_cons (NULL_TREE, V8QI_type_node,
12278 endlink));
12279 tree int_ftype_v4hi
12280 = build_function_type (integer_type_node,
12281 tree_cons (NULL_TREE, V4HI_type_node,
12282 endlink));
12283 tree int_ftype_v2si
12284 = build_function_type (integer_type_node,
12285 tree_cons (NULL_TREE, V2SI_type_node,
12286 endlink));
12287 tree int_ftype_v8qi_int
12288 = build_function_type (integer_type_node,
12289 tree_cons (NULL_TREE, V8QI_type_node,
12290 tree_cons (NULL_TREE, integer_type_node,
12291 endlink)));
12292 tree int_ftype_v4hi_int
12293 = build_function_type (integer_type_node,
12294 tree_cons (NULL_TREE, V4HI_type_node,
12295 tree_cons (NULL_TREE, integer_type_node,
12296 endlink)));
12297 tree int_ftype_v2si_int
12298 = build_function_type (integer_type_node,
12299 tree_cons (NULL_TREE, V2SI_type_node,
12300 tree_cons (NULL_TREE, integer_type_node,
12301 endlink)));
12302 tree v8qi_ftype_v8qi_int_int
12303 = build_function_type (V8QI_type_node,
12304 tree_cons (NULL_TREE, V8QI_type_node,
12305 tree_cons (NULL_TREE, integer_type_node,
12306 tree_cons (NULL_TREE,
12307 integer_type_node,
12308 endlink))));
12309 tree v4hi_ftype_v4hi_int_int
12310 = build_function_type (V4HI_type_node,
12311 tree_cons (NULL_TREE, V4HI_type_node,
12312 tree_cons (NULL_TREE, integer_type_node,
12313 tree_cons (NULL_TREE,
12314 integer_type_node,
12315 endlink))));
12316 tree v2si_ftype_v2si_int_int
12317 = build_function_type (V2SI_type_node,
12318 tree_cons (NULL_TREE, V2SI_type_node,
12319 tree_cons (NULL_TREE, integer_type_node,
12320 tree_cons (NULL_TREE,
12321 integer_type_node,
12322 endlink))));
12323 /* Miscellaneous. */
12324 tree v8qi_ftype_v4hi_v4hi
12325 = build_function_type (V8QI_type_node,
12326 tree_cons (NULL_TREE, V4HI_type_node,
12327 tree_cons (NULL_TREE, V4HI_type_node,
12328 endlink)));
12329 tree v4hi_ftype_v2si_v2si
12330 = build_function_type (V4HI_type_node,
12331 tree_cons (NULL_TREE, V2SI_type_node,
12332 tree_cons (NULL_TREE, V2SI_type_node,
12333 endlink)));
12334 tree v2si_ftype_v4hi_v4hi
12335 = build_function_type (V2SI_type_node,
12336 tree_cons (NULL_TREE, V4HI_type_node,
12337 tree_cons (NULL_TREE, V4HI_type_node,
12338 endlink)));
12339 tree v2si_ftype_v8qi_v8qi
12340 = build_function_type (V2SI_type_node,
12341 tree_cons (NULL_TREE, V8QI_type_node,
12342 tree_cons (NULL_TREE, V8QI_type_node,
12343 endlink)));
12344 tree v4hi_ftype_v4hi_di
12345 = build_function_type (V4HI_type_node,
12346 tree_cons (NULL_TREE, V4HI_type_node,
12347 tree_cons (NULL_TREE,
12348 long_long_integer_type_node,
12349 endlink)));
12350 tree v2si_ftype_v2si_di
12351 = build_function_type (V2SI_type_node,
12352 tree_cons (NULL_TREE, V2SI_type_node,
12353 tree_cons (NULL_TREE,
12354 long_long_integer_type_node,
12355 endlink)));
12356 tree void_ftype_int_int
12357 = build_function_type (void_type_node,
12358 tree_cons (NULL_TREE, integer_type_node,
12359 tree_cons (NULL_TREE, integer_type_node,
12360 endlink)));
12361 tree di_ftype_void
12362 = build_function_type (long_long_unsigned_type_node, endlink);
12363 tree di_ftype_v8qi
12364 = build_function_type (long_long_integer_type_node,
12365 tree_cons (NULL_TREE, V8QI_type_node,
12366 endlink));
12367 tree di_ftype_v4hi
12368 = build_function_type (long_long_integer_type_node,
12369 tree_cons (NULL_TREE, V4HI_type_node,
12370 endlink));
12371 tree di_ftype_v2si
12372 = build_function_type (long_long_integer_type_node,
12373 tree_cons (NULL_TREE, V2SI_type_node,
12374 endlink));
12375 tree v2si_ftype_v4hi
12376 = build_function_type (V2SI_type_node,
12377 tree_cons (NULL_TREE, V4HI_type_node,
12378 endlink));
12379 tree v4hi_ftype_v8qi
12380 = build_function_type (V4HI_type_node,
12381 tree_cons (NULL_TREE, V8QI_type_node,
12382 endlink));
12384 tree di_ftype_di_v4hi_v4hi
12385 = build_function_type (long_long_unsigned_type_node,
12386 tree_cons (NULL_TREE,
12387 long_long_unsigned_type_node,
12388 tree_cons (NULL_TREE, V4HI_type_node,
12389 tree_cons (NULL_TREE,
12390 V4HI_type_node,
12391 endlink))));
12393 tree di_ftype_v4hi_v4hi
12394 = build_function_type (long_long_unsigned_type_node,
12395 tree_cons (NULL_TREE, V4HI_type_node,
12396 tree_cons (NULL_TREE, V4HI_type_node,
12397 endlink)));
12399 /* Normal vector binops. */
12400 tree v8qi_ftype_v8qi_v8qi
12401 = build_function_type (V8QI_type_node,
12402 tree_cons (NULL_TREE, V8QI_type_node,
12403 tree_cons (NULL_TREE, V8QI_type_node,
12404 endlink)));
12405 tree v4hi_ftype_v4hi_v4hi
12406 = build_function_type (V4HI_type_node,
12407 tree_cons (NULL_TREE, V4HI_type_node,
12408 tree_cons (NULL_TREE, V4HI_type_node,
12409 endlink)));
12410 tree v2si_ftype_v2si_v2si
12411 = build_function_type (V2SI_type_node,
12412 tree_cons (NULL_TREE, V2SI_type_node,
12413 tree_cons (NULL_TREE, V2SI_type_node,
12414 endlink)));
12415 tree di_ftype_di_di
12416 = build_function_type (long_long_unsigned_type_node,
12417 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12418 tree_cons (NULL_TREE,
12419 long_long_unsigned_type_node,
12420 endlink)));
12422 /* Add all builtins that are more or less simple operations on two
12423 operands. */
12424 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12426 /* Use one of the operands; the target can have a different mode for
12427 mask-generating compares. */
12428 enum machine_mode mode;
12429 tree type;
12431 if (d->name == 0)
12432 continue;
12434 mode = insn_data[d->icode].operand[1].mode;
12436 switch (mode)
12438 case V8QImode:
12439 type = v8qi_ftype_v8qi_v8qi;
12440 break;
12441 case V4HImode:
12442 type = v4hi_ftype_v4hi_v4hi;
12443 break;
12444 case V2SImode:
12445 type = v2si_ftype_v2si_v2si;
12446 break;
12447 case DImode:
12448 type = di_ftype_di_di;
12449 break;
12451 default:
12452 gcc_unreachable ();
12455 def_mbuiltin (d->mask, d->name, type, d->code);
12458 /* Add the remaining MMX insns with somewhat more complicated types. */
12459 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12460 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12461 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12463 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12464 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12465 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12466 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12467 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12468 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12470 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12471 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12472 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12473 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12474 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12475 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12477 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12478 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12479 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12480 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12481 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12482 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12484 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12485 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12486 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12487 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12488 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12489 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12491 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12493 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12494 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12495 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12496 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12498 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12499 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12500 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12501 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12502 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12503 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12504 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12505 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12506 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12508 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12509 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12510 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12512 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12513 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12514 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12516 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12517 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12518 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12519 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12520 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12521 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12523 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12524 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12525 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12526 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12527 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12528 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12529 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12530 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12531 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12532 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12533 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12534 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12536 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12537 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12538 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12539 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12541 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12542 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12543 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12544 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12545 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12546 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12547 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12550 static void
12551 arm_init_tls_builtins (void)
12553 tree ftype;
12554 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
12555 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
12557 ftype = build_function_type (ptr_type_node, void_list_node);
12558 add_builtin_function ("__builtin_thread_pointer", ftype,
12559 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
12560 NULL, const_nothrow);
12563 static void
12564 arm_init_builtins (void)
12566 arm_init_tls_builtins ();
12568 if (TARGET_REALLY_IWMMXT)
12569 arm_init_iwmmxt_builtins ();
12572 /* Errors in the source file can cause expand_expr to return const0_rtx
12573 where we expect a vector. To avoid crashing, use one of the vector
12574 clear instructions. */
12576 static rtx
12577 safe_vector_operand (rtx x, enum machine_mode mode)
12579 if (x != const0_rtx)
12580 return x;
12581 x = gen_reg_rtx (mode);
12583 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12584 : gen_rtx_SUBREG (DImode, x, 0)));
12585 return x;
12588 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12590 static rtx
12591 arm_expand_binop_builtin (enum insn_code icode,
12592 tree arglist, rtx target)
12594 rtx pat;
12595 tree arg0 = TREE_VALUE (arglist);
12596 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12597 rtx op0 = expand_normal (arg0);
12598 rtx op1 = expand_normal (arg1);
12599 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12600 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12601 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12603 if (VECTOR_MODE_P (mode0))
12604 op0 = safe_vector_operand (op0, mode0);
12605 if (VECTOR_MODE_P (mode1))
12606 op1 = safe_vector_operand (op1, mode1);
12608 if (! target
12609 || GET_MODE (target) != tmode
12610 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12611 target = gen_reg_rtx (tmode);
12613 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12615 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12616 op0 = copy_to_mode_reg (mode0, op0);
12617 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12618 op1 = copy_to_mode_reg (mode1, op1);
12620 pat = GEN_FCN (icode) (target, op0, op1);
12621 if (! pat)
12622 return 0;
12623 emit_insn (pat);
12624 return target;
12627 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12629 static rtx
12630 arm_expand_unop_builtin (enum insn_code icode,
12631 tree arglist, rtx target, int do_load)
12633 rtx pat;
12634 tree arg0 = TREE_VALUE (arglist);
12635 rtx op0 = expand_normal (arg0);
12636 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12637 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12639 if (! target
12640 || GET_MODE (target) != tmode
12641 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12642 target = gen_reg_rtx (tmode);
12643 if (do_load)
12644 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12645 else
12647 if (VECTOR_MODE_P (mode0))
12648 op0 = safe_vector_operand (op0, mode0);
12650 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12651 op0 = copy_to_mode_reg (mode0, op0);
12654 pat = GEN_FCN (icode) (target, op0);
12655 if (! pat)
12656 return 0;
12657 emit_insn (pat);
12658 return target;
12661 /* Expand an expression EXP that calls a built-in function,
12662 with result going to TARGET if that's convenient
12663 (and in mode MODE if that's convenient).
12664 SUBTARGET may be used as the target for computing one of EXP's operands.
12665 IGNORE is nonzero if the value is to be ignored. */
12667 static rtx
12668 arm_expand_builtin (tree exp,
12669 rtx target,
12670 rtx subtarget ATTRIBUTE_UNUSED,
12671 enum machine_mode mode ATTRIBUTE_UNUSED,
12672 int ignore ATTRIBUTE_UNUSED)
12674 const struct builtin_description * d;
12675 enum insn_code icode;
12676 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12677 tree arglist = TREE_OPERAND (exp, 1);
12678 tree arg0;
12679 tree arg1;
12680 tree arg2;
12681 rtx op0;
12682 rtx op1;
12683 rtx op2;
12684 rtx pat;
12685 int fcode = DECL_FUNCTION_CODE (fndecl);
12686 size_t i;
12687 enum machine_mode tmode;
12688 enum machine_mode mode0;
12689 enum machine_mode mode1;
12690 enum machine_mode mode2;
12692 switch (fcode)
12694 case ARM_BUILTIN_TEXTRMSB:
12695 case ARM_BUILTIN_TEXTRMUB:
12696 case ARM_BUILTIN_TEXTRMSH:
12697 case ARM_BUILTIN_TEXTRMUH:
12698 case ARM_BUILTIN_TEXTRMSW:
12699 case ARM_BUILTIN_TEXTRMUW:
12700 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12701 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12702 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12703 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12704 : CODE_FOR_iwmmxt_textrmw);
12706 arg0 = TREE_VALUE (arglist);
12707 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12708 op0 = expand_normal (arg0);
12709 op1 = expand_normal (arg1);
12710 tmode = insn_data[icode].operand[0].mode;
12711 mode0 = insn_data[icode].operand[1].mode;
12712 mode1 = insn_data[icode].operand[2].mode;
12714 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12715 op0 = copy_to_mode_reg (mode0, op0);
12716 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12718 /* @@@ better error message */
12719 error ("selector must be an immediate");
12720 return gen_reg_rtx (tmode);
12722 if (target == 0
12723 || GET_MODE (target) != tmode
12724 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12725 target = gen_reg_rtx (tmode);
12726 pat = GEN_FCN (icode) (target, op0, op1);
12727 if (! pat)
12728 return 0;
12729 emit_insn (pat);
12730 return target;
12732 case ARM_BUILTIN_TINSRB:
12733 case ARM_BUILTIN_TINSRH:
12734 case ARM_BUILTIN_TINSRW:
12735 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12736 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12737 : CODE_FOR_iwmmxt_tinsrw);
12738 arg0 = TREE_VALUE (arglist);
12739 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12740 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12741 op0 = expand_normal (arg0);
12742 op1 = expand_normal (arg1);
12743 op2 = expand_normal (arg2);
12744 tmode = insn_data[icode].operand[0].mode;
12745 mode0 = insn_data[icode].operand[1].mode;
12746 mode1 = insn_data[icode].operand[2].mode;
12747 mode2 = insn_data[icode].operand[3].mode;
12749 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12750 op0 = copy_to_mode_reg (mode0, op0);
12751 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12752 op1 = copy_to_mode_reg (mode1, op1);
12753 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12755 /* @@@ better error message */
12756 error ("selector must be an immediate");
12757 return const0_rtx;
12759 if (target == 0
12760 || GET_MODE (target) != tmode
12761 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12762 target = gen_reg_rtx (tmode);
12763 pat = GEN_FCN (icode) (target, op0, op1, op2);
12764 if (! pat)
12765 return 0;
12766 emit_insn (pat);
12767 return target;
12769 case ARM_BUILTIN_SETWCX:
12770 arg0 = TREE_VALUE (arglist);
12771 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12772 op0 = force_reg (SImode, expand_normal (arg0));
12773 op1 = expand_normal (arg1);
12774 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12775 return 0;
12777 case ARM_BUILTIN_GETWCX:
12778 arg0 = TREE_VALUE (arglist);
12779 op0 = expand_normal (arg0);
12780 target = gen_reg_rtx (SImode);
12781 emit_insn (gen_iwmmxt_tmrc (target, op0));
12782 return target;
12784 case ARM_BUILTIN_WSHUFH:
12785 icode = CODE_FOR_iwmmxt_wshufh;
12786 arg0 = TREE_VALUE (arglist);
12787 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12788 op0 = expand_normal (arg0);
12789 op1 = expand_normal (arg1);
12790 tmode = insn_data[icode].operand[0].mode;
12791 mode1 = insn_data[icode].operand[1].mode;
12792 mode2 = insn_data[icode].operand[2].mode;
12794 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12795 op0 = copy_to_mode_reg (mode1, op0);
12796 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12798 /* @@@ better error message */
12799 error ("mask must be an immediate");
12800 return const0_rtx;
12802 if (target == 0
12803 || GET_MODE (target) != tmode
12804 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12805 target = gen_reg_rtx (tmode);
12806 pat = GEN_FCN (icode) (target, op0, op1);
12807 if (! pat)
12808 return 0;
12809 emit_insn (pat);
12810 return target;
12812 case ARM_BUILTIN_WSADB:
12813 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12814 case ARM_BUILTIN_WSADH:
12815 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12816 case ARM_BUILTIN_WSADBZ:
12817 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12818 case ARM_BUILTIN_WSADHZ:
12819 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12821 /* Several three-argument builtins. */
12822 case ARM_BUILTIN_WMACS:
12823 case ARM_BUILTIN_WMACU:
12824 case ARM_BUILTIN_WALIGN:
12825 case ARM_BUILTIN_TMIA:
12826 case ARM_BUILTIN_TMIAPH:
12827 case ARM_BUILTIN_TMIATT:
12828 case ARM_BUILTIN_TMIATB:
12829 case ARM_BUILTIN_TMIABT:
12830 case ARM_BUILTIN_TMIABB:
12831 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12832 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12833 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12834 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12835 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12836 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12837 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12838 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12839 : CODE_FOR_iwmmxt_walign);
12840 arg0 = TREE_VALUE (arglist);
12841 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12842 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12843 op0 = expand_normal (arg0);
12844 op1 = expand_normal (arg1);
12845 op2 = expand_normal (arg2);
12846 tmode = insn_data[icode].operand[0].mode;
12847 mode0 = insn_data[icode].operand[1].mode;
12848 mode1 = insn_data[icode].operand[2].mode;
12849 mode2 = insn_data[icode].operand[3].mode;
12851 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12852 op0 = copy_to_mode_reg (mode0, op0);
12853 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12854 op1 = copy_to_mode_reg (mode1, op1);
12855 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12856 op2 = copy_to_mode_reg (mode2, op2);
12857 if (target == 0
12858 || GET_MODE (target) != tmode
12859 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12860 target = gen_reg_rtx (tmode);
12861 pat = GEN_FCN (icode) (target, op0, op1, op2);
12862 if (! pat)
12863 return 0;
12864 emit_insn (pat);
12865 return target;
12867 case ARM_BUILTIN_WZERO:
12868 target = gen_reg_rtx (DImode);
12869 emit_insn (gen_iwmmxt_clrdi (target));
12870 return target;
12872 case ARM_BUILTIN_THREAD_POINTER:
12873 return arm_load_tp (target);
12875 default:
12876 break;
12879 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12880 if (d->code == (const enum arm_builtins) fcode)
12881 return arm_expand_binop_builtin (d->icode, arglist, target);
12883 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12884 if (d->code == (const enum arm_builtins) fcode)
12885 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12887 /* @@@ Should really do something sensible here. */
12888 return NULL_RTX;
12891 /* Return the number (counting from 0) of
12892 the least significant set bit in MASK. */
12894 inline static int
12895 number_of_first_bit_set (unsigned mask)
12897 int bit;
12899 for (bit = 0;
12900 (mask & (1 << bit)) == 0;
12901 ++bit)
12902 continue;
12904 return bit;
12907 /* Emit code to push or pop registers to or from the stack. F is the
12908 assembly file. MASK is the registers to push or pop. PUSH is
12909 nonzero if we should push, and zero if we should pop. For debugging
12910 output, if pushing, adjust CFA_OFFSET by the amount of space added
12911 to the stack. REAL_REGS should have the same number of bits set as
12912 MASK, and will be used instead (in the same order) to describe which
12913 registers were saved - this is used to mark the save slots when we
12914 push high registers after moving them to low registers. */
12915 static void
12916 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12917 unsigned long real_regs)
12919 int regno;
12920 int lo_mask = mask & 0xFF;
12921 int pushed_words = 0;
12923 gcc_assert (mask);
12925 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12927 /* Special case. Do not generate a POP PC statement here, do it in
12928 thumb_exit() */
12929 thumb_exit (f, -1);
12930 return;
12933 if (ARM_EABI_UNWIND_TABLES && push)
12935 fprintf (f, "\t.save\t{");
12936 for (regno = 0; regno < 15; regno++)
12938 if (real_regs & (1 << regno))
12940 if (real_regs & ((1 << regno) -1))
12941 fprintf (f, ", ");
12942 asm_fprintf (f, "%r", regno);
12945 fprintf (f, "}\n");
12948 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12950 /* Look at the low registers first. */
12951 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12953 if (lo_mask & 1)
12955 asm_fprintf (f, "%r", regno);
12957 if ((lo_mask & ~1) != 0)
12958 fprintf (f, ", ");
12960 pushed_words++;
12964 if (push && (mask & (1 << LR_REGNUM)))
12966 /* Catch pushing the LR. */
12967 if (mask & 0xFF)
12968 fprintf (f, ", ");
12970 asm_fprintf (f, "%r", LR_REGNUM);
12972 pushed_words++;
12974 else if (!push && (mask & (1 << PC_REGNUM)))
12976 /* Catch popping the PC. */
12977 if (TARGET_INTERWORK || TARGET_BACKTRACE
12978 || current_function_calls_eh_return)
12980 /* The PC is never poped directly, instead
12981 it is popped into r3 and then BX is used. */
12982 fprintf (f, "}\n");
12984 thumb_exit (f, -1);
12986 return;
12988 else
12990 if (mask & 0xFF)
12991 fprintf (f, ", ");
12993 asm_fprintf (f, "%r", PC_REGNUM);
12997 fprintf (f, "}\n");
12999 if (push && pushed_words && dwarf2out_do_frame ())
13001 char *l = dwarf2out_cfi_label ();
13002 int pushed_mask = real_regs;
13004 *cfa_offset += pushed_words * 4;
13005 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
13007 pushed_words = 0;
13008 pushed_mask = real_regs;
13009 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
13011 if (pushed_mask & 1)
13012 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
13017 /* Generate code to return from a thumb function.
13018 If 'reg_containing_return_addr' is -1, then the return address is
13019 actually on the stack, at the stack pointer. */
13020 static void
13021 thumb_exit (FILE *f, int reg_containing_return_addr)
13023 unsigned regs_available_for_popping;
13024 unsigned regs_to_pop;
13025 int pops_needed;
13026 unsigned available;
13027 unsigned required;
13028 int mode;
13029 int size;
13030 int restore_a4 = FALSE;
13032 /* Compute the registers we need to pop. */
13033 regs_to_pop = 0;
13034 pops_needed = 0;
13036 if (reg_containing_return_addr == -1)
13038 regs_to_pop |= 1 << LR_REGNUM;
13039 ++pops_needed;
13042 if (TARGET_BACKTRACE)
13044 /* Restore the (ARM) frame pointer and stack pointer. */
13045 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13046 pops_needed += 2;
13049 /* If there is nothing to pop then just emit the BX instruction and
13050 return. */
13051 if (pops_needed == 0)
13053 if (current_function_calls_eh_return)
13054 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13056 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13057 return;
13059 /* Otherwise if we are not supporting interworking and we have not created
13060 a backtrace structure and the function was not entered in ARM mode then
13061 just pop the return address straight into the PC. */
13062 else if (!TARGET_INTERWORK
13063 && !TARGET_BACKTRACE
13064 && !is_called_in_ARM_mode (current_function_decl)
13065 && !current_function_calls_eh_return)
13067 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13068 return;
13071 /* Find out how many of the (return) argument registers we can corrupt. */
13072 regs_available_for_popping = 0;
13074 /* If returning via __builtin_eh_return, the bottom three registers
13075 all contain information needed for the return. */
13076 if (current_function_calls_eh_return)
13077 size = 12;
13078 else
13080 /* If we can deduce the registers used from the function's
13081 return value. This is more reliable that examining
13082 regs_ever_live[] because that will be set if the register is
13083 ever used in the function, not just if the register is used
13084 to hold a return value. */
13086 if (current_function_return_rtx != 0)
13087 mode = GET_MODE (current_function_return_rtx);
13088 else
13089 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13091 size = GET_MODE_SIZE (mode);
13093 if (size == 0)
13095 /* In a void function we can use any argument register.
13096 In a function that returns a structure on the stack
13097 we can use the second and third argument registers. */
13098 if (mode == VOIDmode)
13099 regs_available_for_popping =
13100 (1 << ARG_REGISTER (1))
13101 | (1 << ARG_REGISTER (2))
13102 | (1 << ARG_REGISTER (3));
13103 else
13104 regs_available_for_popping =
13105 (1 << ARG_REGISTER (2))
13106 | (1 << ARG_REGISTER (3));
13108 else if (size <= 4)
13109 regs_available_for_popping =
13110 (1 << ARG_REGISTER (2))
13111 | (1 << ARG_REGISTER (3));
13112 else if (size <= 8)
13113 regs_available_for_popping =
13114 (1 << ARG_REGISTER (3));
13117 /* Match registers to be popped with registers into which we pop them. */
13118 for (available = regs_available_for_popping,
13119 required = regs_to_pop;
13120 required != 0 && available != 0;
13121 available &= ~(available & - available),
13122 required &= ~(required & - required))
13123 -- pops_needed;
13125 /* If we have any popping registers left over, remove them. */
13126 if (available > 0)
13127 regs_available_for_popping &= ~available;
13129 /* Otherwise if we need another popping register we can use
13130 the fourth argument register. */
13131 else if (pops_needed)
13133 /* If we have not found any free argument registers and
13134 reg a4 contains the return address, we must move it. */
13135 if (regs_available_for_popping == 0
13136 && reg_containing_return_addr == LAST_ARG_REGNUM)
13138 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13139 reg_containing_return_addr = LR_REGNUM;
13141 else if (size > 12)
13143 /* Register a4 is being used to hold part of the return value,
13144 but we have dire need of a free, low register. */
13145 restore_a4 = TRUE;
13147 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13150 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13152 /* The fourth argument register is available. */
13153 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13155 --pops_needed;
13159 /* Pop as many registers as we can. */
13160 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13161 regs_available_for_popping);
13163 /* Process the registers we popped. */
13164 if (reg_containing_return_addr == -1)
13166 /* The return address was popped into the lowest numbered register. */
13167 regs_to_pop &= ~(1 << LR_REGNUM);
13169 reg_containing_return_addr =
13170 number_of_first_bit_set (regs_available_for_popping);
13172 /* Remove this register for the mask of available registers, so that
13173 the return address will not be corrupted by further pops. */
13174 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13177 /* If we popped other registers then handle them here. */
13178 if (regs_available_for_popping)
13180 int frame_pointer;
13182 /* Work out which register currently contains the frame pointer. */
13183 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
13185 /* Move it into the correct place. */
13186 asm_fprintf (f, "\tmov\t%r, %r\n",
13187 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
13189 /* (Temporarily) remove it from the mask of popped registers. */
13190 regs_available_for_popping &= ~(1 << frame_pointer);
13191 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
13193 if (regs_available_for_popping)
13195 int stack_pointer;
13197 /* We popped the stack pointer as well,
13198 find the register that contains it. */
13199 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
13201 /* Move it into the stack register. */
13202 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
13204 /* At this point we have popped all necessary registers, so
13205 do not worry about restoring regs_available_for_popping
13206 to its correct value:
13208 assert (pops_needed == 0)
13209 assert (regs_available_for_popping == (1 << frame_pointer))
13210 assert (regs_to_pop == (1 << STACK_POINTER)) */
13212 else
13214 /* Since we have just move the popped value into the frame
13215 pointer, the popping register is available for reuse, and
13216 we know that we still have the stack pointer left to pop. */
13217 regs_available_for_popping |= (1 << frame_pointer);
13221 /* If we still have registers left on the stack, but we no longer have
13222 any registers into which we can pop them, then we must move the return
13223 address into the link register and make available the register that
13224 contained it. */
13225 if (regs_available_for_popping == 0 && pops_needed > 0)
13227 regs_available_for_popping |= 1 << reg_containing_return_addr;
13229 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
13230 reg_containing_return_addr);
13232 reg_containing_return_addr = LR_REGNUM;
13235 /* If we have registers left on the stack then pop some more.
13236 We know that at most we will want to pop FP and SP. */
13237 if (pops_needed > 0)
13239 int popped_into;
13240 int move_to;
13242 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13243 regs_available_for_popping);
13245 /* We have popped either FP or SP.
13246 Move whichever one it is into the correct register. */
13247 popped_into = number_of_first_bit_set (regs_available_for_popping);
13248 move_to = number_of_first_bit_set (regs_to_pop);
13250 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
13252 regs_to_pop &= ~(1 << move_to);
13254 --pops_needed;
13257 /* If we still have not popped everything then we must have only
13258 had one register available to us and we are now popping the SP. */
13259 if (pops_needed > 0)
13261 int popped_into;
13263 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13264 regs_available_for_popping);
13266 popped_into = number_of_first_bit_set (regs_available_for_popping);
13268 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
13270 assert (regs_to_pop == (1 << STACK_POINTER))
13271 assert (pops_needed == 1)
13275 /* If necessary restore the a4 register. */
13276 if (restore_a4)
13278 if (reg_containing_return_addr != LR_REGNUM)
13280 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13281 reg_containing_return_addr = LR_REGNUM;
13284 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13287 if (current_function_calls_eh_return)
13288 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13290 /* Return to caller. */
13291 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13295 void
13296 thumb_final_prescan_insn (rtx insn)
13298 if (flag_print_asm_name)
13299 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
13300 INSN_ADDRESSES (INSN_UID (insn)));
13304 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
13306 unsigned HOST_WIDE_INT mask = 0xff;
13307 int i;
13309 if (val == 0) /* XXX */
13310 return 0;
13312 for (i = 0; i < 25; i++)
13313 if ((val & (mask << i)) == val)
13314 return 1;
13316 return 0;
13319 /* Returns nonzero if the current function contains,
13320 or might contain a far jump. */
13321 static int
13322 thumb_far_jump_used_p (void)
13324 rtx insn;
13326 /* This test is only important for leaf functions. */
13327 /* assert (!leaf_function_p ()); */
13329 /* If we have already decided that far jumps may be used,
13330 do not bother checking again, and always return true even if
13331 it turns out that they are not being used. Once we have made
13332 the decision that far jumps are present (and that hence the link
13333 register will be pushed onto the stack) we cannot go back on it. */
13334 if (cfun->machine->far_jump_used)
13335 return 1;
13337 /* If this function is not being called from the prologue/epilogue
13338 generation code then it must be being called from the
13339 INITIAL_ELIMINATION_OFFSET macro. */
13340 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13342 /* In this case we know that we are being asked about the elimination
13343 of the arg pointer register. If that register is not being used,
13344 then there are no arguments on the stack, and we do not have to
13345 worry that a far jump might force the prologue to push the link
13346 register, changing the stack offsets. In this case we can just
13347 return false, since the presence of far jumps in the function will
13348 not affect stack offsets.
13350 If the arg pointer is live (or if it was live, but has now been
13351 eliminated and so set to dead) then we do have to test to see if
13352 the function might contain a far jump. This test can lead to some
13353 false negatives, since before reload is completed, then length of
13354 branch instructions is not known, so gcc defaults to returning their
13355 longest length, which in turn sets the far jump attribute to true.
13357 A false negative will not result in bad code being generated, but it
13358 will result in a needless push and pop of the link register. We
13359 hope that this does not occur too often.
13361 If we need doubleword stack alignment this could affect the other
13362 elimination offsets so we can't risk getting it wrong. */
13363 if (regs_ever_live [ARG_POINTER_REGNUM])
13364 cfun->machine->arg_pointer_live = 1;
13365 else if (!cfun->machine->arg_pointer_live)
13366 return 0;
13369 /* Check to see if the function contains a branch
13370 insn with the far jump attribute set. */
13371 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13373 if (GET_CODE (insn) == JUMP_INSN
13374 /* Ignore tablejump patterns. */
13375 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13376 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13377 && get_attr_far_jump (insn) == FAR_JUMP_YES
13380 /* Record the fact that we have decided that
13381 the function does use far jumps. */
13382 cfun->machine->far_jump_used = 1;
13383 return 1;
13387 return 0;
13390 /* Return nonzero if FUNC must be entered in ARM mode. */
13392 is_called_in_ARM_mode (tree func)
13394 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
13396 /* Ignore the problem about functions whose address is taken. */
13397 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13398 return TRUE;
13400 #ifdef ARM_PE
13401 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13402 #else
13403 return FALSE;
13404 #endif
13407 /* The bits which aren't usefully expanded as rtl. */
13408 const char *
13409 thumb_unexpanded_epilogue (void)
13411 int regno;
13412 unsigned long live_regs_mask = 0;
13413 int high_regs_pushed = 0;
13414 int had_to_push_lr;
13415 int size;
13417 if (return_used_this_function)
13418 return "";
13420 if (IS_NAKED (arm_current_func_type ()))
13421 return "";
13423 live_regs_mask = thumb_compute_save_reg_mask ();
13424 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13426 /* If we can deduce the registers used from the function's return value.
13427 This is more reliable that examining regs_ever_live[] because that
13428 will be set if the register is ever used in the function, not just if
13429 the register is used to hold a return value. */
13430 size = arm_size_return_regs ();
13432 /* The prolog may have pushed some high registers to use as
13433 work registers. e.g. the testsuite file:
13434 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13435 compiles to produce:
13436 push {r4, r5, r6, r7, lr}
13437 mov r7, r9
13438 mov r6, r8
13439 push {r6, r7}
13440 as part of the prolog. We have to undo that pushing here. */
13442 if (high_regs_pushed)
13444 unsigned long mask = live_regs_mask & 0xff;
13445 int next_hi_reg;
13447 /* The available low registers depend on the size of the value we are
13448 returning. */
13449 if (size <= 12)
13450 mask |= 1 << 3;
13451 if (size <= 8)
13452 mask |= 1 << 2;
13454 if (mask == 0)
13455 /* Oh dear! We have no low registers into which we can pop
13456 high registers! */
13457 internal_error
13458 ("no low registers available for popping high registers");
13460 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13461 if (live_regs_mask & (1 << next_hi_reg))
13462 break;
13464 while (high_regs_pushed)
13466 /* Find lo register(s) into which the high register(s) can
13467 be popped. */
13468 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13470 if (mask & (1 << regno))
13471 high_regs_pushed--;
13472 if (high_regs_pushed == 0)
13473 break;
13476 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13478 /* Pop the values into the low register(s). */
13479 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13481 /* Move the value(s) into the high registers. */
13482 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13484 if (mask & (1 << regno))
13486 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13487 regno);
13489 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13490 if (live_regs_mask & (1 << next_hi_reg))
13491 break;
13495 live_regs_mask &= ~0x0f00;
13498 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13499 live_regs_mask &= 0xff;
13501 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13503 /* Pop the return address into the PC. */
13504 if (had_to_push_lr)
13505 live_regs_mask |= 1 << PC_REGNUM;
13507 /* Either no argument registers were pushed or a backtrace
13508 structure was created which includes an adjusted stack
13509 pointer, so just pop everything. */
13510 if (live_regs_mask)
13511 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13512 live_regs_mask);
13514 /* We have either just popped the return address into the
13515 PC or it is was kept in LR for the entire function. */
13516 if (!had_to_push_lr)
13517 thumb_exit (asm_out_file, LR_REGNUM);
13519 else
13521 /* Pop everything but the return address. */
13522 if (live_regs_mask)
13523 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13524 live_regs_mask);
13526 if (had_to_push_lr)
13528 if (size > 12)
13530 /* We have no free low regs, so save one. */
13531 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13532 LAST_ARG_REGNUM);
13535 /* Get the return address into a temporary register. */
13536 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13537 1 << LAST_ARG_REGNUM);
13539 if (size > 12)
13541 /* Move the return address to lr. */
13542 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13543 LAST_ARG_REGNUM);
13544 /* Restore the low register. */
13545 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13546 IP_REGNUM);
13547 regno = LR_REGNUM;
13549 else
13550 regno = LAST_ARG_REGNUM;
13552 else
13553 regno = LR_REGNUM;
13555 /* Remove the argument registers that were pushed onto the stack. */
13556 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13557 SP_REGNUM, SP_REGNUM,
13558 current_function_pretend_args_size);
13560 thumb_exit (asm_out_file, regno);
13563 return "";
13566 /* Functions to save and restore machine-specific function data. */
13567 static struct machine_function *
13568 arm_init_machine_status (void)
13570 struct machine_function *machine;
13571 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13573 #if ARM_FT_UNKNOWN != 0
13574 machine->func_type = ARM_FT_UNKNOWN;
13575 #endif
13576 return machine;
13579 /* Return an RTX indicating where the return address to the
13580 calling function can be found. */
13582 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13584 if (count != 0)
13585 return NULL_RTX;
13587 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13590 /* Do anything needed before RTL is emitted for each function. */
13591 void
13592 arm_init_expanders (void)
13594 /* Arrange to initialize and mark the machine per-function status. */
13595 init_machine_status = arm_init_machine_status;
13597 /* This is to stop the combine pass optimizing away the alignment
13598 adjustment of va_arg. */
13599 /* ??? It is claimed that this should not be necessary. */
13600 if (cfun)
13601 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13605 /* Like arm_compute_initial_elimination offset. Simpler because there
13606 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13607 to point at the base of the local variables after static stack
13608 space for a function has been allocated. */
13610 HOST_WIDE_INT
13611 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13613 arm_stack_offsets *offsets;
13615 offsets = arm_get_frame_offsets ();
13617 switch (from)
13619 case ARG_POINTER_REGNUM:
13620 switch (to)
13622 case STACK_POINTER_REGNUM:
13623 return offsets->outgoing_args - offsets->saved_args;
13625 case FRAME_POINTER_REGNUM:
13626 return offsets->soft_frame - offsets->saved_args;
13628 case ARM_HARD_FRAME_POINTER_REGNUM:
13629 return offsets->saved_regs - offsets->saved_args;
13631 case THUMB_HARD_FRAME_POINTER_REGNUM:
13632 return offsets->locals_base - offsets->saved_args;
13634 default:
13635 gcc_unreachable ();
13637 break;
13639 case FRAME_POINTER_REGNUM:
13640 switch (to)
13642 case STACK_POINTER_REGNUM:
13643 return offsets->outgoing_args - offsets->soft_frame;
13645 case ARM_HARD_FRAME_POINTER_REGNUM:
13646 return offsets->saved_regs - offsets->soft_frame;
13648 case THUMB_HARD_FRAME_POINTER_REGNUM:
13649 return offsets->locals_base - offsets->soft_frame;
13651 default:
13652 gcc_unreachable ();
13654 break;
13656 default:
13657 gcc_unreachable ();
13662 /* Generate the rest of a function's prologue. */
13663 void
13664 thumb_expand_prologue (void)
13666 rtx insn, dwarf;
13668 HOST_WIDE_INT amount;
13669 arm_stack_offsets *offsets;
13670 unsigned long func_type;
13671 int regno;
13672 unsigned long live_regs_mask;
13674 func_type = arm_current_func_type ();
13676 /* Naked functions don't have prologues. */
13677 if (IS_NAKED (func_type))
13678 return;
13680 if (IS_INTERRUPT (func_type))
13682 error ("interrupt Service Routines cannot be coded in Thumb mode");
13683 return;
13686 live_regs_mask = thumb_compute_save_reg_mask ();
13687 /* Load the pic register before setting the frame pointer,
13688 so we can use r7 as a temporary work register. */
13689 if (flag_pic && arm_pic_register != INVALID_REGNUM)
13690 arm_load_pic_register (live_regs_mask);
13692 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13693 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13694 stack_pointer_rtx);
13696 offsets = arm_get_frame_offsets ();
13697 amount = offsets->outgoing_args - offsets->saved_regs;
13698 if (amount)
13700 if (amount < 512)
13702 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13703 GEN_INT (- amount)));
13704 RTX_FRAME_RELATED_P (insn) = 1;
13706 else
13708 rtx reg;
13710 /* The stack decrement is too big for an immediate value in a single
13711 insn. In theory we could issue multiple subtracts, but after
13712 three of them it becomes more space efficient to place the full
13713 value in the constant pool and load into a register. (Also the
13714 ARM debugger really likes to see only one stack decrement per
13715 function). So instead we look for a scratch register into which
13716 we can load the decrement, and then we subtract this from the
13717 stack pointer. Unfortunately on the thumb the only available
13718 scratch registers are the argument registers, and we cannot use
13719 these as they may hold arguments to the function. Instead we
13720 attempt to locate a call preserved register which is used by this
13721 function. If we can find one, then we know that it will have
13722 been pushed at the start of the prologue and so we can corrupt
13723 it now. */
13724 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13725 if (live_regs_mask & (1 << regno)
13726 && !(frame_pointer_needed
13727 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13728 break;
13730 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13732 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13734 /* Choose an arbitrary, non-argument low register. */
13735 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13737 /* Save it by copying it into a high, scratch register. */
13738 emit_insn (gen_movsi (spare, reg));
13739 /* Add a USE to stop propagate_one_insn() from barfing. */
13740 emit_insn (gen_prologue_use (spare));
13742 /* Decrement the stack. */
13743 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13744 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13745 stack_pointer_rtx, reg));
13746 RTX_FRAME_RELATED_P (insn) = 1;
13747 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13748 plus_constant (stack_pointer_rtx,
13749 -amount));
13750 RTX_FRAME_RELATED_P (dwarf) = 1;
13751 REG_NOTES (insn)
13752 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13753 REG_NOTES (insn));
13755 /* Restore the low register's original value. */
13756 emit_insn (gen_movsi (reg, spare));
13758 /* Emit a USE of the restored scratch register, so that flow
13759 analysis will not consider the restore redundant. The
13760 register won't be used again in this function and isn't
13761 restored by the epilogue. */
13762 emit_insn (gen_prologue_use (reg));
13764 else
13766 reg = gen_rtx_REG (SImode, regno);
13768 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13770 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13771 stack_pointer_rtx, reg));
13772 RTX_FRAME_RELATED_P (insn) = 1;
13773 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13774 plus_constant (stack_pointer_rtx,
13775 -amount));
13776 RTX_FRAME_RELATED_P (dwarf) = 1;
13777 REG_NOTES (insn)
13778 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13779 REG_NOTES (insn));
13784 if (frame_pointer_needed)
13786 amount = offsets->outgoing_args - offsets->locals_base;
13788 if (amount < 1024)
13789 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13790 stack_pointer_rtx, GEN_INT (amount)));
13791 else
13793 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13794 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13795 hard_frame_pointer_rtx,
13796 stack_pointer_rtx));
13797 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
13798 plus_constant (stack_pointer_rtx, amount));
13799 RTX_FRAME_RELATED_P (dwarf) = 1;
13800 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13801 REG_NOTES (insn));
13804 RTX_FRAME_RELATED_P (insn) = 1;
13807 /* If we are profiling, make sure no instructions are scheduled before
13808 the call to mcount. Similarly if the user has requested no
13809 scheduling in the prolog. Similarly if we want non-call exceptions
13810 using the EABI unwinder, to prevent faulting instructions from being
13811 swapped with a stack adjustment. */
13812 if (current_function_profile || !TARGET_SCHED_PROLOG
13813 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
13814 emit_insn (gen_blockage ());
13816 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13817 if (live_regs_mask & 0xff)
13818 cfun->machine->lr_save_eliminated = 0;
13820 /* If the link register is being kept alive, with the return address in it,
13821 then make sure that it does not get reused by the ce2 pass. */
13822 if (cfun->machine->lr_save_eliminated)
13823 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13827 void
13828 thumb_expand_epilogue (void)
13830 HOST_WIDE_INT amount;
13831 arm_stack_offsets *offsets;
13832 int regno;
13834 /* Naked functions don't have prologues. */
13835 if (IS_NAKED (arm_current_func_type ()))
13836 return;
13838 offsets = arm_get_frame_offsets ();
13839 amount = offsets->outgoing_args - offsets->saved_regs;
13841 if (frame_pointer_needed)
13843 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13844 amount = offsets->locals_base - offsets->saved_regs;
13847 if (amount)
13849 if (amount < 512)
13850 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13851 GEN_INT (amount)));
13852 else
13854 /* r3 is always free in the epilogue. */
13855 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13857 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13858 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13862 /* Emit a USE (stack_pointer_rtx), so that
13863 the stack adjustment will not be deleted. */
13864 emit_insn (gen_prologue_use (stack_pointer_rtx));
13866 if (current_function_profile || !TARGET_SCHED_PROLOG)
13867 emit_insn (gen_blockage ());
13869 /* Emit a clobber for each insn that will be restored in the epilogue,
13870 so that flow2 will get register lifetimes correct. */
13871 for (regno = 0; regno < 13; regno++)
13872 if (regs_ever_live[regno] && !call_used_regs[regno])
13873 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13875 if (! regs_ever_live[LR_REGNUM])
13876 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13879 static void
13880 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13882 unsigned long live_regs_mask = 0;
13883 unsigned long l_mask;
13884 unsigned high_regs_pushed = 0;
13885 int cfa_offset = 0;
13886 int regno;
13888 if (IS_NAKED (arm_current_func_type ()))
13889 return;
13891 if (is_called_in_ARM_mode (current_function_decl))
13893 const char * name;
13895 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13896 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13897 == SYMBOL_REF);
13898 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13900 /* Generate code sequence to switch us into Thumb mode. */
13901 /* The .code 32 directive has already been emitted by
13902 ASM_DECLARE_FUNCTION_NAME. */
13903 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13904 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13906 /* Generate a label, so that the debugger will notice the
13907 change in instruction sets. This label is also used by
13908 the assembler to bypass the ARM code when this function
13909 is called from a Thumb encoded function elsewhere in the
13910 same file. Hence the definition of STUB_NAME here must
13911 agree with the definition in gas/config/tc-arm.c. */
13913 #define STUB_NAME ".real_start_of"
13915 fprintf (f, "\t.code\t16\n");
13916 #ifdef ARM_PE
13917 if (arm_dllexport_name_p (name))
13918 name = arm_strip_name_encoding (name);
13919 #endif
13920 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13921 fprintf (f, "\t.thumb_func\n");
13922 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13925 if (current_function_pretend_args_size)
13927 /* Output unwind directive for the stack adjustment. */
13928 if (ARM_EABI_UNWIND_TABLES)
13929 fprintf (f, "\t.pad #%d\n",
13930 current_function_pretend_args_size);
13932 if (cfun->machine->uses_anonymous_args)
13934 int num_pushes;
13936 fprintf (f, "\tpush\t{");
13938 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13940 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13941 regno <= LAST_ARG_REGNUM;
13942 regno++)
13943 asm_fprintf (f, "%r%s", regno,
13944 regno == LAST_ARG_REGNUM ? "" : ", ");
13946 fprintf (f, "}\n");
13948 else
13949 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13950 SP_REGNUM, SP_REGNUM,
13951 current_function_pretend_args_size);
13953 /* We don't need to record the stores for unwinding (would it
13954 help the debugger any if we did?), but record the change in
13955 the stack pointer. */
13956 if (dwarf2out_do_frame ())
13958 char *l = dwarf2out_cfi_label ();
13960 cfa_offset = cfa_offset + current_function_pretend_args_size;
13961 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13965 /* Get the registers we are going to push. */
13966 live_regs_mask = thumb_compute_save_reg_mask ();
13967 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13968 l_mask = live_regs_mask & 0x40ff;
13969 /* Then count how many other high registers will need to be pushed. */
13970 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13972 if (TARGET_BACKTRACE)
13974 unsigned offset;
13975 unsigned work_register;
13977 /* We have been asked to create a stack backtrace structure.
13978 The code looks like this:
13980 0 .align 2
13981 0 func:
13982 0 sub SP, #16 Reserve space for 4 registers.
13983 2 push {R7} Push low registers.
13984 4 add R7, SP, #20 Get the stack pointer before the push.
13985 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13986 8 mov R7, PC Get hold of the start of this code plus 12.
13987 10 str R7, [SP, #16] Store it.
13988 12 mov R7, FP Get hold of the current frame pointer.
13989 14 str R7, [SP, #4] Store it.
13990 16 mov R7, LR Get hold of the current return address.
13991 18 str R7, [SP, #12] Store it.
13992 20 add R7, SP, #16 Point at the start of the backtrace structure.
13993 22 mov FP, R7 Put this value into the frame pointer. */
13995 work_register = thumb_find_work_register (live_regs_mask);
13997 if (ARM_EABI_UNWIND_TABLES)
13998 asm_fprintf (f, "\t.pad #16\n");
14000 asm_fprintf
14001 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
14002 SP_REGNUM, SP_REGNUM);
14004 if (dwarf2out_do_frame ())
14006 char *l = dwarf2out_cfi_label ();
14008 cfa_offset = cfa_offset + 16;
14009 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14012 if (l_mask)
14014 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14015 offset = bit_count (l_mask) * UNITS_PER_WORD;
14017 else
14018 offset = 0;
14020 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14021 offset + 16 + current_function_pretend_args_size);
14023 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14024 offset + 4);
14026 /* Make sure that the instruction fetching the PC is in the right place
14027 to calculate "start of backtrace creation code + 12". */
14028 if (l_mask)
14030 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14031 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14032 offset + 12);
14033 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14034 ARM_HARD_FRAME_POINTER_REGNUM);
14035 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14036 offset);
14038 else
14040 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14041 ARM_HARD_FRAME_POINTER_REGNUM);
14042 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14043 offset);
14044 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14045 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14046 offset + 12);
14049 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14050 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14051 offset + 8);
14052 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14053 offset + 12);
14054 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14055 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14057 /* Optimization: If we are not pushing any low registers but we are going
14058 to push some high registers then delay our first push. This will just
14059 be a push of LR and we can combine it with the push of the first high
14060 register. */
14061 else if ((l_mask & 0xff) != 0
14062 || (high_regs_pushed == 0 && l_mask))
14063 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14065 if (high_regs_pushed)
14067 unsigned pushable_regs;
14068 unsigned next_hi_reg;
14070 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14071 if (live_regs_mask & (1 << next_hi_reg))
14072 break;
14074 pushable_regs = l_mask & 0xff;
14076 if (pushable_regs == 0)
14077 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14079 while (high_regs_pushed > 0)
14081 unsigned long real_regs_mask = 0;
14083 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14085 if (pushable_regs & (1 << regno))
14087 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14089 high_regs_pushed --;
14090 real_regs_mask |= (1 << next_hi_reg);
14092 if (high_regs_pushed)
14094 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14095 next_hi_reg --)
14096 if (live_regs_mask & (1 << next_hi_reg))
14097 break;
14099 else
14101 pushable_regs &= ~((1 << regno) - 1);
14102 break;
14107 /* If we had to find a work register and we have not yet
14108 saved the LR then add it to the list of regs to push. */
14109 if (l_mask == (1 << LR_REGNUM))
14111 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14112 1, &cfa_offset,
14113 real_regs_mask | (1 << LR_REGNUM));
14114 l_mask = 0;
14116 else
14117 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14122 /* Handle the case of a double word load into a low register from
14123 a computed memory address. The computed address may involve a
14124 register which is overwritten by the load. */
14125 const char *
14126 thumb_load_double_from_address (rtx *operands)
14128 rtx addr;
14129 rtx base;
14130 rtx offset;
14131 rtx arg1;
14132 rtx arg2;
14134 gcc_assert (GET_CODE (operands[0]) == REG);
14135 gcc_assert (GET_CODE (operands[1]) == MEM);
14137 /* Get the memory address. */
14138 addr = XEXP (operands[1], 0);
14140 /* Work out how the memory address is computed. */
14141 switch (GET_CODE (addr))
14143 case REG:
14144 operands[2] = adjust_address (operands[1], SImode, 4);
14146 if (REGNO (operands[0]) == REGNO (addr))
14148 output_asm_insn ("ldr\t%H0, %2", operands);
14149 output_asm_insn ("ldr\t%0, %1", operands);
14151 else
14153 output_asm_insn ("ldr\t%0, %1", operands);
14154 output_asm_insn ("ldr\t%H0, %2", operands);
14156 break;
14158 case CONST:
14159 /* Compute <address> + 4 for the high order load. */
14160 operands[2] = adjust_address (operands[1], SImode, 4);
14162 output_asm_insn ("ldr\t%0, %1", operands);
14163 output_asm_insn ("ldr\t%H0, %2", operands);
14164 break;
14166 case PLUS:
14167 arg1 = XEXP (addr, 0);
14168 arg2 = XEXP (addr, 1);
14170 if (CONSTANT_P (arg1))
14171 base = arg2, offset = arg1;
14172 else
14173 base = arg1, offset = arg2;
14175 gcc_assert (GET_CODE (base) == REG);
14177 /* Catch the case of <address> = <reg> + <reg> */
14178 if (GET_CODE (offset) == REG)
14180 int reg_offset = REGNO (offset);
14181 int reg_base = REGNO (base);
14182 int reg_dest = REGNO (operands[0]);
14184 /* Add the base and offset registers together into the
14185 higher destination register. */
14186 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14187 reg_dest + 1, reg_base, reg_offset);
14189 /* Load the lower destination register from the address in
14190 the higher destination register. */
14191 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14192 reg_dest, reg_dest + 1);
14194 /* Load the higher destination register from its own address
14195 plus 4. */
14196 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14197 reg_dest + 1, reg_dest + 1);
14199 else
14201 /* Compute <address> + 4 for the high order load. */
14202 operands[2] = adjust_address (operands[1], SImode, 4);
14204 /* If the computed address is held in the low order register
14205 then load the high order register first, otherwise always
14206 load the low order register first. */
14207 if (REGNO (operands[0]) == REGNO (base))
14209 output_asm_insn ("ldr\t%H0, %2", operands);
14210 output_asm_insn ("ldr\t%0, %1", operands);
14212 else
14214 output_asm_insn ("ldr\t%0, %1", operands);
14215 output_asm_insn ("ldr\t%H0, %2", operands);
14218 break;
14220 case LABEL_REF:
14221 /* With no registers to worry about we can just load the value
14222 directly. */
14223 operands[2] = adjust_address (operands[1], SImode, 4);
14225 output_asm_insn ("ldr\t%H0, %2", operands);
14226 output_asm_insn ("ldr\t%0, %1", operands);
14227 break;
14229 default:
14230 gcc_unreachable ();
14233 return "";
14236 const char *
14237 thumb_output_move_mem_multiple (int n, rtx *operands)
14239 rtx tmp;
14241 switch (n)
14243 case 2:
14244 if (REGNO (operands[4]) > REGNO (operands[5]))
14246 tmp = operands[4];
14247 operands[4] = operands[5];
14248 operands[5] = tmp;
14250 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
14251 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
14252 break;
14254 case 3:
14255 if (REGNO (operands[4]) > REGNO (operands[5]))
14257 tmp = operands[4];
14258 operands[4] = operands[5];
14259 operands[5] = tmp;
14261 if (REGNO (operands[5]) > REGNO (operands[6]))
14263 tmp = operands[5];
14264 operands[5] = operands[6];
14265 operands[6] = tmp;
14267 if (REGNO (operands[4]) > REGNO (operands[5]))
14269 tmp = operands[4];
14270 operands[4] = operands[5];
14271 operands[5] = tmp;
14274 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
14275 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
14276 break;
14278 default:
14279 gcc_unreachable ();
14282 return "";
14285 /* Output a call-via instruction for thumb state. */
14286 const char *
14287 thumb_call_via_reg (rtx reg)
14289 int regno = REGNO (reg);
14290 rtx *labelp;
14292 gcc_assert (regno < LR_REGNUM);
14294 /* If we are in the normal text section we can use a single instance
14295 per compilation unit. If we are doing function sections, then we need
14296 an entry per section, since we can't rely on reachability. */
14297 if (in_section == text_section)
14299 thumb_call_reg_needed = 1;
14301 if (thumb_call_via_label[regno] == NULL)
14302 thumb_call_via_label[regno] = gen_label_rtx ();
14303 labelp = thumb_call_via_label + regno;
14305 else
14307 if (cfun->machine->call_via[regno] == NULL)
14308 cfun->machine->call_via[regno] = gen_label_rtx ();
14309 labelp = cfun->machine->call_via + regno;
14312 output_asm_insn ("bl\t%a0", labelp);
14313 return "";
14316 /* Routines for generating rtl. */
14317 void
14318 thumb_expand_movmemqi (rtx *operands)
14320 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
14321 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
14322 HOST_WIDE_INT len = INTVAL (operands[2]);
14323 HOST_WIDE_INT offset = 0;
14325 while (len >= 12)
14327 emit_insn (gen_movmem12b (out, in, out, in));
14328 len -= 12;
14331 if (len >= 8)
14333 emit_insn (gen_movmem8b (out, in, out, in));
14334 len -= 8;
14337 if (len >= 4)
14339 rtx reg = gen_reg_rtx (SImode);
14340 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
14341 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
14342 len -= 4;
14343 offset += 4;
14346 if (len >= 2)
14348 rtx reg = gen_reg_rtx (HImode);
14349 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
14350 plus_constant (in, offset))));
14351 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
14352 reg));
14353 len -= 2;
14354 offset += 2;
14357 if (len)
14359 rtx reg = gen_reg_rtx (QImode);
14360 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
14361 plus_constant (in, offset))));
14362 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
14363 reg));
14367 void
14368 thumb_reload_out_hi (rtx *operands)
14370 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14373 /* Handle reading a half-word from memory during reload. */
14374 void
14375 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14377 gcc_unreachable ();
14380 /* Return the length of a function name prefix
14381 that starts with the character 'c'. */
14382 static int
14383 arm_get_strip_length (int c)
14385 switch (c)
14387 ARM_NAME_ENCODING_LENGTHS
14388 default: return 0;
14392 /* Return a pointer to a function's name with any
14393 and all prefix encodings stripped from it. */
14394 const char *
14395 arm_strip_name_encoding (const char *name)
14397 int skip;
14399 while ((skip = arm_get_strip_length (* name)))
14400 name += skip;
14402 return name;
14405 /* If there is a '*' anywhere in the name's prefix, then
14406 emit the stripped name verbatim, otherwise prepend an
14407 underscore if leading underscores are being used. */
14408 void
14409 arm_asm_output_labelref (FILE *stream, const char *name)
14411 int skip;
14412 int verbatim = 0;
14414 while ((skip = arm_get_strip_length (* name)))
14416 verbatim |= (*name == '*');
14417 name += skip;
14420 if (verbatim)
14421 fputs (name, stream);
14422 else
14423 asm_fprintf (stream, "%U%s", name);
14426 static void
14427 arm_file_start (void)
14429 int val;
14431 if (TARGET_BPABI)
14433 const char *fpu_name;
14434 if (arm_select[0].string)
14435 asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
14436 else if (arm_select[1].string)
14437 asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
14438 else
14439 asm_fprintf (asm_out_file, "\t.cpu %s\n",
14440 all_cores[arm_default_cpu].name);
14442 if (TARGET_SOFT_FLOAT)
14444 if (TARGET_VFP)
14445 fpu_name = "softvfp";
14446 else
14447 fpu_name = "softfpa";
14449 else
14451 switch (arm_fpu_arch)
14453 case FPUTYPE_FPA:
14454 fpu_name = "fpa";
14455 break;
14456 case FPUTYPE_FPA_EMU2:
14457 fpu_name = "fpe2";
14458 break;
14459 case FPUTYPE_FPA_EMU3:
14460 fpu_name = "fpe3";
14461 break;
14462 case FPUTYPE_MAVERICK:
14463 fpu_name = "maverick";
14464 break;
14465 case FPUTYPE_VFP:
14466 if (TARGET_HARD_FLOAT)
14467 asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
14468 if (TARGET_HARD_FLOAT_ABI)
14469 asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
14470 fpu_name = "vfp";
14471 break;
14472 default:
14473 abort();
14476 asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
14478 /* Some of these attributes only apply when the corresponding features
14479 are used. However we don't have any easy way of figuring this out.
14480 Conservatively record the setting that would have been used. */
14482 /* Tag_ABI_PCS_wchar_t. */
14483 asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n",
14484 (int)WCHAR_TYPE_SIZE / BITS_PER_UNIT);
14486 /* Tag_ABI_FP_rounding. */
14487 if (flag_rounding_math)
14488 asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
14489 if (!flag_unsafe_math_optimizations)
14491 /* Tag_ABI_FP_denomal. */
14492 asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
14493 /* Tag_ABI_FP_exceptions. */
14494 asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
14496 /* Tag_ABI_FP_user_exceptions. */
14497 if (flag_signaling_nans)
14498 asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
14499 /* Tag_ABI_FP_number_model. */
14500 asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
14501 flag_finite_math_only ? 1 : 3);
14503 /* Tag_ABI_align8_needed. */
14504 asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
14505 /* Tag_ABI_align8_preserved. */
14506 asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
14507 /* Tag_ABI_enum_size. */
14508 asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
14509 flag_short_enums ? 1 : 2);
14511 /* Tag_ABI_optimization_goals. */
14512 if (optimize_size)
14513 val = 4;
14514 else if (optimize >= 2)
14515 val = 2;
14516 else if (optimize)
14517 val = 1;
14518 else
14519 val = 6;
14520 asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
14522 default_file_start();
14525 static void
14526 arm_file_end (void)
14528 int regno;
14530 if (NEED_INDICATE_EXEC_STACK)
14531 /* Add .note.GNU-stack. */
14532 file_end_indicate_exec_stack ();
14534 if (! thumb_call_reg_needed)
14535 return;
14537 switch_to_section (text_section);
14538 asm_fprintf (asm_out_file, "\t.code 16\n");
14539 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14541 for (regno = 0; regno < LR_REGNUM; regno++)
14543 rtx label = thumb_call_via_label[regno];
14545 if (label != 0)
14547 targetm.asm_out.internal_label (asm_out_file, "L",
14548 CODE_LABEL_NUMBER (label));
14549 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14554 rtx aof_pic_label;
14556 #ifdef AOF_ASSEMBLER
14557 /* Special functions only needed when producing AOF syntax assembler. */
14559 struct pic_chain
14561 struct pic_chain * next;
14562 const char * symname;
14565 static struct pic_chain * aof_pic_chain = NULL;
14568 aof_pic_entry (rtx x)
14570 struct pic_chain ** chainp;
14571 int offset;
14573 if (aof_pic_label == NULL_RTX)
14575 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14578 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14579 offset += 4, chainp = &(*chainp)->next)
14580 if ((*chainp)->symname == XSTR (x, 0))
14581 return plus_constant (aof_pic_label, offset);
14583 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14584 (*chainp)->next = NULL;
14585 (*chainp)->symname = XSTR (x, 0);
14586 return plus_constant (aof_pic_label, offset);
14589 void
14590 aof_dump_pic_table (FILE *f)
14592 struct pic_chain * chain;
14594 if (aof_pic_chain == NULL)
14595 return;
14597 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14598 PIC_OFFSET_TABLE_REGNUM,
14599 PIC_OFFSET_TABLE_REGNUM);
14600 fputs ("|x$adcons|\n", f);
14602 for (chain = aof_pic_chain; chain; chain = chain->next)
14604 fputs ("\tDCD\t", f);
14605 assemble_name (f, chain->symname);
14606 fputs ("\n", f);
14610 int arm_text_section_count = 1;
14612 /* A get_unnamed_section callback for switching to the text section. */
14614 static void
14615 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14617 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
14618 arm_text_section_count++);
14619 if (flag_pic)
14620 fprintf (asm_out_file, ", PIC, REENTRANT");
14621 fprintf (asm_out_file, "\n");
14624 static int arm_data_section_count = 1;
14626 /* A get_unnamed_section callback for switching to the data section. */
14628 static void
14629 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14631 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
14632 arm_data_section_count++);
14635 /* Implement TARGET_ASM_INIT_SECTIONS.
14637 AOF Assembler syntax is a nightmare when it comes to areas, since once
14638 we change from one area to another, we can't go back again. Instead,
14639 we must create a new area with the same attributes and add the new output
14640 to that. Unfortunately, there is nothing we can do here to guarantee that
14641 two areas with the same attributes will be linked adjacently in the
14642 resulting executable, so we have to be careful not to do pc-relative
14643 addressing across such boundaries. */
14645 static void
14646 aof_asm_init_sections (void)
14648 text_section = get_unnamed_section (SECTION_CODE,
14649 aof_output_text_section_asm_op, NULL);
14650 data_section = get_unnamed_section (SECTION_WRITE,
14651 aof_output_data_section_asm_op, NULL);
14652 readonly_data_section = text_section;
14655 void
14656 zero_init_section (void)
14658 static int zero_init_count = 1;
14660 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
14661 in_section = NULL;
14664 /* The AOF assembler is religiously strict about declarations of
14665 imported and exported symbols, so that it is impossible to declare
14666 a function as imported near the beginning of the file, and then to
14667 export it later on. It is, however, possible to delay the decision
14668 until all the functions in the file have been compiled. To get
14669 around this, we maintain a list of the imports and exports, and
14670 delete from it any that are subsequently defined. At the end of
14671 compilation we spit the remainder of the list out before the END
14672 directive. */
14674 struct import
14676 struct import * next;
14677 const char * name;
14680 static struct import * imports_list = NULL;
14682 void
14683 aof_add_import (const char *name)
14685 struct import * new;
14687 for (new = imports_list; new; new = new->next)
14688 if (new->name == name)
14689 return;
14691 new = (struct import *) xmalloc (sizeof (struct import));
14692 new->next = imports_list;
14693 imports_list = new;
14694 new->name = name;
14697 void
14698 aof_delete_import (const char *name)
14700 struct import ** old;
14702 for (old = &imports_list; *old; old = & (*old)->next)
14704 if ((*old)->name == name)
14706 *old = (*old)->next;
14707 return;
14712 int arm_main_function = 0;
14714 static void
14715 aof_dump_imports (FILE *f)
14717 /* The AOF assembler needs this to cause the startup code to be extracted
14718 from the library. Brining in __main causes the whole thing to work
14719 automagically. */
14720 if (arm_main_function)
14722 switch_to_section (text_section);
14723 fputs ("\tIMPORT __main\n", f);
14724 fputs ("\tDCD __main\n", f);
14727 /* Now dump the remaining imports. */
14728 while (imports_list)
14730 fprintf (f, "\tIMPORT\t");
14731 assemble_name (f, imports_list->name);
14732 fputc ('\n', f);
14733 imports_list = imports_list->next;
14737 static void
14738 aof_globalize_label (FILE *stream, const char *name)
14740 default_globalize_label (stream, name);
14741 if (! strcmp (name, "main"))
14742 arm_main_function = 1;
14745 static void
14746 aof_file_start (void)
14748 fputs ("__r0\tRN\t0\n", asm_out_file);
14749 fputs ("__a1\tRN\t0\n", asm_out_file);
14750 fputs ("__a2\tRN\t1\n", asm_out_file);
14751 fputs ("__a3\tRN\t2\n", asm_out_file);
14752 fputs ("__a4\tRN\t3\n", asm_out_file);
14753 fputs ("__v1\tRN\t4\n", asm_out_file);
14754 fputs ("__v2\tRN\t5\n", asm_out_file);
14755 fputs ("__v3\tRN\t6\n", asm_out_file);
14756 fputs ("__v4\tRN\t7\n", asm_out_file);
14757 fputs ("__v5\tRN\t8\n", asm_out_file);
14758 fputs ("__v6\tRN\t9\n", asm_out_file);
14759 fputs ("__sl\tRN\t10\n", asm_out_file);
14760 fputs ("__fp\tRN\t11\n", asm_out_file);
14761 fputs ("__ip\tRN\t12\n", asm_out_file);
14762 fputs ("__sp\tRN\t13\n", asm_out_file);
14763 fputs ("__lr\tRN\t14\n", asm_out_file);
14764 fputs ("__pc\tRN\t15\n", asm_out_file);
14765 fputs ("__f0\tFN\t0\n", asm_out_file);
14766 fputs ("__f1\tFN\t1\n", asm_out_file);
14767 fputs ("__f2\tFN\t2\n", asm_out_file);
14768 fputs ("__f3\tFN\t3\n", asm_out_file);
14769 fputs ("__f4\tFN\t4\n", asm_out_file);
14770 fputs ("__f5\tFN\t5\n", asm_out_file);
14771 fputs ("__f6\tFN\t6\n", asm_out_file);
14772 fputs ("__f7\tFN\t7\n", asm_out_file);
14773 switch_to_section (text_section);
14776 static void
14777 aof_file_end (void)
14779 if (flag_pic)
14780 aof_dump_pic_table (asm_out_file);
14781 arm_file_end ();
14782 aof_dump_imports (asm_out_file);
14783 fputs ("\tEND\n", asm_out_file);
14785 #endif /* AOF_ASSEMBLER */
14787 #ifndef ARM_PE
14788 /* Symbols in the text segment can be accessed without indirecting via the
14789 constant pool; it may take an extra binary operation, but this is still
14790 faster than indirecting via memory. Don't do this when not optimizing,
14791 since we won't be calculating al of the offsets necessary to do this
14792 simplification. */
14794 static void
14795 arm_encode_section_info (tree decl, rtx rtl, int first)
14797 /* This doesn't work with AOF syntax, since the string table may be in
14798 a different AREA. */
14799 #ifndef AOF_ASSEMBLER
14800 if (optimize > 0 && TREE_CONSTANT (decl))
14801 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14802 #endif
14804 /* If we are referencing a function that is weak then encode a long call
14805 flag in the function name, otherwise if the function is static or
14806 or known to be defined in this file then encode a short call flag. */
14807 if (first && DECL_P (decl))
14809 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14810 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14811 else if (! TREE_PUBLIC (decl))
14812 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14815 default_encode_section_info (decl, rtl, first);
14817 #endif /* !ARM_PE */
14819 static void
14820 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14822 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14823 && !strcmp (prefix, "L"))
14825 arm_ccfsm_state = 0;
14826 arm_target_insn = NULL;
14828 default_internal_label (stream, prefix, labelno);
14831 /* Output code to add DELTA to the first argument, and then jump
14832 to FUNCTION. Used for C++ multiple inheritance. */
14833 static void
14834 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14835 HOST_WIDE_INT delta,
14836 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14837 tree function)
14839 static int thunk_label = 0;
14840 char label[256];
14841 char labelpc[256];
14842 int mi_delta = delta;
14843 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14844 int shift = 0;
14845 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14846 ? 1 : 0);
14847 if (mi_delta < 0)
14848 mi_delta = - mi_delta;
14849 if (TARGET_THUMB)
14851 int labelno = thunk_label++;
14852 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14853 fputs ("\tldr\tr12, ", file);
14854 assemble_name (file, label);
14855 fputc ('\n', file);
14856 if (flag_pic)
14858 /* If we are generating PIC, the ldr instruction below loads
14859 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
14860 the address of the add + 8, so we have:
14862 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
14863 = target + 1.
14865 Note that we have "+ 1" because some versions of GNU ld
14866 don't set the low bit of the result for R_ARM_REL32
14867 relocations against thumb function symbols. */
14868 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
14869 assemble_name (file, labelpc);
14870 fputs (":\n", file);
14871 fputs ("\tadd\tr12, pc, r12\n", file);
14874 while (mi_delta != 0)
14876 if ((mi_delta & (3 << shift)) == 0)
14877 shift += 2;
14878 else
14880 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14881 mi_op, this_regno, this_regno,
14882 mi_delta & (0xff << shift));
14883 mi_delta &= ~(0xff << shift);
14884 shift += 8;
14887 if (TARGET_THUMB)
14889 fprintf (file, "\tbx\tr12\n");
14890 ASM_OUTPUT_ALIGN (file, 2);
14891 assemble_name (file, label);
14892 fputs (":\n", file);
14893 if (flag_pic)
14895 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
14896 rtx tem = XEXP (DECL_RTL (function), 0);
14897 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
14898 tem = gen_rtx_MINUS (GET_MODE (tem),
14899 tem,
14900 gen_rtx_SYMBOL_REF (Pmode,
14901 ggc_strdup (labelpc)));
14902 assemble_integer (tem, 4, BITS_PER_WORD, 1);
14904 else
14905 /* Output ".word .LTHUNKn". */
14906 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14908 else
14910 fputs ("\tb\t", file);
14911 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14912 if (NEED_PLT_RELOC)
14913 fputs ("(PLT)", file);
14914 fputc ('\n', file);
14919 arm_emit_vector_const (FILE *file, rtx x)
14921 int i;
14922 const char * pattern;
14924 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14926 switch (GET_MODE (x))
14928 case V2SImode: pattern = "%08x"; break;
14929 case V4HImode: pattern = "%04x"; break;
14930 case V8QImode: pattern = "%02x"; break;
14931 default: gcc_unreachable ();
14934 fprintf (file, "0x");
14935 for (i = CONST_VECTOR_NUNITS (x); i--;)
14937 rtx element;
14939 element = CONST_VECTOR_ELT (x, i);
14940 fprintf (file, pattern, INTVAL (element));
14943 return 1;
14946 const char *
14947 arm_output_load_gr (rtx *operands)
14949 rtx reg;
14950 rtx offset;
14951 rtx wcgr;
14952 rtx sum;
14954 if (GET_CODE (operands [1]) != MEM
14955 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14956 || GET_CODE (reg = XEXP (sum, 0)) != REG
14957 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14958 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14959 return "wldrw%?\t%0, %1";
14961 /* Fix up an out-of-range load of a GR register. */
14962 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14963 wcgr = operands[0];
14964 operands[0] = reg;
14965 output_asm_insn ("ldr%?\t%0, %1", operands);
14967 operands[0] = wcgr;
14968 operands[1] = reg;
14969 output_asm_insn ("tmcr%?\t%0, %1", operands);
14970 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14972 return "";
14975 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14977 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14978 named arg and all anonymous args onto the stack.
14979 XXX I know the prologue shouldn't be pushing registers, but it is faster
14980 that way. */
14982 static void
14983 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14984 enum machine_mode mode ATTRIBUTE_UNUSED,
14985 tree type ATTRIBUTE_UNUSED,
14986 int *pretend_size,
14987 int second_time ATTRIBUTE_UNUSED)
14989 cfun->machine->uses_anonymous_args = 1;
14990 if (cum->nregs < NUM_ARG_REGS)
14991 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14994 /* Return nonzero if the CONSUMER instruction (a store) does not need
14995 PRODUCER's value to calculate the address. */
14998 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
15000 rtx value = PATTERN (producer);
15001 rtx addr = PATTERN (consumer);
15003 if (GET_CODE (value) == COND_EXEC)
15004 value = COND_EXEC_CODE (value);
15005 if (GET_CODE (value) == PARALLEL)
15006 value = XVECEXP (value, 0, 0);
15007 value = XEXP (value, 0);
15008 if (GET_CODE (addr) == COND_EXEC)
15009 addr = COND_EXEC_CODE (addr);
15010 if (GET_CODE (addr) == PARALLEL)
15011 addr = XVECEXP (addr, 0, 0);
15012 addr = XEXP (addr, 0);
15014 return !reg_overlap_mentioned_p (value, addr);
15017 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15018 have an early register shift value or amount dependency on the
15019 result of PRODUCER. */
15022 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
15024 rtx value = PATTERN (producer);
15025 rtx op = PATTERN (consumer);
15026 rtx early_op;
15028 if (GET_CODE (value) == COND_EXEC)
15029 value = COND_EXEC_CODE (value);
15030 if (GET_CODE (value) == PARALLEL)
15031 value = XVECEXP (value, 0, 0);
15032 value = XEXP (value, 0);
15033 if (GET_CODE (op) == COND_EXEC)
15034 op = COND_EXEC_CODE (op);
15035 if (GET_CODE (op) == PARALLEL)
15036 op = XVECEXP (op, 0, 0);
15037 op = XEXP (op, 1);
15039 early_op = XEXP (op, 0);
15040 /* This is either an actual independent shift, or a shift applied to
15041 the first operand of another operation. We want the whole shift
15042 operation. */
15043 if (GET_CODE (early_op) == REG)
15044 early_op = op;
15046 return !reg_overlap_mentioned_p (value, early_op);
15049 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15050 have an early register shift value dependency on the result of
15051 PRODUCER. */
15054 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
15056 rtx value = PATTERN (producer);
15057 rtx op = PATTERN (consumer);
15058 rtx early_op;
15060 if (GET_CODE (value) == COND_EXEC)
15061 value = COND_EXEC_CODE (value);
15062 if (GET_CODE (value) == PARALLEL)
15063 value = XVECEXP (value, 0, 0);
15064 value = XEXP (value, 0);
15065 if (GET_CODE (op) == COND_EXEC)
15066 op = COND_EXEC_CODE (op);
15067 if (GET_CODE (op) == PARALLEL)
15068 op = XVECEXP (op, 0, 0);
15069 op = XEXP (op, 1);
15071 early_op = XEXP (op, 0);
15073 /* This is either an actual independent shift, or a shift applied to
15074 the first operand of another operation. We want the value being
15075 shifted, in either case. */
15076 if (GET_CODE (early_op) != REG)
15077 early_op = XEXP (early_op, 0);
15079 return !reg_overlap_mentioned_p (value, early_op);
15082 /* Return nonzero if the CONSUMER (a mul or mac op) does not
15083 have an early register mult dependency on the result of
15084 PRODUCER. */
15087 arm_no_early_mul_dep (rtx producer, rtx consumer)
15089 rtx value = PATTERN (producer);
15090 rtx op = PATTERN (consumer);
15092 if (GET_CODE (value) == COND_EXEC)
15093 value = COND_EXEC_CODE (value);
15094 if (GET_CODE (value) == PARALLEL)
15095 value = XVECEXP (value, 0, 0);
15096 value = XEXP (value, 0);
15097 if (GET_CODE (op) == COND_EXEC)
15098 op = COND_EXEC_CODE (op);
15099 if (GET_CODE (op) == PARALLEL)
15100 op = XVECEXP (op, 0, 0);
15101 op = XEXP (op, 1);
15103 return (GET_CODE (op) == PLUS
15104 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
15108 /* We can't rely on the caller doing the proper promotion when
15109 using APCS or ATPCS. */
15111 static bool
15112 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
15114 return !TARGET_AAPCS_BASED;
15118 /* AAPCS based ABIs use short enums by default. */
15120 static bool
15121 arm_default_short_enums (void)
15123 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
15127 /* AAPCS requires that anonymous bitfields affect structure alignment. */
15129 static bool
15130 arm_align_anon_bitfield (void)
15132 return TARGET_AAPCS_BASED;
15136 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
15138 static tree
15139 arm_cxx_guard_type (void)
15141 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
15145 /* The EABI says test the least significant bit of a guard variable. */
15147 static bool
15148 arm_cxx_guard_mask_bit (void)
15150 return TARGET_AAPCS_BASED;
15154 /* The EABI specifies that all array cookies are 8 bytes long. */
15156 static tree
15157 arm_get_cookie_size (tree type)
15159 tree size;
15161 if (!TARGET_AAPCS_BASED)
15162 return default_cxx_get_cookie_size (type);
15164 size = build_int_cst (sizetype, 8);
15165 return size;
15169 /* The EABI says that array cookies should also contain the element size. */
15171 static bool
15172 arm_cookie_has_size (void)
15174 return TARGET_AAPCS_BASED;
15178 /* The EABI says constructors and destructors should return a pointer to
15179 the object constructed/destroyed. */
15181 static bool
15182 arm_cxx_cdtor_returns_this (void)
15184 return TARGET_AAPCS_BASED;
15187 /* The EABI says that an inline function may never be the key
15188 method. */
15190 static bool
15191 arm_cxx_key_method_may_be_inline (void)
15193 return !TARGET_AAPCS_BASED;
15196 static void
15197 arm_cxx_determine_class_data_visibility (tree decl)
15199 if (!TARGET_AAPCS_BASED)
15200 return;
15202 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
15203 is exported. However, on systems without dynamic vague linkage,
15204 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
15205 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
15206 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
15207 else
15208 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
15209 DECL_VISIBILITY_SPECIFIED (decl) = 1;
15212 static bool
15213 arm_cxx_class_data_always_comdat (void)
15215 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
15216 vague linkage if the class has no key function. */
15217 return !TARGET_AAPCS_BASED;
15221 /* The EABI says __aeabi_atexit should be used to register static
15222 destructors. */
15224 static bool
15225 arm_cxx_use_aeabi_atexit (void)
15227 return TARGET_AAPCS_BASED;
15231 void
15232 arm_set_return_address (rtx source, rtx scratch)
15234 arm_stack_offsets *offsets;
15235 HOST_WIDE_INT delta;
15236 rtx addr;
15237 unsigned long saved_regs;
15239 saved_regs = arm_compute_save_reg_mask ();
15241 if ((saved_regs & (1 << LR_REGNUM)) == 0)
15242 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15243 else
15245 if (frame_pointer_needed)
15246 addr = plus_constant(hard_frame_pointer_rtx, -4);
15247 else
15249 /* LR will be the first saved register. */
15250 offsets = arm_get_frame_offsets ();
15251 delta = offsets->outgoing_args - (offsets->frame + 4);
15254 if (delta >= 4096)
15256 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
15257 GEN_INT (delta & ~4095)));
15258 addr = scratch;
15259 delta &= 4095;
15261 else
15262 addr = stack_pointer_rtx;
15264 addr = plus_constant (addr, delta);
15266 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15271 void
15272 thumb_set_return_address (rtx source, rtx scratch)
15274 arm_stack_offsets *offsets;
15275 HOST_WIDE_INT delta;
15276 int reg;
15277 rtx addr;
15278 unsigned long mask;
15280 emit_insn (gen_rtx_USE (VOIDmode, source));
15282 mask = thumb_compute_save_reg_mask ();
15283 if (mask & (1 << LR_REGNUM))
15285 offsets = arm_get_frame_offsets ();
15287 /* Find the saved regs. */
15288 if (frame_pointer_needed)
15290 delta = offsets->soft_frame - offsets->saved_args;
15291 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
15293 else
15295 delta = offsets->outgoing_args - offsets->saved_args;
15296 reg = SP_REGNUM;
15298 /* Allow for the stack frame. */
15299 if (TARGET_BACKTRACE)
15300 delta -= 16;
15301 /* The link register is always the first saved register. */
15302 delta -= 4;
15304 /* Construct the address. */
15305 addr = gen_rtx_REG (SImode, reg);
15306 if ((reg != SP_REGNUM && delta >= 128)
15307 || delta >= 1024)
15309 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
15310 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
15311 addr = scratch;
15313 else
15314 addr = plus_constant (addr, delta);
15316 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15318 else
15319 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15322 /* Implements target hook vector_mode_supported_p. */
15323 bool
15324 arm_vector_mode_supported_p (enum machine_mode mode)
15326 if ((mode == V2SImode)
15327 || (mode == V4HImode)
15328 || (mode == V8QImode))
15329 return true;
15331 return false;
15334 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15335 ARM insns and therefore guarantee that the shift count is modulo 256.
15336 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15337 guarantee no particular behavior for out-of-range counts. */
15339 static unsigned HOST_WIDE_INT
15340 arm_shift_truncation_mask (enum machine_mode mode)
15342 return mode == SImode ? 255 : 0;
15346 /* Map internal gcc register numbers to DWARF2 register numbers. */
15348 unsigned int
15349 arm_dbx_register_number (unsigned int regno)
15351 if (regno < 16)
15352 return regno;
15354 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15355 compatibility. The EABI defines them as registers 96-103. */
15356 if (IS_FPA_REGNUM (regno))
15357 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
15359 if (IS_VFP_REGNUM (regno))
15360 return 64 + regno - FIRST_VFP_REGNUM;
15362 if (IS_IWMMXT_GR_REGNUM (regno))
15363 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
15365 if (IS_IWMMXT_REGNUM (regno))
15366 return 112 + regno - FIRST_IWMMXT_REGNUM;
15368 gcc_unreachable ();
15372 #ifdef TARGET_UNWIND_INFO
15373 /* Emit unwind directives for a store-multiple instruction. This should
15374 only ever be generated by the function prologue code, so we expect it
15375 to have a particular form. */
15377 static void
15378 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
15380 int i;
15381 HOST_WIDE_INT offset;
15382 HOST_WIDE_INT nregs;
15383 int reg_size;
15384 unsigned reg;
15385 unsigned lastreg;
15386 rtx e;
15388 /* First insn will adjust the stack pointer. */
15389 e = XVECEXP (p, 0, 0);
15390 if (GET_CODE (e) != SET
15391 || GET_CODE (XEXP (e, 0)) != REG
15392 || REGNO (XEXP (e, 0)) != SP_REGNUM
15393 || GET_CODE (XEXP (e, 1)) != PLUS)
15394 abort ();
15396 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
15397 nregs = XVECLEN (p, 0) - 1;
15399 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
15400 if (reg < 16)
15402 /* The function prologue may also push pc, but not annotate it as it is
15403 never restored. We turn this into a stack pointer adjustment. */
15404 if (nregs * 4 == offset - 4)
15406 fprintf (asm_out_file, "\t.pad #4\n");
15407 offset -= 4;
15409 reg_size = 4;
15411 else if (IS_VFP_REGNUM (reg))
15413 /* FPA register saves use an additional word. */
15414 offset -= 4;
15415 reg_size = 8;
15417 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
15419 /* FPA registers are done differently. */
15420 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
15421 return;
15423 else
15424 /* Unknown register type. */
15425 abort ();
15427 /* If the stack increment doesn't match the size of the saved registers,
15428 something has gone horribly wrong. */
15429 if (offset != nregs * reg_size)
15430 abort ();
15432 fprintf (asm_out_file, "\t.save {");
15434 offset = 0;
15435 lastreg = 0;
15436 /* The remaining insns will describe the stores. */
15437 for (i = 1; i <= nregs; i++)
15439 /* Expect (set (mem <addr>) (reg)).
15440 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15441 e = XVECEXP (p, 0, i);
15442 if (GET_CODE (e) != SET
15443 || GET_CODE (XEXP (e, 0)) != MEM
15444 || GET_CODE (XEXP (e, 1)) != REG)
15445 abort ();
15447 reg = REGNO (XEXP (e, 1));
15448 if (reg < lastreg)
15449 abort ();
15451 if (i != 1)
15452 fprintf (asm_out_file, ", ");
15453 /* We can't use %r for vfp because we need to use the
15454 double precision register names. */
15455 if (IS_VFP_REGNUM (reg))
15456 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
15457 else
15458 asm_fprintf (asm_out_file, "%r", reg);
15460 #ifdef ENABLE_CHECKING
15461 /* Check that the addresses are consecutive. */
15462 e = XEXP (XEXP (e, 0), 0);
15463 if (GET_CODE (e) == PLUS)
15465 offset += reg_size;
15466 if (GET_CODE (XEXP (e, 0)) != REG
15467 || REGNO (XEXP (e, 0)) != SP_REGNUM
15468 || GET_CODE (XEXP (e, 1)) != CONST_INT
15469 || offset != INTVAL (XEXP (e, 1)))
15470 abort ();
15472 else if (i != 1
15473 || GET_CODE (e) != REG
15474 || REGNO (e) != SP_REGNUM)
15475 abort ();
15476 #endif
15478 fprintf (asm_out_file, "}\n");
15481 /* Emit unwind directives for a SET. */
15483 static void
15484 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
15486 rtx e0;
15487 rtx e1;
15489 e0 = XEXP (p, 0);
15490 e1 = XEXP (p, 1);
15491 switch (GET_CODE (e0))
15493 case MEM:
15494 /* Pushing a single register. */
15495 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
15496 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
15497 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
15498 abort ();
15500 asm_fprintf (asm_out_file, "\t.save ");
15501 if (IS_VFP_REGNUM (REGNO (e1)))
15502 asm_fprintf(asm_out_file, "{d%d}\n",
15503 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
15504 else
15505 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
15506 break;
15508 case REG:
15509 if (REGNO (e0) == SP_REGNUM)
15511 /* A stack increment. */
15512 if (GET_CODE (e1) != PLUS
15513 || GET_CODE (XEXP (e1, 0)) != REG
15514 || REGNO (XEXP (e1, 0)) != SP_REGNUM
15515 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15516 abort ();
15518 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
15519 -INTVAL (XEXP (e1, 1)));
15521 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
15523 HOST_WIDE_INT offset;
15524 unsigned reg;
15526 if (GET_CODE (e1) == PLUS)
15528 if (GET_CODE (XEXP (e1, 0)) != REG
15529 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15530 abort ();
15531 reg = REGNO (XEXP (e1, 0));
15532 offset = INTVAL (XEXP (e1, 1));
15533 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
15534 HARD_FRAME_POINTER_REGNUM, reg,
15535 INTVAL (XEXP (e1, 1)));
15537 else if (GET_CODE (e1) == REG)
15539 reg = REGNO (e1);
15540 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
15541 HARD_FRAME_POINTER_REGNUM, reg);
15543 else
15544 abort ();
15546 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
15548 /* Move from sp to reg. */
15549 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
15551 else if (GET_CODE (e1) == PLUS
15552 && GET_CODE (XEXP (e1, 0)) == REG
15553 && REGNO (XEXP (e1, 0)) == SP_REGNUM
15554 && GET_CODE (XEXP (e1, 1)) == CONST_INT)
15556 /* Set reg to offset from sp. */
15557 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
15558 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
15560 else
15561 abort ();
15562 break;
15564 default:
15565 abort ();
15570 /* Emit unwind directives for the given insn. */
15572 static void
15573 arm_unwind_emit (FILE * asm_out_file, rtx insn)
15575 rtx pat;
15577 if (!ARM_EABI_UNWIND_TABLES)
15578 return;
15580 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15581 return;
15583 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15584 if (pat)
15585 pat = XEXP (pat, 0);
15586 else
15587 pat = PATTERN (insn);
15589 switch (GET_CODE (pat))
15591 case SET:
15592 arm_unwind_emit_set (asm_out_file, pat);
15593 break;
15595 case SEQUENCE:
15596 /* Store multiple. */
15597 arm_unwind_emit_stm (asm_out_file, pat);
15598 break;
15600 default:
15601 abort();
15606 /* Output a reference from a function exception table to the type_info
15607 object X. The EABI specifies that the symbol should be relocated by
15608 an R_ARM_TARGET2 relocation. */
15610 static bool
15611 arm_output_ttype (rtx x)
15613 fputs ("\t.word\t", asm_out_file);
15614 output_addr_const (asm_out_file, x);
15615 /* Use special relocations for symbol references. */
15616 if (GET_CODE (x) != CONST_INT)
15617 fputs ("(TARGET2)", asm_out_file);
15618 fputc ('\n', asm_out_file);
15620 return TRUE;
15622 #endif /* TARGET_UNWIND_INFO */
15625 /* Output unwind directives for the start/end of a function. */
15627 void
15628 arm_output_fn_unwind (FILE * f, bool prologue)
15630 if (!ARM_EABI_UNWIND_TABLES)
15631 return;
15633 if (prologue)
15634 fputs ("\t.fnstart\n", f);
15635 else
15636 fputs ("\t.fnend\n", f);
15639 static bool
15640 arm_emit_tls_decoration (FILE *fp, rtx x)
15642 enum tls_reloc reloc;
15643 rtx val;
15645 val = XVECEXP (x, 0, 0);
15646 reloc = INTVAL (XVECEXP (x, 0, 1));
15648 output_addr_const (fp, val);
15650 switch (reloc)
15652 case TLS_GD32:
15653 fputs ("(tlsgd)", fp);
15654 break;
15655 case TLS_LDM32:
15656 fputs ("(tlsldm)", fp);
15657 break;
15658 case TLS_LDO32:
15659 fputs ("(tlsldo)", fp);
15660 break;
15661 case TLS_IE32:
15662 fputs ("(gottpoff)", fp);
15663 break;
15664 case TLS_LE32:
15665 fputs ("(tpoff)", fp);
15666 break;
15667 default:
15668 gcc_unreachable ();
15671 switch (reloc)
15673 case TLS_GD32:
15674 case TLS_LDM32:
15675 case TLS_IE32:
15676 fputs (" + (. - ", fp);
15677 output_addr_const (fp, XVECEXP (x, 0, 2));
15678 fputs (" - ", fp);
15679 output_addr_const (fp, XVECEXP (x, 0, 3));
15680 fputc (')', fp);
15681 break;
15682 default:
15683 break;
15686 return TRUE;
15689 bool
15690 arm_output_addr_const_extra (FILE *fp, rtx x)
15692 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
15693 return arm_emit_tls_decoration (fp, x);
15694 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
15696 char label[256];
15697 int labelno = INTVAL (XVECEXP (x, 0, 0));
15699 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
15700 assemble_name_raw (fp, label);
15702 return TRUE;
15704 else if (GET_CODE (x) == CONST_VECTOR)
15705 return arm_emit_vector_const (fp, x);
15707 return FALSE;
15710 #include "gt-arm.h"